{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Simple Binary Classification with defaults\n",
    "\n",
    "In this notebook we will train a Wide and Deep model and simply a \"Deep\" model using the well known adult dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/javierrodriguezzaurin/.pyenv/versions/3.10.13/envs/widedeep310/lib/python3.10/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
      "  from .autonotebook import tqdm as notebook_tqdm\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "\n",
    "from pytorch_widedeep.preprocessing import WidePreprocessor, TabPreprocessor\n",
    "from pytorch_widedeep.training import Trainer\n",
    "from pytorch_widedeep.models import Wide, TabMlp, WideDeep\n",
    "from pytorch_widedeep.metrics import Accuracy, Precision\n",
    "from pytorch_widedeep.datasets import load_adult"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age</th>\n",
       "      <th>workclass</th>\n",
       "      <th>fnlwgt</th>\n",
       "      <th>education</th>\n",
       "      <th>educational-num</th>\n",
       "      <th>marital-status</th>\n",
       "      <th>occupation</th>\n",
       "      <th>relationship</th>\n",
       "      <th>race</th>\n",
       "      <th>gender</th>\n",
       "      <th>capital-gain</th>\n",
       "      <th>capital-loss</th>\n",
       "      <th>hours-per-week</th>\n",
       "      <th>native-country</th>\n",
       "      <th>income</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>25</td>\n",
       "      <td>Private</td>\n",
       "      <td>226802</td>\n",
       "      <td>11th</td>\n",
       "      <td>7</td>\n",
       "      <td>Never-married</td>\n",
       "      <td>Machine-op-inspct</td>\n",
       "      <td>Own-child</td>\n",
       "      <td>Black</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>&lt;=50K</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>38</td>\n",
       "      <td>Private</td>\n",
       "      <td>89814</td>\n",
       "      <td>HS-grad</td>\n",
       "      <td>9</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Farming-fishing</td>\n",
       "      <td>Husband</td>\n",
       "      <td>White</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>50</td>\n",
       "      <td>United-States</td>\n",
       "      <td>&lt;=50K</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>28</td>\n",
       "      <td>Local-gov</td>\n",
       "      <td>336951</td>\n",
       "      <td>Assoc-acdm</td>\n",
       "      <td>12</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Protective-serv</td>\n",
       "      <td>Husband</td>\n",
       "      <td>White</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>&gt;50K</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>44</td>\n",
       "      <td>Private</td>\n",
       "      <td>160323</td>\n",
       "      <td>Some-college</td>\n",
       "      <td>10</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Machine-op-inspct</td>\n",
       "      <td>Husband</td>\n",
       "      <td>Black</td>\n",
       "      <td>Male</td>\n",
       "      <td>7688</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>&gt;50K</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>18</td>\n",
       "      <td>?</td>\n",
       "      <td>103497</td>\n",
       "      <td>Some-college</td>\n",
       "      <td>10</td>\n",
       "      <td>Never-married</td>\n",
       "      <td>?</td>\n",
       "      <td>Own-child</td>\n",
       "      <td>White</td>\n",
       "      <td>Female</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>30</td>\n",
       "      <td>United-States</td>\n",
       "      <td>&lt;=50K</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   age  workclass  fnlwgt     education  educational-num      marital-status  \\\n",
       "0   25    Private  226802          11th                7       Never-married   \n",
       "1   38    Private   89814       HS-grad                9  Married-civ-spouse   \n",
       "2   28  Local-gov  336951    Assoc-acdm               12  Married-civ-spouse   \n",
       "3   44    Private  160323  Some-college               10  Married-civ-spouse   \n",
       "4   18          ?  103497  Some-college               10       Never-married   \n",
       "\n",
       "          occupation relationship   race  gender  capital-gain  capital-loss  \\\n",
       "0  Machine-op-inspct    Own-child  Black    Male             0             0   \n",
       "1    Farming-fishing      Husband  White    Male             0             0   \n",
       "2    Protective-serv      Husband  White    Male             0             0   \n",
       "3  Machine-op-inspct      Husband  Black    Male          7688             0   \n",
       "4                  ?    Own-child  White  Female             0             0   \n",
       "\n",
       "   hours-per-week native-country income  \n",
       "0              40  United-States  <=50K  \n",
       "1              50  United-States  <=50K  \n",
       "2              40  United-States   >50K  \n",
       "3              40  United-States   >50K  \n",
       "4              30  United-States  <=50K  "
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = load_adult(as_frame=True)\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>age</th>\n",
       "      <th>workclass</th>\n",
       "      <th>fnlwgt</th>\n",
       "      <th>education</th>\n",
       "      <th>educational_num</th>\n",
       "      <th>marital_status</th>\n",
       "      <th>occupation</th>\n",
       "      <th>relationship</th>\n",
       "      <th>race</th>\n",
       "      <th>gender</th>\n",
       "      <th>capital_gain</th>\n",
       "      <th>capital_loss</th>\n",
       "      <th>hours_per_week</th>\n",
       "      <th>native_country</th>\n",
       "      <th>income_label</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>25</td>\n",
       "      <td>Private</td>\n",
       "      <td>226802</td>\n",
       "      <td>11th</td>\n",
       "      <td>7</td>\n",
       "      <td>Never-married</td>\n",
       "      <td>Machine-op-inspct</td>\n",
       "      <td>Own-child</td>\n",
       "      <td>Black</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>38</td>\n",
       "      <td>Private</td>\n",
       "      <td>89814</td>\n",
       "      <td>HS-grad</td>\n",
       "      <td>9</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Farming-fishing</td>\n",
       "      <td>Husband</td>\n",
       "      <td>White</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>50</td>\n",
       "      <td>United-States</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>28</td>\n",
       "      <td>Local-gov</td>\n",
       "      <td>336951</td>\n",
       "      <td>Assoc-acdm</td>\n",
       "      <td>12</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Protective-serv</td>\n",
       "      <td>Husband</td>\n",
       "      <td>White</td>\n",
       "      <td>Male</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>44</td>\n",
       "      <td>Private</td>\n",
       "      <td>160323</td>\n",
       "      <td>Some-college</td>\n",
       "      <td>10</td>\n",
       "      <td>Married-civ-spouse</td>\n",
       "      <td>Machine-op-inspct</td>\n",
       "      <td>Husband</td>\n",
       "      <td>Black</td>\n",
       "      <td>Male</td>\n",
       "      <td>7688</td>\n",
       "      <td>0</td>\n",
       "      <td>40</td>\n",
       "      <td>United-States</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>18</td>\n",
       "      <td>?</td>\n",
       "      <td>103497</td>\n",
       "      <td>Some-college</td>\n",
       "      <td>10</td>\n",
       "      <td>Never-married</td>\n",
       "      <td>?</td>\n",
       "      <td>Own-child</td>\n",
       "      <td>White</td>\n",
       "      <td>Female</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>30</td>\n",
       "      <td>United-States</td>\n",
       "      <td>0</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   age  workclass  fnlwgt     education  educational_num      marital_status  \\\n",
       "0   25    Private  226802          11th                7       Never-married   \n",
       "1   38    Private   89814       HS-grad                9  Married-civ-spouse   \n",
       "2   28  Local-gov  336951    Assoc-acdm               12  Married-civ-spouse   \n",
       "3   44    Private  160323  Some-college               10  Married-civ-spouse   \n",
       "4   18          ?  103497  Some-college               10       Never-married   \n",
       "\n",
       "          occupation relationship   race  gender  capital_gain  capital_loss  \\\n",
       "0  Machine-op-inspct    Own-child  Black    Male             0             0   \n",
       "1    Farming-fishing      Husband  White    Male             0             0   \n",
       "2    Protective-serv      Husband  White    Male             0             0   \n",
       "3  Machine-op-inspct      Husband  Black    Male          7688             0   \n",
       "4                  ?    Own-child  White  Female             0             0   \n",
       "\n",
       "   hours_per_week native_country  income_label  \n",
       "0              40  United-States             0  \n",
       "1              50  United-States             0  \n",
       "2              40  United-States             1  \n",
       "3              40  United-States             1  \n",
       "4              30  United-States             0  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# For convenience, we'll replace '-' with '_'\n",
    "df.columns = [c.replace(\"-\", \"_\") for c in df.columns]\n",
    "# binary target\n",
    "df[\"income_label\"] = (df[\"income\"].apply(lambda x: \">50K\" in x)).astype(int)\n",
    "df.drop(\"income\", axis=1, inplace=True)\n",
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "df.drop([\"fnlwgt\", \"educational_num\"], axis=1, inplace=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Preparing the data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define wide, crossed and deep tabular columns\n",
    "wide_cols = [\n",
    "    \"workclass\",\n",
    "    \"education\",\n",
    "    \"marital_status\",\n",
    "    \"occupation\",\n",
    "    \"relationship\",\n",
    "    \"race\",\n",
    "    \"gender\",\n",
    "    \"native_country\",\n",
    "]\n",
    "crossed_cols = [(\"education\", \"occupation\"), (\"native_country\", \"occupation\")]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "cat_embed_cols = [\n",
    "    \"workclass\",\n",
    "    \"education\",\n",
    "    \"marital_status\",\n",
    "    \"occupation\",\n",
    "    \"relationship\",\n",
    "    \"race\",\n",
    "    \"gender\",\n",
    "    \"capital_gain\",\n",
    "    \"capital_loss\",\n",
    "    \"native_country\",\n",
    "]\n",
    "continuous_cols = [\"age\", \"hours_per_week\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# TARGET\n",
    "target_col = \"income_label\"\n",
    "target = df[target_col].values"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "let's see what the preprocessors do"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "# wide\n",
    "wide_preprocessor = WidePreprocessor(wide_cols=wide_cols, crossed_cols=crossed_cols)\n",
    "X_wide = wide_preprocessor.fit_transform(df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# # wide_preprocessor has an attribute called encoding_dict with the encoding dictionary\n",
    "# wide_preprocessor.encoding_dict"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# deeptabular\n",
    "tab_preprocessor = TabPreprocessor(\n",
    "    embed_cols=cat_embed_cols,\n",
    "    continuous_cols=continuous_cols,\n",
    "    cols_to_scale=continuous_cols,\n",
    ")\n",
    "X_tab = tab_preprocessor.fit_transform(df)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[('workclass', 9, 5),\n",
       " ('education', 16, 8),\n",
       " ('marital_status', 7, 5),\n",
       " ('occupation', 15, 7),\n",
       " ('relationship', 6, 4),\n",
       " ('race', 5, 4),\n",
       " ('gender', 2, 2),\n",
       " ('capital_gain', 123, 24),\n",
       " ('capital_loss', 99, 21),\n",
       " ('native_country', 42, 13)]"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# check the docs to understand the useful attributes that the tab_preprocessor has. For example,\n",
    "# as well as an encoding dictionary, tab_preprocessor has an attribute called cat_embed_input\n",
    "# that specifies the categortical columns that will be represented as embeddings, the number\n",
    "# of different categories per feature, and the dimension of the embeddings as defined by some\n",
    "# of the internal rules of thumb that the preprocessor has (have a look to the docs)\n",
    "tab_preprocessor.cat_embed_input"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[  1  10  26 ...  61 103 328]\n",
      " [  1  11  27 ...  61 104 329]\n",
      " [  2  12  27 ...  61 105 330]\n",
      " ...\n",
      " [  1  11  28 ...  61 115 335]\n",
      " [  1  11  26 ...  61 115 335]\n",
      " [  7  11  27 ...  61 127 336]]\n",
      "(48842, 10)\n"
     ]
    }
   ],
   "source": [
    "print(X_wide)\n",
    "print(X_wide.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[ 1.          1.          1.         ...  1.         -0.99512893\n",
      "  -0.03408696]\n",
      " [ 1.          2.          2.         ...  1.         -0.04694151\n",
      "   0.77292975]\n",
      " [ 2.          3.          2.         ...  1.         -0.77631645\n",
      "  -0.03408696]\n",
      " ...\n",
      " [ 1.          2.          3.         ...  1.          1.41180837\n",
      "  -0.03408696]\n",
      " [ 1.          2.          1.         ...  1.         -1.21394141\n",
      "  -1.64812038]\n",
      " [ 7.          2.          2.         ...  1.          0.97418341\n",
      "  -0.03408696]]\n",
      "(48842, 12)\n"
     ]
    }
   ],
   "source": [
    "print(X_tab)\n",
    "print(X_tab.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Defining the model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "wide = Wide(input_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
    "tab_mlp = TabMlp(\n",
    "    column_idx=tab_preprocessor.column_idx,\n",
    "    cat_embed_input=tab_preprocessor.cat_embed_input,\n",
    "    cat_embed_dropout=0.1,\n",
    "    continuous_cols=continuous_cols,\n",
    "    mlp_hidden_dims=[400, 200],\n",
    "    mlp_dropout=0.5,\n",
    "    mlp_activation=\"leaky_relu\",\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Let's first find out how a linear model performs "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Wide(\n",
       "  (wide_linear): Embedding(809, 1, padding_idx=0)\n",
       ")"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wide"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Before being passed to the Trainer, the models need to be \"constructed\" with the ``WideDeep`` constructor class. For the particular case of the wide/linear model, not much really happens"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "lin_model = WideDeep(wide=wide)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (wide): Wide(\n",
       "    (wide_linear): Embedding(809, 1, padding_idx=0)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lin_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "lin_trainer = Trainer(\n",
    "    model=lin_model,\n",
    "    objective=\"binary\",\n",
    "    optimizers=torch.optim.AdamW(lin_model.parameters(), lr=0.01),\n",
    "    metrics=[Accuracy, Precision],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|█████████████████████████████████████████| 306/306 [00:02<00:00, 109.04it/s, loss=0.426, metrics={'acc': 0.7983, 'prec': 0.6152}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 102.46it/s, loss=0.366, metrics={'acc': 0.832, 'prec': 0.6916}]\n",
      "epoch 2: 100%|█████████████████████████████████████████| 306/306 [00:02<00:00, 130.27it/s, loss=0.364, metrics={'acc': 0.8305, 'prec': 0.6933}]\n",
      "valid: 100%|█████████████████████████████████████████████| 77/77 [00:00<00:00, 150.46it/s, loss=0.361, metrics={'acc': 0.8357, 'prec': 0.6982}]\n",
      "epoch 3: 100%|█████████████████████████████████████████| 306/306 [00:02<00:00, 133.19it/s, loss=0.359, metrics={'acc': 0.8329, 'prec': 0.6994}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 145.75it/s, loss=0.361, metrics={'acc': 0.836, 'prec': 0.7009}]\n",
      "epoch 4: 100%|█████████████████████████████████████████| 306/306 [00:02<00:00, 130.91it/s, loss=0.358, metrics={'acc': 0.8333, 'prec': 0.7005}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 155.08it/s, loss=0.361, metrics={'acc': 0.8364, 'prec': 0.702}]\n"
     ]
    }
   ],
   "source": [
    "lin_trainer.fit(X_wide=X_wide, target=target, n_epochs=4, batch_size=128, val_split=0.2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Bear in mind that `wide` is a linear model where the non-linearities are captured via the crossed columns. For the crossed-columns to be effective one needs proper business knowledge. There is no magic formula to produce them\n",
    "\n",
    "Let's have a look to the tabular model by itself"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_model = WideDeep(deeptabular=tab_mlp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (deeptabular): Sequential(\n",
       "    (0): TabMlp(\n",
       "      (cat_embed): DiffSizeCatEmbeddings(\n",
       "        (embed_layers): ModuleDict(\n",
       "          (emb_layer_workclass): Embedding(10, 5, padding_idx=0)\n",
       "          (emb_layer_education): Embedding(17, 8, padding_idx=0)\n",
       "          (emb_layer_marital_status): Embedding(8, 5, padding_idx=0)\n",
       "          (emb_layer_occupation): Embedding(16, 7, padding_idx=0)\n",
       "          (emb_layer_relationship): Embedding(7, 4, padding_idx=0)\n",
       "          (emb_layer_race): Embedding(6, 4, padding_idx=0)\n",
       "          (emb_layer_gender): Embedding(3, 2, padding_idx=0)\n",
       "          (emb_layer_capital_gain): Embedding(124, 24, padding_idx=0)\n",
       "          (emb_layer_capital_loss): Embedding(100, 21, padding_idx=0)\n",
       "          (emb_layer_native_country): Embedding(43, 13, padding_idx=0)\n",
       "        )\n",
       "        (embedding_dropout): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "      (cont_norm): Identity()\n",
       "      (encoder): MLP(\n",
       "        (mlp): Sequential(\n",
       "          (dense_layer_0): Sequential(\n",
       "            (0): Linear(in_features=95, out_features=400, bias=True)\n",
       "            (1): LeakyReLU(negative_slope=0.01, inplace=True)\n",
       "            (2): Dropout(p=0.5, inplace=False)\n",
       "          )\n",
       "          (dense_layer_1): Sequential(\n",
       "            (0): Linear(in_features=400, out_features=200, bias=True)\n",
       "            (1): LeakyReLU(negative_slope=0.01, inplace=True)\n",
       "            (2): Dropout(p=0.5, inplace=False)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=200, out_features=1, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tab_model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "You can see how the `WideDeep` class has added a final prediction layer that collects the activations from the last layer of the model and plugs them into the output neuron. If this was a multiclass classification problem, the prediction dimension (i.e. the size of that final layer) needs to be specified via the `pred_dim` when instantiating the `WideDeep` class, as we will see later"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_trainer = Trainer(\n",
    "    model=tab_model,\n",
    "    objective=\"binary\",\n",
    "    optimizers=torch.optim.AdamW(tab_model.parameters(), lr=0.001),\n",
    "    metrics=[Accuracy, Precision],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|███████████████████████████████████████████| 306/306 [00:03<00:00, 97.00it/s, loss=0.37, metrics={'acc': 0.8267, 'prec': 0.7037}]\n",
      "valid: 100%|█████████████████████████████████████████████| 77/77 [00:00<00:00, 134.91it/s, loss=0.313, metrics={'acc': 0.8588, 'prec': 0.7577}]\n",
      "epoch 2: 100%|███████████████████████████████████████████| 306/306 [00:03<00:00, 86.86it/s, loss=0.319, metrics={'acc': 0.8514, 'prec': 0.761}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:01<00:00, 73.13it/s, loss=0.296, metrics={'acc': 0.8675, 'prec': 0.7685}]\n",
      "epoch 3: 100%|██████████████████████████████████████████| 306/306 [00:03<00:00, 79.07it/s, loss=0.305, metrics={'acc': 0.8574, 'prec': 0.7646}]\n",
      "valid: 100%|█████████████████████████████████████████████| 77/77 [00:00<00:00, 130.11it/s, loss=0.289, metrics={'acc': 0.8696, 'prec': 0.7765}]\n",
      "epoch 4: 100%|██████████████████████████████████████████| 306/306 [00:03<00:00, 87.39it/s, loss=0.296, metrics={'acc': 0.8622, 'prec': 0.7769}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 90.63it/s, loss=0.285, metrics={'acc': 0.8697, 'prec': 0.7741}]\n"
     ]
    }
   ],
   "source": [
    "tab_trainer.fit(X_tab=X_tab, target=target, n_epochs=4, batch_size=128, val_split=0.2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The best result I ever obtained with `LightGBM` on this dataset is 0.8782...so we are pretty close.\n",
    "\n",
    "Let's combine the `wide` and `tab_mlp` components see if it helps"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [],
   "source": [
    "wide = Wide(input_dim=np.unique(X_wide).shape[0], pred_dim=1)\n",
    "tab_mlp = TabMlp(\n",
    "    column_idx=tab_preprocessor.column_idx,\n",
    "    cat_embed_input=tab_preprocessor.cat_embed_input,\n",
    "    cat_embed_dropout=0.1,\n",
    "    continuous_cols=continuous_cols,\n",
    "    mlp_hidden_dims=[400, 200],\n",
    "    mlp_dropout=0.5,\n",
    "    mlp_activation=\"leaky_relu\",\n",
    ")\n",
    "wd_model = WideDeep(wide=wide, deeptabular=tab_mlp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_trainer = Trainer(\n",
    "    model=wd_model,\n",
    "    objective=\"binary\",\n",
    "    optimizers=torch.optim.AdamW(wd_model.parameters(), lr=0.001),\n",
    "    metrics=[Accuracy, Precision],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|██████████████████████████████████████████| 306/306 [00:03<00:00, 77.48it/s, loss=0.418, metrics={'acc': 0.8047, 'prec': 0.6154}]\n",
      "valid: 100%|█████████████████████████████████████████████| 77/77 [00:00<00:00, 110.51it/s, loss=0.321, metrics={'acc': 0.8521, 'prec': 0.7059}]\n",
      "epoch 2: 100%|██████████████████████████████████████████| 306/306 [00:03<00:00, 82.70it/s, loss=0.333, metrics={'acc': 0.8428, 'prec': 0.7141}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 112.52it/s, loss=0.299, metrics={'acc': 0.866, 'prec': 0.7447}]\n",
      "epoch 3: 100%|██████████████████████████████████████████| 306/306 [00:04<00:00, 74.34it/s, loss=0.312, metrics={'acc': 0.8533, 'prec': 0.7404}]\n",
      "valid: 100%|███████████████████████████████████████████████| 77/77 [00:00<00:00, 89.86it/s, loss=0.29, metrics={'acc': 0.8683, 'prec': 0.7496}]\n",
      "epoch 4: 100%|██████████████████████████████████████████| 306/306 [00:04<00:00, 65.32it/s, loss=0.301, metrics={'acc': 0.8591, 'prec': 0.7542}]\n",
      "valid: 100%|██████████████████████████████████████████████| 77/77 [00:00<00:00, 86.81it/s, loss=0.286, metrics={'acc': 0.8712, 'prec': 0.7552}]\n"
     ]
    }
   ],
   "source": [
    "wd_trainer.fit(\n",
    "    X_wide=X_wide, X_tab=X_tab, target=target, n_epochs=4, batch_size=128, val_split=0.2\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "For this particular case, the combination of both did not lead to better results that using just the tab_mlp model, when using only 4 epochs. \n",
    "\n",
    "Note that we have use a `TabMlp` model, but we could use any other model in the library using the same syntax"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "from pytorch_widedeep.models import TabTransformer"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "The parameters for the `TabTransformer` are this\n",
    "\n",
    "```\n",
    "column_idx: Dict[str, int],\n",
    "cat_embed_input: Optional[List[Tuple[str, int]]] = None,\n",
    "cat_embed_dropout: Optional[float] = None,\n",
    "use_cat_bias: Optional[bool] = None,\n",
    "cat_embed_activation: Optional[str] = None,\n",
    "shared_embed: Optional[bool] = None,\n",
    "add_shared_embed: Optional[bool] = None,\n",
    "frac_shared_embed: Optional[float] = None,\n",
    "continuous_cols: Optional[List[str]] = None,\n",
    "cont_norm_layer: Optional[Literal[\"batchnorm\", \"layernorm\"]] = None,\n",
    "embed_continuous: Optional[bool] = None,\n",
    "embed_continuous_method: Optional[Literal[\"standard\", \"piecewise\", \"periodic\"]] = None,\n",
    "cont_embed_dropout: Optional[float] = None,\n",
    "cont_embed_activation: Optional[str] = None,\n",
    "quantization_setup: Optional[Dict[str, List[float]]] = None,\n",
    "n_frequencies: Optional[int] = None,\n",
    "sigma: Optional[float] = None,\n",
    "share_last_layer: Optional[bool] = None,\n",
    "full_embed_dropout: Optional[bool] = None,\n",
    "input_dim: int = 32,\n",
    "n_heads: int = 8,\n",
    "use_qkv_bias: bool = False,\n",
    "n_blocks: int = 4,\n",
    "attn_dropout: float = 0.2,\n",
    "ff_dropout: float = 0.1,\n",
    "ff_factor: int = 4,\n",
    "transformer_activation: str = \"gelu\",\n",
    "use_linear_attention: bool = False,\n",
    "use_flash_attention: bool = False,\n",
    "mlp_hidden_dims: Optional[List[int]] = None,\n",
    "mlp_activation: str = \"relu\",\n",
    "mlp_dropout: float = 0.1,\n",
    "mlp_batchnorm: bool = False,\n",
    "mlp_batchnorm_last: bool = False,\n",
    "mlp_linear_first: bool = True,\n",
    "```\n",
    "\n",
    "Please, see the documentation for details on each one of them, for now let's see how one could use a `TabTransformer` model in a few lines of code"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_transformer = TabTransformer(\n",
    "    column_idx=tab_preprocessor.column_idx,\n",
    "    cat_embed_input=tab_preprocessor.cat_embed_input,\n",
    "    cat_embed_dropout=0.1,\n",
    "    continuous_cols=continuous_cols,\n",
    "    embed_continuous_method=\"standard\",\n",
    "    cont_norm_layer=\"layernorm\",\n",
    "    cont_embed_dropout=0.2,\n",
    "    cont_embed_activation=\"leaky_relu\",\n",
    "    n_heads=4,\n",
    "    ff_dropout=0.2,\n",
    "    mlp_dropout=0.5,\n",
    "    mlp_activation=\"leaky_relu\",\n",
    "    mlp_linear_first=\"True\",\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_model = WideDeep(deeptabular=tab_transformer)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (deeptabular): Sequential(\n",
       "    (0): TabTransformer(\n",
       "      (cat_embed): SameSizeCatEmbeddings(\n",
       "        (embed): Embedding(325, 32, padding_idx=0)\n",
       "        (dropout): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "      (cont_norm): LayerNorm((2,), eps=1e-05, elementwise_affine=True)\n",
       "      (cont_embed): ContEmbeddings(\n",
       "        INFO: [ContLinear = weight(n_cont_cols, embed_dim) + bias(n_cont_cols, embed_dim)]\n",
       "        (linear): ContLinear(n_cont_cols=2, embed_dim=32, embed_dropout=0.2)\n",
       "        (activation_fn): LeakyReLU(negative_slope=0.01, inplace=True)\n",
       "        (dropout): Dropout(p=0.2, inplace=False)\n",
       "      )\n",
       "      (encoder): Sequential(\n",
       "        (transformer_block0): TransformerEncoder(\n",
       "          (attn): MultiHeadedAttention(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (q_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (kv_proj): Linear(in_features=32, out_features=64, bias=False)\n",
       "            (out_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "          )\n",
       "          (ff): FeedForward(\n",
       "            (w_1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (w_2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (activation): GELU(approximate='none')\n",
       "          )\n",
       "          (attn_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "          (ff_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "        (transformer_block1): TransformerEncoder(\n",
       "          (attn): MultiHeadedAttention(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (q_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (kv_proj): Linear(in_features=32, out_features=64, bias=False)\n",
       "            (out_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "          )\n",
       "          (ff): FeedForward(\n",
       "            (w_1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (w_2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (activation): GELU(approximate='none')\n",
       "          )\n",
       "          (attn_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "          (ff_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "        (transformer_block2): TransformerEncoder(\n",
       "          (attn): MultiHeadedAttention(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (q_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (kv_proj): Linear(in_features=32, out_features=64, bias=False)\n",
       "            (out_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "          )\n",
       "          (ff): FeedForward(\n",
       "            (w_1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (w_2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (activation): GELU(approximate='none')\n",
       "          )\n",
       "          (attn_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "          (ff_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "        (transformer_block3): TransformerEncoder(\n",
       "          (attn): MultiHeadedAttention(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (q_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "            (kv_proj): Linear(in_features=32, out_features=64, bias=False)\n",
       "            (out_proj): Linear(in_features=32, out_features=32, bias=False)\n",
       "          )\n",
       "          (ff): FeedForward(\n",
       "            (w_1): Linear(in_features=32, out_features=128, bias=True)\n",
       "            (w_2): Linear(in_features=128, out_features=32, bias=True)\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (activation): GELU(approximate='none')\n",
       "          )\n",
       "          (attn_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "          (ff_addnorm): AddNorm(\n",
       "            (dropout): Dropout(p=0.2, inplace=False)\n",
       "            (ln): LayerNorm((32,), eps=1e-05, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=384, out_features=1, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 30,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tab_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_trainer = Trainer(\n",
    "    model=tab_model,\n",
    "    objective=\"binary\",\n",
    "    optimizers=torch.optim.AdamW(tab_model.parameters(), lr=0.001),\n",
    "    metrics=[Accuracy, Precision],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|██████████████████████████████████████████| 306/306 [00:11<00:00, 27.57it/s, loss=0.359, metrics={'acc': 0.8334, 'prec': 0.7082}]\n",
      "valid: 100%|███████████████████████████████████████████████| 77/77 [00:01<00:00, 57.89it/s, loss=0.33, metrics={'acc': 0.8536, 'prec': 0.7152}]\n"
     ]
    }
   ],
   "source": [
    "tab_trainer.fit(X_tab=X_tab, target=target, n_epochs=1, batch_size=128, val_split=0.2)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  },
  "vscode": {
   "interpreter": {
    "hash": "3b99005fd577fa40f3cce433b2b92303885900e634b2b5344c07c59d06c8792d"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
