{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "a7b1aa10",
   "metadata": {},
   "source": [
    "## Using a Hugginface model\n",
    "\n",
    "In this notebook we will show how to use an \"external\" Hugginface model along with any other model in the libray. In particular we will show how to combine it with a tabular DL model.\n",
    "\n",
    "Since we are here, we will also compare the performance of a few models on a text classification problem.\n",
    "\n",
    "The notebook will go as follows:\n",
    "\n",
    "1. Text classification using tf-idf + LightGBM\n",
    "2. Text classification using a basic RNN  \n",
    "3. Text classification using Distilbert\n",
    "\n",
    "In all 3 cases we will add some tabular features to see if these help. \n",
    "\n",
    "In general, I would not pay much attention to the results since I have placed no effort in getting the best possible results (i.e. no hyperparameter optimization or trying different architectures, for example).\n",
    "\n",
    "Let's go"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e75756db",
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "import lightgbm as lgb\n",
    "from lightgbm import Dataset as lgbDataset\n",
    "from scipy.sparse import hstack, csr_matrix\n",
    "from sklearn.metrics import (\n",
    "    f1_score,\n",
    "    recall_score,\n",
    "    accuracy_score,\n",
    "    precision_score,\n",
    "    confusion_matrix,\n",
    ")\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "from torch import Tensor, nn\n",
    "from transformers import DistilBertModel, DistilBertTokenizer\n",
    "from pytorch_widedeep import Trainer\n",
    "from pytorch_widedeep.models import TabMlp, BasicRNN, WideDeep\n",
    "from pytorch_widedeep.metrics import F1Score, Accuracy\n",
    "from pytorch_widedeep.utils import Tokenizer, LabelEncoder\n",
    "from pytorch_widedeep.preprocessing import TextPreprocessor, TabPreprocessor\n",
    "from pytorch_widedeep.datasets import load_womens_ecommerce\n",
    "from pytorch_widedeep.utils.fastai_transforms import (\n",
    "    fix_html,\n",
    "    spec_add_spaces,\n",
    "    rm_useless_spaces,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "adc4b786",
   "metadata": {},
   "source": [
    "Let's load the data and have a look:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "ca6f1166",
   "metadata": {},
   "outputs": [],
   "source": [
    "df = load_womens_ecommerce(as_frame=True)\n",
    "\n",
    "df.columns = [c.replace(\" \", \"_\").lower() for c in df.columns]\n",
    "\n",
    "# classes from [0,num_class)\n",
    "df[\"rating\"] = (df[\"rating\"] - 1).astype(\"int64\")\n",
    "\n",
    "# group reviews with 1 and 2 scores into one class\n",
    "df.loc[df.rating == 0, \"rating\"] = 1\n",
    "\n",
    "# and back again to [0,num_class)\n",
    "df[\"rating\"] = (df[\"rating\"] - 1).astype(\"int64\")\n",
    "\n",
    "# drop short reviews\n",
    "df = df[~df.review_text.isna()]\n",
    "df[\"review_length\"] = df.review_text.apply(lambda x: len(x.split(\" \")))\n",
    "df = df[df.review_length >= 5]\n",
    "df = df.drop(\"review_length\", axis=1).reset_index(drop=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "f518da68",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>clothing_id</th>\n",
       "      <th>age</th>\n",
       "      <th>title</th>\n",
       "      <th>review_text</th>\n",
       "      <th>rating</th>\n",
       "      <th>recommended_ind</th>\n",
       "      <th>positive_feedback_count</th>\n",
       "      <th>division_name</th>\n",
       "      <th>department_name</th>\n",
       "      <th>class_name</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>767</td>\n",
       "      <td>33</td>\n",
       "      <td>None</td>\n",
       "      <td>Absolutely wonderful - silky and sexy and comf...</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>Initmates</td>\n",
       "      <td>Intimate</td>\n",
       "      <td>Intimates</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1080</td>\n",
       "      <td>34</td>\n",
       "      <td>None</td>\n",
       "      <td>Love this dress!  it's sooo pretty.  i happene...</td>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>4</td>\n",
       "      <td>General</td>\n",
       "      <td>Dresses</td>\n",
       "      <td>Dresses</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>1077</td>\n",
       "      <td>60</td>\n",
       "      <td>Some major design flaws</td>\n",
       "      <td>I had such high hopes for this dress and reall...</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>General</td>\n",
       "      <td>Dresses</td>\n",
       "      <td>Dresses</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>1049</td>\n",
       "      <td>50</td>\n",
       "      <td>My favorite buy!</td>\n",
       "      <td>I love, love, love this jumpsuit. it's fun, fl...</td>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>General Petite</td>\n",
       "      <td>Bottoms</td>\n",
       "      <td>Pants</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>847</td>\n",
       "      <td>47</td>\n",
       "      <td>Flattering shirt</td>\n",
       "      <td>This shirt is very flattering to all due to th...</td>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>6</td>\n",
       "      <td>General</td>\n",
       "      <td>Tops</td>\n",
       "      <td>Blouses</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "   clothing_id  age                    title  \\\n",
       "0          767   33                     None   \n",
       "1         1080   34                     None   \n",
       "2         1077   60  Some major design flaws   \n",
       "3         1049   50         My favorite buy!   \n",
       "4          847   47         Flattering shirt   \n",
       "\n",
       "                                         review_text  rating  recommended_ind  \\\n",
       "0  Absolutely wonderful - silky and sexy and comf...       2                1   \n",
       "1  Love this dress!  it's sooo pretty.  i happene...       3                1   \n",
       "2  I had such high hopes for this dress and reall...       1                0   \n",
       "3  I love, love, love this jumpsuit. it's fun, fl...       3                1   \n",
       "4  This shirt is very flattering to all due to th...       3                1   \n",
       "\n",
       "   positive_feedback_count   division_name department_name class_name  \n",
       "0                        0       Initmates        Intimate  Intimates  \n",
       "1                        4         General         Dresses    Dresses  \n",
       "2                        0         General         Dresses    Dresses  \n",
       "3                        0  General Petite         Bottoms      Pants  \n",
       "4                        6         General            Tops    Blouses  "
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ce2cc2c8",
   "metadata": {},
   "source": [
    "So, we will use the `review_text` column to predict the `rating`. Later on, we will try to combine it with some other columns (like `division_name` and `age`) see if these help.\n",
    "\n",
    "Let's first have a look to the distribution of ratings"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "ba88f4a7",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "rating\n",
       "3    12515\n",
       "2     4904\n",
       "1     2820\n",
       "0     2369\n",
       "Name: count, dtype: int64"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.rating.value_counts()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "197e55b1",
   "metadata": {},
   "source": [
    "This shows that we could have perhaps grouped rating scores of 1, 2 and 3 into 1...but anyway, let's just move on with those 4 classes.\n",
    "\n",
    "We are not going to carry any hyperparameter optimization here, so, we will only need a train and a test set (i.e.  no need of a validation set for the example in this notebook) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "df254871",
   "metadata": {},
   "outputs": [],
   "source": [
    "train, test = train_test_split(df, train_size=0.8, random_state=1, stratify=df.rating)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d08e3fc1",
   "metadata": {},
   "source": [
    "Let's see what we have to beat. What metrics would we obtain if we always predict the most common rating (3)?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "428ad14e",
   "metadata": {},
   "outputs": [],
   "source": [
    "most_common_pred = [train.rating.value_counts().index[0]] * len(test)\n",
    "\n",
    "most_common_acc = accuracy_score(test.rating, most_common_pred)\n",
    "most_common_f1 = f1_score(test.rating, most_common_pred, average=\"weighted\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0f116d7a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Accuracy: 0.553516143299425. F1 Score: 0.3944344218301668\n"
     ]
    }
   ],
   "source": [
    "print(f\"Accuracy: {most_common_acc}. F1 Score: {most_common_f1}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45a0a6a2",
   "metadata": {},
   "source": [
    "ok, these are our \"baseline\" metrics. \n",
    "\n",
    "Let's start by using simply tf-idf + lightGBM"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "86addfa7",
   "metadata": {},
   "source": [
    "### 1. Text classification using tf-idf + LightGBM\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "516bc7b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ?Tokenizer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5b144f1a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# this Tokenizer is part of our utils module but of course, any valid tokenizer can be used here.\n",
    "\n",
    "# When using notebooks there seems to be an issue related with multiprocessing (and sometimes tqdm)\n",
    "# that can only be solved by using only one CPU\n",
    "tok = Tokenizer(n_cpus=1)\n",
    "tok_reviews_tr = tok.process_all(train.review_text.tolist())\n",
    "tok_reviews_te = tok.process_all(test.review_text.tolist())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "aaa94f8e",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/Users/javierrodriguezzaurin/.pyenv/versions/3.10.13/envs/widedeep310/lib/python3.10/site-packages/sklearn/feature_extraction/text.py:525: UserWarning: The parameter 'token_pattern' will not be used since 'tokenizer' is not None'\n",
      "  warnings.warn(\n"
     ]
    }
   ],
   "source": [
    "vectorizer = TfidfVectorizer(\n",
    "    max_features=5000, preprocessor=lambda x: x, tokenizer=lambda x: x, min_df=5\n",
    ")\n",
    "\n",
    "X_text_tr = vectorizer.fit_transform(tok_reviews_tr)\n",
    "X_text_te = vectorizer.transform(tok_reviews_te)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "5809e741",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<18086x4566 sparse matrix of type '<class 'numpy.float64'>'\n",
       "\twith 884074 stored elements in Compressed Sparse Row format>"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_text_tr"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c93cb754",
   "metadata": {},
   "source": [
    "We now move our matrices to lightGBM `Dataset` format"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "59c88b27",
   "metadata": {},
   "outputs": [],
   "source": [
    "lgbtrain_text = lgbDataset(\n",
    "    X_text_tr,\n",
    "    train.rating.values,\n",
    "    free_raw_data=False,\n",
    ")\n",
    "\n",
    "lgbtest_text = lgbDataset(\n",
    "    X_text_te,\n",
    "    test.rating.values,\n",
    "    reference=lgbtrain_text,\n",
    "    free_raw_data=False,\n",
    ")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bfeb88ed",
   "metadata": {},
   "source": [
    "and off we go. By the way, I think as we run the next cell, we should appreciate how fast lightGBM runs. Yes, the input is a sparse matrix, but still, trains on 18086x4566 in a matter of secs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "582c6efc",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "lgb_text_model = lgb.train(\n",
    "    {\"objective\": \"multiclass\", \"num_classes\": 4},\n",
    "    lgbtrain_text,\n",
    "    valid_sets=[lgbtest_text, lgbtrain_text],\n",
    "    valid_names=[\"test\", \"train\"],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "40e5462e",
   "metadata": {},
   "outputs": [],
   "source": [
    "preds_text = lgb_text_model.predict(X_text_te)\n",
    "pred_text_class = np.argmax(preds_text, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "143f61bc",
   "metadata": {},
   "outputs": [],
   "source": [
    "acc_text = accuracy_score(lgbtest_text.label, pred_text_class)\n",
    "f1_text = f1_score(lgbtest_text.label, pred_text_class, average=\"weighted\")\n",
    "cm_text = confusion_matrix(lgbtest_text.label, pred_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "8e53b5f7",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LightGBM Accuracy: 0.6444051304732419. LightGBM F1 Score: 0.617154488246181\n"
     ]
    }
   ],
   "source": [
    "print(f\"LightGBM Accuracy: {acc_text}. LightGBM F1 Score: {f1_text}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "15397539",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LightGBM Confusion Matrix: \n",
      " [[ 199  135   61   79]\n",
      " [ 123  169  149  123]\n",
      " [  30   94  279  578]\n",
      " [  16   30  190 2267]]\n"
     ]
    }
   ],
   "source": [
    "print(f\"LightGBM Confusion Matrix: \\n {cm_text}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5ec73328",
   "metadata": {},
   "source": [
    "Ok, so, **with no hyperparameter optimization lightGBM gets an accuracy of 0.64 and a F1 score of 0.62**.\n",
    "This is significantly better than predicting always the most popular. \n",
    "\n",
    "Let's see if in this implementation, some additional features, like `age` or `class_name` are of any help"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "id": "7bdc37d6",
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_cols = [\n",
    "    \"age\",\n",
    "    \"division_name\",\n",
    "    \"department_name\",\n",
    "    \"class_name\",\n",
    "]\n",
    "\n",
    "for tab_df in [train, test]:\n",
    "    for c in [\"division_name\", \"department_name\", \"class_name\"]:\n",
    "        tab_df[c] = tab_df[c].str.lower()\n",
    "        tab_df[c].fillna(\"missing\", inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "id": "b17aa479",
   "metadata": {},
   "outputs": [],
   "source": [
    "# This is our LabelEncoder. A class that is designed to work with the models in this library but\n",
    "# can be used for general purposes\n",
    "le = LabelEncoder(columns_to_encode=[\"division_name\", \"department_name\", \"class_name\"])\n",
    "train_tab_le = le.fit_transform(train)\n",
    "test_tab_le = le.transform(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "2e55e42f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>clothing_id</th>\n",
       "      <th>age</th>\n",
       "      <th>title</th>\n",
       "      <th>review_text</th>\n",
       "      <th>rating</th>\n",
       "      <th>recommended_ind</th>\n",
       "      <th>positive_feedback_count</th>\n",
       "      <th>division_name</th>\n",
       "      <th>department_name</th>\n",
       "      <th>class_name</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>4541</th>\n",
       "      <td>836</td>\n",
       "      <td>35</td>\n",
       "      <td>None</td>\n",
       "      <td>Bought this on sale in my reg size- 10. im 5'9...</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>18573</th>\n",
       "      <td>1022</td>\n",
       "      <td>25</td>\n",
       "      <td>Look like \"mom jeans\"</td>\n",
       "      <td>Maybe i just have the wrong body type for thes...</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1058</th>\n",
       "      <td>815</td>\n",
       "      <td>39</td>\n",
       "      <td>Ig brought me here</td>\n",
       "      <td>Love the way this top layers under my jackets ...</td>\n",
       "      <td>2</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "      <td>1</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>12132</th>\n",
       "      <td>984</td>\n",
       "      <td>47</td>\n",
       "      <td>Runs small especially the arms</td>\n",
       "      <td>I love this jacket. it's the prettiest and mos...</td>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>1</td>\n",
       "      <td>3</td>\n",
       "      <td>3</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>20756</th>\n",
       "      <td>1051</td>\n",
       "      <td>42</td>\n",
       "      <td>True red, true beauty.</td>\n",
       "      <td>These pants are gorgeous--the fabric has a sat...</td>\n",
       "      <td>3</td>\n",
       "      <td>1</td>\n",
       "      <td>0</td>\n",
       "      <td>2</td>\n",
       "      <td>2</td>\n",
       "      <td>4</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "       clothing_id  age                           title  \\\n",
       "4541           836   35                            None   \n",
       "18573         1022   25           Look like \"mom jeans\"   \n",
       "1058           815   39              Ig brought me here   \n",
       "12132          984   47  Runs small especially the arms   \n",
       "20756         1051   42          True red, true beauty.   \n",
       "\n",
       "                                             review_text  rating  \\\n",
       "4541   Bought this on sale in my reg size- 10. im 5'9...       2   \n",
       "18573  Maybe i just have the wrong body type for thes...       1   \n",
       "1058   Love the way this top layers under my jackets ...       2   \n",
       "12132  I love this jacket. it's the prettiest and mos...       3   \n",
       "20756  These pants are gorgeous--the fabric has a sat...       3   \n",
       "\n",
       "       recommended_ind  positive_feedback_count  division_name  \\\n",
       "4541                 1                        2              1   \n",
       "18573                0                        0              2   \n",
       "1058                 1                        0              1   \n",
       "12132                1                        0              1   \n",
       "20756                1                        0              2   \n",
       "\n",
       "       department_name  class_name  \n",
       "4541                 1           1  \n",
       "18573                2           2  \n",
       "1058                 1           1  \n",
       "12132                3           3  \n",
       "20756                2           4  "
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_tab_le.head()"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a326c0de",
   "metadata": {},
   "source": [
    "let's for example have a look to the encodings for the categorical feature `class_name`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "id": "de0cb90f",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "{'blouses': 1,\n",
       " 'jeans': 2,\n",
       " 'jackets': 3,\n",
       " 'pants': 4,\n",
       " 'knits': 5,\n",
       " 'dresses': 6,\n",
       " 'skirts': 7,\n",
       " 'sweaters': 8,\n",
       " 'fine gauge': 9,\n",
       " 'legwear': 10,\n",
       " 'lounge': 11,\n",
       " 'shorts': 12,\n",
       " 'outerwear': 13,\n",
       " 'intimates': 14,\n",
       " 'swim': 15,\n",
       " 'trend': 16,\n",
       " 'sleep': 17,\n",
       " 'layering': 18,\n",
       " 'missing': 19,\n",
       " 'casual bottoms': 20,\n",
       " 'chemises': 21}"
      ]
     },
     "execution_count": 21,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "le.encoding_dict[\"class_name\"]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "id": "68b365c7",
   "metadata": {},
   "outputs": [],
   "source": [
    "# tabular training and test sets\n",
    "X_tab_tr = csr_matrix(train_tab_le[tab_cols].values)\n",
    "X_tab_te = csr_matrix(test_tab_le[tab_cols].values)\n",
    "\n",
    "# text + tabular training and test sets\n",
    "X_tab_text_tr = hstack((X_tab_tr, X_text_tr))\n",
    "X_tab_text_te = hstack((X_tab_te, X_text_te))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "id": "1c074b85",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<18086x4 sparse matrix of type '<class 'numpy.int64'>'\n",
       "\twith 72344 stored elements in Compressed Sparse Row format>"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_tab_tr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "a0f35351",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<18086x4570 sparse matrix of type '<class 'numpy.float64'>'\n",
       "\twith 956418 stored elements in Compressed Sparse Row format>"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "X_tab_text_tr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "d396b91a",
   "metadata": {},
   "outputs": [],
   "source": [
    "lgbtrain_tab_text = lgbDataset(\n",
    "    X_tab_text_tr,\n",
    "    train.rating.values,\n",
    "    categorical_feature=[0, 1, 2, 3],\n",
    "    free_raw_data=False,\n",
    ")\n",
    "\n",
    "lgbtest_tab_text = lgbDataset(\n",
    "    X_tab_text_te,\n",
    "    test.rating.values,\n",
    "    reference=lgbtrain_tab_text,\n",
    "    free_raw_data=False,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "6a36cd21",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/wd38/lib/python3.8/site-packages/lightgbm/basic.py:2065: UserWarning: Using categorical_feature in Dataset.\n",
      "  _log_warning('Using categorical_feature in Dataset.')\n",
      "/opt/conda/envs/wd38/lib/python3.8/site-packages/lightgbm/basic.py:2068: UserWarning: categorical_feature in Dataset is overridden.\n",
      "New categorical_feature is [0, 1, 2, 3]\n",
      "  _log_warning('categorical_feature in Dataset is overridden.\\n'\n",
      "/opt/conda/envs/wd38/lib/python3.8/site-packages/lightgbm/engine.py:239: UserWarning: 'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. Pass 'log_evaluation()' callback via 'callbacks' argument instead.\n",
      "  _log_warning(\"'verbose_eval' argument is deprecated and will be removed in a future release of LightGBM. \"\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[LightGBM] [Warning] Auto-choosing col-wise multi-threading, the overhead of testing was 0.138280 seconds.\n",
      "You can set `force_col_wise=true` to remove the overhead.\n",
      "[LightGBM] [Info] Total Bins 143432\n",
      "[LightGBM] [Info] Number of data points in the train set: 18086, number of used features: 2289\n",
      "[LightGBM] [Info] Start training from score -2.255919\n",
      "[LightGBM] [Info] Start training from score -2.081545\n",
      "[LightGBM] [Info] Start training from score -1.528281\n",
      "[LightGBM] [Info] Start training from score -0.591354\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/wd38/lib/python3.8/site-packages/lightgbm/basic.py:1780: UserWarning: Overriding the parameters from Reference Dataset.\n",
      "  _log_warning('Overriding the parameters from Reference Dataset.')\n",
      "/opt/conda/envs/wd38/lib/python3.8/site-packages/lightgbm/basic.py:1513: UserWarning: categorical_column in param dict is overridden.\n",
      "  _log_warning(f'{cat_alias} in param dict is overridden.')\n"
     ]
    }
   ],
   "source": [
    "lgb_tab_text_model = lgb.train(\n",
    "    {\"objective\": \"multiclass\", \"num_classes\": 4},\n",
    "    lgbtrain_tab_text,\n",
    "    valid_sets=[lgbtrain_tab_text, lgbtest_tab_text],\n",
    "    valid_names=[\"test\", \"train\"],\n",
    "    verbose_eval=False,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "fbaf5d8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "preds_tab_text = lgb_tab_text_model.predict(X_tab_text_te)\n",
    "preds_tab_text_class = np.argmax(preds_tab_text, 1)\n",
    "\n",
    "acc_tab_text = accuracy_score(lgbtest_tab_text.label, preds_tab_text_class)\n",
    "f1_tab_text = f1_score(lgbtest_tab_text.label, preds_tab_text_class, average=\"weighted\")\n",
    "cm_tab_text = confusion_matrix(lgbtest_tab_text.label, preds_tab_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "cf9bd76d",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LightGBM text + tabular Accuracy: 0.6382131800088456. LightGBM text + tabular F1 Score: 0.6080251307242649\n"
     ]
    }
   ],
   "source": [
    "print(\n",
    "    f\"LightGBM text + tabular Accuracy: {acc_tab_text}. LightGBM text + tabular F1 Score: {f1_tab_text}\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "29bc8ed2",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LightGBM text + tabular Confusion Matrix:\n",
      " [[ 193  123   68   90]\n",
      " [ 123  146  157  138]\n",
      " [  37   90  272  582]\n",
      " [  16   37  175 2275]]\n"
     ]
    }
   ],
   "source": [
    "print(f\"LightGBM text + tabular Confusion Matrix:\\n {cm_tab_text}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "bc093906",
   "metadata": {},
   "source": [
    "So, in this set up, **the addition tabular columns do not help performance**.\n",
    "\n",
    "### 2. Text classification using pytorch-widedeep's built-in models (a basic RNN)\n",
    "\n",
    "Moving on now to fully using `pytorch-widedeep` in this dataset, let's have a look on how one could use a simple RNN to predict the ratings with the library."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "0f7b9793",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The vocabulary contains 4328 tokens\n"
     ]
    }
   ],
   "source": [
    "text_preprocessor = TextPreprocessor(\n",
    "    text_col=\"review_text\", max_vocab=5000, min_freq=5, maxlen=90, n_cpus=1\n",
    ")\n",
    "\n",
    "wd_X_text_tr = text_preprocessor.fit_transform(train)\n",
    "wd_X_text_te = text_preprocessor.transform(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "8d03daae",
   "metadata": {},
   "outputs": [],
   "source": [
    "basic_rnn = BasicRNN(\n",
    "    vocab_size=len(text_preprocessor.vocab.itos),\n",
    "    embed_dim=300,\n",
    "    hidden_dim=64,\n",
    "    n_layers=3,\n",
    "    rnn_dropout=0.2,\n",
    "    head_hidden_dims=[32],\n",
    ")\n",
    "\n",
    "\n",
    "wd_text_model = WideDeep(deeptext=basic_rnn, pred_dim=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "b22d4c70",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (deeptext): Sequential(\n",
       "    (0): BasicRNN(\n",
       "      (word_embed): Embedding(4328, 300, padding_idx=1)\n",
       "      (rnn): LSTM(300, 64, num_layers=3, batch_first=True, dropout=0.2)\n",
       "      (rnn_mlp): MLP(\n",
       "        (mlp): Sequential(\n",
       "          (dense_layer_0): Sequential(\n",
       "            (0): Linear(in_features=64, out_features=32, bias=True)\n",
       "            (1): ReLU(inplace=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=32, out_features=4, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wd_text_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "id": "84b14833",
   "metadata": {},
   "outputs": [],
   "source": [
    "text_trainer = Trainer(\n",
    "    wd_text_model,\n",
    "    objective=\"multiclass\",\n",
    "    metrics=[Accuracy, F1Score(average=True)],\n",
    "    num_workers=0,  # As in the case of the tokenizer, in notebook I need to set this to 0 for the Trainer to work\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "id": "90d162b5",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|███████████████████████████████████████████████| 71/71 [00:01<00:00, 52.39it/s, loss=1.16, metrics={'acc': 0.5349, 'f1': 0.2011}]\n",
      "epoch 2: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 70.35it/s, loss=0.964, metrics={'acc': 0.5827, 'f1': 0.3005}]\n",
      "epoch 3: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 70.33it/s, loss=0.845, metrics={'acc': 0.6252, 'f1': 0.4133}]\n",
      "epoch 4: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 69.99it/s, loss=0.765, metrics={'acc': 0.6575, 'f1': 0.4875}]\n",
      "epoch 5: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 69.55it/s, loss=0.709, metrics={'acc': 0.6879, 'f1': 0.5423}]\n"
     ]
    }
   ],
   "source": [
    "text_trainer.fit(\n",
    "    X_text=wd_X_text_tr,\n",
    "    target=train.rating.values,\n",
    "    n_epochs=5,\n",
    "    batch_size=256,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "id": "732aee39",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "predict: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 18/18 [00:00<00:00, 211.51it/s]\n"
     ]
    }
   ],
   "source": [
    "wd_pred_text = text_trainer.predict_proba(X_text=wd_X_text_te)\n",
    "wd_pred_text_class = np.argmax(wd_pred_text, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "id": "bcf7602f",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_acc_text = accuracy_score(test.rating, wd_pred_text_class)\n",
    "wd_f1_text = f1_score(test.rating, wd_pred_text_class, average=\"weighted\")\n",
    "wd_cm_text = confusion_matrix(test.rating, wd_pred_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "id": "b3845176",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Basic RNN Accuracy: 0.6076957098628926. Basic RNN F1 Score: 0.6017335854471788\n"
     ]
    }
   ],
   "source": [
    "print(f\"Basic RNN Accuracy: {wd_acc_text}. Basic RNN F1 Score: {wd_f1_text}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "3e11ba6a",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Basic RNN Confusion Matrix:\n",
      " [[ 327   76   62    9]\n",
      " [ 285  115  117   47]\n",
      " [ 131  122  315  413]\n",
      " [  42   69  401 1991]]\n"
     ]
    }
   ],
   "source": [
    "print(f\"Basic RNN Confusion Matrix:\\n {wd_cm_text}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "b000a39e",
   "metadata": {},
   "source": [
    "The performance is very similar to that of using simply tf-idf and lightgbm. Let see if adding tabular features helps when using `pytorch-widedeep`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "id": "5d3c7bba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ?TabPreprocessor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "id": "6bd0bfa6",
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_preprocessor = TabPreprocessor(cat_embed_cols=tab_cols)\n",
    "\n",
    "wd_X_tab_tr = tab_preprocessor.fit_transform(train)\n",
    "wd_X_tab_te = tab_preprocessor.transform(test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "id": "6fe8392b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ?TabMlp"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "id": "e9a50846",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "tab_model = TabMlp(\n",
    "    column_idx=tab_preprocessor.column_idx,\n",
    "    cat_embed_input=tab_preprocessor.cat_embed_input,\n",
    "    mlp_hidden_dims=[100, 50],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "id": "80287e9b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "TabMlp(\n",
       "  (cat_and_cont_embed): DiffSizeCatAndContEmbeddings(\n",
       "    (cat_embed): DiffSizeCatEmbeddings(\n",
       "      (embed_layers): ModuleDict(\n",
       "        (emb_layer_age): Embedding(78, 18, padding_idx=0)\n",
       "        (emb_layer_division_name): Embedding(5, 3, padding_idx=0)\n",
       "        (emb_layer_department_name): Embedding(8, 5, padding_idx=0)\n",
       "        (emb_layer_class_name): Embedding(22, 9, padding_idx=0)\n",
       "      )\n",
       "      (embedding_dropout): Dropout(p=0.1, inplace=False)\n",
       "    )\n",
       "  )\n",
       "  (encoder): MLP(\n",
       "    (mlp): Sequential(\n",
       "      (dense_layer_0): Sequential(\n",
       "        (0): Dropout(p=0.1, inplace=False)\n",
       "        (1): Linear(in_features=35, out_features=100, bias=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "      (dense_layer_1): Sequential(\n",
       "        (0): Dropout(p=0.1, inplace=False)\n",
       "        (1): Linear(in_features=100, out_features=50, bias=True)\n",
       "        (2): ReLU(inplace=True)\n",
       "      )\n",
       "    )\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 43,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "tab_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "id": "58e58c4b",
   "metadata": {},
   "outputs": [],
   "source": [
    "text_model = BasicRNN(\n",
    "    vocab_size=len(text_preprocessor.vocab.itos),\n",
    "    embed_dim=300,\n",
    "    hidden_dim=64,\n",
    "    n_layers=3,\n",
    "    rnn_dropout=0.2,\n",
    "    head_hidden_dims=[32],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "id": "24dc01d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_tab_and_text_model = WideDeep(deeptabular=tab_model, deeptext=text_model, pred_dim=4)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "id": "b5ed8459",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (deeptabular): Sequential(\n",
       "    (0): TabMlp(\n",
       "      (cat_and_cont_embed): DiffSizeCatAndContEmbeddings(\n",
       "        (cat_embed): DiffSizeCatEmbeddings(\n",
       "          (embed_layers): ModuleDict(\n",
       "            (emb_layer_age): Embedding(78, 18, padding_idx=0)\n",
       "            (emb_layer_division_name): Embedding(5, 3, padding_idx=0)\n",
       "            (emb_layer_department_name): Embedding(8, 5, padding_idx=0)\n",
       "            (emb_layer_class_name): Embedding(22, 9, padding_idx=0)\n",
       "          )\n",
       "          (embedding_dropout): Dropout(p=0.1, inplace=False)\n",
       "        )\n",
       "      )\n",
       "      (encoder): MLP(\n",
       "        (mlp): Sequential(\n",
       "          (dense_layer_0): Sequential(\n",
       "            (0): Dropout(p=0.1, inplace=False)\n",
       "            (1): Linear(in_features=35, out_features=100, bias=True)\n",
       "            (2): ReLU(inplace=True)\n",
       "          )\n",
       "          (dense_layer_1): Sequential(\n",
       "            (0): Dropout(p=0.1, inplace=False)\n",
       "            (1): Linear(in_features=100, out_features=50, bias=True)\n",
       "            (2): ReLU(inplace=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=50, out_features=4, bias=True)\n",
       "  )\n",
       "  (deeptext): Sequential(\n",
       "    (0): BasicRNN(\n",
       "      (word_embed): Embedding(4328, 300, padding_idx=1)\n",
       "      (rnn): LSTM(300, 64, num_layers=3, batch_first=True, dropout=0.2)\n",
       "      (rnn_mlp): MLP(\n",
       "        (mlp): Sequential(\n",
       "          (dense_layer_0): Sequential(\n",
       "            (0): Linear(in_features=64, out_features=32, bias=True)\n",
       "            (1): ReLU(inplace=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=32, out_features=4, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 46,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wd_tab_and_text_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "id": "f427819a",
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_and_text_trainer = Trainer(\n",
    "    wd_tab_and_text_model,\n",
    "    objective=\"multiclass\",\n",
    "    metrics=[Accuracy, F1Score(average=True)],\n",
    "    num_workers=0,  # As in the case of the tokenizer, in notebook I need to set this to 0 for the Trainer to work\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "id": "1603fa31",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|████████████████████████████████████████████████| 71/71 [00:01<00:00, 52.04it/s, loss=1.13, metrics={'acc': 0.538, 'f1': 0.1911}]\n",
      "epoch 2: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 52.28it/s, loss=0.936, metrics={'acc': 0.5887, 'f1': 0.3507}]\n",
      "epoch 3: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 52.26it/s, loss=0.825, metrics={'acc': 0.6394, 'f1': 0.4545}]\n",
      "epoch 4: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 51.33it/s, loss=0.757, metrics={'acc': 0.6696, 'f1': 0.5214}]\n",
      "epoch 5: 100%|██████████████████████████████████████████████| 71/71 [00:01<00:00, 50.39it/s, loss=0.702, metrics={'acc': 0.6963, 'f1': 0.5654}]\n"
     ]
    }
   ],
   "source": [
    "tab_and_text_trainer.fit(\n",
    "    X_tab=wd_X_tab_tr,\n",
    "    X_text=wd_X_text_tr,\n",
    "    target=train.rating.values,\n",
    "    n_epochs=5,\n",
    "    batch_size=256,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "id": "504cc3de",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "predict: 100%|████████████████████████████████████████████████████████████████████████████████████████████████| 18/18 [00:00<00:00, 136.94it/s]\n"
     ]
    }
   ],
   "source": [
    "wd_pred_tab_and_text = tab_and_text_trainer.predict_proba(\n",
    "    X_tab=wd_X_tab_te, X_text=wd_X_text_te\n",
    ")\n",
    "wd_pred_tab_and_text_class = np.argmax(wd_pred_tab_and_text, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "id": "d1106a8e",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_acc_tab_and_text = accuracy_score(test.rating, wd_pred_tab_and_text_class)\n",
    "wd_f1_tab_and_text = f1_score(\n",
    "    test.rating, wd_pred_tab_and_text_class, average=\"weighted\"\n",
    ")\n",
    "wd_cm_tab_and_text = confusion_matrix(test.rating, wd_pred_tab_and_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "id": "185272e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Basic RNN + Tabular  Accuracy: 0.6333480760725343. Basic RNN + TabularF1 Score: 0.6332310089593208\n",
      "Basic RNN + Tabular  Confusion Matrix:\n",
      " [[ 267  132   65   10]\n",
      " [ 198  168  159   39]\n",
      " [  57  113  410  401]\n",
      " [  12   58  414 2019]]\n"
     ]
    }
   ],
   "source": [
    "print(\n",
    "    f\"Basic RNN + Tabular  Accuracy: {wd_acc_tab_and_text}. Basic RNN + TabularF1 Score: {wd_f1_tab_and_text}\"\n",
    ")\n",
    "print(f\"Basic RNN + Tabular  Confusion Matrix:\\n {wd_cm_tab_and_text}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "20cfc359",
   "metadata": {},
   "source": [
    "\n",
    "### 3. Text classification using a Hugginface model as a custom model in pytorch-widedeep's"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5e834cdd",
   "metadata": {},
   "source": [
    "We are going to \"manually\" code the Tokenizer and the model and see how they can be used as part of the process along with the `pytorch-widedeep` library.\n",
    "\n",
    "Tokenizer:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "id": "2065719f",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertTokenizer(object):\n",
    "    def __init__(\n",
    "        self,\n",
    "        pretrained_tokenizer=\"distilbert-base-uncased\",\n",
    "        do_lower_case=True,\n",
    "        max_length=90,\n",
    "    ):\n",
    "        super(BertTokenizer, self).__init__()\n",
    "        self.pretrained_tokenizer = pretrained_tokenizer\n",
    "        self.do_lower_case = do_lower_case\n",
    "        self.max_length = max_length\n",
    "\n",
    "    def fit(self, texts):\n",
    "        self.tokenizer = DistilBertTokenizer.from_pretrained(\n",
    "            self.pretrained_tokenizer, do_lower_case=self.do_lower_case\n",
    "        )\n",
    "\n",
    "        return self\n",
    "\n",
    "    def transform(self, texts):\n",
    "        input_ids = []\n",
    "        for text in texts:\n",
    "            encoded_sent = self.tokenizer.encode_plus(\n",
    "                text=self._pre_rules(text),\n",
    "                add_special_tokens=True,\n",
    "                max_length=self.max_length,\n",
    "                padding=\"max_length\",\n",
    "                truncation=True,\n",
    "            )\n",
    "\n",
    "            input_ids.append(encoded_sent.get(\"input_ids\"))\n",
    "        return np.stack(input_ids)\n",
    "\n",
    "    def fit_transform(self, texts):\n",
    "        return self.fit(texts).transform(texts)\n",
    "\n",
    "    @staticmethod\n",
    "    def _pre_rules(text):\n",
    "        return fix_html(rm_useless_spaces(spec_add_spaces(text)))"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d12ef03c",
   "metadata": {},
   "source": [
    "Model:"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "id": "98a192a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "class BertModel(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        model_name: str = \"distilbert-base-uncased\",\n",
    "        freeze_bert: bool = False,\n",
    "    ):\n",
    "        super(BertModel, self).__init__()\n",
    "\n",
    "        self.bert = DistilBertModel.from_pretrained(\n",
    "            model_name,\n",
    "        )\n",
    "\n",
    "        if freeze_bert:\n",
    "            for param in self.bert.parameters():\n",
    "                param.requires_grad = False\n",
    "\n",
    "    def forward(self, X_inp: Tensor) -> Tensor:\n",
    "        attn_mask = (X_inp != 0).type(torch.int8)\n",
    "        outputs = self.bert(input_ids=X_inp, attention_mask=attn_mask)\n",
    "        return outputs[0][:, 0, :]\n",
    "\n",
    "    @property\n",
    "    def output_dim(self) -> int:\n",
    "        # This is THE ONLY requirement for any model to work with pytorch-widedeep. Must\n",
    "        # have a 'output_dim' property so the WideDeep class knows the incoming dims\n",
    "        # from the custom model. in this case, I hardcoded it\n",
    "        return 768"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "id": "8086cfaa",
   "metadata": {},
   "outputs": [],
   "source": [
    "bert_tokenizer = BertTokenizer()\n",
    "X_bert_tr = bert_tokenizer.fit_transform(train[\"review_text\"].tolist())\n",
    "X_bert_te = bert_tokenizer.transform(test[\"review_text\"].tolist())"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "913db509",
   "metadata": {},
   "source": [
    "As I mentioned a number of times in the documentation and examples, `pytorch-widedeep` is designed for flexibility. For any of the data modes (tabular, text and images) there are available components/models in the library. However, the user can choose to use any model they want with the only requirement that such model must have a `output_dim` property. \n",
    "\n",
    "With that in mind, the `BertModel` class defined above can be used by `pytorch-widedeep` as any other of the internal components. In other words, simply...pass it to the `WideDeep` class. In this case we are going to add a FC-head as part of the classifier."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "id": "8a0c14e1",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertModel: ['vocab_projector.bias', 'vocab_layer_norm.bias', 'vocab_transform.weight', 'vocab_transform.bias', 'vocab_projector.weight', 'vocab_layer_norm.weight']\n",
      "- This IS expected if you are initializing DistilBertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing DistilBertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "bert_model = BertModel(freeze_bert=True)\n",
    "wd_bert_model = WideDeep(\n",
    "    deeptext=bert_model,\n",
    "    head_hidden_dims=[256, 128, 64],\n",
    "    pred_dim=4,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "id": "e353a3b9",
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "WideDeep(\n",
       "  (deeptext): BertModel(\n",
       "    (bert): DistilBertModel(\n",
       "      (embeddings): Embeddings(\n",
       "        (word_embeddings): Embedding(30522, 768, padding_idx=0)\n",
       "        (position_embeddings): Embedding(512, 768)\n",
       "        (LayerNorm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "        (dropout): Dropout(p=0.1, inplace=False)\n",
       "      )\n",
       "      (transformer): Transformer(\n",
       "        (layer): ModuleList(\n",
       "          (0): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "          (1): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "          (2): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "          (3): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "          (4): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "          (5): TransformerBlock(\n",
       "            (attention): MultiHeadSelfAttention(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (q_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (k_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (v_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "              (out_lin): Linear(in_features=768, out_features=768, bias=True)\n",
       "            )\n",
       "            (sa_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "            (ffn): FFN(\n",
       "              (dropout): Dropout(p=0.1, inplace=False)\n",
       "              (lin1): Linear(in_features=768, out_features=3072, bias=True)\n",
       "              (lin2): Linear(in_features=3072, out_features=768, bias=True)\n",
       "              (activation): GELUActivation()\n",
       "            )\n",
       "            (output_layer_norm): LayerNorm((768,), eps=1e-12, elementwise_affine=True)\n",
       "          )\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "  )\n",
       "  (deephead): Sequential(\n",
       "    (0): MLP(\n",
       "      (mlp): Sequential(\n",
       "        (dense_layer_0): Sequential(\n",
       "          (0): Linear(in_features=768, out_features=256, bias=True)\n",
       "          (1): ReLU(inplace=True)\n",
       "          (2): Dropout(p=0.1, inplace=False)\n",
       "        )\n",
       "        (dense_layer_1): Sequential(\n",
       "          (0): Linear(in_features=256, out_features=128, bias=True)\n",
       "          (1): ReLU(inplace=True)\n",
       "          (2): Dropout(p=0.1, inplace=False)\n",
       "        )\n",
       "        (dense_layer_2): Sequential(\n",
       "          (0): Linear(in_features=128, out_features=64, bias=True)\n",
       "          (1): ReLU(inplace=True)\n",
       "          (2): Dropout(p=0.1, inplace=False)\n",
       "        )\n",
       "      )\n",
       "    )\n",
       "    (1): Linear(in_features=64, out_features=4, bias=True)\n",
       "  )\n",
       ")"
      ]
     },
     "execution_count": 56,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wd_bert_model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "id": "769019e3",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|████████████████████████████████████████████| 283/283 [00:14<00:00, 19.68it/s, loss=0.968, metrics={'acc': 0.5879, 'f1': 0.3591}]\n",
      "epoch 2: 100%|████████████████████████████████████████████| 283/283 [00:14<00:00, 19.63it/s, loss=0.884, metrics={'acc': 0.6178, 'f1': 0.4399}]\n",
      "epoch 3: 100%|█████████████████████████████████████████████| 283/283 [00:14<00:00, 19.55it/s, loss=0.87, metrics={'acc': 0.6234, 'f1': 0.4527}]\n"
     ]
    }
   ],
   "source": [
    "wd_bert_trainer = Trainer(\n",
    "    wd_bert_model,\n",
    "    objective=\"multiclass\",\n",
    "    metrics=[Accuracy, F1Score(average=True)],\n",
    "    num_workers=0,  # As in the case of the tokenizer, in notebook I need to set this to 0 for the Trainer to work\n",
    ")\n",
    "\n",
    "wd_bert_trainer.fit(\n",
    "    X_text=X_bert_tr,\n",
    "    target=train.rating.values,\n",
    "    n_epochs=3,\n",
    "    batch_size=64,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "id": "8af9d539",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "predict: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████| 71/71 [00:03<00:00, 21.97it/s]\n"
     ]
    }
   ],
   "source": [
    "wd_bert_pred_text = wd_bert_trainer.predict_proba(X_text=X_bert_te)\n",
    "wd_bert_pred_text_class = np.argmax(wd_bert_pred_text, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "id": "278fd3e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_bert_acc = accuracy_score(test.rating, wd_bert_pred_text_class)\n",
    "wd_bert_f1 = f1_score(test.rating, wd_bert_pred_text_class, average=\"weighted\")\n",
    "wd_bert_cm = confusion_matrix(test.rating, wd_bert_pred_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "id": "e2cb468b",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Distilbert Accuracy: 0.6326846528084918. Distilbert F1 Score: 0.5796652991272998\n",
      "Distilbert Confusion Matrix:\n",
      " [[ 287   75   22   90]\n",
      " [ 197  136   62  169]\n",
      " [  68  119  123  671]\n",
      " [  40   64   84 2315]]\n"
     ]
    }
   ],
   "source": [
    "print(f\"Distilbert Accuracy: {wd_bert_acc}. Distilbert F1 Score: {wd_bert_f1}\")\n",
    "print(f\"Distilbert Confusion Matrix:\\n {wd_bert_cm}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4c71ac33",
   "metadata": {},
   "source": [
    "Now, adding a tabular model follows the exact same process as the one described in section 2. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 61,
   "id": "3dcda54c",
   "metadata": {},
   "outputs": [],
   "source": [
    "tab_model = TabMlp(\n",
    "    column_idx=tab_preprocessor.column_idx,\n",
    "    cat_embed_input=tab_preprocessor.cat_embed_input,\n",
    "    mlp_hidden_dims=[100, 50],\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 62,
   "id": "3fbc1bdc",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_tab_bert_model = WideDeep(\n",
    "    deeptabular=tab_model,\n",
    "    deeptext=bert_model,\n",
    "    head_hidden_dims=[256, 128, 64],\n",
    "    pred_dim=4,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 63,
   "id": "88d4c372",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_tab_bert_trainer = Trainer(\n",
    "    wd_tab_bert_model,\n",
    "    objective=\"multiclass\",\n",
    "    metrics=[Accuracy, F1Score(average=True)],\n",
    "    num_workers=0,  # As in the case of the tokenizer, in notebook I need to set this to 0 for the Trainer to work\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 64,
   "id": "f4cfc3e8",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "epoch 1: 100%|████████████████████████████████████████████| 283/283 [00:15<00:00, 18.15it/s, loss=0.974, metrics={'acc': 0.5838, 'f1': 0.3404}]\n",
      "epoch 2: 100%|█████████████████████████████████████████████| 283/283 [00:15<00:00, 18.38it/s, loss=0.885, metrics={'acc': 0.618, 'f1': 0.4378}]\n",
      "epoch 3: 100%|████████████████████████████████████████████| 283/283 [00:15<00:00, 18.40it/s, loss=0.868, metrics={'acc': 0.6252, 'f1': 0.4575}]\n"
     ]
    }
   ],
   "source": [
    "wd_tab_bert_trainer.fit(\n",
    "    X_tab=wd_X_tab_tr,\n",
    "    X_text=X_bert_tr,\n",
    "    target=train.rating.values,\n",
    "    n_epochs=3,\n",
    "    batch_size=64,\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 65,
   "id": "189635ec",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "predict: 100%|█████████████████████████████████████████████████████████████████████████████████████████████████| 71/71 [00:03<00:00, 21.32it/s]\n"
     ]
    }
   ],
   "source": [
    "wd_tab_bert_pred_text = wd_tab_bert_trainer.predict_proba(\n",
    "    X_tab=wd_X_tab_te, X_text=X_bert_te\n",
    ")\n",
    "wd_tab_bert_pred_text_class = np.argmax(wd_tab_bert_pred_text, 1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 66,
   "id": "e04b1180",
   "metadata": {},
   "outputs": [],
   "source": [
    "wd_tab_bert_acc = accuracy_score(test.rating, wd_tab_bert_pred_text_class)\n",
    "wd_tab_bert_f1 = f1_score(test.rating, wd_tab_bert_pred_text_class, average=\"weighted\")\n",
    "wd_tab_bert_cm = confusion_matrix(test.rating, wd_tab_bert_pred_text_class)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 67,
   "id": "9234f0ff",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Distilbert + Tabular Accuracy: 0.6242812914639541. Distilbert+ Tabular F1 Score: 0.5508351761564895\n",
      "Distilbert + Tabular Confusion Matrix:\n",
      " [[ 297   56   11  110]\n",
      " [ 229   91   38  206]\n",
      " [  86   90   71  734]\n",
      " [  49   48   42 2364]]\n"
     ]
    }
   ],
   "source": [
    "print(\n",
    "    f\"Distilbert + Tabular Accuracy: {wd_tab_bert_acc}. Distilbert+ Tabular F1 Score: {wd_tab_bert_f1}\"\n",
    ")\n",
    "print(f\"Distilbert + Tabular Confusion Matrix:\\n {wd_tab_bert_cm}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
