{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "(shared_functions)=\n",
    "# Shared functions\n",
    "\n",
    "This notebook contains functions which are commonly reused in the book, for loading and saving data, fitting and assessing prediction models, or plotting results. \n",
    "\n",
    "The notebook can be downloaded from GitHub with\n",
    "\n",
    "```\n",
    "!curl -O https://raw.githubusercontent.com/Fraud-Detection-Handbook/fraud-detection-handbook/main/Chapter_References/shared_functions.ipynb\n",
    "\n",
    "```\n",
    "\n",
    "The notebook can be included in other notebooks using\n",
    "\n",
    "```\n",
    "%run shared_functions\n",
    "```\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "##  General imports"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# General\n",
    "import os\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import math\n",
    "import sys\n",
    "import time\n",
    "import pickle\n",
    "import json\n",
    "import datetime\n",
    "import random\n",
    "\n",
    "#import sklearn\n",
    "import sklearn\n",
    "from sklearn import *\n",
    "\n",
    "%matplotlib inline\n",
    "\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "sns.set_style('darkgrid', {'axes.facecolor': '0.9'})\n",
    "\n",
    "import graphviz\n",
    "import xgboost\n",
    "\n",
    "# For imbalanced learning\n",
    "import imblearn\n",
    "\n",
    "import warnings\n",
    "warnings.filterwarnings('ignore')\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Loading and saving data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### read_from_files\n",
    "\n",
    "First use in [Chapter 3, Baseline Feature Transformation](Baseline_Feature_Transformation)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Load a set of pickle files, put them together in a single DataFrame, and order them by time\n",
    "# It takes as input the folder DIR_INPUT where the files are stored, and the BEGIN_DATE and END_DATE\n",
    "def read_from_files(DIR_INPUT, BEGIN_DATE, END_DATE):\n",
    "    \n",
    "    files = [os.path.join(DIR_INPUT, f) for f in os.listdir(DIR_INPUT) if f>=BEGIN_DATE+'.pkl' and f<=END_DATE+'.pkl']\n",
    "\n",
    "    frames = []\n",
    "    for f in files:\n",
    "        df = pd.read_pickle(f)\n",
    "        frames.append(df)\n",
    "        del df\n",
    "    df_final = pd.concat(frames)\n",
    "    \n",
    "    df_final=df_final.sort_values('TRANSACTION_ID')\n",
    "    df_final.reset_index(drop=True,inplace=True)\n",
    "    #  Note: -1 are missing values for real world data \n",
    "    df_final=df_final.replace([-1],0)\n",
    "    \n",
    "    return df_final\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### save_object\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "#Save oject as pickle file\n",
    "def save_object(obj, filename):\n",
    "    with open(filename, 'wb') as output:\n",
    "        pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Data preprocessing"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### scaleData\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def scaleData(train,test,features):\n",
    "    scaler = sklearn.preprocessing.StandardScaler()\n",
    "    scaler.fit(train[features])\n",
    "    train[features]=scaler.transform(train[features])\n",
    "    test[features]=scaler.transform(test[features])\n",
    "    \n",
    "    return (train,test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Train/Test splitting strategies"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_train_test_set\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS).\n",
    "Sampling ratio added in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_test_set(transactions_df,\n",
    "                       start_date_training,\n",
    "                       delta_train=7,delta_delay=7,delta_test=7,\n",
    "                       sampling_ratio=1.0,\n",
    "                       random_state=0):\n",
    "    \n",
    "    # Get the training set data\n",
    "    train_df = transactions_df[(transactions_df.TX_DATETIME>=start_date_training) &\n",
    "                               (transactions_df.TX_DATETIME<start_date_training+datetime.timedelta(days=delta_train))]\n",
    "    \n",
    "    # Get the test set data\n",
    "    test_df = []\n",
    "    \n",
    "    # Note: Cards known to be compromised after the delay period are removed from the test set\n",
    "    # That is, for each test day, all frauds known at (test_day-delay_period) are removed\n",
    "    \n",
    "    # First, get known defrauded customers from the training set\n",
    "    known_defrauded_customers = set(train_df[train_df.TX_FRAUD==1].CUSTOMER_ID)\n",
    "    \n",
    "    # Get the relative starting day of training set (easier than TX_DATETIME to collect test data)\n",
    "    start_tx_time_days_training = train_df.TX_TIME_DAYS.min()\n",
    "    \n",
    "    # Then, for each day of the test set\n",
    "    for day in range(delta_test):\n",
    "    \n",
    "        # Get test data for that day\n",
    "        test_df_day = transactions_df[transactions_df.TX_TIME_DAYS==start_tx_time_days_training+\n",
    "                                                                    delta_train+delta_delay+\n",
    "                                                                    day]\n",
    "        \n",
    "        # Compromised cards from that test day, minus the delay period, are added to the pool of known defrauded customers\n",
    "        test_df_day_delay_period = transactions_df[transactions_df.TX_TIME_DAYS==start_tx_time_days_training+\n",
    "                                                                                delta_train+\n",
    "                                                                                day-1]\n",
    "        \n",
    "        new_defrauded_customers = set(test_df_day_delay_period[test_df_day_delay_period.TX_FRAUD==1].CUSTOMER_ID)\n",
    "        known_defrauded_customers = known_defrauded_customers.union(new_defrauded_customers)\n",
    "        \n",
    "        test_df_day = test_df_day[~test_df_day.CUSTOMER_ID.isin(known_defrauded_customers)]\n",
    "        \n",
    "        test_df.append(test_df_day)\n",
    "        \n",
    "    test_df = pd.concat(test_df)\n",
    "    \n",
    "    # If subsample\n",
    "    if sampling_ratio<1:\n",
    "        \n",
    "        train_df_frauds=train_df[train_df.TX_FRAUD==1].sample(frac=sampling_ratio, random_state=random_state)\n",
    "        train_df_genuine=train_df[train_df.TX_FRAUD==0].sample(frac=sampling_ratio, random_state=random_state)\n",
    "        train_df=pd.concat([train_df_frauds,train_df_genuine])\n",
    "        \n",
    "    # Sort data sets by ascending order of transaction ID\n",
    "    train_df=train_df.sort_values('TRANSACTION_ID')\n",
    "    test_df=test_df.sort_values('TRANSACTION_ID')\n",
    "    \n",
    "    return (train_df, test_df)\n",
    "                               \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_train_delay_test_set(transactions_df,\n",
    "                             start_date_training,\n",
    "                             delta_train=7,delta_delay=7,delta_test=7,\n",
    "                             sampling_ratio=1.0,\n",
    "                             random_state=0):\n",
    "    \n",
    "    # Get the training set data\n",
    "    train_df = transactions_df[(transactions_df.TX_DATETIME>=start_date_training) &\n",
    "                               (transactions_df.TX_DATETIME<start_date_training+datetime.timedelta(days=delta_train))]\n",
    "    \n",
    "    # Get the delay set data\n",
    "    delay_df = transactions_df[(transactions_df.TX_DATETIME>=start_date_training+datetime.timedelta(days=delta_train)) &\n",
    "                               (transactions_df.TX_DATETIME<start_date_training+datetime.timedelta(days=delta_train)+\n",
    "                                                                               +datetime.timedelta(days=delta_delay))]\n",
    "    \n",
    "    # Get the test set data\n",
    "    test_df = []\n",
    "    \n",
    "    # Note: Cards known to be compromised after the delay period are removed from the test set\n",
    "    # That is, for each test day, all frauds known at (test_day-delay_period) are removed\n",
    "    \n",
    "    # First, get known defrauded customers from the training set\n",
    "    known_defrauded_customers = set(train_df[train_df.TX_FRAUD==1].CUSTOMER_ID)\n",
    "    \n",
    "    # Get the relative starting day of training set (easier than TX_DATETIME to collect test data)\n",
    "    start_tx_time_days_training = train_df.TX_TIME_DAYS.min()\n",
    "    \n",
    "    # Then, for each day of the test set\n",
    "    for day in range(delta_test):\n",
    "    \n",
    "        # Get test data for that day\n",
    "        test_df_day = transactions_df[transactions_df.TX_TIME_DAYS==start_tx_time_days_training+\n",
    "                                                                    delta_train+delta_delay+\n",
    "                                                                    day]\n",
    "        \n",
    "        # Compromised cards from that test day, minus the delay period, are added to the pool of known defrauded customers\n",
    "        test_df_day_delay_period = transactions_df[transactions_df.TX_TIME_DAYS==start_tx_time_days_training+\n",
    "                                                                                delta_train+\n",
    "                                                                                day-1]\n",
    "        \n",
    "        new_defrauded_customers = set(test_df_day_delay_period[test_df_day_delay_period.TX_FRAUD==1].CUSTOMER_ID)\n",
    "        known_defrauded_customers = known_defrauded_customers.union(new_defrauded_customers)\n",
    "        \n",
    "        test_df_day = test_df_day[~test_df_day.CUSTOMER_ID.isin(known_defrauded_customers)]\n",
    "        \n",
    "        test_df.append(test_df_day)\n",
    "        \n",
    "    test_df = pd.concat(test_df)\n",
    "    \n",
    "    # If subsample\n",
    "    if sampling_ratio<1:\n",
    "        \n",
    "        train_df_frauds=train_df[train_df.TX_FRAUD==1].sample(frac=sampling_ratio, random_state=random_state)\n",
    "        train_df_genuine=train_df[train_df.TX_FRAUD==0].sample(frac=sampling_ratio, random_state=random_state)\n",
    "        train_df=pd.concat([train_df_frauds,train_df_genuine])\n",
    "        \n",
    "    # Sort data sets by ascending order of transaction ID\n",
    "    train_df=train_df.sort_values('TRANSACTION_ID')\n",
    "    test_df=test_df.sort_values('TRANSACTION_ID')\n",
    "    \n",
    "    return (train_df, delay_df, test_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prequentialSplit\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def prequentialSplit(transactions_df,\n",
    "                     start_date_training, \n",
    "                     n_folds=4, \n",
    "                     delta_train=7,\n",
    "                     delta_delay=7,\n",
    "                     delta_assessment=7):\n",
    "    \n",
    "    prequential_split_indices=[]\n",
    "        \n",
    "    # For each fold\n",
    "    for fold in range(n_folds):\n",
    "        \n",
    "        # Shift back start date for training by the fold index times the assessment period (delta_assessment)\n",
    "        # (See Fig. 5)\n",
    "        start_date_training_fold = start_date_training-datetime.timedelta(days=fold*delta_assessment)\n",
    "        \n",
    "        # Get the training and test (assessment) sets\n",
    "        (train_df, test_df)=get_train_test_set(transactions_df,\n",
    "                                               start_date_training=start_date_training_fold,\n",
    "                                               delta_train=delta_train,delta_delay=delta_delay,delta_test=delta_assessment)\n",
    "    \n",
    "        # Get the indices from the two sets, and add them to the list of prequential splits\n",
    "        indices_train=list(train_df.index)\n",
    "        indices_test=list(test_df.index)\n",
    "        \n",
    "        prequential_split_indices.append((indices_train,indices_test))\n",
    "    \n",
    "    return prequential_split_indices\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Predictions functions"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### fit_model_and_get_predictions\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def fit_model_and_get_predictions(classifier, train_df, test_df, \n",
    "                                  input_features, output_feature=\"TX_FRAUD\",scale=True):\n",
    "\n",
    "    # By default, scales input data\n",
    "    if scale:\n",
    "        (train_df, test_df)=scaleData(train_df,test_df,input_features)\n",
    "    \n",
    "    # We first train the classifier using the `fit` method, and pass as arguments the input and output features\n",
    "    start_time=time.time()\n",
    "    classifier.fit(train_df[input_features], train_df[output_feature])\n",
    "    training_execution_time=time.time()-start_time\n",
    "\n",
    "    # We then get the predictions on the training and test data using the `predict_proba` method\n",
    "    # The predictions are returned as a numpy array, that provides the probability of fraud for each transaction \n",
    "    start_time=time.time()\n",
    "    predictions_test=classifier.predict_proba(test_df[input_features])[:,1]\n",
    "    prediction_execution_time=time.time()-start_time\n",
    "    \n",
    "    predictions_train=classifier.predict_proba(train_df[input_features])[:,1]\n",
    "\n",
    "    # The result is returned as a dictionary containing the fitted models, \n",
    "    # and the predictions on the training and test sets\n",
    "    model_and_predictions_dictionary = {'classifier': classifier,\n",
    "                                        'predictions_test': predictions_test,\n",
    "                                        'predictions_train': predictions_train,\n",
    "                                        'training_execution_time': training_execution_time,\n",
    "                                        'prediction_execution_time': prediction_execution_time\n",
    "                                       }\n",
    "    \n",
    "    return model_and_predictions_dictionary"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Performance assessment"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### card_precision_top_k_day\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS).\n",
    "Detailed in [Chapter 4, Precision_top_K_Metrics](Precision_Top_K_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def card_precision_top_k_day(df_day,top_k):\n",
    "    \n",
    "    # This takes the max of the predictions AND the max of label TX_FRAUD for each CUSTOMER_ID, \n",
    "    # and sorts by decreasing order of fraudulent prediction\n",
    "    df_day = df_day.groupby('CUSTOMER_ID').max().sort_values(by=\"predictions\", ascending=False).reset_index(drop=False)\n",
    "            \n",
    "    # Get the top k most suspicious cards\n",
    "    df_day_top_k=df_day.head(top_k)\n",
    "    list_detected_compromised_cards=list(df_day_top_k[df_day_top_k.TX_FRAUD==1].CUSTOMER_ID)\n",
    "    \n",
    "    # Compute precision top k\n",
    "    card_precision_top_k = len(list_detected_compromised_cards) / top_k\n",
    "    \n",
    "    return list_detected_compromised_cards, card_precision_top_k\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### card_precision_top_k\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS).\n",
    "Detailed in [Chapter 4, Precision_top_K_Metrics](Precision_Top_K_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def card_precision_top_k(predictions_df, top_k, remove_detected_compromised_cards=True):\n",
    "\n",
    "    # Sort days by increasing order\n",
    "    list_days=list(predictions_df['TX_TIME_DAYS'].unique())\n",
    "    list_days.sort()\n",
    "    \n",
    "    # At first, the list of detected compromised cards is empty\n",
    "    list_detected_compromised_cards = []\n",
    "    \n",
    "    card_precision_top_k_per_day_list = []\n",
    "    nb_compromised_cards_per_day = []\n",
    "    \n",
    "    # For each day, compute precision top k\n",
    "    for day in list_days:\n",
    "        \n",
    "        df_day = predictions_df[predictions_df['TX_TIME_DAYS']==day]\n",
    "        df_day = df_day[['predictions', 'CUSTOMER_ID', 'TX_FRAUD']]\n",
    "        \n",
    "        # Let us remove detected compromised cards from the set of daily transactions\n",
    "        df_day = df_day[df_day.CUSTOMER_ID.isin(list_detected_compromised_cards)==False]\n",
    "        \n",
    "        nb_compromised_cards_per_day.append(len(df_day[df_day.TX_FRAUD==1].CUSTOMER_ID.unique()))\n",
    "        \n",
    "        detected_compromised_cards, card_precision_top_k = card_precision_top_k_day(df_day,top_k)\n",
    "        \n",
    "        card_precision_top_k_per_day_list.append(card_precision_top_k)\n",
    "        \n",
    "        # Let us update the list of detected compromised cards\n",
    "        if remove_detected_compromised_cards:\n",
    "            list_detected_compromised_cards.extend(detected_compromised_cards)\n",
    "        \n",
    "    # Compute the mean\n",
    "    mean_card_precision_top_k = np.array(card_precision_top_k_per_day_list).mean()\n",
    "    \n",
    "    # Returns precision top k per day as a list, and resulting mean\n",
    "    return nb_compromised_cards_per_day,card_precision_top_k_per_day_list,mean_card_precision_top_k\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### card_precision_top_k_custom\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def card_precision_top_k_custom(y_true, y_pred, top_k, transactions_df):\n",
    "    \n",
    "    # Let us create a predictions_df DataFrame, that contains all transactions matching the indices of the current fold\n",
    "    # (indices of the y_true vector)\n",
    "    predictions_df=transactions_df.iloc[y_true.index.values].copy()\n",
    "    predictions_df['predictions']=y_pred\n",
    "    \n",
    "    # Compute the CP@k using the function implemented in Chapter 4, Section 4.2\n",
    "    nb_compromised_cards_per_day,card_precision_top_k_per_day_list,mean_card_precision_top_k=\\\n",
    "        card_precision_top_k(predictions_df, top_k)\n",
    "    \n",
    "    # Return the mean_card_precision_top_k\n",
    "    return mean_card_precision_top_k\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### performance_assessment\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def performance_assessment(predictions_df, output_feature='TX_FRAUD', \n",
    "                           prediction_feature='predictions', top_k_list=[100],\n",
    "                           rounded=True):\n",
    "    \n",
    "    AUC_ROC = metrics.roc_auc_score(predictions_df[output_feature], predictions_df[prediction_feature])\n",
    "    AP = metrics.average_precision_score(predictions_df[output_feature], predictions_df[prediction_feature])\n",
    "    \n",
    "    performances = pd.DataFrame([[AUC_ROC, AP]], \n",
    "                           columns=['AUC ROC','Average precision'])\n",
    "    \n",
    "    for top_k in top_k_list:\n",
    "    \n",
    "        _, _, mean_card_precision_top_k = card_precision_top_k(predictions_df, top_k)\n",
    "        performances['Card Precision@'+str(top_k)]=mean_card_precision_top_k\n",
    "        \n",
    "    if rounded:\n",
    "        performances = performances.round(3)\n",
    "    \n",
    "    return performances\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### performance_assessment_model_collection\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def performance_assessment_model_collection(fitted_models_and_predictions_dictionary, \n",
    "                                            transactions_df, \n",
    "                                            type_set='test',\n",
    "                                            top_k_list=[100]):\n",
    "\n",
    "    performances=pd.DataFrame() \n",
    "    \n",
    "    for classifier_name, model_and_predictions in fitted_models_and_predictions_dictionary.items():\n",
    "    \n",
    "        predictions_df=transactions_df\n",
    "            \n",
    "        predictions_df['predictions']=model_and_predictions['predictions_'+type_set]\n",
    "        \n",
    "        performances_model=performance_assessment(predictions_df, output_feature='TX_FRAUD', \n",
    "                                                   prediction_feature='predictions', top_k_list=top_k_list)\n",
    "        performances_model.index=[classifier_name]\n",
    "        \n",
    "        performances=performances.append(performances_model)\n",
    "        \n",
    "    return performances"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### execution_times_model_collection\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def execution_times_model_collection(fitted_models_and_predictions_dictionary):\n",
    "\n",
    "    execution_times=pd.DataFrame() \n",
    "    \n",
    "    for classifier_name, model_and_predictions in fitted_models_and_predictions_dictionary.items():\n",
    "    \n",
    "        execution_times_model=pd.DataFrame() \n",
    "        execution_times_model['Training execution time']=[model_and_predictions['training_execution_time']]\n",
    "        execution_times_model['Prediction execution time']=[model_and_predictions['prediction_execution_time']]\n",
    "        execution_times_model.index=[classifier_name]\n",
    "        \n",
    "        execution_times=execution_times.append(execution_times_model)\n",
    "        \n",
    "    return execution_times"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_class_from_fraud_probability\n",
    "\n",
    "First use in [Chapter 4, Threshold Based Metrics](Threshold_Based_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Getting classes from a vector of fraud probabilities and a threshold\n",
    "def get_class_from_fraud_probability(fraud_probabilities, threshold=0.5):\n",
    "    \n",
    "    predicted_classes = [0 if fraud_probability<threshold else 1 \n",
    "                         for fraud_probability in fraud_probabilities]\n",
    "\n",
    "    return predicted_classes\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### threshold_based_metrics\n",
    "\n",
    "First use in [Chapter 4, Threshold Based Metrics](Threshold_Based_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def threshold_based_metrics(fraud_probabilities, true_label, thresholds_list):\n",
    "    \n",
    "    results = []\n",
    "    \n",
    "    for threshold in thresholds_list:\n",
    "    \n",
    "        predicted_classes = get_class_from_fraud_probability(fraud_probabilities, threshold=threshold)\n",
    "    \n",
    "        (TN, FP, FN, TP) = metrics.confusion_matrix(true_label, predicted_classes).ravel()\n",
    "    \n",
    "        MME = (FP+FN)/(TN+FP+FN+TP)\n",
    "    \n",
    "        TPR = TP/(TP+FN)\n",
    "        TNR = TN/(TN+FP)\n",
    "    \n",
    "        FPR = FP/(TN+FP)\n",
    "        FNR = FN/(TP+FN)\n",
    "        \n",
    "        BER = 1/2*(FPR+FNR)\n",
    "        \n",
    "        Gmean = np.sqrt(TPR*TNR)\n",
    "    \n",
    "        precision = 1 # 1 if TP+FP=0\n",
    "        FDR = 1 # 1 if TP+FP=0\n",
    "        \n",
    "        if TP+FP>0:\n",
    "            precision = TP/(TP+FP)\n",
    "            FDR=FP/(TP+FP)\n",
    "        \n",
    "        NPV = 1 # 1 if TN+FN=0\n",
    "        FOR = 1 # 1 if TN+FN=0\n",
    "        \n",
    "        if TN+FN>0:\n",
    "            NPV = TN/(TN+FN)\n",
    "            FOR = FN/(TN+FN)\n",
    "            \n",
    "        \n",
    "        F1_score = 2*(precision*TPR)/(precision+TPR)\n",
    "    \n",
    "        results.append([threshold, MME, TPR, TNR, FPR, FNR, BER, Gmean, precision, NPV, FDR, FOR, F1_score])\n",
    "        \n",
    "    results_df = pd.DataFrame(results,columns=['Threshold' ,'MME', 'TPR', 'TNR', 'FPR', 'FNR', 'BER', 'G-mean', 'Precision', 'NPV', 'FDR', 'FOR', 'F1 Score'])\n",
    "    \n",
    "    return results_df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_summary_performances\n",
    "\n",
    "First use in [Chapter 5, Model Selection](Model_Selection)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_summary_performances(performances_df, parameter_column_name=\"Parameters summary\"):\n",
    "\n",
    "    metrics = ['AUC ROC','Average precision','Card Precision@100']\n",
    "    performances_results=pd.DataFrame(columns=metrics)\n",
    "    \n",
    "    performances_df.reset_index(drop=True,inplace=True)\n",
    "\n",
    "    best_estimated_parameters = []\n",
    "    validation_performance = []\n",
    "    test_performance = []\n",
    "    \n",
    "    for metric in metrics:\n",
    "    \n",
    "        index_best_validation_performance = performances_df.index[np.argmax(performances_df[metric+' Validation'].values)]\n",
    "    \n",
    "        best_estimated_parameters.append(performances_df[parameter_column_name].iloc[index_best_validation_performance])\n",
    "        \n",
    "        validation_performance.append(\n",
    "                str(round(performances_df[metric+' Validation'].iloc[index_best_validation_performance],3))+\n",
    "                '+/-'+\n",
    "                str(round(performances_df[metric+' Validation'+' Std'].iloc[index_best_validation_performance],2))\n",
    "        )\n",
    "        \n",
    "        test_performance.append(\n",
    "                str(round(performances_df[metric+' Test'].iloc[index_best_validation_performance],3))+\n",
    "                '+/-'+\n",
    "                str(round(performances_df[metric+' Test'+' Std'].iloc[index_best_validation_performance],2))\n",
    "        )\n",
    "    \n",
    "    performances_results.loc[\"Best estimated parameters\"]=best_estimated_parameters\n",
    "    performances_results.loc[\"Validation performance\"]=validation_performance\n",
    "    performances_results.loc[\"Test performance\"]=test_performance\n",
    "\n",
    "    optimal_test_performance = []\n",
    "    optimal_parameters = []\n",
    "\n",
    "    for metric in ['AUC ROC Test','Average precision Test','Card Precision@100 Test']:\n",
    "    \n",
    "        index_optimal_test_performance = performances_df.index[np.argmax(performances_df[metric].values)]\n",
    "    \n",
    "        optimal_parameters.append(performances_df[parameter_column_name].iloc[index_optimal_test_performance])\n",
    "    \n",
    "        optimal_test_performance.append(\n",
    "                str(round(performances_df[metric].iloc[index_optimal_test_performance],3))+\n",
    "                '+/-'+\n",
    "                str(round(performances_df[metric+' Std'].iloc[index_optimal_test_performance],2))\n",
    "        )\n",
    "\n",
    "    performances_results.loc[\"Optimal parameter(s)\"]=optimal_parameters\n",
    "    performances_results.loc[\"Optimal test performance\"]=optimal_test_performance\n",
    "    \n",
    "    return performances_results"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model_selection_performances\n",
    "\n",
    "First use in [Chapter 5, Model Selection](Model_Selection)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_selection_performances(performances_df_dictionary,\n",
    "                                 performance_metric='AUC ROC'):\n",
    "    \n",
    "    # Note: max_depth of 50 is similar to None\n",
    "    default_parameters_dictionary={\n",
    "        \"Decision Tree\": 50,\n",
    "        \"Logstic Regression\": 1,\n",
    "        \"Random Forest\": \"100/50\",\n",
    "        \"XGBoost\": \"100/0.1/2\"\n",
    "    }\n",
    "    \n",
    "    mean_performances_dictionary={\n",
    "        \"Default parameters\": [],\n",
    "        \"Best validation parameters\": [],\n",
    "        \"Optimal parameters\": []\n",
    "    }\n",
    "    \n",
    "    std_performances_dictionary={\n",
    "        \"Default parameters\": [],\n",
    "        \"Best validation parameters\": [],\n",
    "        \"Optimal parameters\": []\n",
    "    }\n",
    "    \n",
    "    # For each model class\n",
    "    for model_class, performances_df in performances_df_dictionary.items():\n",
    "        \n",
    "        # Get the performances for the default paramaters\n",
    "        default_performances=performances_df[performances_df['Parameters summary']==default_parameters_dictionary[model_class]]\n",
    "        default_performances=default_performances.round(decimals=3)\n",
    "        \n",
    "        mean_performances_dictionary[\"Default parameters\"].append(default_performances[performance_metric+\" Test\"].values[0])\n",
    "        std_performances_dictionary[\"Default parameters\"].append(default_performances[performance_metric+\" Test Std\"].values[0])\n",
    "        \n",
    "        # Get the performances for the best estimated parameters\n",
    "        performances_summary=get_summary_performances(performances_df, parameter_column_name=\"Parameters summary\")\n",
    "        mean_std_performances=performances_summary.loc[[\"Test performance\"]][performance_metric].values[0]\n",
    "        mean_std_performances=mean_std_performances.split(\"+/-\")\n",
    "        mean_performances_dictionary[\"Best validation parameters\"].append(float(mean_std_performances[0]))\n",
    "        std_performances_dictionary[\"Best validation parameters\"].append(float(mean_std_performances[1]))\n",
    "        \n",
    "        # Get the performances for the boptimal parameters\n",
    "        mean_std_performances=performances_summary.loc[[\"Optimal test performance\"]][performance_metric].values[0]\n",
    "        mean_std_performances=mean_std_performances.split(\"+/-\")\n",
    "        mean_performances_dictionary[\"Optimal parameters\"].append(float(mean_std_performances[0]))\n",
    "        std_performances_dictionary[\"Optimal parameters\"].append(float(mean_std_performances[1]))\n",
    "        \n",
    "    # Return the mean performances and their standard deviations    \n",
    "    return (mean_performances_dictionary,std_performances_dictionary)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_selection_performances(performances_df_dictionary,\n",
    "                                 performance_metric='AUC ROC',\n",
    "                                 model_classes=['Decision Tree', \n",
    "                                                'Logistic Regression', \n",
    "                                                'Random Forest', \n",
    "                                                'XGBoost'],\n",
    "                                 default_parameters_dictionary={\n",
    "                                                \"Decision Tree\": 50,\n",
    "                                                \"Logistic Regression\": 1,\n",
    "                                                \"Random Forest\": \"100/50\",\n",
    "                                                \"XGBoost\": \"100/0.1/3\"\n",
    "                                            }):\n",
    "    \n",
    "    mean_performances_dictionary={\n",
    "        \"Default parameters\": [],\n",
    "        \"Best validation parameters\": [],\n",
    "        \"Optimal parameters\": []\n",
    "    }\n",
    "    \n",
    "    std_performances_dictionary={\n",
    "        \"Default parameters\": [],\n",
    "        \"Best validation parameters\": [],\n",
    "        \"Optimal parameters\": []\n",
    "    }\n",
    "    \n",
    "    # For each model class\n",
    "    for model_class in model_classes:\n",
    "        \n",
    "        performances_df=performances_df_dictionary[model_class]\n",
    "        \n",
    "        # Get the performances for the default paramaters\n",
    "        default_performances=performances_df[performances_df['Parameters summary']==default_parameters_dictionary[model_class]]\n",
    "        default_performances=default_performances.round(decimals=3)\n",
    "        \n",
    "        mean_performances_dictionary[\"Default parameters\"].append(default_performances[performance_metric+\" Test\"].values[0])\n",
    "        std_performances_dictionary[\"Default parameters\"].append(default_performances[performance_metric+\" Test Std\"].values[0])\n",
    "        \n",
    "        # Get the performances for the best estimated parameters\n",
    "        performances_summary=get_summary_performances(performances_df, parameter_column_name=\"Parameters summary\")\n",
    "        mean_std_performances=performances_summary.loc[[\"Test performance\"]][performance_metric].values[0]\n",
    "        mean_std_performances=mean_std_performances.split(\"+/-\")\n",
    "        mean_performances_dictionary[\"Best validation parameters\"].append(float(mean_std_performances[0]))\n",
    "        std_performances_dictionary[\"Best validation parameters\"].append(float(mean_std_performances[1]))\n",
    "        \n",
    "        # Get the performances for the boptimal parameters\n",
    "        mean_std_performances=performances_summary.loc[[\"Optimal test performance\"]][performance_metric].values[0]\n",
    "        mean_std_performances=mean_std_performances.split(\"+/-\")\n",
    "        mean_performances_dictionary[\"Optimal parameters\"].append(float(mean_std_performances[0]))\n",
    "        std_performances_dictionary[\"Optimal parameters\"].append(float(mean_std_performances[1]))\n",
    "        \n",
    "    # Return the mean performances and their standard deviations    \n",
    "    return (mean_performances_dictionary,std_performances_dictionary)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Model selection"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prequential_grid_search\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def prequential_grid_search(transactions_df, \n",
    "                            classifier, \n",
    "                            input_features, output_feature, \n",
    "                            parameters, scoring, \n",
    "                            start_date_training, \n",
    "                            n_folds=4,\n",
    "                            expe_type='Test',\n",
    "                            delta_train=7, \n",
    "                            delta_delay=7, \n",
    "                            delta_assessment=7,\n",
    "                            performance_metrics_list_grid=['roc_auc'],\n",
    "                            performance_metrics_list=['AUC ROC'],\n",
    "                            n_jobs=-1):\n",
    "    \n",
    "    estimators = [('scaler', sklearn.preprocessing.StandardScaler()), ('clf', classifier)]\n",
    "    pipe = sklearn.pipeline.Pipeline(estimators)\n",
    "    \n",
    "    prequential_split_indices=prequentialSplit(transactions_df,\n",
    "                                               start_date_training=start_date_training, \n",
    "                                               n_folds=n_folds, \n",
    "                                               delta_train=delta_train, \n",
    "                                               delta_delay=delta_delay, \n",
    "                                               delta_assessment=delta_assessment)\n",
    "    \n",
    "    grid_search = sklearn.model_selection.GridSearchCV(pipe, parameters, scoring=scoring, cv=prequential_split_indices, refit=False, n_jobs=n_jobs)\n",
    "    \n",
    "    X=transactions_df[input_features]\n",
    "    y=transactions_df[output_feature]\n",
    "\n",
    "    grid_search.fit(X, y)\n",
    "    \n",
    "    performances_df=pd.DataFrame()\n",
    "    \n",
    "    for i in range(len(performance_metrics_list_grid)):\n",
    "        performances_df[performance_metrics_list[i]+' '+expe_type]=grid_search.cv_results_['mean_test_'+performance_metrics_list_grid[i]]\n",
    "        performances_df[performance_metrics_list[i]+' '+expe_type+' Std']=grid_search.cv_results_['std_test_'+performance_metrics_list_grid[i]]\n",
    "\n",
    "    performances_df['Parameters']=grid_search.cv_results_['params']\n",
    "    performances_df['Execution time']=grid_search.cv_results_['mean_fit_time']\n",
    "    \n",
    "    return performances_df\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model_selection_wrapper\n",
    "\n",
    "First use in [Chapter 5, Model Selection](Model_Selection)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def model_selection_wrapper(transactions_df, \n",
    "                            classifier, \n",
    "                            input_features, output_feature,\n",
    "                            parameters, \n",
    "                            scoring, \n",
    "                            start_date_training_for_valid,\n",
    "                            start_date_training_for_test,\n",
    "                            n_folds=4,\n",
    "                            delta_train=7, \n",
    "                            delta_delay=7, \n",
    "                            delta_assessment=7,\n",
    "                            performance_metrics_list_grid=['roc_auc'],\n",
    "                            performance_metrics_list=['AUC ROC'],\n",
    "                            n_jobs=-1):\n",
    "\n",
    "    # Get performances on the validation set using prequential validation\n",
    "    performances_df_validation=prequential_grid_search(transactions_df, classifier, \n",
    "                            input_features, output_feature,\n",
    "                            parameters, scoring, \n",
    "                            start_date_training=start_date_training_for_valid,\n",
    "                            n_folds=n_folds,\n",
    "                            expe_type='Validation',\n",
    "                            delta_train=delta_train, \n",
    "                            delta_delay=delta_delay, \n",
    "                            delta_assessment=delta_assessment,\n",
    "                            performance_metrics_list_grid=performance_metrics_list_grid,\n",
    "                            performance_metrics_list=performance_metrics_list,\n",
    "                            n_jobs=n_jobs)\n",
    "    \n",
    "    # Get performances on the test set using prequential validation\n",
    "    performances_df_test=prequential_grid_search(transactions_df, classifier, \n",
    "                            input_features, output_feature,\n",
    "                            parameters, scoring, \n",
    "                            start_date_training=start_date_training_for_test,\n",
    "                            n_folds=n_folds,\n",
    "                            expe_type='Test',\n",
    "                            delta_train=delta_train, \n",
    "                            delta_delay=delta_delay, \n",
    "                            delta_assessment=delta_assessment,\n",
    "                            performance_metrics_list_grid=performance_metrics_list_grid,\n",
    "                            performance_metrics_list=performance_metrics_list,\n",
    "                            n_jobs=n_jobs)\n",
    "    \n",
    "    # Bind the two resulting DataFrames\n",
    "    performances_df_validation.drop(columns=['Parameters','Execution time'], inplace=True)\n",
    "    performances_df=pd.concat([performances_df_test,performances_df_validation],axis=1)\n",
    "\n",
    "    # And return as a single DataFrame\n",
    "    return performances_df\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### kfold_cv_with_classifier\n",
    "\n",
    "First use in [Chapter 6, Cost-sensitive learning](Cost_Sensitive_Learning)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def kfold_cv_with_classifier(classifier,\n",
    "                             X,\n",
    "                             y,\n",
    "                             n_splits=5,\n",
    "                             strategy_name=\"Basline classifier\"):\n",
    "    \n",
    "    cv = sklearn.model_selection.StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=0)\n",
    "    \n",
    "    cv_results_=sklearn.model_selection.cross_validate(classifier,X,y,cv=cv,\n",
    "                                                       scoring=['roc_auc',\n",
    "                                                                'average_precision',\n",
    "                                                                'balanced_accuracy'],\n",
    "                                                       return_estimator=True)\n",
    "    \n",
    "    results=round(pd.DataFrame(cv_results_),3)\n",
    "    results_mean=list(results.mean().values)\n",
    "    results_std=list(results.std().values)\n",
    "    results_df=pd.DataFrame([[str(round(results_mean[i],3))+'+/-'+\n",
    "                              str(round(results_std[i],3)) for i in range(len(results))]],\n",
    "                            columns=['Fit time (s)','Score time (s)',\n",
    "                                     'AUC ROC','Average Precision','Balanced accuracy'])\n",
    "    results_df.rename(index={0:strategy_name}, inplace=True)\n",
    "    \n",
    "    classifier_0=cv_results_['estimator'][0]\n",
    "    \n",
    "    (train_index, test_index) = next(cv.split(X, y))\n",
    "    train_df=pd.DataFrame({'X1':X[train_index,0],'X2':X[train_index,1], 'Y':y[train_index]})\n",
    "    test_df=pd.DataFrame({'X1':X[test_index,0],'X2':X[test_index,1], 'Y':y[test_index]})\n",
    "    \n",
    "    return (results_df, classifier_0, train_df, test_df)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Plotting"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_tx_stats\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Compute the number of transactions per day, fraudulent transactions per day and fraudulent cards per day\n",
    "\n",
    "def get_tx_stats(transactions_df, start_date_df=\"2018-04-01\"):\n",
    "    \n",
    "    #Number of transactions per day\n",
    "    nb_tx_per_day=transactions_df.groupby(['TX_TIME_DAYS'])['CUSTOMER_ID'].count()\n",
    "    #Number of fraudulent transactions per day\n",
    "    nb_fraudulent_transactions_per_day=transactions_df.groupby(['TX_TIME_DAYS'])['TX_FRAUD'].sum()\n",
    "    #Number of fraudulent cards per day\n",
    "    nb_compromised_card_per_day=transactions_df[transactions_df['TX_FRAUD']==1].groupby(['TX_TIME_DAYS']).CUSTOMER_ID.nunique()\n",
    "    \n",
    "    tx_stats=pd.DataFrame({\"nb_tx_per_day\":nb_tx_per_day,\n",
    "                           \"nb_fraudulent_transactions_per_day\":nb_fraudulent_transactions_per_day,\n",
    "                           \"nb_compromised_cards_per_day\":nb_compromised_card_per_day})\n",
    "\n",
    "    tx_stats=tx_stats.reset_index()\n",
    "    \n",
    "    start_date = datetime.datetime.strptime(start_date_df, \"%Y-%m-%d\")\n",
    "    tx_date=start_date+tx_stats['TX_TIME_DAYS'].apply(datetime.timedelta)\n",
    "    \n",
    "    tx_stats['tx_date']=tx_date\n",
    "    \n",
    "    return tx_stats\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_template_tx_stats\n",
    "\n",
    "First use in [Chapter 3, Baseline Fraud Detection System](Baseline_FDS)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot the number of transactions per day, fraudulent transactions per day and fraudulent cards per day\n",
    "\n",
    "def get_template_tx_stats(ax ,fs,\n",
    "                          start_date_training,\n",
    "                          title='',\n",
    "                          delta_train=7,\n",
    "                          delta_delay=7,\n",
    "                          delta_test=7,\n",
    "                          ylim=300):\n",
    "    \n",
    "    ax.set_title(title, fontsize=fs*1.5)\n",
    "    ax.set_ylim([0, ylim])\n",
    "    \n",
    "    ax.set_xlabel('Date', fontsize=fs)\n",
    "    ax.set_ylabel('Number', fontsize=fs)\n",
    "    \n",
    "    plt.yticks(fontsize=fs*0.7) \n",
    "    plt.xticks(fontsize=fs*0.7)    \n",
    "\n",
    "    ax.axvline(start_date_training+datetime.timedelta(days=delta_train), 0,ylim, color=\"black\")\n",
    "    ax.axvline(start_date_training+datetime.timedelta(days=delta_train+delta_delay), 0, ylim, color=\"black\")\n",
    "    \n",
    "    ax.text(start_date_training+datetime.timedelta(days=2), ylim-20,'Training period', fontsize=fs)\n",
    "    ax.text(start_date_training+datetime.timedelta(days=delta_train+2), ylim-20,'Delay period', fontsize=fs)\n",
    "    ax.text(start_date_training+datetime.timedelta(days=delta_train+delta_delay+2), ylim-20,'Test period', fontsize=fs)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_template_roc_curve\n",
    "\n",
    "First use in [Chapter 4, Threshold Free Metrics](Threshold_Free_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_template_roc_curve(ax, title,fs,random=True):\n",
    "    \n",
    "    ax.set_title(title, fontsize=fs)\n",
    "    ax.set_xlim([-0.01, 1.01])\n",
    "    ax.set_ylim([-0.01, 1.01])\n",
    "    \n",
    "    ax.set_xlabel('False Positive Rate', fontsize=fs)\n",
    "    ax.set_ylabel('True Positive Rate', fontsize=fs)\n",
    "    \n",
    "    if random:\n",
    "        ax.plot([0, 1], [0, 1],'r--',label=\"AUC ROC Random = 0.5\")\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_template_pr_curve\n",
    "\n",
    "First use in [Chapter 4, Threshold Free Metrics](Threshold_Free_Metrics)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_template_pr_curve(ax, title,fs, baseline=0.5):\n",
    "    ax.set_title(title, fontsize=fs)\n",
    "    ax.set_xlim([-0.01, 1.01])\n",
    "    ax.set_ylim([-0.01, 1.01])\n",
    "    \n",
    "    ax.set_xlabel('Recall (True Positive Rate)', fontsize=fs)\n",
    "    ax.set_ylabel('Precision', fontsize=fs)\n",
    "    \n",
    "    ax.plot([0, 1], [baseline, baseline],'r--',label='AP Random = {0:0.3f}'.format(baseline))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_performance_plot\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get the performance plot for a single performance metric\n",
    "def get_performance_plot(performances_df, \n",
    "                         ax, \n",
    "                         performance_metric, \n",
    "                         expe_type_list=['Test','Train'], \n",
    "                         expe_type_color_list=['#008000','#2F4D7E'],\n",
    "                         parameter_name=\"Tree maximum depth\",\n",
    "                         summary_performances=None):\n",
    "    \n",
    "    # expe_type_list is the list of type of experiments, typically containing 'Test', 'Train', or 'Valid'\n",
    "    # For all types of experiments\n",
    "    for i in range(len(expe_type_list)):\n",
    "    \n",
    "        # Column in performances_df for which to retrieve the data \n",
    "        performance_metric_expe_type=performance_metric+' '+expe_type_list[i]\n",
    "    \n",
    "        # Plot data on graph\n",
    "        ax.plot(performances_df['Parameters summary'], performances_df[performance_metric_expe_type], \n",
    "                color=expe_type_color_list[i], label = expe_type_list[i])\n",
    "        \n",
    "        # If performances_df contains confidence intervals, add them to the graph\n",
    "        if performance_metric_expe_type+' Std' in performances_df.columns:\n",
    "        \n",
    "            conf_min = performances_df[performance_metric_expe_type]\\\n",
    "                        -2*performances_df[performance_metric_expe_type+' Std']\n",
    "            conf_max = performances_df[performance_metric_expe_type]\\\n",
    "                        +2*performances_df[performance_metric_expe_type+' Std']\n",
    "    \n",
    "            ax.fill_between(performances_df['Parameters summary'], conf_min, conf_max, color=expe_type_color_list[i], alpha=.1)\n",
    "\n",
    "    # If summary_performances table is present, adds vertical dashed bar for best estimated parameter \n",
    "    if summary_performances is not None:\n",
    "        best_estimated_parameter=summary_performances[performance_metric][['Best estimated parameters']].values[0]\n",
    "        best_estimated_performance=float(summary_performances[performance_metric][['Validation performance']].values[0].split(\"+/-\")[0])\n",
    "        ymin, ymax = ax.get_ylim()\n",
    "        ax.vlines(best_estimated_parameter, ymin, best_estimated_performance,\n",
    "                  linestyles=\"dashed\")\n",
    "    \n",
    "    # Set title, and x and y axes labels\n",
    "    ax.set_title(performance_metric+'\\n', fontsize=14)\n",
    "    ax.set(xlabel = parameter_name, ylabel=performance_metric)\n",
    "    "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_performances_plots\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get the performance plots for a set of performance metric\n",
    "def get_performances_plots(performances_df, \n",
    "                           performance_metrics_list=['AUC ROC', 'Average precision', 'Card Precision@100'], \n",
    "                           expe_type_list=['Test','Train'], expe_type_color_list=['#008000','#2F4D7E'],\n",
    "                           parameter_name=\"Tree maximum depth\",\n",
    "                           summary_performances=None):\n",
    "    \n",
    "    # Create as many graphs as there are performance metrics to display\n",
    "    n_performance_metrics = len(performance_metrics_list)\n",
    "    fig, ax = plt.subplots(1, n_performance_metrics, figsize=(5*n_performance_metrics,4))\n",
    "    \n",
    "    # Plot performance metric for each metric in performance_metrics_list\n",
    "    for i in range(n_performance_metrics):\n",
    "    \n",
    "        get_performance_plot(performances_df, ax[i], performance_metric=performance_metrics_list[i], \n",
    "                             expe_type_list=expe_type_list, \n",
    "                             expe_type_color_list=expe_type_color_list,\n",
    "                             parameter_name=parameter_name,\n",
    "                             summary_performances=summary_performances)\n",
    "    \n",
    "    ax[n_performance_metrics-1].legend(loc='upper left', \n",
    "                                       labels=expe_type_list, \n",
    "                                       bbox_to_anchor=(1.05, 1),\n",
    "                                       title=\"Type set\")\n",
    "\n",
    "    plt.subplots_adjust(wspace=0.5, \n",
    "                        hspace=0.8)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_execution_times_plot\n",
    "\n",
    "First use in [Chapter 5, Validation Strategies](Validation_Strategies)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get the performance plot for a single performance metric\n",
    "def get_execution_times_plot(performances_df,\n",
    "                             title=\"\",\n",
    "                             parameter_name=\"Tree maximum depth\"):\n",
    "    \n",
    "    fig, ax = plt.subplots(1,1, figsize=(5,4))\n",
    "    \n",
    "    # Plot data on graph\n",
    "    ax.plot(performances_df['Parameters summary'], performances_df[\"Execution time\"], \n",
    "            color=\"black\")\n",
    "        \n",
    "    # Set title, and x and y axes labels\n",
    "    ax.set_title(title, fontsize=14)\n",
    "    ax.set(xlabel = parameter_name, ylabel=\"Execution time (seconds)\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_model_selection_performances_plots\n",
    "\n",
    "First use in [Chapter 5, Model Selection](Model_Selection)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get the performance plot for a single performance metric\n",
    "def get_model_selection_performance_plot(performances_df_dictionary, \n",
    "                                         ax, \n",
    "                                         performance_metric,\n",
    "                                         ylim=[0,1],\n",
    "                                         model_classes=['Decision Tree', \n",
    "                                                        'Logistic Regression', \n",
    "                                                        'Random Forest', \n",
    "                                                        'XGBoost']):\n",
    "    \n",
    "    \n",
    "    (mean_performances_dictionary,std_performances_dictionary) = \\\n",
    "        model_selection_performances(performances_df_dictionary=performances_df_dictionary,\n",
    "                                     performance_metric=performance_metric)\n",
    "    \n",
    "    \n",
    "    # width of the bars\n",
    "    barWidth = 0.3\n",
    "    # The x position of bars\n",
    "    r1 = np.arange(len(model_classes))\n",
    "    r2 = r1+barWidth\n",
    "    r3 = r1+2*barWidth\n",
    "    \n",
    "    # Create Default parameters bars (Orange)\n",
    "    ax.bar(r1, mean_performances_dictionary['Default parameters'], \n",
    "           width = barWidth, color = '#CA8035', edgecolor = 'black', \n",
    "           yerr=std_performances_dictionary['Default parameters'], capsize=7, label='Default parameters')\n",
    " \n",
    "    # Create Best validation parameters bars (Red)\n",
    "    ax.bar(r2, mean_performances_dictionary['Best validation parameters'], \n",
    "           width = barWidth, color = '#008000', edgecolor = 'black', \n",
    "           yerr=std_performances_dictionary['Best validation parameters'], capsize=7, label='Best validation parameters')\n",
    "\n",
    "    # Create Optimal parameters bars (Green)\n",
    "    ax.bar(r3, mean_performances_dictionary['Optimal parameters'], \n",
    "           width = barWidth, color = '#2F4D7E', edgecolor = 'black', \n",
    "           yerr=std_performances_dictionary['Optimal parameters'], capsize=7, label='Optimal parameters')\n",
    " \n",
    "\n",
    "    # Set title, and x and y axes labels\n",
    "    ax.set_ylim(ylim[0],ylim[1])\n",
    "    ax.set_xticks(r2+barWidth/2)\n",
    "    ax.set_xticklabels(model_classes, rotation = 45, ha=\"right\", fontsize=12)\n",
    "    ax.set_title(performance_metric+'\\n', fontsize=18)\n",
    "    ax.set_xlabel(\"Model class\", fontsize=16)\n",
    "    ax.set_ylabel(performance_metric, fontsize=15)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### get_model_selection_performances_plots\n",
    "\n",
    "First use in [Chapter 5, Model Selection](Model_Selection)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_model_selection_performances_plots(performances_df_dictionary, \n",
    "                                           performance_metrics_list=['AUC ROC', 'Average precision', 'Card Precision@100'],\n",
    "                                           ylim_list=[[0.6,0.9],[0.2,0.8],[0.2,0.35]],\n",
    "                                           model_classes=['Decision Tree', \n",
    "                                                          'Logistic Regression', \n",
    "                                                          'Random Forest', \n",
    "                                                          'XGBoost']):\n",
    "    \n",
    "    # Create as many graphs as there are performance metrics to display\n",
    "    n_performance_metrics = len(performance_metrics_list)\n",
    "    fig, ax = plt.subplots(1, n_performance_metrics, figsize=(5*n_performance_metrics,4))\n",
    "    \n",
    "    parameter_types=['Default parameters','Best validation parameters','Optimal parameters']\n",
    "    \n",
    "    # Plot performance metric for each metric in performance_metrics_list\n",
    "    for i in range(n_performance_metrics):\n",
    "    \n",
    "        get_model_selection_performance_plot(performances_df_dictionary, \n",
    "                                             ax[i], \n",
    "                                             performance_metrics_list[i],\n",
    "                                             ylim=ylim_list[i],\n",
    "                                             model_classes=model_classes\n",
    "                                            )\n",
    "    \n",
    "    ax[n_performance_metrics-1].legend(loc='upper left', \n",
    "                                       labels=parameter_types, \n",
    "                                       bbox_to_anchor=(1.05, 1),\n",
    "                                       title=\"Parameter type\",\n",
    "                                       prop={'size': 12},\n",
    "                                       title_fontsize=12)\n",
    "\n",
    "    plt.subplots_adjust(wspace=0.5, \n",
    "                        hspace=0.8)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### plot_decision_boundary_classifier\n",
    "\n",
    "First use in [Chapter 6, Cost-sensitive learning](Cost_Sensitive_Learning)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_decision_boundary_classifier(ax, \n",
    "                                      classifier,\n",
    "                                      train_df,\n",
    "                                      input_features=['X1','X2'],\n",
    "                                      output_feature='Y',\n",
    "                                      title=\"\",\n",
    "                                      fs=14,\n",
    "                                      plot_training_data=True):\n",
    "\n",
    "    plot_colors = [\"tab:blue\",\"tab:orange\"]\n",
    "\n",
    "    x1_min, x1_max = train_df[input_features[0]].min() - 1, train_df[input_features[0]].max() + 1\n",
    "    x2_min, x2_max = train_df[input_features[1]].min() - 1, train_df[input_features[1]].max() + 1\n",
    "    \n",
    "    plot_step=0.1\n",
    "    xx, yy = np.meshgrid(np.arange(x1_min, x1_max, plot_step),\n",
    "                         np.arange(x2_min, x2_max, plot_step))\n",
    "\n",
    "    Z = classifier.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:,1]\n",
    "    Z = Z.reshape(xx.shape)\n",
    "    ax.contourf(xx, yy, Z, cmap=plt.cm.RdYlBu_r,alpha=0.3)\n",
    "\n",
    "    if plot_training_data:\n",
    "        # Plot the training points\n",
    "        groups = train_df.groupby(output_feature)\n",
    "        for name, group in groups:\n",
    "            ax.scatter(group[input_features[0]], group[input_features[1]], edgecolors='black', label=name)\n",
    "        \n",
    "    ax.set_title(title, fontsize=fs)\n",
    "    ax.set_xlabel(input_features[0], fontsize=fs)\n",
    "    ax.set_ylabel(input_features[1], fontsize=fs)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### plot_decision_boundary\n",
    "\n",
    "First use in [Chapter 6, Cost-sensitive learning](Cost_Sensitive_Learning)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_decision_boundary(classifier_0,\n",
    "                           train_df, \n",
    "                           test_df):\n",
    "    \n",
    "    fig_decision_boundary, ax = plt.subplots(1, 3, figsize=(5*3,5))\n",
    "\n",
    "    plot_decision_boundary_classifier(ax[0], classifier_0,\n",
    "                                  train_df,\n",
    "                                  title=\"Decision surface of the decision tree\\n With training data\",\n",
    "                                  plot_training_data=True)\n",
    "\n",
    "    plot_decision_boundary_classifier(ax[1], classifier_0,\n",
    "                                  train_df,\n",
    "                                  title=\"Decision surface of the decision tree\\n\",\n",
    "                                  plot_training_data=False)\n",
    "\n",
    "\n",
    "    plot_decision_boundary_classifier(ax[2], classifier_0,\n",
    "                                  test_df,\n",
    "                                  title=\"Decision surface of the decision tree\\n With test data\",\n",
    "                                  plot_training_data=True)\n",
    "\n",
    "    ax[-1].legend(loc='upper left', \n",
    "              #labels=[0,1], \n",
    "              bbox_to_anchor=(1.05, 1),\n",
    "              title=\"Class\")\n",
    "\n",
    "    sm = plt.cm.ScalarMappable(cmap=plt.cm.RdYlBu_r, norm=plt.Normalize(vmin=0, vmax=1))\n",
    "    cax = fig_decision_boundary.add_axes([0.93, 0.15, 0.02, 0.5])\n",
    "    fig_decision_boundary.colorbar(sm, cax=cax, alpha=0.3, boundaries=np.linspace(0, 1, 11))\n",
    "    \n",
    "    return fig_decision_boundary"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Deep Learning functions\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torch.nn.functional as F"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### seed_everything\n",
    "\n",
    "First use in [Chapter 7, Feed-forward neural network](FeedForwardNeuralNetworks)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def seed_everything(seed):\n",
    "    random.seed(seed)\n",
    "    os.environ['PYTHONHASHSEED'] = str(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = True"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Utils for data management, training and evaluation\n",
    "\n",
    "First use in [Chapter 7, Feed-forward neural network](FeedForwardNeuralNetworks)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FraudDataset(torch.utils.data.Dataset):\n",
    "    \n",
    "    def __init__(self, x, y):\n",
    "        'Initialization'\n",
    "        self.x = x\n",
    "        self.y = y\n",
    "\n",
    "    def __len__(self):\n",
    "        'Denotes the total number of samples'\n",
    "        return len(self.x)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        'Generates one sample of data'\n",
    "        # Select sample index\n",
    "        if self.y is not None:\n",
    "            return self.x[index], self.y[index]\n",
    "        else:\n",
    "            return self.x[index]\n",
    "\n",
    "def prepare_generators(training_set,valid_set,batch_size=64):\n",
    "    \n",
    "    train_loader_params = {'batch_size': batch_size,\n",
    "              'shuffle': True,\n",
    "              'num_workers': 0}\n",
    "    valid_loader_params = {'batch_size': batch_size,\n",
    "              'num_workers': 0}\n",
    "    \n",
    "    training_generator = torch.utils.data.DataLoader(training_set, **train_loader_params)\n",
    "    valid_generator = torch.utils.data.DataLoader(valid_set, **valid_loader_params)\n",
    "    \n",
    "    return training_generator,valid_generator\n",
    "\n",
    "def evaluate_model(model,generator,criterion):\n",
    "    model.eval()\n",
    "    batch_losses = []\n",
    "    for x_batch, y_batch in generator:\n",
    "        # Forward pass\n",
    "        y_pred = model(x_batch)\n",
    "        # Compute Loss\n",
    "        loss = criterion(y_pred.squeeze(), y_batch)\n",
    "        batch_losses.append(loss.item())\n",
    "    mean_loss = np.mean(batch_losses)    \n",
    "    return mean_loss\n",
    "\n",
    "class EarlyStopping:\n",
    "    \n",
    "    def __init__(self, patience=2, verbose=False):\n",
    "        self.patience = patience\n",
    "        self.verbose = verbose\n",
    "        self.counter = 0\n",
    "        self.best_score = np.Inf\n",
    "    \n",
    "    def continue_training(self,current_score):\n",
    "        if self.best_score > current_score:\n",
    "            self.best_score = current_score\n",
    "            self.counter = 0\n",
    "            if self.verbose:\n",
    "                print(\"New best score:\", current_score)\n",
    "        else:\n",
    "            self.counter+=1\n",
    "            if self.verbose:\n",
    "                print(self.counter, \" iterations since best score.\")\n",
    "                \n",
    "        return self.counter <= self.patience  \n",
    "    \n",
    "def training_loop(model,training_generator,valid_generator,optimizer,criterion,max_epochs=100,apply_early_stopping=True,patience=2,verbose=False):\n",
    "    #Setting the model in training mode\n",
    "    model.train()\n",
    "\n",
    "    if apply_early_stopping:\n",
    "        early_stopping = EarlyStopping(verbose=verbose,patience=patience)\n",
    "    \n",
    "    all_train_losses = []\n",
    "    all_valid_losses = []\n",
    "    \n",
    "    #Training loop\n",
    "    start_time=time.time()\n",
    "    for epoch in range(max_epochs):\n",
    "        model.train()\n",
    "        train_loss=[]\n",
    "        for x_batch, y_batch in training_generator:\n",
    "            optimizer.zero_grad()\n",
    "            # Forward pass\n",
    "            y_pred = model(x_batch)\n",
    "            # Compute Loss\n",
    "            loss = criterion(y_pred.squeeze(), y_batch)\n",
    "            # Backward pass\n",
    "            loss.backward()\n",
    "            optimizer.step()   \n",
    "            train_loss.append(loss.item())\n",
    "        \n",
    "        #showing last training loss after each epoch\n",
    "        all_train_losses.append(np.mean(train_loss))\n",
    "        if verbose:\n",
    "            print('')\n",
    "            print('Epoch {}: train loss: {}'.format(epoch, np.mean(train_loss)))\n",
    "        #evaluating the model on the test set after each epoch    \n",
    "        valid_loss = evaluate_model(model,valid_generator,criterion)\n",
    "        all_valid_losses.append(valid_loss)\n",
    "        if verbose:\n",
    "            print('valid loss: {}'.format(valid_loss))\n",
    "        if apply_early_stopping:\n",
    "            if not early_stopping.continue_training(valid_loss):\n",
    "                if verbose:\n",
    "                    print(\"Early stopping\")\n",
    "                break\n",
    "        \n",
    "    training_execution_time=time.time()-start_time\n",
    "    return model,training_execution_time,all_train_losses,all_valid_losses\n",
    "\n",
    "def per_sample_mse(model,generator):\n",
    "    model.eval()\n",
    "    criterion = torch.nn.MSELoss(reduction=\"none\")\n",
    "    batch_losses = []\n",
    "    for x_batch, y_batch in generator:\n",
    "        # Forward pass\n",
    "        y_pred = model(x_batch)\n",
    "        # Compute Loss\n",
    "        loss = criterion(y_pred.squeeze(), y_batch)\n",
    "        loss_app = list(torch.mean(loss,axis=1).detach().numpy())\n",
    "        batch_losses.extend(loss_app)\n",
    "    return batch_losses\n",
    "\n",
    "class FraudDatasetForPipe(torch.utils.data.Dataset):\n",
    "    \n",
    "    def __init__(self, x, y):\n",
    "        'Initialization'\n",
    "        self.x = torch.FloatTensor(x)\n",
    "        self.y = None\n",
    "        if y is not None:\n",
    "            self.y = torch.LongTensor(y.values)\n",
    "        \n",
    "\n",
    "    def __len__(self):\n",
    "        'Denotes the total number of samples'\n",
    "        return len(self.x)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        'Generates one sample of data'\n",
    "        # Select sample index\n",
    "        if self.y is not None:\n",
    "            return self.x[index], self.y[index]\n",
    "        else:\n",
    "            return self.x[index], -1  \n",
    "        \n",
    "def rolling_window(array, window):\n",
    "    a = np.concatenate([np.ones((window-1,))*-1,array])\n",
    "    shape = a.shape[:-1] + (a.shape[-1] - window + 1, window)\n",
    "    strides = a.strides + (a.strides[-1],)\n",
    "    return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides).astype(int)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### FraudDatasetUnsupervised\n",
    "\n",
    "First use in [Chapter 7, Autoencoders and anomaly detection](Autoencoders)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FraudDatasetUnsupervised(torch.utils.data.Dataset):\n",
    "    \n",
    "    def __init__(self, x,output=True):\n",
    "        'Initialization'\n",
    "        self.x = x\n",
    "        self.output = output\n",
    "\n",
    "    def __len__(self):\n",
    "        'Denotes the total number of samples'\n",
    "        return len(self.x)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        'Generates one sample of data'\n",
    "        # Select sample index\n",
    "        if self.output:\n",
    "            return self.x[index], self.x[index]\n",
    "        else:\n",
    "            return self.x[index]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### SimpleFraudMLPWithDropout and FraudMLP modules\n",
    "\n",
    "First use in [Chapter 7, Feed-forward neural network](FeedForwardNeuralNetworks)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimpleFraudMLPWithDropout(torch.nn.Module):\n",
    "    \n",
    "        def __init__(self, input_size, hidden_size,p):\n",
    "            super(SimpleFraudMLPWithDropout, self).__init__()\n",
    "            # parameters\n",
    "            self.input_size = input_size\n",
    "            self.hidden_size  = hidden_size\n",
    "            self.p = p\n",
    "            \n",
    "            #input to hidden\n",
    "            self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n",
    "            self.relu = torch.nn.ReLU()\n",
    "            #hidden to output\n",
    "            self.fc2 = torch.nn.Linear(self.hidden_size, 1)\n",
    "            self.sigmoid = torch.nn.Sigmoid()\n",
    "            \n",
    "            self.dropout = torch.nn.Dropout(self.p)\n",
    "            \n",
    "        def forward(self, x):\n",
    "            \n",
    "            hidden = self.fc1(x)\n",
    "            hidden = self.relu(hidden)\n",
    "            \n",
    "            hidden = self.dropout(hidden)\n",
    "            \n",
    "            output = self.fc2(hidden)\n",
    "            output = self.sigmoid(output)\n",
    "            \n",
    "            return output\n",
    "        \n",
    "class FraudMLP(torch.nn.Module):\n",
    "    \n",
    "        def __init__(self, input_size,hidden_size=100,num_layers=1,p=0):\n",
    "            super(FraudMLP, self).__init__()\n",
    "            # parameters\n",
    "            self.input_size = input_size\n",
    "            self.hidden_size  = hidden_size\n",
    "            self.p = p\n",
    "            \n",
    "            #input to hidden\n",
    "            self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n",
    "            self.relu = torch.nn.ReLU()\n",
    "            \n",
    "            self.fc_hidden=[]\n",
    "            for i in range(num_layers-1):\n",
    "                self.fc_hidden.append(torch.nn.Linear(self.hidden_size, self.hidden_size))\n",
    "                self.fc_hidden.append(torch.nn.ReLU())\n",
    "                \n",
    "            #hidden to output\n",
    "            self.fc2 = torch.nn.Linear(self.hidden_size, 2)\n",
    "            self.softmax = torch.nn.Softmax()\n",
    "            \n",
    "            self.dropout = torch.nn.Dropout(self.p)\n",
    "            \n",
    "        def forward(self, x):\n",
    "            \n",
    "            hidden = self.fc1(x)\n",
    "            hidden = self.relu(hidden)             \n",
    "            hidden = self.dropout(hidden)\n",
    "            \n",
    "            for layer in self.fc_hidden:\n",
    "                hidden=layer(hidden)\n",
    "                hidden = self.dropout(hidden)\n",
    "            \n",
    "            output = self.fc2(hidden)\n",
    "            output = self.softmax(output)\n",
    "            \n",
    "            return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### SimpleAutoencoder module\n",
    "\n",
    "First use in [Chapter 7, Autoencoders and anomaly detection](Autoencoders)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class SimpleAutoencoder(torch.nn.Module):\n",
    "    \n",
    "        def __init__(self, input_size, intermediate_size, code_size):\n",
    "            super(SimpleAutoencoder, self).__init__()\n",
    "            # parameters\n",
    "            self.input_size = input_size\n",
    "            self.intermediate_size = intermediate_size           \n",
    "            self.code_size  = code_size\n",
    "            \n",
    "            self.relu = torch.nn.ReLU()   \n",
    "            \n",
    "            #encoder\n",
    "            self.fc1 = torch.nn.Linear(self.input_size, self.intermediate_size)\n",
    "            self.fc2 = torch.nn.Linear(self.intermediate_size, self.code_size)\n",
    "            \n",
    "            #decoder \n",
    "            self.fc3 = torch.nn.Linear(self.code_size, self.intermediate_size)            \n",
    "            self.fc4 = torch.nn.Linear(self.intermediate_size, self.input_size)\n",
    "            \n",
    "            \n",
    "        def forward(self, x):\n",
    "            \n",
    "            hidden = self.fc1(x)\n",
    "            hidden = self.relu(hidden)\n",
    "            \n",
    "            code = self.fc2(hidden)\n",
    "            code = self.relu(code)\n",
    " \n",
    "            hidden = self.fc3(code)\n",
    "            hidden = self.relu(hidden)\n",
    "            \n",
    "            output = self.fc4(hidden)\n",
    "            #linear activation in final layer)            \n",
    "            \n",
    "            return output"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Attention module\n",
    "\n",
    "First use in [Chapter 7, Sequential models and representation learning](SequentialModeling)."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# source : https://github.com/IBM/pytorch-seq2seq/blob/master/seq2seq/models/attention.py\n",
    "\n",
    "class Attention(torch.nn.Module):\n",
    "    r\"\"\"\n",
    "    Applies an attention mechanism on the output features from the decoder.\n",
    "    .. math::\n",
    "            \\begin{array}{ll}\n",
    "            x = context*output \\\\\n",
    "            attn = exp(x_i) / sum_j exp(x_j) \\\\\n",
    "            output = \\tanh(w * (attn * context) + b * output)\n",
    "            \\end{array}\n",
    "    Args:\n",
    "        dim(int): The number of expected features in the output\n",
    "    Inputs: output, context\n",
    "        - **output** (batch, output_len, dimensions): tensor containing the output features from the decoder.\n",
    "        - **context** (batch, input_len, dimensions): tensor containing features of the encoded input sequence.\n",
    "    Outputs: output, attn\n",
    "        - **output** (batch, output_len, dimensions): tensor containing the attended output features from the decoder.\n",
    "        - **attn** (batch, output_len, input_len): tensor containing attention weights.\n",
    "    Attributes:\n",
    "        linear_out (torch.nn.Linear): applies a linear transformation to the incoming data: :math:`y = Ax + b`.\n",
    "        mask (torch.Tensor, optional): applies a :math:`-inf` to the indices specified in the `Tensor`.\n",
    "    Examples::\n",
    "         >>> attention = seq2seq.models.Attention(256)\n",
    "         >>> context = Variable(torch.randn(5, 3, 256))\n",
    "         >>> output = Variable(torch.randn(5, 5, 256))\n",
    "         >>> output, attn = attention(output, context)\n",
    "    \"\"\"\n",
    "    def __init__(self, dim):\n",
    "        super(Attention, self).__init__()\n",
    "        self.linear_out = torch.nn.Linear(dim*2, dim)\n",
    "        self.mask = None\n",
    "\n",
    "    def set_mask(self, mask):\n",
    "        \"\"\"\n",
    "        Sets indices to be masked\n",
    "        Args:\n",
    "            mask (torch.Tensor): tensor containing indices to be masked\n",
    "        \"\"\"\n",
    "        self.mask = mask\n",
    "\n",
    "    def forward(self, output, context):\n",
    "        batch_size = output.size(0)\n",
    "        hidden_size = output.size(2)\n",
    "        input_size = context.size(1)\n",
    "        # (batch, out_len, dim) * (batch, in_len, dim) -> (batch, out_len, in_len)\n",
    "        attn = torch.bmm(output, context.transpose(1, 2))\n",
    "        if self.mask is not None:\n",
    "            attn.data.masked_fill_(self.mask, -float('inf'))\n",
    "        attn = F.softmax(attn.view(-1, input_size), dim=1).view(batch_size, -1, input_size)\n",
    "\n",
    "        # (batch, out_len, in_len) * (batch, in_len, dim) -> (batch, out_len, dim)\n",
    "        mix = torch.bmm(attn, context)\n",
    "\n",
    "        # concat -> (batch, out_len, 2*dim)\n",
    "        combined = torch.cat((mix, output), dim=2)\n",
    "        # output -> (batch, out_len, dim)\n",
    "        output = F.tanh(self.linear_out(combined.view(-1, 2 * hidden_size))).view(batch_size, -1, hidden_size)\n",
    "\n",
    "        return output, attn\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
