{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import gc\n",
    "import time\n",
    "import category_encoders as ce\n",
    "from contextlib import contextmanager\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "from sklearn.feature_selection import VarianceThreshold\n",
    "from sklearn.metrics import roc_auc_score, roc_curve\n",
    "from sklearn.model_selection import KFold, StratifiedKFold\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import category_encoders as ce\n",
    "from sklearn.feature_selection import SelectKBest\n",
    "from sklearn.feature_selection import chi2\n",
    "from scipy.cluster.vq import kmeans2, whiten\n",
    "from sklearn.decomposition import truncated_svd\n",
    "import warnings\n",
    "warnings.simplefilter(action='ignore', category=FutureWarning)\n",
    "\n",
    "num_rows = None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "descretize = lambda x, n: list(map(str, list(pd.qcut(x, n, duplicates='drop'))))\n",
    "\n",
    "# One-hot encoding for categorical columns with get_dummies\n",
    "def one_hot_encoder(df, nan_as_category = True):\n",
    "    original_columns = list(df.columns)\n",
    "    categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n",
    "    df = pd.get_dummies(df, columns= categorical_columns, dummy_na= nan_as_category)\n",
    "    new_columns = [c for c in df.columns if c not in original_columns]\n",
    "    return df, new_columns\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Preprocess application_train.csv and application_test.csv\n",
    "def application_train_test(num_rows = None, nan_as_category=False):\n",
    "    # Read data and merge\n",
    "    df = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip', nrows= num_rows)\n",
    "    \n",
    "    n_train = df.shape[0]\n",
    "    \n",
    "    test_df = pd.read_csv('/media/limbo/Home-Credit/data/application_test.csv.zip', nrows= num_rows)\n",
    "    print(\"Train samples: {}, test samples: {}\".format(len(df), len(test_df)))\n",
    "    df = df.append(test_df).reset_index()\n",
    "    \n",
    "    \n",
    "    df['CODE_GENDER'].replace('XNA', np.nan, inplace=True)\n",
    "    df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)\n",
    "    df['NAME_FAMILY_STATUS'].replace('Unknown', np.nan, inplace=True)\n",
    "    df['ORGANIZATION_TYPE'].replace('XNA', np.nan, inplace=True)\n",
    "    \n",
    "    # Optional: Remove 4 applications with XNA CODE_GENDER (train set)\n",
    "    df = df[df['CODE_GENDER'] != 'XNA']\n",
    "    \n",
    "    docs = [_f for _f in df.columns if 'FLAG_DOC' in _f]\n",
    "    live = [_f for _f in df.columns if ('FLAG_' in _f) & ('FLAG_DOC' not in _f) & ('_FLAG_' not in _f)]\n",
    "    \n",
    "    # NaN values for DAYS_EMPLOYED: 365.243 -> nan\n",
    "    df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace= True)\n",
    "\n",
    "    inc_by_org = df[['AMT_INCOME_TOTAL', 'ORGANIZATION_TYPE']].groupby('ORGANIZATION_TYPE').median()['AMT_INCOME_TOTAL']\n",
    "\n",
    "    df['NEW_CREDIT_TO_ANNUITY_RATIO'] = df['AMT_CREDIT'] / df['AMT_ANNUITY']\n",
    "    df['NEW_AMT_INCOME_TOTAL_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']\n",
    "    df['NEW_CREDIT_TO_GOODS_RATIO'] = df['AMT_CREDIT'] / df['AMT_GOODS_PRICE']\n",
    "    df['NEW_DOC_IND_AVG'] = df[docs].mean(axis=1)\n",
    "    df['NEW_DOC_IND_STD'] = df[docs].std(axis=1)\n",
    "    df['NEW_DOC_IND_KURT'] = df[docs].kurtosis(axis=1)\n",
    "    df['NEW_LIVE_IND_SUM'] = df[live].sum(axis=1)\n",
    "    df['NEW_LIVE_IND_STD'] = df[live].std(axis=1)\n",
    "    df['NEW_LIVE_IND_KURT'] = df[live].kurtosis(axis=1)\n",
    "    df['NEW_INC_PER_CHLD'] = df['AMT_INCOME_TOTAL'] / (1 + df['CNT_CHILDREN'])\n",
    "    df['NEW_INC_BY_ORG'] = df['ORGANIZATION_TYPE'].map(inc_by_org)\n",
    "    df['NEW_EMPLOY_TO_BIRTH_RATIO'] = df['DAYS_EMPLOYED'] / df['DAYS_BIRTH']\n",
    "    df['NEW_ANNUITY_TO_INCOME_RATIO'] = df['AMT_ANNUITY'] / (1 + df['AMT_INCOME_TOTAL'])\n",
    "    df['NEW_SOURCES_PROD'] = df['EXT_SOURCE_1'] * df['EXT_SOURCE_2'] * df['EXT_SOURCE_3']\n",
    "    df['NEW_EXT_SOURCES_MEAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].mean(axis=1)\n",
    "    df['NEW_SCORES_STD'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].std(axis=1)\n",
    "    df['NEW_SCORES_STD'] = df['NEW_SCORES_STD'].fillna(df['NEW_SCORES_STD'].mean())\n",
    "    df['NEW_CAR_TO_BIRTH_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_BIRTH']\n",
    "    df['NEW_CAR_TO_EMPLOY_RATIO'] = df['OWN_CAR_AGE'] / df['DAYS_EMPLOYED']\n",
    "    df['NEW_PHONE_TO_BIRTH_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_BIRTH']\n",
    "    df['NEW_PHONE_TO_EMPLOY_RATIO'] = df['DAYS_LAST_PHONE_CHANGE'] / df['DAYS_EMPLOYED']\n",
    "    df['NEW_CREDIT_TO_INCOME_RATIO'] = df['AMT_CREDIT'] / df['AMT_INCOME_TOTAL']\n",
    "    \n",
    "    df['children_ratio'] = df['CNT_CHILDREN'] / df['CNT_FAM_MEMBERS']\n",
    "    \n",
    "    \n",
    "    df['NEW_EXT_SOURCES_MEDIAN'] = df[['EXT_SOURCE_1', 'EXT_SOURCE_2', 'EXT_SOURCE_3']].median(axis=1)\n",
    "    \n",
    "    df['NEW_DOC_IND_SKEW'] = df[docs].skew(axis=1)\n",
    "    df['NEW_LIVE_IND_SKEW'] = df[live].skew(axis=1)\n",
    "    \n",
    "    \n",
    "    df['ind_0'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_EMPLOYED'].dropna().median()).mean()\n",
    "    df['ind_1'] = df['DAYS_EMPLOYED'] - df['DAYS_EMPLOYED'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_EMPLOYED'].dropna().median()).median()\n",
    "    \n",
    "    df['ind_2'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_BIRTH'].dropna().median()).mean()\n",
    "    df['ind_3'] = df['DAYS_BIRTH'] - df['DAYS_BIRTH'].replace([np.inf, -np.inf], np.nan).fillna(df['DAYS_BIRTH'].dropna().median()).median()\n",
    "    \n",
    "    \n",
    "    df['ind_4'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).mean()\n",
    "    df['ind_5'] = df['AMT_INCOME_TOTAL'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).median() \n",
    "   \n",
    "    \n",
    "    df['ind_6'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_CREDIT'].dropna().median()).mean()\n",
    "    df['ind_7'] = df['AMT_CREDIT'] - df['AMT_CREDIT'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_CREDIT'].dropna().median()).median() \n",
    "   \n",
    "    df['ind_8'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_ANNUITY'].dropna().median()).mean()\n",
    "    df['ind_9'] = df['AMT_ANNUITY'] - df['AMT_ANNUITY'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_ANNUITY'].dropna().median()).median() \n",
    "    \n",
    "    df['ind_10'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).mean()\n",
    "    df['ind_11'] = df['AMT_CREDIT'] - df['AMT_INCOME_TOTAL'].replace([np.inf, -np.inf], np.nan).fillna(df['AMT_INCOME_TOTAL'].dropna().median()).median() \n",
    "    \n",
    "    \n",
    "    AGGREGATION_RECIPIES = [\n",
    "    (['CODE_GENDER', 'NAME_EDUCATION_TYPE'], [('AMT_ANNUITY', 'max'),\n",
    "                                              ('AMT_CREDIT', 'max'),\n",
    "                                              ('EXT_SOURCE_1', 'mean'),\n",
    "                                              ('EXT_SOURCE_2', 'mean'),\n",
    "                                              ('OWN_CAR_AGE', 'max'),\n",
    "                                              ('OWN_CAR_AGE', 'sum')]),\n",
    "    (['CODE_GENDER', 'ORGANIZATION_TYPE'], [('AMT_ANNUITY', 'mean'),\n",
    "                                            ('AMT_INCOME_TOTAL', 'mean'),\n",
    "                                            ('DAYS_REGISTRATION', 'mean'),\n",
    "                                            ('EXT_SOURCE_1', 'mean'),\n",
    "                                            ('NEW_CREDIT_TO_ANNUITY_RATIO', 'mean')]),\n",
    "    (['CODE_GENDER', 'REG_CITY_NOT_WORK_CITY'], [('AMT_ANNUITY', 'mean'),\n",
    "                                                 ('CNT_CHILDREN', 'mean'),\n",
    "                                                 ('DAYS_ID_PUBLISH', 'mean')]),\n",
    "    (['CODE_GENDER', 'NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('EXT_SOURCE_1', 'mean'),\n",
    "                                                                                           ('EXT_SOURCE_2', 'mean')]),\n",
    "    (['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE'], [('AMT_CREDIT', 'mean'),\n",
    "                                                  ('AMT_REQ_CREDIT_BUREAU_YEAR', 'mean'),\n",
    "                                                  ('APARTMENTS_AVG', 'mean'),\n",
    "                                                  ('BASEMENTAREA_AVG', 'mean'),\n",
    "                                                  ('EXT_SOURCE_1', 'mean'),\n",
    "                                                  ('EXT_SOURCE_2', 'mean'),\n",
    "                                                  ('EXT_SOURCE_3', 'mean'),\n",
    "                                                  ('NONLIVINGAREA_AVG', 'mean'),\n",
    "                                                  ('OWN_CAR_AGE', 'mean')]),\n",
    "    (['NAME_EDUCATION_TYPE', 'OCCUPATION_TYPE', 'REG_CITY_NOT_WORK_CITY'], [('ELEVATORS_AVG', 'mean'),\n",
    "                                                                            ('EXT_SOURCE_1', 'mean')]),\n",
    "    (['OCCUPATION_TYPE'], [('AMT_ANNUITY', 'median'),\n",
    "                           ('CNT_CHILDREN', 'median'),\n",
    "                           ('CNT_FAM_MEMBERS', 'median'),\n",
    "                           ('DAYS_BIRTH', 'median'),\n",
    "                           ('DAYS_EMPLOYED', 'median'),\n",
    "                           ('NEW_CREDIT_TO_ANNUITY_RATIO', 'median'),\n",
    "                           ('DAYS_REGISTRATION', 'median'),\n",
    "                           ('EXT_SOURCE_1', 'median'),\n",
    "                           ('EXT_SOURCE_2', 'median'),\n",
    "                           ('EXT_SOURCE_3', 'median')]),\n",
    "]\n",
    "\n",
    "    \n",
    "    for groupby_cols, specs in AGGREGATION_RECIPIES:\n",
    "        group_object = df.groupby(groupby_cols)\n",
    "        for select, agg in specs:\n",
    "            groupby_aggregate_name = '{}_{}_{}'.format('_'.join(groupby_cols), agg, select)\n",
    "            df = df.merge(group_object[select]\n",
    "                              .agg(agg)\n",
    "                              .reset_index()\n",
    "                              .rename(index=str,\n",
    "                                      columns={select: groupby_aggregate_name})\n",
    "                              [groupby_cols + [groupby_aggregate_name]],\n",
    "                              on=groupby_cols,\n",
    "                              how='left')\n",
    "    \n",
    "    df['retirement_age'] = (df['DAYS_BIRTH'] > -14000).astype(int)\n",
    "    df['long_employment'] = (df['DAYS_EMPLOYED'] > -2000).astype(int)\n",
    "    df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']\n",
    "    df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']\n",
    "    df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']\n",
    "    df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']\n",
    "    df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])\n",
    "    df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']\n",
    "    \n",
    "    df['cnt_non_child'] = df['CNT_FAM_MEMBERS'] - df['CNT_CHILDREN']\n",
    "    df['child_to_non_child_ratio'] = df['CNT_CHILDREN'] / df['cnt_non_child']\n",
    "    df['income_per_non_child'] = df['AMT_INCOME_TOTAL'] / df['cnt_non_child']\n",
    "    df['credit_per_person'] = df['AMT_CREDIT'] / df['CNT_FAM_MEMBERS']\n",
    "    df['credit_per_child'] = df['AMT_CREDIT'] / (1 + df['CNT_CHILDREN'])\n",
    "    df['credit_per_non_child'] = df['AMT_CREDIT'] / df['cnt_non_child']\n",
    "    \n",
    "    df['p_0'] = descretize(df['credit_per_non_child'].values, 2 ** 5)\n",
    "    df['p_1'] = descretize(df['credit_per_person'].values, 2 ** 5)\n",
    "    df['p_2'] = descretize(df['credit_per_child'].values, 2 ** 5)\n",
    "    df['p_3'] = descretize(df['retirement_age'].values, 2 ** 5)\n",
    "    df['p_4'] = descretize(df['income_per_non_child'].values, 2 ** 5)\n",
    "    df['p_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 5)\n",
    "    \n",
    "    df['p_6'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 5)\n",
    "    df['p_7'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 6)\n",
    "    df['p_8'] = descretize(df['NEW_CREDIT_TO_ANNUITY_RATIO'].values, 2 ** 7)\n",
    "    \n",
    "    \n",
    "    df['pe_0'] = descretize(df['credit_per_non_child'].values, 2 ** 6)\n",
    "    df['pe_1'] = descretize(df['credit_per_person'].values, 2 ** 6)\n",
    "    df['pe_2'] = descretize(df['credit_per_child'].values, 2 ** 6)\n",
    "    df['pe_3'] = descretize(df['retirement_age'].values, 2 ** 6)\n",
    "    df['pe_4'] = descretize(df['income_per_non_child'].values, 2 ** 6)\n",
    "    df['pe_5'] = descretize(df['child_to_non_child_ratio'].values, 2 ** 6)\n",
    "          \n",
    "    \n",
    "    c = df['NEW_CREDIT_TO_ANNUITY_RATIO'].replace([np.inf, -np.inf], np.nan).fillna(999).values\n",
    "    a, b = kmeans2(np.log1p(c), 2, iter=333)  \n",
    "    df['x_0'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 4, iter=333)  \n",
    "    df['x_1'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 8, iter=333)  \n",
    "    df['x_2'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 16, iter=333)  \n",
    "    df['x_3'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 32, iter=333)  \n",
    "    df['x_4'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 64, iter=333)  \n",
    "    df['x_5'] = b\n",
    "    \n",
    "    a, b = kmeans2(np.log1p(c), 128, iter=333)  \n",
    "    df['x_6'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 150, iter=333)  \n",
    "    df['x_7'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 256, iter=333)  \n",
    "    df['x_8'] = b\n",
    "\n",
    "    a, b = kmeans2(np.log1p(c), 512, iter=333)  \n",
    "    df['x_9'] = b\n",
    "    \n",
    "    a, b = kmeans2(np.log1p(c), 1024, iter=333)  \n",
    "    df['x_10'] = b\n",
    "\n",
    "    \n",
    "    # Categorical features with Binary encode (0 or 1; two categories)\n",
    "    for bin_feature in ['CODE_GENDER', 'FLAG_OWN_CAR', 'FLAG_OWN_REALTY']:\n",
    "        df[bin_feature], uniques = pd.factorize(df[bin_feature])\n",
    "            \n",
    "    # Categorical features with One-Hot encode\n",
    "    df, cat_cols = one_hot_encoder(df, nan_as_category)\n",
    "    \n",
    "    del test_df\n",
    "    gc.collect()\n",
    "    return df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Train samples: 307511, test samples: 48744\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/kain/Workstation/PyEnv/lib/python3.5/site-packages/numpy/lib/function_base.py:4033: RuntimeWarning: All-NaN slice encountered\n",
      "  r = func(a, **kwargs)\n",
      "/home/kain/Workstation/PyEnv/lib/python3.5/site-packages/scipy/cluster/vq.py:523: UserWarning: One of the clusters is empty. Re-run kmeans with a different initialization.\n",
      "  warnings.warn(\"One of the clusters is empty. \"\n"
     ]
    }
   ],
   "source": [
    "df = application_train_test(num_rows=None, nan_as_category=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "14"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "def bureau_and_balance(num_rows = None, nan_as_category = True):\n",
    "    bureau = pd.read_csv('../data/bureau.csv', nrows = num_rows)\n",
    "    bb = pd.read_csv('../data/bureau_balance.csv', nrows = num_rows)\n",
    "    bb, bb_cat = one_hot_encoder(bb, nan_as_category)\n",
    "    bureau, bureau_cat = one_hot_encoder(bureau, nan_as_category)\n",
    "    \n",
    "    # Bureau balance: Perform aggregations and merge with bureau.csv\n",
    "    bb_aggregations = {'MONTHS_BALANCE': ['min', 'max', 'size', 'median']}\n",
    "    for col in bb_cat:\n",
    "        bb_aggregations[col] = ['median']\n",
    "    bb_agg = bb.groupby('SK_ID_BUREAU').agg(bb_aggregations)\n",
    "    bb_agg.columns = pd.Index([e[0] + \"_\" + e[1].upper() for e in bb_agg.columns.tolist()])\n",
    "    bureau = bureau.join(bb_agg, how='left', on='SK_ID_BUREAU')\n",
    "    bureau.drop(['SK_ID_BUREAU'], axis=1, inplace= True)\n",
    "    del bb, bb_agg\n",
    "    gc.collect()\n",
    "    \n",
    "    # Bureau and bureau_balance numeric features\n",
    "    num_aggregations = {\n",
    "        'DAYS_CREDIT': ['min', 'max', 'median', 'var'],\n",
    "        'DAYS_CREDIT_ENDDATE': ['min', 'max', 'median'],\n",
    "        'DAYS_CREDIT_UPDATE': ['median'],\n",
    "        'CREDIT_DAY_OVERDUE': ['max', 'median'],\n",
    "        'AMT_CREDIT_MAX_OVERDUE': ['median'],\n",
    "        'AMT_CREDIT_SUM': ['max', 'median', 'sum'],\n",
    "        'AMT_CREDIT_SUM_DEBT': ['max', 'median', 'sum'],\n",
    "        'AMT_CREDIT_SUM_OVERDUE': ['median'],\n",
    "        'AMT_CREDIT_SUM_LIMIT': ['median', 'sum'],\n",
    "        'AMT_ANNUITY': ['max', 'median'],\n",
    "        'CNT_CREDIT_PROLONG': ['sum'],\n",
    "        'MONTHS_BALANCE_MIN': ['min', 'median'],\n",
    "        'MONTHS_BALANCE_MAX': ['max', 'median'],\n",
    "        'MONTHS_BALANCE_SIZE': ['median', 'sum']\n",
    "    }\n",
    "    # Bureau and bureau_balance categorical features\n",
    "    cat_aggregations = {}\n",
    "    for cat in bureau_cat: cat_aggregations[cat] = ['median']\n",
    "    for cat in bb_cat: cat_aggregations[cat + \"_MEDIAN\"] = ['median']\n",
    "    \n",
    "    bureau_agg = bureau.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n",
    "    bureau_agg.columns = pd.Index(['BURO_' + e[0] + \"_\" + e[1].upper() for e in bureau_agg.columns.tolist()])\n",
    "    # Bureau: Active credits - using only numerical aggregations\n",
    "    active = bureau[bureau['CREDIT_ACTIVE_Active'] == 1]\n",
    "    active_agg = active.groupby('SK_ID_CURR').agg(num_aggregations)\n",
    "    cols = active_agg.columns.tolist()\n",
    "    active_agg.columns = pd.Index(['ACTIVE_' + e[0] + \"_\" + e[1].upper() for e in active_agg.columns.tolist()])\n",
    "    bureau_agg = bureau_agg.join(active_agg, how='left', on='SK_ID_CURR')\n",
    "    del active, active_agg\n",
    "    gc.collect()\n",
    "    # Bureau: Closed credits - using only numerical aggregations\n",
    "    closed = bureau[bureau['CREDIT_ACTIVE_Closed'] == 1]\n",
    "    closed_agg = closed.groupby('SK_ID_CURR').agg(num_aggregations)\n",
    "    closed_agg.columns = pd.Index(['CLOSED_' + e[0] + \"_\" + e[1].upper() for e in closed_agg.columns.tolist()])\n",
    "    bureau_agg = bureau_agg.join(closed_agg, how='left', on='SK_ID_CURR')\n",
    "    \n",
    "    for e in cols:\n",
    "        bureau_agg['NEW_RATIO_BURO_' + e[0] + \"_\" + e[1].upper()] = bureau_agg['ACTIVE_' + e[0] + \"_\" + e[1].upper()] / bureau_agg['CLOSED_' + e[0] + \"_\" + e[1].upper()]\n",
    "    \n",
    "    del closed, closed_agg, bureau\n",
    "    gc.collect()\n",
    "    return bureau_agg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "bureau = bureau_and_balance(num_rows)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "7"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df = df.join(bureau, how='left', on='SK_ID_CURR')\n",
    "del bureau\n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def previous_applications(num_rows=None, nan_as_category=True):\n",
    "    prev = pd.read_csv('../data/previous_application.csv', nrows = num_rows)\n",
    "    prev, cat_cols = one_hot_encoder(prev, nan_as_category= True)\n",
    "    # Days 365.243 values -> nan\n",
    "    prev['DAYS_FIRST_DRAWING'].replace(365243, np.nan, inplace= True)\n",
    "    prev['DAYS_FIRST_DUE'].replace(365243, np.nan, inplace= True)\n",
    "    prev['DAYS_LAST_DUE_1ST_VERSION'].replace(365243, np.nan, inplace= True)\n",
    "    prev['DAYS_LAST_DUE'].replace(365243, np.nan, inplace= True)\n",
    "    prev['DAYS_TERMINATION'].replace(365243, np.nan, inplace= True)\n",
    "    # Add feature: value ask / value received percentage\n",
    "    prev['APP_CREDIT_PERC'] = prev['AMT_APPLICATION'] / prev['AMT_CREDIT']\n",
    "    # Previous applications numeric features\n",
    "    num_aggregations = {\n",
    "        'AMT_ANNUITY': ['min', 'max', 'median'],\n",
    "        'AMT_APPLICATION': ['min', 'max', 'median'],\n",
    "        'AMT_CREDIT': ['min', 'max', 'median'],\n",
    "        'APP_CREDIT_PERC': ['min', 'max', 'median', 'var'],\n",
    "        'AMT_DOWN_PAYMENT': ['min', 'max', 'median'],\n",
    "        'AMT_GOODS_PRICE': ['min', 'max', 'median'],\n",
    "        'HOUR_APPR_PROCESS_START': ['min', 'max', 'median'],\n",
    "        'RATE_DOWN_PAYMENT': ['min', 'max', 'median'],\n",
    "        'DAYS_DECISION': ['min', 'max', 'median'],\n",
    "        'CNT_PAYMENT': ['median', 'sum'],\n",
    "    }\n",
    "    # Previous applications categorical features\n",
    "    cat_aggregations = {}\n",
    "    for cat in cat_cols:\n",
    "        cat_aggregations[cat] = ['median']\n",
    "    \n",
    "    prev_agg = prev.groupby('SK_ID_CURR').agg({**num_aggregations, **cat_aggregations})\n",
    "    prev_agg.columns = pd.Index(['PREV_' + e[0] + \"_\" + e[1].upper() for e in prev_agg.columns.tolist()])\n",
    "    # Previous Applications: Approved Applications - only numerical features\n",
    "    approved = prev[prev['NAME_CONTRACT_STATUS_Approved'] == 1]\n",
    "    approved_agg = approved.groupby('SK_ID_CURR').agg(num_aggregations)\n",
    "    cols = approved_agg.columns.tolist()\n",
    "    approved_agg.columns = pd.Index(['APPROVED_' + e[0] + \"_\" + e[1].upper() for e in approved_agg.columns.tolist()])\n",
    "    prev_agg = prev_agg.join(approved_agg, how='left', on='SK_ID_CURR')\n",
    "    # Previous Applications: Refused Applications - only numerical features\n",
    "    refused = prev[prev['NAME_CONTRACT_STATUS_Refused'] == 1]\n",
    "    refused_agg = refused.groupby('SK_ID_CURR').agg(num_aggregations)\n",
    "    refused_agg.columns = pd.Index(['REFUSED_' + e[0] + \"_\" + e[1].upper() for e in refused_agg.columns.tolist()])\n",
    "    prev_agg = prev_agg.join(refused_agg, how='left', on='SK_ID_CURR')\n",
    "    del refused, refused_agg, approved, approved_agg, prev\n",
    "    \n",
    "    for e in cols:\n",
    "        prev_agg['NEW_RATIO_PREV_' + e[0] + \"_\" + e[1].upper()] = prev_agg['APPROVED_' + e[0] + \"_\" + e[1].upper()] / prev_agg['REFUSED_' + e[0] + \"_\" + e[1].upper()]\n",
    "    \n",
    "    gc.collect()\n",
    "    return prev_agg\n",
    "\n",
    "# Preprocess POS_CASH_balance.csv\n",
    "def pos_cash(num_rows = None, nan_as_category = True):\n",
    "    pos = pd.read_csv('../data/POS_CASH_balance.csv', nrows = num_rows)\n",
    "    pos, cat_cols = one_hot_encoder(pos, nan_as_category= True)\n",
    "    # Features\n",
    "    aggregations = {\n",
    "        'MONTHS_BALANCE': ['max', 'median', 'size'],\n",
    "        'SK_DPD': ['max', 'median'],\n",
    "        'SK_DPD_DEF': ['max', 'median']\n",
    "    }\n",
    "    for cat in cat_cols:\n",
    "        aggregations[cat] = ['median']\n",
    "    \n",
    "    pos_agg = pos.groupby('SK_ID_CURR').agg(aggregations)\n",
    "    pos_agg.columns = pd.Index(['POS_' + e[0] + \"_\" + e[1].upper() for e in pos_agg.columns.tolist()])\n",
    "    # Count pos cash accounts\n",
    "    pos_agg['POS_COUNT'] = pos.groupby('SK_ID_CURR').size()\n",
    "    del pos\n",
    "    gc.collect()\n",
    "    return pos_agg\n",
    "    \n",
    "# Preprocess installments_payments.csv\n",
    "def installments_payments(num_rows = None, nan_as_category = True):\n",
    "    ins = pd.read_csv('/media/limbo/Home-Credit/data/installments_payments.csv', nrows = num_rows)\n",
    "    ins, cat_cols = one_hot_encoder(ins, nan_as_category= True)\n",
    "    # Percentage and difference paid in each installment (amount paid and installment value)\n",
    "    ins['PAYMENT_PERC'] = ins['AMT_PAYMENT'] / ins['AMT_INSTALMENT']\n",
    "    ins['PAYMENT_DIFF'] = ins['AMT_INSTALMENT'] - ins['AMT_PAYMENT']\n",
    "    \n",
    "    ins['PAYMENT_PERC_median'] = ins['PAYMENT_PERC'] - ins['PAYMENT_PERC'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_PERC'].dropna().median()).median()\n",
    "    ins['PAYMENT_PERC_MEDIAN'] = ins['PAYMENT_PERC'] - ins['PAYMENT_PERC'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_PERC'].dropna().median()).median()\n",
    "    \n",
    "    ins['PAYMENT_DIFF_median'] = ins['PAYMENT_DIFF'] - ins['PAYMENT_DIFF'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_DIFF'].dropna().median()).median()\n",
    "    ins['PAYMENT_DIFF_MEDIAN'] = ins['PAYMENT_DIFF'] - ins['PAYMENT_DIFF'].replace([np.inf, -np.inf], np.nan).fillna(ins['PAYMENT_DIFF'].dropna().median()).median()\n",
    "    \n",
    "    \n",
    "    ins['pay_0'] = descretize(ins['PAYMENT_PERC'].values, 2 ** 6)\n",
    "    ins['pay_1'] = descretize(ins['PAYMENT_DIFF'].values, 2 ** 6)\n",
    "    ins['pay_2'] = descretize(ins['PAYMENT_PERC_MEDIAN'].values, 2 ** 6)\n",
    "    ins['pay_3'] = descretize(ins['PAYMENT_PERC_MEDIAN'].values, 2 ** 6)\n",
    "    ins['pay_4'] = descretize(ins['PAYMENT_DIFF_median'].values, 2 ** 6)\n",
    "    ins['pay_5'] = descretize(ins['PAYMENT_DIFF_MEDIAN'].values, 2 ** 6)\n",
    "    \n",
    "    # Days past due and days before due (no negative values)\n",
    "    ins['DPD'] = ins['DAYS_ENTRY_PAYMENT'] - ins['DAYS_INSTALMENT']\n",
    "    ins['DBD'] = ins['DAYS_INSTALMENT'] - ins['DAYS_ENTRY_PAYMENT']\n",
    "    ins['DPD'] = ins['DPD'].apply(lambda x: x if x > 0 else 0)\n",
    "    ins['DBD'] = ins['DBD'].apply(lambda x: x if x > 0 else 0)\n",
    "    \n",
    "    \n",
    "    ins['day_0'] = descretize(ins['DAYS_ENTRY_PAYMENT'].values, 2 ** 6)\n",
    "    ins['day_1'] = descretize(ins['DAYS_INSTALMENT'].values, 2 ** 6)\n",
    "    ins['day_2'] = descretize(ins['DBD'].values, 2 ** 6)\n",
    "    \n",
    "    \n",
    "    # Features: Perform aggregations\n",
    "    aggregations = {\n",
    "        'NUM_INSTALMENT_VERSION': ['nunique'],\n",
    "        'DPD': ['max', 'median', 'sum'],\n",
    "        'DBD': ['max', 'median', 'sum'],\n",
    "        'PAYMENT_PERC': ['max', 'median', 'sum', 'var'],\n",
    "        'PAYMENT_DIFF': ['max', 'median', 'sum', 'var'],\n",
    "        'AMT_INSTALMENT': ['max', 'median', 'sum'],\n",
    "        'AMT_PAYMENT': ['min', 'max', 'median', 'sum'],\n",
    "        'DAYS_ENTRY_PAYMENT': ['max', 'median', 'sum']\n",
    "    }\n",
    "    for cat in cat_cols:\n",
    "        aggregations[cat] = ['median']\n",
    "    ins_agg = ins.groupby('SK_ID_CURR').agg(aggregations)\n",
    "    ins_agg.columns = pd.Index(['INSTAL_' + e[0] + \"_\" + e[1].upper() for e in ins_agg.columns.tolist()])\n",
    "    # Count installments accounts\n",
    "    ins_agg['INSTAL_COUNT'] = ins.groupby('SK_ID_CURR').size()\n",
    "    del ins\n",
    "    gc.collect()\n",
    "    return ins_agg\n",
    "\n",
    "\n",
    "# Preprocess credit_card_balance.csv\n",
    "def credit_card_balance(num_rows = None, nan_as_category = True):\n",
    "    cc = pd.read_csv('../data/credit_card_balance.csv', nrows = num_rows)\n",
    "    cc, cat_cols = one_hot_encoder(cc, nan_as_category= True)\n",
    "    # General aggregations\n",
    "    cc.drop(['SK_ID_PREV'], axis= 1, inplace = True)\n",
    "    cc_agg = cc.groupby('SK_ID_CURR').agg(['min', 'max', 'mean', 'sum', 'var'])\n",
    "    cc_agg.columns = pd.Index(['CC_' + e[0] + \"_\" + e[1].upper() for e in cc_agg.columns.tolist()])\n",
    "    # Count credit card lines\n",
    "    cc_agg['CC_COUNT'] = cc.groupby('SK_ID_CURR').size()\n",
    "    del cc\n",
    "    gc.collect()\n",
    "    return cc_agg\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Previous applications df shape: (338857, 279)\n",
      "Pos-cash balance df shape: (337252, 18)\n",
      "Installments payments df shape: (339587, 26)\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "7"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "prev = previous_applications(num_rows)\n",
    "print(\"Previous applications df shape:\", prev.shape)\n",
    "df = df.join(prev, how='left', on='SK_ID_CURR')\n",
    "del prev\n",
    "gc.collect()\n",
    "\n",
    "pos = pos_cash(num_rows)\n",
    "print(\"Pos-cash balance df shape:\", pos.shape)\n",
    "df = df.join(pos, how='left', on='SK_ID_CURR')\n",
    "del pos\n",
    "gc.collect()\n",
    "\n",
    "ins = installments_payments(num_rows)\n",
    "print(\"Installments payments df shape:\", ins.shape)\n",
    "df = df.join(ins, how='left', on='SK_ID_CURR')\n",
    "del ins\n",
    "gc.collect()\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Credit card balance df shape: (103558, 141)\n",
      "(356255, 1518)\n"
     ]
    }
   ],
   "source": [
    "cc = credit_card_balance(num_rows)\n",
    "print(\"Credit card balance df shape:\", cc.shape)\n",
    "df = df.join(cc, how='left', on='SK_ID_CURR')\n",
    "del cc\n",
    "gc.collect()\n",
    "\n",
    "print(df.shape)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>index</th>\n",
       "      <th>AMT_ANNUITY</th>\n",
       "      <th>AMT_CREDIT</th>\n",
       "      <th>AMT_GOODS_PRICE</th>\n",
       "      <th>AMT_INCOME_TOTAL</th>\n",
       "      <th>AMT_REQ_CREDIT_BUREAU_DAY</th>\n",
       "      <th>AMT_REQ_CREDIT_BUREAU_HOUR</th>\n",
       "      <th>AMT_REQ_CREDIT_BUREAU_MON</th>\n",
       "      <th>AMT_REQ_CREDIT_BUREAU_QRT</th>\n",
       "      <th>AMT_REQ_CREDIT_BUREAU_WEEK</th>\n",
       "      <th>...</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_Signed_MAX</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_Signed_MEAN</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_Signed_SUM</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_Signed_VAR</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_nan_MIN</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_nan_MAX</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_nan_MEAN</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_nan_SUM</th>\n",
       "      <th>CC_NAME_CONTRACT_STATUS_nan_VAR</th>\n",
       "      <th>CC_COUNT</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>0</td>\n",
       "      <td>24700.5</td>\n",
       "      <td>406597.5</td>\n",
       "      <td>351000.0</td>\n",
       "      <td>202500.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>1</td>\n",
       "      <td>35698.5</td>\n",
       "      <td>1293502.5</td>\n",
       "      <td>1129500.0</td>\n",
       "      <td>270000.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>2</td>\n",
       "      <td>6750.0</td>\n",
       "      <td>135000.0</td>\n",
       "      <td>135000.0</td>\n",
       "      <td>67500.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>3</td>\n",
       "      <td>29686.5</td>\n",
       "      <td>312682.5</td>\n",
       "      <td>297000.0</td>\n",
       "      <td>135000.0</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>...</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>6.0</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>4</td>\n",
       "      <td>21865.5</td>\n",
       "      <td>513000.0</td>\n",
       "      <td>513000.0</td>\n",
       "      <td>121500.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>0.0</td>\n",
       "      <td>...</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "      <td>NaN</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>5 rows × 1518 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "   index  AMT_ANNUITY  AMT_CREDIT  AMT_GOODS_PRICE  AMT_INCOME_TOTAL  \\\n",
       "0      0      24700.5    406597.5         351000.0          202500.0   \n",
       "1      1      35698.5   1293502.5        1129500.0          270000.0   \n",
       "2      2       6750.0    135000.0         135000.0           67500.0   \n",
       "3      3      29686.5    312682.5         297000.0          135000.0   \n",
       "4      4      21865.5    513000.0         513000.0          121500.0   \n",
       "\n",
       "   AMT_REQ_CREDIT_BUREAU_DAY  AMT_REQ_CREDIT_BUREAU_HOUR  \\\n",
       "0                        0.0                         0.0   \n",
       "1                        0.0                         0.0   \n",
       "2                        0.0                         0.0   \n",
       "3                        NaN                         NaN   \n",
       "4                        0.0                         0.0   \n",
       "\n",
       "   AMT_REQ_CREDIT_BUREAU_MON  AMT_REQ_CREDIT_BUREAU_QRT  \\\n",
       "0                        0.0                        0.0   \n",
       "1                        0.0                        0.0   \n",
       "2                        0.0                        0.0   \n",
       "3                        NaN                        NaN   \n",
       "4                        0.0                        0.0   \n",
       "\n",
       "   AMT_REQ_CREDIT_BUREAU_WEEK    ...     CC_NAME_CONTRACT_STATUS_Signed_MAX  \\\n",
       "0                         0.0    ...                                    NaN   \n",
       "1                         0.0    ...                                    NaN   \n",
       "2                         0.0    ...                                    NaN   \n",
       "3                         NaN    ...                                    0.0   \n",
       "4                         0.0    ...                                    NaN   \n",
       "\n",
       "   CC_NAME_CONTRACT_STATUS_Signed_MEAN  CC_NAME_CONTRACT_STATUS_Signed_SUM  \\\n",
       "0                                  NaN                                 NaN   \n",
       "1                                  NaN                                 NaN   \n",
       "2                                  NaN                                 NaN   \n",
       "3                                  0.0                                 0.0   \n",
       "4                                  NaN                                 NaN   \n",
       "\n",
       "   CC_NAME_CONTRACT_STATUS_Signed_VAR  CC_NAME_CONTRACT_STATUS_nan_MIN  \\\n",
       "0                                 NaN                              NaN   \n",
       "1                                 NaN                              NaN   \n",
       "2                                 NaN                              NaN   \n",
       "3                                 0.0                              0.0   \n",
       "4                                 NaN                              NaN   \n",
       "\n",
       "   CC_NAME_CONTRACT_STATUS_nan_MAX  CC_NAME_CONTRACT_STATUS_nan_MEAN  \\\n",
       "0                              NaN                               NaN   \n",
       "1                              NaN                               NaN   \n",
       "2                              NaN                               NaN   \n",
       "3                              0.0                               0.0   \n",
       "4                              NaN                               NaN   \n",
       "\n",
       "   CC_NAME_CONTRACT_STATUS_nan_SUM  CC_NAME_CONTRACT_STATUS_nan_VAR  CC_COUNT  \n",
       "0                              NaN                              NaN       NaN  \n",
       "1                              NaN                              NaN       NaN  \n",
       "2                              NaN                              NaN       NaN  \n",
       "3                              0.0                              0.0       6.0  \n",
       "4                              NaN                              NaN       NaN  \n",
       "\n",
       "[5 rows x 1518 columns]"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_file_path = \"Level_1_stack/test_xgb-0.csv\"\n",
    "validation_file_path = 'Level_1_stack/validation_xgb-0.csv'\n",
    "num_folds = 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "train = pd.read_csv('/media/limbo/Home-Credit/data/application_train.csv.zip', nrows= num_rows)\n",
    "n_train = train.shape[0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Starting LightGBM. Train shape: (307511, 1518), test shape: (48744, 1518)\n",
      "(307511, 1515)\n",
      "(246008, 1515) (61503, 1515) (48744, 1515)\n",
      "Fold  1 AUC : 0.788798\n",
      "(246009, 1515) (61502, 1515) (48744, 1515)\n",
      "Fold  2 AUC : 0.789005\n",
      "(246009, 1515) (61502, 1515) (48744, 1515)\n",
      "Fold  3 AUC : 0.781536\n",
      "(246009, 1515) (61502, 1515) (48744, 1515)\n",
      "Fold  4 AUC : 0.783756\n",
      "(246009, 1515) (61502, 1515) (48744, 1515)\n",
      "Fold  5 AUC : 0.787710\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "14"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "encoding = 'ohe'\n",
    "\n",
    "train_df = df.iloc[0:n_train]\n",
    "test_df = df.iloc[n_train:]\n",
    "\n",
    "print(\"Starting LightGBM. Train shape: {}, test shape: {}\".format(train_df.shape, test_df.shape))\n",
    "gc.collect()\n",
    "# Cross validation model\n",
    "folds = KFold(n_splits=num_folds, shuffle=True, random_state=1001)\n",
    "# Create arrays and dataframes to store results\n",
    "oof_preds = np.zeros(train_df.shape[0])\n",
    "sub_preds = np.zeros(test_df.shape[0])\n",
    "feature_importance_df = pd.DataFrame()\n",
    "feats = [f for f in train_df.columns if f not in ['TARGET','SK_ID_CURR','SK_ID_BUREAU','SK_ID_PREV','index']]\n",
    "\n",
    "#feats = [col for col in feats_0 if df[col].dtype == 'object']\n",
    "\n",
    "\n",
    "print(train_df[feats].shape)\n",
    "for n_fold, (train_idx, valid_idx) in enumerate(folds.split(train_df[feats], train_df['TARGET'])):\n",
    "        \n",
    "      \n",
    "        categorical_columns = [col for col in df.columns if df[col].dtype == 'object']\n",
    "        \n",
    "        if encoding == 'ohe':\n",
    "            \n",
    "            enc = ce.OneHotEncoder(impute_missing=True, cols=categorical_columns).fit(train_df[feats].iloc[train_idx],\n",
    "                                                                                       train_df['TARGET'].iloc[train_idx])\n",
    "            x_train = enc.transform(train_df[feats].iloc[train_idx])\n",
    "            x_valid = enc.transform(train_df[feats].iloc[valid_idx])\n",
    "            x_test = enc.transform(test_df[feats])\n",
    "            print(x_train.shape, x_valid.shape, x_test.shape)\n",
    "\n",
    "            dtest = xgb.DMatrix(x_test.values)\n",
    "            dtrain = xgb.DMatrix(x_train.values, \n",
    "                         label=train_df['TARGET'].iloc[train_idx].values)\n",
    "            dvalid = xgb.DMatrix(x_valid.values, \n",
    "                         train_df['TARGET'].iloc[valid_idx].values)\n",
    "\n",
    "        params = dict(\n",
    "          booster=\"gbtree\",\n",
    "          eval_metric = \"auc\",\n",
    "          nthread=4,\n",
    "          eta=0.05,\n",
    "          max_depth=6,\n",
    "          min_child_weight = 30,\n",
    "          gamma=0,\n",
    "          subsample = 0.85,\n",
    "          colsample_bytree = 0.7,\n",
    "          colsample_bylevel = 0.632,\n",
    "          alpha=0)\n",
    "\n",
    "        clf = xgb.train(\n",
    "            params,\n",
    "            dtrain,\n",
    "            num_boost_round=30000,\n",
    "            evals=[(dtrain, 'train'), (dvalid, 'valid')],\n",
    "            early_stopping_rounds=100,\n",
    "            verbose_eval=False\n",
    "        )\n",
    "\n",
    "        oof_preds[valid_idx] = clf.predict(dvalid)\n",
    "        sub_preds += clf.predict(dtest) / folds.n_splits\n",
    "\n",
    "        print('Fold %2d AUC : %.6f' % (n_fold + 1, roc_auc_score(train_df['TARGET'].iloc[valid_idx].values, oof_preds[valid_idx])))\n",
    "        del clf, dtrain, dvalid\n",
    "        gc.collect()\n",
    "\n",
    "# print('Full AUC score %.6f' % roc_auc_score(train_df['TARGET'].iloc[train_idx].values, oof_preds))\n",
    "# # Write submission file and plot feature importance\n",
    "\n",
    "sub_df = test_df[['SK_ID_CURR']].copy()\n",
    "sub_df['TARGET'] = sub_preds\n",
    "sub_df[['SK_ID_CURR', 'TARGET']].to_csv(test_file_path, index= False)\n",
    "\n",
    "\n",
    "val_df = train_df[['SK_ID_CURR', 'TARGET']].copy()\n",
    "val_df['TARGET'] = oof_preds\n",
    "val_df[['SK_ID_CURR', 'TARGET']].to_csv(validation_file_path, index= False)        \n",
    "            \n",
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gc.collect()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
