{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.base import BaseEstimator, TransformerMixin, RegressorMixin\n",
    "import lightgbm as lgb\n",
    "from sklearn.pipeline import Pipeline\n",
    "from sklearn.model_selection import train_test_split, StratifiedKFold, RandomizedSearchCV\n",
    "from sklearn.metrics import f1_score, make_scorer\n",
    "from scipy.stats import randint as sp_randint\n",
    "from scipy.stats import uniform as sp_uniform"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_data = pd.read_csv(\"../input/train.csv\")\n",
    "test_data = pd.read_csv(\"../input/test.csv\")\n",
    "sub_orig = pd.read_csv(\"../input/sample_submission.csv\", index_col = 0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "target = train_data['Target']"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(9557, 143)"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "train_data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(23856, 142)"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "test_data.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "test_data.loc[test_data['rez_esc'] == 99.0 , 'rez_esc'] = 5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MissingValuesImputer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self, impute_zero_columns):\n",
    "        self.impute_zero_columns = impute_zero_columns\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        print(\"Missing Values Imputer\")\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        \n",
    "        # Fill missing values for v18q1, v2a1 and rez_esc\n",
    "        for column in self.impute_zero_columns:\n",
    "            X[column] = X[column].fillna(0)\n",
    "\n",
    "        # For meaneduc we use the average schooling of household adults\n",
    "        self.X_with_meaneduc_na = X[pd.isnull(X['meaneduc'])]\n",
    "        self.mean_escolari_dict = dict(self.X_with_meaneduc_na.groupby('idhogar')['escolari'].apply(np.mean))\n",
    "        for row_index in self.X_with_meaneduc_na.index:\n",
    "            row_idhogar = X.at[row_index, 'idhogar']\n",
    "            X.at[row_index, 'meaneduc'] = self.mean_escolari_dict[row_idhogar]\n",
    "            X.at[row_index, 'SQBmeaned'] = np.square(self.mean_escolari_dict[row_idhogar])\n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "class RemoveObjectTransformer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self):\n",
    "        self.target = ['dependency']\n",
    "        self.source = ['SQBdependency']\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        print(\"Remove Object Imputer\")\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        for i in range(0, len(self.target)):\n",
    "            X[self.target[i]] = np.sqrt(X[self.source[i]])\n",
    "            X.drop(self.source, axis=1, inplace=True)\n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def calculate_edu(row):\n",
    "    if (row['edjefe'] == 'yes' and row['edjefa'] == 'no') or (row['edjefe'] == 'no' and row['edjefa'] == 'yes'):\n",
    "        return 1\n",
    "    if row['edjefe'] == 'no' and row['edjefa'] == 'no':\n",
    "        return 0\n",
    "    if row['edjefe'] == 'yes' or row['edjefe'] == 'no':\n",
    "        return pd.to_numeric(row['edjefa'])\n",
    "    return pd.to_numeric(row['edjefe'])\n",
    "\n",
    "\n",
    "class CategoricalVariableTransformer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self):\n",
    "        pass\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        print(\"Categorical Variables Transformer\")\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        X['house_holder_edu'] = X.apply(calculate_edu, axis=1).values\n",
    "        X.drop(['edjefe', 'edjefa'], axis=1, inplace=True)\n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [],
   "source": [
    "class UnnecessaryColumnsRemoverTransformer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self, axis = 1):\n",
    "        print(\"Unnecessary Columns Remover Transformer\")\n",
    "        self.axis = axis\n",
    "        self.unnecessary_columns = [\n",
    "            'r4t3', 'tamhog', 'tamviv', 'hogar_total', 'v18q', 'v14a',\n",
    "            'mobilephone', 'energcocinar1', 'sanitario6', 'Id',\n",
    "            'estadocivil7', 'lugar1', 'area1', 'female', 'agesq',\n",
    "        ]\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        unnecessary_columns_to_extend = [\n",
    "            [col for col in X.columns.tolist() if 'SQB' in col],\n",
    "        ]\n",
    "        \n",
    "        for col_list in unnecessary_columns_to_extend:\n",
    "            self.unnecessary_columns.extend(col_list)\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        X = X.drop(self.unnecessary_columns, axis = self.axis)\n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FeatureEngineeringTransformer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self, axis = 1):\n",
    "        self.axis = axis\n",
    "        \n",
    "        # individual level boolean features\n",
    "        self.aggr_mean_list = ['rez_esc', 'dis', 'male', 'female', 'estadocivil1', 'estadocivil2',\n",
    "                               'estadocivil3', 'estadocivil4', 'estadocivil5', 'estadocivil6',\n",
    "                               'estadocivil7', 'parentesco2', 'parentesco3', 'parentesco4',\n",
    "                               'parentesco5', 'parentesco6', 'parentesco7', 'parentesco8',\n",
    "                               'parentesco9', 'parentesco10', 'parentesco11', 'parentesco12',\n",
    "                               'instlevel1', 'instlevel2', 'instlevel3', 'instlevel4', 'instlevel5',\n",
    "                               'instlevel6', 'instlevel7', 'instlevel8', 'instlevel9']\n",
    "\n",
    "        # individual level ordered features\n",
    "        self.individual_ordered_features = ['escolari', 'age', 'escolari_age']\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        print(\"Feature Engineering Transformer\")\n",
    "        self.more_columns_to_drop = [\n",
    "            [col for col in X.columns.tolist() if 'parentesco' in col and 'parentesco1' not in col],\n",
    "            ['idhogar']\n",
    "        ]\n",
    "        self.aggregate_features = (['mean', 'max', 'min', 'sum'])\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        X['adult'] = X['hogar_adul'] - X['hogar_mayor']\n",
    "        X['dependency_count'] = X['hogar_nin'] + X['hogar_mayor']\n",
    "        \n",
    "        X['dependency'] = X['dependency_count'] / X['adult']\n",
    "        X['dependency'] = X['dependency'].fillna(0)\n",
    "        \n",
    "        X['child_percent'] = X['hogar_nin'] / X['hogar_total']\n",
    "        X['elder_percent'] = X['hogar_mayor'] / X['hogar_total']\n",
    "        X['adult_percent'] = X['hogar_adul'] / X['hogar_total']\n",
    "\n",
    "        X['rent_per_adult'] = X['v2a1'] / X['hogar_adul']\n",
    "        X['rent_per_person'] = X['v2a1'] / X['hhsize']\n",
    "\n",
    "        X['overcrowding_room_and_bedroom'] = (X['hacdor'] + X['hacapo']) / 2\n",
    "        X['no_appliances'] = X['refrig'] + X['computer'] + X['television']\n",
    "\n",
    "        X['r4h1_percent_in_male'] = X['r4h1'] / X['r4h3']\n",
    "        X['r4m1_percent_in_female'] = X['r4m1'] / X['r4m3']\n",
    "        X['r4h1_percent_in_total'] = X['r4h1'] / X['hhsize']\n",
    "        X['r4m1_percent_in_total'] = X['r4m1'] / X['hhsize']\n",
    "        X['r4t1_percent_in_total'] = X['r4t1'] / X['hhsize']\n",
    "\n",
    "        X['rent_per_room'] = X['v2a1'] / X['rooms']\n",
    "        X['bedroom_per_room'] = X['bedrooms'] / X['rooms']\n",
    "        X['elder_per_room'] = X['hogar_mayor'] / X['rooms']\n",
    "        X['adults_per_room'] = X['adult'] / X['rooms']\n",
    "        X['child_per_room'] = X['hogar_nin'] / X['rooms']\n",
    "        X['male_per_room'] = X['r4h3'] / X['rooms']\n",
    "        X['female_per_room'] = X['r4m3'] / X['rooms']\n",
    "        X['room_per_person_household'] = X['hhsize'] / X['rooms']\n",
    "\n",
    "        X['rent_per_bedroom'] = X['v2a1'] / X['bedrooms']\n",
    "        X['edler_per_bedroom'] = X['hogar_mayor'] / X['bedrooms']\n",
    "        X['adults_per_bedroom'] = X['adult'] / X['bedrooms']\n",
    "        X['child_per_bedroom'] = X['hogar_nin'] / X['bedrooms']\n",
    "        X['male_per_bedroom'] = X['r4h3'] / X['bedrooms']\n",
    "        X['female_per_bedroom'] = X['r4m3'] / X['bedrooms']\n",
    "        X['bedrooms_per_person_household'] = X['hhsize'] / X['bedrooms']\n",
    "\n",
    "        X['tablet_per_person_household'] = X['v18q1'] / X['hhsize']\n",
    "        X['phone_per_person_household'] = X['qmobilephone'] / X['hhsize']\n",
    "\n",
    "        X['age_12_19'] = X['hogar_nin'] - X['r4t1']\n",
    "\n",
    "        X['escolari_age'] = X['escolari'] / X['age']\n",
    "\n",
    "        X['rez_esc_escolari'] = X['rez_esc'] / X['escolari']\n",
    "        X['rez_esc_r4t1'] = X['rez_esc'] / X['r4t1']\n",
    "        X['rez_esc_r4t2'] = X['rez_esc'] / X['r4t2']\n",
    "        X['rez_esc_r4t3'] = X['rez_esc'] / X['r4t3']\n",
    "        X['rez_esc_age'] = X['rez_esc'] / X['age']\n",
    "        \n",
    "        # Create individual-level mean features\n",
    "        grouped_mean_df = X.groupby('idhogar')[self.aggr_mean_list]\n",
    "        grouped_mean_df = grouped_mean_df.agg((['mean']))\n",
    "        \n",
    "        # Create individual-level ordered features\n",
    "        grouped_ordered_df = X.groupby('idhogar')[self.individual_ordered_features]\n",
    "        grouped_ordered_df = grouped_ordered_df.agg(self.aggregate_features)\n",
    "        \n",
    "        X = X.join(grouped_mean_df, on = 'idhogar')\n",
    "        X = X.join(grouped_ordered_df, on = 'idhogar')\n",
    "        \n",
    "        # Finally remove the other parentesco columns since we are only going to use only heads of\n",
    "        # households for our scoring\n",
    "        for col in self.more_columns_to_drop:\n",
    "            X = X.drop(col, axis = self.axis) \n",
    "        \n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CorrelationOutputer(BaseEstimator, TransformerMixin):\n",
    "    \n",
    "    def __init__(self):\n",
    "        pass\n",
    "        \n",
    "    def fit(self, X, y = None):\n",
    "        return self\n",
    "    \n",
    "    def transform(self, X, y = None):\n",
    "        if 'Target' in X.columns.values:\n",
    "            correlation = X.corr()\n",
    "            correlation = correlation['Target'].sort_values(ascending=False)\n",
    "            print(f'The most 20 positive feature: \\n{correlation.head(20)}')\n",
    "            print(f'The most 20 negative feature: \\n{correlation.tail(20)}')\n",
    "            X.drop(['Target'], axis=1, inplace=True)\n",
    "            \n",
    "        print(f'X has {X.shape[0]} rows, and {X.shape[1]} features')\n",
    "        return X"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LGBClassifierCV(BaseEstimator, RegressorMixin):\n",
    "    \n",
    "    def __init__(self,\n",
    "                 axis = 0,\n",
    "                 lgb_params = None,\n",
    "                 fit_params = None,\n",
    "                 cv = 3,\n",
    "                 perform_random_search = False,\n",
    "                 use_train_test_split = False,\n",
    "                 use_kfold_split = True):\n",
    "        self.axis = axis\n",
    "        self.lgb_params = lgb_params\n",
    "        self.fit_params = fit_params\n",
    "        self.cv = cv\n",
    "        self.perform_random_search = perform_random_search\n",
    "        self.use_train_test_split = use_train_test_split\n",
    "        self.use_kfold_split = use_kfold_split\n",
    "    \n",
    "    @property\n",
    "    def feature_importances_(self):\n",
    "        feature_importances = []\n",
    "        for estimator in self.estimators_:\n",
    "            feature_importances.append(\n",
    "                estimator.feature_importances_\n",
    "            )\n",
    "        return np.mean(feature_importances, axis = 0)\n",
    "    \n",
    "    @property\n",
    "    def evals_result_(self):\n",
    "        evals_result = []\n",
    "        for estimator in self.estimators_:\n",
    "            evals_result.append(\n",
    "                estimator.evals_result_\n",
    "            )\n",
    "        return np.array(evals_result)\n",
    "    \n",
    "    @property\n",
    "    def best_scores_(self):\n",
    "        best_scores = []\n",
    "        for estimator in self.estimators_:\n",
    "            best_scores.append(\n",
    "                estimator.best_score_['validation']['macroF1']\n",
    "            )\n",
    "        return np.array(best_scores)\n",
    "    \n",
    "    @property\n",
    "    def cv_scores_(self):\n",
    "        return self.best_scores_\n",
    "    \n",
    "    @property\n",
    "    def cv_score_(self):\n",
    "        return np.mean(self.best_scores_)\n",
    "    \n",
    "    @property\n",
    "    def best_iterations_(self):\n",
    "        best_iterations = []\n",
    "        for estimator in self.estimators_:\n",
    "            best_iterations.append(\n",
    "                estimator.best_iteration_\n",
    "            )\n",
    "        return np.array(best_iterations)\n",
    "    \n",
    "    @property\n",
    "    def best_iteration_(self):\n",
    "        return np.round(np.mean(self.best_iterations_))\n",
    "\n",
    "    def find_best_params_(self, X, y):\n",
    "        \n",
    "        # Define a search space for the parameters\n",
    "        lgb_search_params = {\n",
    "            'num_leaves': sp_randint(20, 100),\n",
    "            'min_child_samples': sp_randint(40, 100), \n",
    "            'min_child_weight': [1e-5, 1e-3, 1e-2, 1e-1, 1, 1e1, 1e2, 1e3, 1e4],\n",
    "            'subsample': sp_uniform(loc = 0.75, scale = 0.25), \n",
    "            'colsample_bytree': sp_uniform(loc = 0.8, scale = 0.15),\n",
    "            'reg_alpha': [0, 1e-3, 1e-1, 1, 10, 50, 100],\n",
    "            'reg_lambda': [0, 1e-3, 1e-1, 1, 10, 50, 100]\n",
    "        }\n",
    "\n",
    "        x_train, x_val, y_train, y_val = train_test_split(X, y, test_size = 0.10, random_state = 42, stratify = y)\n",
    "        F1_scorer = make_scorer(f1_score, greater_is_better = True, average = 'macro')\n",
    "\n",
    "        lgb_model = lgb.LGBMClassifier(**self.lgb_params)\n",
    "        self.fit_params[\"eval_set\"] = [(x_train, y_train), (x_val, y_val)]\n",
    "        self.fit_params[\"verbose\"] = 200\n",
    "\n",
    "        rs = RandomizedSearchCV(estimator = lgb_model, \n",
    "                                param_distributions = lgb_search_params, \n",
    "                                n_iter = 100,\n",
    "                                scoring = F1_scorer,\n",
    "                                cv = 5,\n",
    "                                refit = True,\n",
    "                                random_state = 314,\n",
    "                                verbose = False,\n",
    "                                fit_params = self.fit_params)\n",
    "        \n",
    "        # Fit the random search\n",
    "        _ = rs.fit(x_train, y_train)\n",
    "        \n",
    "        print(\"Optimal LGB parameters:\")\n",
    "        print(rs.best_params_)\n",
    "        with open(\"lgb_best_params.pickle\", \"wb\") as lgb_best_params:\n",
    "            pickle.dump(rs.best_params_, lgb_best_params)\n",
    "        \n",
    "        return rs.best_params_\n",
    "    \n",
    "    def fit(self, X, y, **fit_params):\n",
    "        print(\"LGBClassifierCV\")\n",
    "        \n",
    "        # Use only heads of households for scoring\n",
    "        X.insert(0, 'Target', y)\n",
    "        X = X.query('parentesco1 == 1')\n",
    "        y = X['Target'] - 1\n",
    "        X = X.drop(['Target', 'parentesco1'], 1)\n",
    "        print(\"Number of columns in train - \" + str(X.shape[1]))\n",
    "        \n",
    "        self.estimators_ = []\n",
    "        \n",
    "        # Use the best parameters to fit a model to whole data\n",
    "        if self.perform_random_search:\n",
    "            self.lgb_optimal_params = self.find_best_params_(X, y)\n",
    "            \n",
    "        # Use a simple train-test split. I have found that this gives a better local CV score than K folds.\n",
    "        if self.use_train_test_split:\n",
    "            x_train, x_val, y_train, y_val = train_test_split(X, y, test_size = 0.1, random_state = 0)\n",
    "            \n",
    "            lgb_model = lgb.LGBMClassifier(**self.lgb_params)\n",
    "            if self.perform_random_search:\n",
    "                lgb_model.set_params(**self.lgb_optimal_params)\n",
    "            \n",
    "            lgb_model.fit(\n",
    "                    x_train, y_train,\n",
    "                    eval_set = [(x_train, y_train), (x_val, y_val)],\n",
    "                    **self.fit_params\n",
    "            )\n",
    "            print(\"Train F1 - \"\n",
    "                  + str(lgb_model.best_score_['train']['macroF1'])\n",
    "                  + \"   \"\n",
    "                  + \"Validation F1 - \"\n",
    "                  + str(lgb_model.best_score_['validation']['macroF1']))\n",
    "            self.estimators_.append(lgb_model)\n",
    "            \n",
    "        # When not using random search to tune parameters, proceed with a simple Stratified Kfold CV\n",
    "        if self.use_kfold_split:\n",
    "            kf = StratifiedKFold(n_splits = self.cv, shuffle = True)\n",
    "            for fold_index, (train, valid) in enumerate(kf.split(X, y)):\n",
    "                print(\"Train Fold Index - \" + str(fold_index))\n",
    "\n",
    "                lgb_model = lgb.LGBMClassifier(**self.lgb_params)\n",
    "                if self.perform_random_search:\n",
    "                    lgb_model.set_params(**self.lgb_optimal_params)\n",
    "\n",
    "                lgb_model.fit(\n",
    "                        X.iloc[train], y.iloc[train],\n",
    "                        eval_set = [(X.iloc[train], y.iloc[train]), (X.iloc[valid], y.iloc[valid])],\n",
    "                        **self.fit_params\n",
    "                )\n",
    "                print(\"Train F1 - \"\n",
    "                      + str(lgb_model.best_score_['train']['macroF1'])\n",
    "                      + \"   \"\n",
    "                      + \"Validation F1 - \"\n",
    "                      + str(lgb_model.best_score_['validation']['macroF1']))\n",
    "\n",
    "                self.estimators_.append(lgb_model)\n",
    "        return self\n",
    "    \n",
    "    def predict(self, X):\n",
    "        # Remove this column since we are using only heads of households for scoring\n",
    "        X = X.drop('parentesco1', 1)\n",
    "        \n",
    "        # When not using random search, use voting to get predictions from all CV estimators.\n",
    "        y_pred = []\n",
    "        for estimator_index, estimator in enumerate(self.estimators_):\n",
    "            print(\"Estimator Index - \" + str(estimator_index))\n",
    "            y_pred.append(estimator.predict(X))\n",
    "        return np.mean(y_pred, axis = self.axis).astype(int)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_lgb_params():\n",
    "    \n",
    "    def evaluate_macroF1_lgb(truth, predictions):  \n",
    "        pred_labels = predictions.reshape(len(np.unique(truth)), -1).argmax(axis = 0)\n",
    "        f1 = f1_score(truth, pred_labels, average = 'macro')\n",
    "        return ('macroF1', f1, True)\n",
    "\n",
    "    def learning_rate_power_0997(current_iter):\n",
    "        base_learning_rate = 0.1\n",
    "        min_learning_rate = 0.02\n",
    "        lr = base_learning_rate  * np.power(.995, current_iter)\n",
    "        return max(lr, min_learning_rate)\n",
    "\n",
    "    lgb_params = {'boosting_type': 'dart',\n",
    "                  'class_weight': 'balanced',\n",
    "                  \"objective\": 'multiclassova',\n",
    "                  'colsample_bytree': 0.932999339566722,\n",
    "                  'min_child_samples': 49,\n",
    "                  'min_child_weight': 0.01,\n",
    "                  'num_leaves': 92,\n",
    "                  'reg_alpha': 0.001,\n",
    "                  'reg_lambda': 0,\n",
    "                  'subsample': 0.7588178065029635,\n",
    "                  'metric': None,\n",
    "                  'silent': True,\n",
    "                  'random_state': 0,\n",
    "                  'n_jobs': -1}\n",
    "\n",
    "    fit_params = {\"early_stopping_rounds\": 400, \n",
    "                  \"eval_metric\" : evaluate_macroF1_lgb, \n",
    "                  'eval_names': ['train', 'validation'],\n",
    "                  'verbose': False,\n",
    "                  'categorical_feature': 'auto'}\n",
    "    \n",
    "    return lgb_params, fit_params"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Unnecessary Columns Remover Transformer\n",
      "Missing Values Imputer\n",
      "Remove Object Imputer\n",
      "Categorical Variables Transformer\n",
      "Feature Engineering Transformer\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Meng\\Tools\\Anaconda3\\envs\\ml\\lib\\site-packages\\pandas\\core\\reshape\\merge.py:544: UserWarning: merging between different levels can give an unintended result (1 levels on the left, 2 on the right)\n",
      "  warnings.warn(msg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The most 20 positive feature: \n",
      "Target                        1.000000\n",
      "(escolari, mean)              0.449353\n",
      "(escolari, max)               0.388633\n",
      "meaneduc                      0.335072\n",
      "house_holder_edu              0.329950\n",
      "adult_percent                 0.320388\n",
      "(instlevel8, mean)            0.306825\n",
      "(escolari, min)               0.305241\n",
      "cielorazo                     0.304421\n",
      "escolari                      0.302305\n",
      "phone_per_person_household    0.298884\n",
      "eviv3                         0.294222\n",
      "epared3                       0.292451\n",
      "pisomoscer                    0.280284\n",
      "(escolari_age, min)           0.271018\n",
      "(escolari, sum)               0.268812\n",
      "(escolari_age, mean)          0.263443\n",
      "paredblolad                   0.261274\n",
      "etecho3                       0.257378\n",
      "no_appliances                 0.243289\n",
      "Name: Target, dtype: float64\n",
      "The most 20 negative feature: \n",
      "r4h1                            -0.229889\n",
      "r4h1_percent_in_male            -0.248590\n",
      "r4m1                            -0.253163\n",
      "(estadocivil1, mean)            -0.260641\n",
      "(instlevel2, mean)              -0.265139\n",
      "room_per_person_household       -0.267415\n",
      "female_per_room                 -0.273809\n",
      "female_per_bedroom              -0.274250\n",
      "(instlevel1, mean)              -0.278413\n",
      "bedrooms_per_person_household   -0.281959\n",
      "overcrowding                    -0.289110\n",
      "r4t1_percent_in_total           -0.289337\n",
      "r4t1                            -0.316745\n",
      "child_percent                   -0.320388\n",
      "hogar_nin                       -0.328199\n",
      "dependency_count                -0.339183\n",
      "child_per_room                  -0.370620\n",
      "child_per_bedroom               -0.374951\n",
      "dependency                      -0.376901\n",
      "elimbasu5                             NaN\n",
      "Name: Target, dtype: float64\n",
      "X has 9557 rows, and 190 features\n",
      "LGBClassifierCV\n",
      "Number of columns in train - 189\n",
      "Train F1 - 0.8557356069179175   Validation F1 - 0.43620992534036007\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Meng\\Tools\\Anaconda3\\envs\\ml\\lib\\site-packages\\pandas\\core\\reshape\\merge.py:544: UserWarning: merging between different levels can give an unintended result (1 levels on the left, 2 on the right)\n",
      "  warnings.warn(msg, UserWarning)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "X has 23856 rows, and 190 features\n",
      "Estimator Index - 0\n",
      "Local CV Score - 0.43620992534036007\n",
      "              Target\n",
      "Id                  \n",
      "ID_2f6873615       4\n",
      "ID_1c78846d2       4\n",
      "ID_e5442cf6a       4\n",
      "ID_a8db26a79       4\n",
      "ID_a62966799       4\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Meng\\Tools\\Anaconda3\\envs\\ml\\lib\\site-packages\\sklearn\\preprocessing\\label.py:151: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.\n",
      "  if diff:\n"
     ]
    }
   ],
   "source": [
    "lgb_params, lgb_fit_params = get_lgb_params()\n",
    "\n",
    "pipeline = Pipeline([\n",
    "    ('na_imputer', MissingValuesImputer(impute_zero_columns = ['v18q1', 'v2a1', 'rez_esc'])),\n",
    "    ('remove_imputer', RemoveObjectTransformer()),\n",
    "    ('cat_transformer', CategoricalVariableTransformer()),\n",
    "    ('feature_engineering_transformer', FeatureEngineeringTransformer()),\n",
    "    ('unnecessary_columns_remover_transformer', UnnecessaryColumnsRemoverTransformer()),\n",
    "    ('correlation', CorrelationOutputer()),\n",
    "    ('lgb', LGBClassifierCV(lgb_params = lgb_params,\n",
    "                            fit_params = lgb_fit_params,\n",
    "                            cv = 5,\n",
    "                            perform_random_search = False,\n",
    "                            use_train_test_split = True,\n",
    "                            use_kfold_split = False)\n",
    "    )\n",
    "])\n",
    "\n",
    "\n",
    "pipeline.fit(train_data.copy(), target)\n",
    "pred = pipeline.predict(test_data.copy())\n",
    "print(\"Local CV Score - \" + str(pipeline.named_steps['lgb'].cv_score_))\n",
    "sub_orig['Target'] = pred + 1\n",
    "sub_orig.to_csv('Pipeline_Base_LGB_'+ str(pipeline.named_steps['lgb'].cv_score_) + '.csv')\n",
    "print(sub_orig.head())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
