{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "ExecuteTime": {
     "end_time": "2021-09-05T04:51:16.663066Z",
     "start_time": "2021-09-05T04:51:11.533773Z"
    },
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
   },
   "outputs": [
    {
     "ename": "ModuleNotFoundError",
     "evalue": "No module named 'graphviz'",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mModuleNotFoundError\u001b[0m                       Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-1-59c25f7a3213>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mseaborn\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0msns\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m \u001b[1;32mimport\u001b[0m \u001b[0mgraphviz\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      7\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      8\u001b[0m \u001b[1;31m#from imblearn.over_sampling import RandomOverSampler\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mModuleNotFoundError\u001b[0m: No module named 'graphviz'"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "import time\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import graphviz\n",
    "\n",
    "#from imblearn.over_sampling import RandomOverSampler\n",
    "#from imblearn.pipeline import Pipeline\n",
    "\n",
    "from sklearn.preprocessing import RobustScaler, StandardScaler\n",
    "from sklearn.model_selection import GridSearchCV, train_test_split\n",
    "\n",
    "from sklearn.linear_model import LogisticRegression, Ridge, Lasso, ElasticNet\n",
    "import lightgbm as lgb\n",
    "import xgboost as xgb\n",
    "import catboost as cat\n",
    "\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier, VotingClassifier, BaggingClassifier, ExtraTreesClassifier\n",
    "\n",
    "from sklearn.metrics import roc_curve, auc, confusion_matrix, roc_auc_score\n",
    "from sklearn.tree import export_graphviz\n",
    "import warnings \n",
    "warnings.filterwarnings('ignore')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_roc_curve(fprs, tprs):\n",
    "    \n",
    "    tprs_interp = []\n",
    "    aucs = []\n",
    "    mean_fpr = np.linspace(0, 1, 100)\n",
    "    f, ax = plt.subplots(figsize=(8, 8))\n",
    "    \n",
    "    # Plotting ROC for each fold and computing AUC scores\n",
    "    for i, (fpr, tpr) in enumerate(zip(fprs, tprs), 1):\n",
    "        tprs_interp.append(np.interp(mean_fpr, fpr, tpr))\n",
    "        tprs_interp[-1][0] = 0.0\n",
    "        roc_auc = auc(fpr, tpr)\n",
    "        aucs.append(roc_auc)\n",
    "        ax.plot(fpr, tpr, lw=1, alpha=0.3, label='ROC Fold {} (AUC = {:.3f})'.format(i, roc_auc))\n",
    "        \n",
    "    # Plotting ROC for random guessing\n",
    "    plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r', alpha=0.8, label='Random Guessing')\n",
    "    \n",
    "    mean_tpr = np.mean(tprs_interp, axis=0)\n",
    "    mean_tpr[-1] = 1.0\n",
    "    mean_auc = auc(mean_fpr, mean_tpr)\n",
    "    std_auc = np.std(aucs)\n",
    "    \n",
    "    # Plotting the mean ROC\n",
    "    ax.plot(mean_fpr, mean_tpr, color='b', label='Mean ROC (AUC = {:.3f} $\\pm$ {:.3f})'.format(mean_auc, std_auc), lw=2, alpha=0.8)\n",
    "    \n",
    "    # Plotting the standard deviation around the mean ROC Curve\n",
    "    std_tpr = np.std(tprs_interp, axis=0)\n",
    "    tprs_upper = np.minimum(mean_tpr + std_tpr, 1)\n",
    "    tprs_lower = np.maximum(mean_tpr - std_tpr, 0)\n",
    "    ax.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2, label='$\\pm$ 1 std. dev.')\n",
    "    \n",
    "    ax.set_xlabel('False Positive Rate', size=15, labelpad=20)\n",
    "    ax.set_ylabel('True Positive Rate', size=15, labelpad=20)\n",
    "    ax.tick_params(axis='x', labelsize=15)\n",
    "    ax.tick_params(axis='y', labelsize=15)\n",
    "    ax.set_xlim([-0.05, 1.05])\n",
    "    ax.set_ylim([-0.05, 1.05])\n",
    "\n",
    "    ax.set_title('ROC Curves of Folds', size=20, y=1.02)\n",
    "    ax.legend(loc='lower right', prop={'size': 13})\n",
    "    \n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_feature_importances(feature_importances, title, feature_names):\n",
    "    feature_importances = 100.0*(feature_importances/max(feature_importances))\n",
    "    index_sorted = np.flipud(np.argsort(feature_importances))\n",
    "    pos = np.arange(index_sorted.shape[0])+0.5\n",
    "    \n",
    "    fig, ax = plt.subplots(figsize=(16,4))\n",
    "    plt.bar(pos,feature_importances[index_sorted])\n",
    "    for tick in ax.get_xticklabels():\n",
    "        tick.set_rotation(90)\n",
    "    plt.xticks(pos,feature_names[index_sorted])\n",
    "    plt.ylabel('Relative Importance')\n",
    "    plt.title(title)\n",
    "    plt.show() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "_cell_guid": "79c7e3d0-c299-4dcb-8224-4455121ee9b0",
    "_uuid": "d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
   },
   "outputs": [],
   "source": [
    "df_train = pd.read_csv('/kaggle/input/rs6-attrition-predict/train.csv')\n",
    "df_test = pd.read_csv('/kaggle/input/rs6-attrition-predict/test.csv')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def extract_features(df, is_train=False):\n",
    "    # target\n",
    "    y = pd.DataFrame()\n",
    "    if is_train:\n",
    "        attrition_dict = {'No':0,'Yes':1}\n",
    "        df['Attrition'] = df['Attrition'].map(lambda x: attrition_dict[x])\n",
    "        y = df.Attrition\n",
    "        df.drop(['Attrition'], axis=1, inplace=True)\n",
    "    else:\n",
    "        y = df.user_id\n",
    "    df.drop(['user_id'], inplace=True, axis=1)\n",
    "    # BusinessTravel\n",
    "    businesstravel_dict = {'Non-Travel':0, 'Travel_Rarely':1, 'Travel_Frequently':2}\n",
    "    df['BusinessTravel'] = df['BusinessTravel'].map(lambda x: businesstravel_dict[x])\n",
    "    # Department\n",
    "    department_dict = {'Sales':0, 'Research & Development':1, 'Human Resources':2}\n",
    "    df['Department'] = df['Department'].map(lambda x: department_dict[x])\n",
    "    # EducationField\n",
    "    educationfield_dict = {'Life Sciences':0, 'Medical':1, 'Marketing':2, 'Technical Degree':3, 'Human Resources':4, 'Other':5}\n",
    "    df['EducationField'] = df['EducationField'].map(lambda x: educationfield_dict[x])\n",
    "    # Gender\n",
    "    gender_dict = {'Male':0, 'Female': 1}\n",
    "    df['Gender'] = df['Gender'].map(lambda x: gender_dict[x])\n",
    "    # JobRole\n",
    "    jobrole_dict = {'Sales Executive':0, \n",
    "                    'Research Scientist':1, \n",
    "                    'Laboratory Technician':2, \n",
    "                    'Manufacturing Director':3, \n",
    "                    'Healthcare Representative':4,\n",
    "                    'Manager':5, \n",
    "                    'Sales Representative':6,\n",
    "                    'Research Director':7,\n",
    "                    'Human Resources':8\n",
    "                   }\n",
    "    df['JobRole'] = df['JobRole'].map(lambda x: jobrole_dict[x])\n",
    "    # MaritalStatus\n",
    "    maritalstatus_dict = {'Single':0, 'Married':1, 'Divorced':2}\n",
    "    df['MaritalStatus'] = df['MaritalStatus'].map(lambda x: maritalstatus_dict[x])\n",
    "    # Over18\n",
    "    df.drop(['Over18'], inplace=True, axis=1)\n",
    "    # EmployeeNumber\n",
    "    df.drop(['EmployeeNumber'], inplace=True, axis=1)\n",
    "    # OverTime\n",
    "    overtime_dict = {'Yes':0, 'No':1}\n",
    "    df['OverTime'] = df['OverTime'].map(lambda x: overtime_dict[x])\n",
    "    return y, df"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "target, train = extract_features(df_train, True)\n",
    "user_id, test = extract_features(df_test, False)\n",
    "del df_train\n",
    "del df_test"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_optimizer_params(model, model_params, train=train, test=test):\n",
    "    gridsearch = GridSearchCV(model, model_params, scoring='roc_auc', cv=5)\n",
    "    gridsearch.fit(train, target)\n",
    "    best_score = gridsearch.best_score_\n",
    "    print(\"Best score: %0.3f\" % best_score)\n",
    "    print(\"Best parameters set:\")\n",
    "    best_parameters = gridsearch.best_estimator_.get_params()\n",
    "    for param_name in sorted(gridsearch.param_grid.keys()):\n",
    "        print(\"\\t%s: %r\" % (param_name, best_parameters[param_name]))\n",
    "#     print(best_parameters)\n",
    "    return best_parameters\n",
    "\n",
    "def get_model_result(model, model_params, model_name, scaler=None, test_size=0.5, train=train, test=test, feature_importance=False, gridsearch=True):\n",
    "    if scaler is not None:\n",
    "        train = scaler.fit_transform(train)\n",
    "        test = scaler.fit_transform(test)\n",
    "\n",
    "    if gridsearch is True:\n",
    "        best_params = get_optimizer_params(model, model_params)\n",
    "        model.set_params(**best_params)\n",
    "    X_train, X_val, y_train, y_val = train_test_split(train, target, test_size=test_size, random_state=2020, stratify=target)\n",
    "    model.fit(X_train, y_train)\n",
    "    prob_y_val = model.predict_proba(X_val)[:,1] if hasattr(model, 'predict_proba') else model.predict(X_val)\n",
    "    trn_fpr, trn_tpr, trn_thresholds = roc_curve(y_val, prob_y_val)\n",
    "    best_score = auc(trn_fpr, trn_tpr)\n",
    "    best_parameters = model.get_params()\n",
    "    plot_roc_curve([list(trn_fpr)], [list(trn_tpr)])\n",
    "        \n",
    "        \n",
    "    if feature_importance is True:\n",
    "        plot_feature_importances(model.feature_importances_, 'Importance of Features', train.columns)\n",
    "    result = pd.DataFrame()\n",
    "    result['user_id'] = user_id\n",
    "    result['Attrition'] = pd.DataFrame(model.predict_proba(test)[:,1] if hasattr(model, 'predict_proba') else model.predict(test))\n",
    "    result[['user_id', 'Attrition']].to_csv(f'result-{model_name}.csv', index=False, float_format='%.8f')\n",
    "    print('result of predict:\\n', result.head())\n",
    "    return best_score, best_parameters"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = pd.concat([train, test]).corr() ** 2\n",
    "data = np.tril(data, k=-1)\n",
    "data[data==0] = np.nan"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "figure, ax = plt.subplots(figsize=(10, 10))\n",
    "sns.heatmap(np.sqrt(data), annot=False, cmap='viridis', ax=ax)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "data = train.corrwith(target).agg('square')\n",
    "\n",
    "figure, ax = plt.subplots(figsize=(10, 10))\n",
    "data.agg('sqrt').plot.bar(ax=ax)\n",
    "del data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Linear models with Regularization\n",
    "**Normal equation**\n",
    "Given an input sample $x=[x_1, x_2, \\cdots, x_n]$, linear regression predicts $y$ using the following equation:\n",
    "$$\\bar{y}=\\theta_0+\\theta_1x_1+\\theta_2x_2+\\cdots+\\theta_n x_n$$\n",
    "Given $m$ training samples $x^{(1)}, x^{(2)}, \\cdots,x^{(m)}$, linear regression finds $\\theta$ that minimize the Mean Square Error(MSE) between $\\bar{y}$ and $y$:\n",
    "$$MSE(\\theta)=\\frac{1}{m}\\sum_{i=1}^m(\\bar{y}^{(i)}-y)^2=\\frac{1}{m}\\sum_{i=1}^m(\\theta \\cdot x^{(i)} - y)^2$$\n",
    "where $y$ is the true value and $\\bar{y}$ is the predicted value. The solution of this minimization problem is given by $\\frac{\\partial MSE}{\\partial \\theta}=0$, which can calcuated,\n",
    "$$\\theta=(X^T\\cdot X)^{-1}\\cdot X^T \\cdot y$$\n",
    "where $X$ is the input data matrix of size $m \\times (n + 1)$. Each row of $X$ corresponds to a sample, each columns corresponds to a feature. There are $(n+1)$ columns since there is a columns of $1$-th added to the $n$-features, corresponding to $\\theta_0$. And $\\bar{y}$ is a vector of true target values of size $m \\times 1$, and $\\theta$ is a vector of size $(n+1)\\times 1$. The $\\theta$ is the set of parameters that minimizes the cost function of linear regression $MSE(\\theta)$.\n",
    "\n",
    "$$\n",
    "\\begin{equation}\n",
    "\\begin{aligned}\n",
    "\\frac{\\partial MSE}{\\partial \\theta} &= \\frac{\\partial \\frac{1}{m} \\sum_{i=1}^m ((\\theta \\cdot x^{(i)})^2 + y^2 - \\theta \\cdot x^{(i)}y)}{\\partial \\theta}\\\\\n",
    "&= \\frac{\\partial(\\frac{1}{m}\\theta^T \\theta \\sum_{i=1}^m \\sum_{j=1}^m x^{(i)}x^{(j)})}{\\partial \\theta} + \\frac{y^Ty}{\\partial \\theta} - \\frac{\\partial \\frac{2y}{m}\\sum_{i=1}^mx^{(i)}}{\\partial \\theta} \\\\\n",
    "&= \\frac{2}{m}(X^TX \\theta - X^Ty)\n",
    "\\end{aligned}\n",
    "\\end{equation}\n",
    "$$\n",
    "GD(Gradient Descent) is most useful when the cost function (here it is the MSE) does not have a clean and nice analytical solution. But it can also accelerate the training when you have a lot of input features or a large training set. So the gradient of $MSE(\\theta)$ over $\\theta$ is :\n",
    "$$\\nabla MSE(\\theta)= \\frac{2}{m}X^T(X \\cdot \\theta - \\bar{y})$$\n",
    "The pseudo algorithm for gradient descent is:\n",
    "- Initialize parameter vector $\\theta$\n",
    "- Choose learning rate $\\mu$\n",
    "- For each step, update the parameters with a sample (means Stochastic Gradient Descent), a random selected sub-sample (means Mini-batch Gradient Descent), using all $m$ samples (means Batch gradient descent).\n",
    "- Iterate until certain number of steps has passed, or when the decrease of $MSE(\\theta)$ is smaller than a tolerance (means early stopping).\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "\n",
    "**Ridge Regression**\n",
    "\n",
    "Ridge regression appy a regularization term proportional to the square of **l2-norm** of feature weights (not including the intercept). A common expression is:\n",
    "$$J(\\theta)=MSE(\\theta)+\\alpha(\\theta_1^2 + \\theta_2^2 + \\cdots + \\theta_n^2)$$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Ridge()\n",
    "model_params = {'alpha': [0.01, 0.05, 0.1, 0.5, 1, 2, 5, 7], 'tol': [0.0001, 0.001, 0.01, 0.1]}\n",
    "model_params = {'alpha': [2], 'tol': [0.0001]}\n",
    "start = time.time()\n",
    "best_score_ridge, best_params_ridge = get_model_result(model, model_params, 'ridge', StandardScaler())\n",
    "print(time.time()-start)\n",
    "del model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Lasso Regression**\n",
    "\n",
    "LASSO(Least Absolute Shrinkage and Selection Operator Regression)\n",
    "cost function:\n",
    "$$J(\\theta)=MSE(\\theta)+ \\alpha(|\\theta_1| + |\\theta_2| + \\cdots + |\\theta_n|)$$\n",
    "The penalty is proportional to the *l1-norm* of theta."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = Lasso()\n",
    "model_params = {'alpha': np.logspace(-10, -6, 50)}\n",
    "model_params = {'alpha': [1e-10]}\n",
    "start = time.time()\n",
    "best_score_lasso, best_params_lasso = get_model_result(model, model_params, 'lasso', StandardScaler())\n",
    "print(time.time() - start)\n",
    "del model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Elastic Net**\n",
    "\n",
    "Elastic net is somewhere between ridge regression and lasso regression. The cost function is:\n",
    "$$J(\\theta)=MSE(\\theta) + r lasso\\_penalty + (1-r) lasso\\_penalty$$\n",
    "in sklearn,\n",
    "$$J(\\theta)=MSE(\\theta) + \\alpha r lasso\\_penalty + \\alpha (1-r) lasso\\_penalty$$"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ElasticNet()\n",
    "model_params = {'alpha': np.logspace(-10, -4, 10), 'l1_ratio': np.logspace(-10, -4, 10)}\n",
    "model_params = {'alpha': [0.0001], 'l1_ratio': [1e-10]}\n",
    "start = time.time()\n",
    "best_score_elasticnet, best_params_elasticnet = get_model_result(model, model_params, 'elasticnet', StandardScaler())\n",
    "print(time.time() - start)\n",
    "del model "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Logistic Regression\n",
    "The function $g$ of **Logisitic Regression** named sigmoid function, i.e., $\\frac{1}{1+e^{-z}}$. \n",
    "\n",
    "Hypothesis from linear regression: $z_{\\theta}(x)=\\theta_0x_0 + \\theta_1 x_1 + \\cdots + \\theta_n x_n = \\theta^T x$, so Logistic Hypothesis from composition of sigmoid/logistic function and linear hypothesis:\n",
    "$$h_{\\theta}(x)=\\frac{1}{1+e^{-z_{\\theta}(x)}} = \\frac{1}{1+e^{-\\theta^Tx}}$$\n",
    "\n",
    "The probability of $x$ being $1$ for the given $\\theta$, i.e.,\n",
    "$$h_{\\theta}(x)=\\frac{1}{1+e^{-z_{\\theta}(x)}}=\\frac{1}{1+e^{-\\theta^Tx}}$$\n",
    "And the probability of $x$ being $0$ for the given $\\theta$, i,e.,\n",
    "$$h_{\\theta}(x)=\\frac{e^{-z_{\\theta}(x)}}{1+e^{-z_{\\theta}(x)}}=\\frac{e^{-\\theta^Tx}}{1+e^{-\\theta^Tx}}$$\n",
    "\n",
    "Usually, logistic regression predicts $\\bar{y}=1$ if $\\bar{h} \\geq 0.5$, and $y = 0$ if $\\bar{h} < 0.5$. The **decision boundary** is given by $\\theta^T \\cdot x=0$. While training, the cost function is the **log loss**:\n",
    "$$J(\\theta)=-\\frac{1}{m} \\sum_{i=1}^m[y^{(i)}log(h_{\\theta}^{(i)})+(1-y^{(i)})log(1-h_{\\theta}^{(i)})]$$\n",
    "The partial derivatives of log loss is:\n",
    "$$\\frac{\\partial J(\\theta)}{\\partial \\theta_j}=\\frac{1}{m}\\sum_{i=1}^m(h^{(i)}-y^{(i)})x_j^{(i)}$$\n",
    "\n",
    "The following trick will come in handy:\n",
    "$$\n",
    "\\begin{equation}\n",
    "\\begin{aligned}\n",
    "h^{\\prime} &= (\\frac{1}{1+e^{-\\theta^Tx}})^{\\prime} \\\\\n",
    "& = - \\frac{1}{(1+e^{-\\theta^Tx})^2} \\cdot (1+e^{-\\theta^Tx})^{\\prime} \\\\\n",
    "& = - \\frac{1}{(1+e^{-\\theta^Tx})^2} \\cdot e^{-\\theta^Tx} \\cdot (-\\theta^Tx)^{\\prime} \\\\\n",
    "& = - \\frac{xe^{-\\theta^Tx}}{(1+e^{\\theta^Tx})^2} \\\\\n",
    "& = - \\frac{1}{1+e^{-\\theta^Tx}} \\cdot \\frac{e^{-\\theta^Tx}}{1+e^{-\\theta^Tx}} \\cdot x \\\\\n",
    "& = h(1-h)x\n",
    "\\end{aligned}\n",
    "\\end{equation}\n",
    "$$\n",
    "\n",
    "$$\n",
    "\\begin{equation}\n",
    "\\begin{aligned}\n",
    "J(\\theta)^{\\prime} &= \\sum_{i=1}^m(y_i log^{\\prime}(h) + (1-y_i)log^{\\prime}(1-h))) \\\\\n",
    "& = \\sum ((y_i\\frac{1}{h}h^{\\prime})+(1-y_i)\\frac{1}{1-h}(1-h)^{\\prime}) \\\\\n",
    "& = \\sum (y_i (1-h)x_i - (1-y_i)hx_i )\\\\\n",
    "& = \\sum (y_i-h)x_i\n",
    "\\end{aligned}\n",
    "\\end{equation}\n",
    "$$\n",
    "where $i$ means $i$-th sample, and $j$ means $j$-th feature."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Softmax Regrssion**\n",
    "Softmax regression is used when there are more than two classes ($0$ and $1$) to classify. For each class $k$, there is a vector of parameters $\\theta_k$. The softmax score for a sample $x$ is $s_k(x) = \\theta_k^T \\cdot x$. The probability of sample $x$ being in class $k$ is:\n",
    "$$h_k = \\frac{e^{s_k(x)}}{\\sum_{j=1}^K e^{s_k(x)}}$$\n",
    "where $K$ is the total number of possible classes. Softmax Regression takes the class that has the highest probability $p$ as the predicted class. When training, the cost function is the **cross entropy**,\n",
    "$$J(\\theta)=-\\frac{1}{m}\\sum_{i=1}^m\\sum_{k=1}^K y_k^{(i)}log(h_k^{(i)})$$\n",
    "where $y_k^{(i)}=1$ if sample $i$ belongs to class $k$, and $y_k^{(i)}=0$ otherwise. \n",
    "\n",
    "The gradient vector of cross entropy for class $k$ is:\n",
    "$$J(\\theta)^{\\prime}=\\frac{1}{m}\\sum_{i=1}^m(h_k^{(i)}-y_k^{(i)})x^{(i)}$$\n",
    "note that $\\theta$ is a matrix, i.e., multi-vector."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = LogisticRegression(class_weight='balanced')\n",
    "model_params = {\n",
    "    'penalty': ['l1', 'l2'], \n",
    "    'C': [0.4, 0.5, 0.6],\n",
    "    'solver':['liblinear', 'lbfgs', 'newton-cg', 'sag', 'saga']\n",
    "}\n",
    "model_params = {\n",
    "    'penalty': ['l1'], \n",
    "    'C': [0.5],\n",
    "    'solver':['liblinear']\n",
    "}\n",
    "start = time.time()\n",
    "best_score_lgr,best_params_lgr = get_model_result(model, model_params,'logisticregression', StandardScaler()) \n",
    "print(time.time() - start)\n",
    "del model "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Support Vector Machine\n",
    "\n",
    "SVM(Support Vector Machine) is one of the most popular classification techniques in machine learning. Use the logistic regression cost with l$2$ regularization to define the loss function of svm.\n",
    "$$J(\\theta) = \\frac{1}{m} \\sum_{i=1}^m[y^{(i)}(-log(h^{(i)})) + (1-y^{(i)}) (-log(1-h^{(i)}))] + \\frac{\\lambda}{2m} \\sum_{j=1}^n \\theta_j^2$$\n",
    "where $h^{(i)}=\\sigma(\\theta^T \\cdot x^{(i)}) = \\frac{1}{1+e^{-\\theta^T \\cdot x^{(i)}}}$, \n",
    "其中一种解释是，若$y^{(i)}=1$, 对于损失函数$J(\\theta)$的贡献是$-log(h^{(i)})$，则，若$y^{(i)}=0$，对于损失函数$J(\\theta)$的贡献是$-log(1-h^{(i)})$，在SVM中，使用$max(0, 1- \\theta^T \\cdot x^{(i)})$来替代$-log(h^{(i)})$，并且用$(0, 1 + \\theta^T \\cdot x^{(i)})$来替代$-log(1-p^{(i)})$。定义$z=\\theta^T \\cdot x^{(i)}$则"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sigmoid(x):\n",
    "    return 1/(1+np.exp(-x))\n",
    "fig,ax = plt.subplots(nrows = 1, ncols = 2, figsize = (12,4))\n",
    "z = np.linspace(-3,3,num=100)\n",
    "ax[0].plot(z,-np.log(sigmoid(z)),label='logistic',lw=2, color='b')\n",
    "ax[1].plot(z,-np.log(1-sigmoid(z)),label='logistic',lw=2,color='b')\n",
    "ax[0].plot(z,np.maximum(0,1-z),label='SVM',lw=2, color='r',linestyle='--')\n",
    "ax[1].plot(z,np.maximum(0,1+z),label='SVM',lw=2,color='r',linestyle='--')\n",
    "ax[0].set_title('y=1')\n",
    "ax[1].set_title('y=0')\n",
    "ax[0].set_xlabel('z')\n",
    "ax[1].set_xlabel('z')\n",
    "ax[0].set_ylabel('individual loss')\n",
    "ax[1].set_ylabel('individual loss')\n",
    "ax[0].legend()\n",
    "ax[1].legend()\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Linear SVM**\n",
    "SVM的cost function为：\n",
    "$$J(\\theta)=\\frac{1}{m} \\sum_{i=1}^m [y^{(i)}(max(0,1-\\theta^T \\cdot x^{(i)}))+ (1-y^{(i)})(max(0, \\theta^T \\cdot x^{(i)}))] + \\frac{\\lambda}{2m}\\sum_{j=1}^n \\theta^2_j$$\n",
    "由于常系数$\\frac{1}{m}$并不会影响最终的结果，因此上式可以写成，\n",
    "$$J(\\theta)=\\sum_{i=1}^m max(0, 1-t^{(i)}(\\theta^T \\cdot x^{(i)})) + \\frac{\\lambda}{2} \\sum_{j=1}^n \\theta^2_j$$\n",
    "其中，当$y^{(i)}=1$时$t^{(i)}=1$，当$y^{(i)}=0$时，$t^{(i)}=-1$。为了方便讨论，其中基于$l2$的惩罚项，通过利用参数$C$来替代参数$\\lambda$，并且使用$w^T\\cdot x^{(i)}$来替代$\\theta^T \\cdot x^{(i)}$，即\n",
    "$$J(w,b)=C\\sum_{i=1}^m max(0, 1-t^{(i)}(w^T \\cdot x^{(i)} + b)) + \\frac{1}{2} \\sum_{j=1}^m w_j^2$$\n",
    "其中$C$等价于$\\frac{1}{\\lambda}$。较大的$C$会导致较低的偏差、较高的方差（可能导致过拟合），较小的$C$导致较高的偏差、较小的方差（可能导致欠拟合）。\n",
    "函数$$l(w^T \\cdot x^{(i)} + b) = max(0, 1-t^{(i)}(w^T \\cdot x^{(i)} + b))$$称之为*hinge loss*。把基于$l_2$的代价函数$J$称之为*soft margin SVM*。\n",
    "在SVM中的两个margins定义如下：\n",
    "$$w^T \\cdot x^{(i)} + b = 1$$\n",
    "$$w^T \\cdot x^{(i)} + b = -1$$\n",
    "Hinge loss 惩罚称之为margin violations，当$y=1$时，样本通过$w^T \\cdot x + b =1$margin，hinge loss变成非0值并且可以度量从$w^Tx+b$到$1$之间有多远。另一方面，假如$y=0$样本通过$w^T \\cdot x+ b= -1$Margin时，hinge loss依然为非0元素，此时要度量的是$w^T \\cdot x + b$到$-1$之间的距离。\n",
    "\n",
    "正则项$\\frac{1}{2} \\sum_{j=1}^n w_j^2$可以起到扩大两个margin的作用。如假设$wx_0+b=0$,$wx_1+b=1$,和$wx_{-1}+b=-1$这三条之前，则\n",
    "$$x_1-x_0=\\frac{1}{w}$$\n",
    "$$x_0 -x_{-1} = \\frac{1}{w}$$\n",
    "由上可知，较小的$w$，意味者两个margin之间较大的距离，即$x_1-x_{-1}=\\frac{2}{w}$。\n",
    "当在参数学习过程中，是降低$J$的值，即降低$||w||_2^2=\\sum_{j=1}^n w_j^2$，此时提升两个margin之间的距离。因此，SVM又称作“large margin classifier”。\n",
    "在sklearn中的SVC就是利用了soft margin。\n",
    "\n",
    "当参数$C$非常大(趋向于无穷的时候)的时候，margin将会变窄， margin violations将不被允许，此时称之为hard margin SVM，其目标函数为\n",
    "$$minize \\ J(w,b)=\\frac{1}{2} \\sum_{j=1}^n w_j^2 $$\n",
    "$$subject\\ to t^{(i)}(w^T \\cdot x^{(i)} +b) \\geq 1$$\n",
    "其中Soft margin SVM平衡的是margin widening和margin violations（即在两个margin之间的值）之间的平衡。\n",
    "\n",
    "**Not-Linear SVM**\n",
    "- 多项式特征， 通过增加C的值在一定程度上可以解决这个问题\n",
    "- 相似特征\n",
    "另一个构造非线性SVM的方式通过计算每个数据点$x$和landmarks $l$之间的相似度来添加新的功能。相似度函数（核函数）RBF(gaussian radial basis function)定义如下：\n",
    "$$\\phi_r(x,l)=e^{-\\lambda ||x-l||^2}$$\n",
    "其中$||x-l||^2=(x_1-l_1)^2+(x_2-l_2)^2 + \\cdots + (x_n - l_n)^2$，例如"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "x = np.linspace(-4,4,num=100)\n",
    "l = 0\n",
    "gamma1=0.5\n",
    "f1 = np.exp(-gamma1*(x-l)*(x-l))\n",
    "gamma2=5\n",
    "f2 = np.exp(-gamma2*(x-l)*(x-l))\n",
    "plt.plot(x,f1,label=r'$\\gamma = 0.5$')\n",
    "plt.plot(x,f2,label=r'$\\gamma = 5$')\n",
    "plt.legend(fontsize = 14)\n",
    "plt.xlabel('x',fontsize = 14)\n",
    "plt.ylabel('similarity', fontsize = 14)\n",
    "plt.arrow(0,0.2,0,-0.18, head_width=0.2, head_length=0.05,lw=1,color='indianred')\n",
    "plt.text(-0.7,0.22,'landmark', color='indianred', fontsize=14)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "其中从上图中可以看出：\n",
    "- similarity(核)函数的范围为0到1，相似度最高为1，此时landmark和sample值相同，以此为界限，远离此界限的similarity越来越小\n",
    "- $\\gamma$控制着$||x-l||^2$对相似度的影响，较大的$\\gamma$对应着较小的$||x-l||^2$，同理反之。\n",
    "创建landmark的方式是，利用$x$创建landmark $l$，即\n",
    "$$l^{(1)}=x^{(1)}, l^{(2)}=x^{(2)}, \\cdots, l^{(m)}=x^{(m)}$$\n",
    "则样本$x^{(i)}$被转为：\n",
    "$$f_1^{(i)}=\\phi_\\lambda (x^{(i)}, l^{(1)})$$\n",
    "$$f_2^{(i)}=\\phi_\\lambda (x^{(i)}, l^{(2)})$$\n",
    "$$\\cdots$$\n",
    "$$f_3^{(i)}=\\phi_\\lambda (x^{(i)}, l^{(3)})$$\n",
    "其中$f_j^{(i)}$表示的是样本$x^{(i)}$和landmark $l^{(j)}$之间的相似度，定义如下\n",
    "$$f^{(i)}=[f_1^{(i)}, f_2^{(i)}, \\cdots, f_m^{(i)}]^T$$\n",
    "此时用$w^T_f \\cdot f^{(i)} + b$来取代$w_x^T \\cdot x^{(i)} + b$ "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# model = SVC()\n",
    "# model_params = {\n",
    "#     'C':[0.1,1,5,10,50,100, 200],\n",
    "#     'kernel': ['linear', 'poly', 'rbf', 'sigmoid'],\n",
    "#     'gamma':[1,0.1,0.01,0.001]\n",
    "# }\n",
    "# best_score_svc, best_params_svc = get_model_result(model, model_params, 'SVC', StandardScaler())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Random Forests\n",
    "- Decision Tree\n",
    "- Random Forest\n",
    "- CART"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "tree = DecisionTreeClassifier(max_depth=2, random_state=2020)\n",
    "tree.fit(train, target)\n",
    "dot_data = export_graphviz(tree,\n",
    "                out_file=None,\n",
    "                feature_names=train.columns,\n",
    "                class_names=['Yes', 'No'],\n",
    "                rounded=True,\n",
    "                filled=True)\n",
    "\n",
    "graph = graphviz.Source(dot_data)\n",
    "graph.render() \n",
    "graph"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "由上可知：\n",
    "- TotalWorkingYears/(MaritalStatus/OverTime)为分割点\n",
    "- gini系数\n",
    "- samples表示有多少样本满足当前分割条件\n",
    "- values总和为sample，其中Yes的样本数为988,No的样本数为188\n",
    "- class为哪个类在此节点中占大多数\n",
    "\n",
    "为了预测，一直沿着树的路径，直到达到叶节点为止。每个类别的概率是叶节点中每个类别的训练样本的分数，如左叶节点以$\\frac{12}{12+27}$，此属性可以通过DecisionTreeClassifier的predict_proba()函数获得。\n",
    "\n",
    "**Gini**\n",
    "$$G=1-\\sum_{k=1}^np_k^2$$\n",
    "其中$p_k$是类别$k$的比率，则\n",
    "$$G=1-(\\frac{12}{12+27})^2-(\\frac{27}{12+27})^2$$\n",
    "\n",
    "**Training**\n",
    "- Decision Tree Classifier\n",
    "CART算法把一个节点分为两个孩子节点，分割的标准包含一个特征$k$和一个与特征相关的阈值$t_k$，其中基于Gini的损失函数定义如下：\n",
    "$$J(k,t_k)=G_{left} \\times \\frac{m_left}{m} + G_{right} \\times \\frac{m_{right}}{m}$$\n",
    "其中$G_{right}$和$G_{left}$为左右孩子的Gini熵，$m_{left}$和$m_{right}$为左右孩子的样本数，则parent的节点数$m=m_{left}+m_{right}$。算法停止的条件可以为\n",
    "$$G_{left} \\times \\frac{m_{left}}{m} + G_{right} \\times \\frac{m_{right}}{m} \\geq G_{parent}$$\n",
    "或者使用树的最大深度作为停止条件。\n",
    "- CART算法是一种贪婪算法，它搜索的是一个局部最优决策。优化分裂时无需要考虑此时的分割树结构是否是全局最优的。\n",
    "- 在使用算法默认值的时候，一定的训练集情况下，可以得到确定的结构。但是有时候会显示一定的随机性，这是由于算法本身将会把输入的特征进行重排导致的。\n",
    "- 在sklearn中可以设置max_features设置为小于总的features数，在这种情况下，算法将随机采样特征子集，然后从中搜索最佳的分割点。\n",
    "- 处理连续值。在lightbgm中采用bins的算法，而一般的决策树处理连续变量却是只将连续变量做离散二值化。若特征有n个值${a_1, a_2, \\cdots, a_n}$，则有n-1个候选划分节点$T_a={\\frac{a_i+a_{i+1}}{2}|1\\leq i \\leq n-1}$，这样，可以在连续取值中选择一个最佳分裂节点。与离散属性不同，若当前结点划分属性为连续值，该属性还可以作为其后代节点的划分属性。\n",
    "- 缺失值处理。若样本$x$在划分属性$a$上的取值未知，则将$x$同时划入所有子节点，且样本权值在与属性值$a_v$对应的子节点中调整为$r_v \\cdot Ent(\\cdot)$，直观的看，就是让同一个样本以不同的概率划分到不同的子节点中去。其中在周志华书中提到，对于属性$a$,$l$表示缺失值所占的比例，$p_k$表示无缺失值样本中$k$类所占的比例，$r_v$则表示无缺失值样本在属性$a$上取值$a_v$的样本所占的比例，显然，$\\sum_{k=1}^{|y|}p_k=1, \\sum_{v=1}^V r_v=1$。依据上述定义则，信息增益的计算为\n",
    "$$Gain(D,a) = l \\times Gain(D,a)=l \\times (Ent(D)-\\sum_{v=1}^V r_v Ent(D_v))$$\n",
    "其中，$Ent(D)=-\\sum_{k=1}^{|y|}p_klog_2p_k$\n",
    "- Decision Tree Regression\n",
    "与分类不同的是，决策回归损失函数在基于特征$k$的情况下沿着阈值$t_k$进行分割，具体定义如下:\n",
    "$$J(k,t_k)=MSE_{left} \\times \\frac{m_{left}}{m} + MSE_{right} \\times \\frac{m_{right}}{m}$$\n",
    "其中，$MSE_{left(right)}=\\frac{1}{m_{left(right)}} \\sum_{i \\in left(right)}(\\bar{y}_{left(right)}- y^{(i)})^2$，$\\bar{y}_{left(right)} = \\frac{1}{m_{left(right)}} \\sum_{i \\in left(right)}y^{(i)}$。\n",
    "- 首先，算法按照特征$k$和特征阈值$t_k$划分左右孩子\n",
    "- 计算左右孩子的均值$\\bar{y}_{left(right)}$\n",
    "- 计算均方误差$MSE_{left}$和$MSE_{right}$\n",
    "- 遍历所有的$(k, t_k)$，使得$J(k, t_k)$最小的结构"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = DecisionTreeClassifier(random_state=2020)\n",
    "model_params = {\n",
    "    'max_features': [0.8, 1.0], \n",
    "    'max_depth': [8, 9, 10],\n",
    "    'min_samples_leaf':[30, 40, 50]\n",
    "} \n",
    "model_params = {\n",
    "    'max_features': [1.0], \n",
    "    'max_depth': [9],\n",
    "    'min_samples_leaf':[30]\n",
    "}\n",
    "start = time.time()\n",
    "best_score_dtc, best_params_dtc = get_model_result(model, model_params, 'dtc', None, feature_importance=True)\n",
    "print(time.time() - start)\n",
    "del model  "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**RandomForest**\n",
    "随机森林是决策树的集合，在随机森林中，每个决策树都在训练集的一个随机子集上训练，该子集通常是替换采样的，样本大小等于训练集大小。在分类的情况下，选择所有树中平均概率最高的类别。在回归的情况下，预测的值是所有树上的预测值的平均值。\n",
    "- 随机森林相对单个决策树而言，由于随机森林本身是求的平均值，其方差也会减小，通常更多情况下，方差减少的效果会补偿偏差的增大，因此会产生更好的模型。\n",
    "- sklearn中指出，特征分割的时候，rf只会在随机子集中进行搜索。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = RandomForestClassifier(random_state=2020)\n",
    "model_params = {\n",
    "    'n_estimators':[500],\n",
    "    'n_jobs':[-1],\n",
    "    'max_features': [0.5, 0.6], \n",
    "    'max_depth': [8, 9, 10],\n",
    "    'min_samples_leaf':[8, 10],\n",
    "#     'random_state':[2020]\n",
    "}\n",
    "model_params = {\n",
    "    'n_estimators':[200],\n",
    "    'max_features': [0.6], \n",
    "    'max_depth': [10],\n",
    "    'min_samples_leaf':[10],\n",
    "}\n",
    "start = time.time()\n",
    "best_score_rfc, best_params_rfc = get_model_result(model, model_params, 'rfc', feature_importance=True)\n",
    "print(time.time() - start)\n",
    "del model "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**CART**\n",
    "一棵树的基本定义如下：\n",
    "$$f(x)=\\sum_{j=1}^T w_jI(x \\in R_j)$$\n",
    "其中$w_j$是$j$-th叶子节点的权重，${R_j}(j=1,2,\\cdots, T)$称为数的结构，$T$表示树结构中的叶子节点数。$I(\\cdot)$是一个指示函数，若$x$属于叶子区域$R_j$，则$I$为1，反之同理。$f(x)$的目的就是在样本$x$落在区域$R_j$上时预测$w_j$。\n",
    "\n",
    "在训练此树之前，需要定义价值函数：\n",
    "$$J(f)=\\sum_{i=1}^n L(y_i, f(x_i))=\\sum_{i=1}^nL(y_i, \\sum_{j=1}^T w_jI(x\\in R_j))$$\n",
    "- 在一个固定结构下学习权重\n",
    "在给定区域$R_j$的情况下，即$I(\\cdot)$是确定的，则此时的损失函数可以写作为\n",
    "$$J(f)=\\sum_{j=1}^T \\sum_{x_j\\in R_j}L(y_i, w_j)$$\n",
    "最小化$J(f)$，其实就是对于每个区域$j$最小化$\\sum_{x_i \\in R_j} L(y_i, w_j)$，进一步，我们可以得到\n",
    "$$w_j^{*}=argmin_w\\sum_{x_i \\in R_j}L(y_i, w)$$\n",
    "当使用平方损失函数时，则$w_j^{*}=argmin\\sum_{x_i \\in R_j}(y_i -w)^2$，为了求解最优值，令一次导数为0，即$\\frac{\\partial \\sum_{x_i \\in R_j}(y_i - w)^2}{\\partial w}=0$，则\n",
    "$$w_j^{*}=\\frac{\\sum_{x_i \\in R_j}y_i}{n_j}$$\n",
    "其中，$n_j$表示在区域（叶子）$R_j$的样本数，当使用平方损失时，在区域$R_j$中，评估权重$w_j$为此区域的平均值，当使用$L(y_i, w)=|y_i-w|$即绝对值损失时，此时的$w_j^*$为区域$R_j$的中位数。\n",
    "- 在已知权重下学习结构\n",
    "若权重$w_j^{*}$被给出，则价值函数能够被写为\n",
    "$$J(f)=\\sum_{j=1}^T \\sum_{x_j \\in R_j} \\sum_{x_i \\in R_j}L(y_i, w_j^{*})=\\sum_{j=1}^TL_j^{*}$$\n",
    "其中$L_j^*$为叶子节点$j$的累加损失。若训练的树结构在叶子节点$k$处，进行分割，则此时的损失为\n",
    "$$J_{before}=\\sum_{j\\neq k}L_j^{*} + L_k^{*}$$\n",
    "按照节点$k$进行分割，可以得到\n",
    "$$J_{after}=\\sum_{j\\neq k} L_j^* + L_L^* + L_L^*$$\n",
    "则分割节点的增益为\n",
    "$$Gain = J_{before}- J_{after}= L_k^{*} - (L_L^*+L_R^*)$$\n",
    "增益越大，代价函数$J$的下降越大。 在树训练的每个步骤中，计算每个可能的分割（节点分割，特征分割，特征阈值）的增益，然后选择使增益最大化的分割。\n",
    "由于节点$k$已经确定分割，则$L_k^{*}$是一个常数，此时为了Gain最大化，即最小化$(L_L^*+L_R^*)$。\n",
    "在平方损失函数下，则可以得到\n",
    "$$L_L^*+L_R^*=\\sum_{x_i \\in left} (y_i - \\bar{y}_{left})^2 + \\sum_{x_i \\in right}(y_i -\\bar{y}_{right})^2=n_{left}MSE_{left}+n_{right}MSE_{right}$$\n",
    "其中$n_{left(right)}$是left(right)节点中训练样本的数量。 $\\bar{y}_{left(right)}$只是平方损失下左（右）节点的估计权重，如上一步所示。 要最小化的数量与我们在决策树回归部分中定义的成本函数$J(k, t_K)$成比例。 也就是说，在该部分中定义的所有内容仅是将普通CART算法应用于平方损失的特定情况。\n",
    "- 剪枝"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Ensemble Learning\n",
    "\n",
    "**Ensemble with different classifiers**\n",
    "- voting"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# rfc = RandomForestClassifier(best_params_rfc)\n",
    "# svm = SVC(best_params_svc)\n",
    "# lr = LogisticRegression(best_params_logisticregression)\n",
    "# best_score_voting, best_params_voting = get_model_result(VotingClassifier(estimators = [('rf',rfc), ('svm',svm), ('log', lr)], voting='soft'), 'voting', StandardScaler(), gridsearch=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Ensemble with same classifiers**\n",
    "- Randomly sample trainset samples for each classifier\n",
    "    - bagging, random sampling of trainset\n",
    "    - pasting, the method of randomly sampling training instances without replacement\n",
    "- Randomly sample features that are used to train each classifier\n",
    "- Specifically for decision trees, using random threshold for each feature"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "best_score_bagging, best_params_bagging = get_model_result(BaggingClassifier(LogisticRegression(**best_params_lgr),n_estimators=500, random_state=2020), 'bagging', StandardScaler(), gridsearch=False)\n",
    "best_score_pasting, best_params_pasting = get_model_result(BaggingClassifier(LogisticRegression(**best_params_lgr),n_estimators=500, bootstrap_features=True, max_features=1.0, random_state=2020), 'pasting', StandardScaler(), gridsearch=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = ExtraTreesClassifier(random_state=2020)\n",
    "model_params = {\n",
    "    'n_estimators':[500],\n",
    "    'n_jobs':[-1], \n",
    "    'max_features': [0.5,0.6,0.7,0.8,0.9,1.0], \n",
    "    'max_depth': [10,11,12,13,14],\n",
    "    'min_samples_leaf':[1,10,100],\n",
    "#     'random_state':[0]\n",
    "} \n",
    "model_params = {\n",
    "    'n_estimators':[500],\n",
    "    'max_features': [0.5], \n",
    "    'max_depth': [12],\n",
    "    'min_samples_leaf':[10],  \n",
    "}\n",
    "start = time.time()\n",
    "best_score_etf, best_params_etf = get_model_result(model, model_params, 'etc',feature_importance=True)\n",
    "print(time.time() - start)\n",
    "del model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Boosting"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = xgb.XGBClassifier(random_state=2020,tree_method='gpu_hist', silent=1, booster='gbtree', objective='binary:logistic')\n",
    "model_params = {\n",
    "    'booster':['gbtree'],\n",
    "    'colsample_bytree': [0.5, 0.8],\n",
    "    'subsample': [0,3, 0.5],\n",
    "    'learning_rate': [0.075, 0.01],\n",
    "    'objective': ['binary:logistic'],\n",
    "    'max_depth': [ 7, 8, 9],\n",
    "    'num_parallel_tree': [0.1, 1, 10],\n",
    "    'min_child_weight': [0.2, 0.8],\n",
    "}\n",
    "model_params = {\n",
    "    'colsample_bytree': [0.5],\n",
    "    'subsample': [0.5],\n",
    "    'learning_rate': [0.075],\n",
    "    'max_depth': [9],\n",
    "    'num_parallel_tree': [1],\n",
    "    'min_child_weight': [0.2],\n",
    "}\n",
    "\n",
    "start = time.time()\n",
    "best_score_xgboost, best_params_xgboost = get_model_result(model, model_params, 'xgboost')\n",
    "print(time.time()-start)\n",
    "del model  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model = lgb.LGBMClassifier(random_state=2020, device='gpu', gpu_platform_id=0, gpu_device_id=0, silent=1)\n",
    "model_params = {\n",
    "    'n_estimators': [200, 300, 400],\n",
    "    'learning_rate': [0.01, 0.1, 0.5],\n",
    "    'num_leaves':[10,100,400],\n",
    "    'colsample_bytree':[0.5,0.8, 1.0],\n",
    "    'subsample':[0.3,0.5,0.9],\n",
    "    'max_depth':[7, 10, 15],\n",
    "    'reg_alpha':[0.01, 0.2, 0.5],\n",
    "    'reg_lambda':[0.01, 0.3, 0.8],\n",
    "    'min_split_gain':[0.01, 0.1],\n",
    "    'min_child_weight': [1,2,4],\n",
    "}\n",
    "model_params = {\n",
    "    'n_estimators': [600],\n",
    "    'learning_rate': [0.1],\n",
    "    'num_leaves':[120],\n",
    "    'colsample_bytree':[0.5],\n",
    "    'subsample':[0.9],\n",
    "    'max_depth':[15],\n",
    "    'reg_alpha':[0.01, 0.2],\n",
    "    'reg_lambda':[0.4],\n",
    "    'min_split_gain':[0.01],\n",
    "}\n",
    "start = time.time()\n",
    "best_score_lightgbm, best_params_lightgbm = get_model_result(model, model_params, 'lightgbm')\n",
    "print(time.time()-start)\n",
    "del model "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "model = cat.CatBoostClassifier(random_state=2020, task_type='GPU', allow_writing_files=False, silent=True, eval_metric='AUC', bootstrap_type='Bernoulli')\n",
    "model_params = {\n",
    "    'iterations': [200,600],\n",
    "    'learning_rate': [0.05, 0.3, 0.5],\n",
    "    'depth': [6, 7, 9, 10],\n",
    "    'l2_leaf_reg': [30, 40, 50],\n",
    "    'bootstrap_type': ['Bernoulli'],\n",
    "    'subsample': [0.5, 0.7, 1.0],\n",
    "    'scale_pos_weight': [4, 5, 10],\n",
    "}\n",
    "model_params = {\n",
    "    'iterations': [600],\n",
    "    'learning_rate': [ 0.5],\n",
    "    'depth': [10],\n",
    "    'l2_leaf_reg': [40],\n",
    "    'subsample': [0.5],\n",
    "    'scale_pos_weight': [8],\n",
    "}\n",
    "start = time.time()\n",
    "best_score_catboost, best_params_catboost = get_model_result(model, model_params, 'catboost')\n",
    "print(time.time() - start)\n",
    "del model "
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Different General ML"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "auc_value = [best_score_ridge, best_score_elasticnet, best_score_lgr, best_score_dtc, best_score_rfc, best_score_etf, best_score_xgboost, best_score_lightgbm, best_score_catboost, best_score_bagging, best_score_pasting]\n",
    "auc_label = ['ridge', 'elasticnet', 'lgr', 'dtc', 'rfc', 'etf', 'xgboost', 'lightgbm', 'catboost', 'bagging', 'pasting']\n",
    "# auc_time = [best_score_ridge, best_score_elasticnet, best_score_lgr, best_score_dtc, best_score_rfc, best_score_etf, best_score_xgboost, best_score_lightgbm, best_score_catboost]\n",
    "figure, ax = plt.subplots(figsize=(16,4))\n",
    "\n",
    "plt.bar(range(len(auc_value)), auc_value, tick_label=auc_label)\n",
    "for tick in ax.get_xticklabels():\n",
    "    tick.set_rotation(90)\n",
    "plt.title('Different AUC in Dataset by ML')\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5rc1"
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
