UsmanGohar commited on
Commit
986d325
1 Parent(s): e609040

Upload 318 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. AdultNoteBook/Data/adult.csv +0 -0
  3. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/0-income-prediction-84-369-accuracy-checkpoint.ipynb +0 -0
  4. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/1-adult-income-checkpoint.ipynb +3 -0
  5. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/2-boosting-algorithms-model-for-adult-census-income-checkpoint.ipynb +2165 -0
  6. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/3-income-prediction-xgbclassifier-auc-0-926-checkpoint.ipynb +0 -0
  7. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/4-deep-analysis-and-90-accuracy-checkpoint.ipynb +0 -0
  8. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/5-income-classification-using-meta-learning-checkpoint.ipynb +0 -0
  9. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/6-income-prediction-eda-to-visuals-0-98-auc-checkpoint.ipynb +0 -0
  10. AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/7-adult-census-income-eda-and-prediction-87-35-checkpoint.ipynb +0 -0
  11. AdultNoteBook/Kernels/AdaBoost/1-adult-income.ipynb +3 -0
  12. AdultNoteBook/Kernels/AdaBoost/1-adult-income.py +638 -0
  13. AdultNoteBook/Kernels/AdaBoost/2-boosting-algorithms-model-for-adult-census-income.ipynb +2199 -0
  14. AdultNoteBook/Kernels/AdaBoost/2-boosting-algorithms-model-for-adult-census-income.py +276 -0
  15. AdultNoteBook/Kernels/AdaBoost/4-deep-analysis-and-90-accuracy.ipynb +0 -0
  16. AdultNoteBook/Kernels/AdaBoost/4-deep-analysis-and-90-accuracy.py +994 -0
  17. AdultNoteBook/Kernels/AdaBoost/5-income-classification-using-meta-learning.ipynb +0 -0
  18. AdultNoteBook/Kernels/AdaBoost/5-income-classification-using-meta-learning.py +632 -0
  19. AdultNoteBook/Kernels/AdaBoost/6-income-prediction-eda-to-visuals-0-98-auc.ipynb +0 -0
  20. AdultNoteBook/Kernels/AdaBoost/6-income-prediction-eda-to-visuals-0-98-auc.py +682 -0
  21. AdultNoteBook/Kernels/AdaBoost/7-adult-census-income-eda-and-prediction-87-35.ipynb +0 -0
  22. AdultNoteBook/Kernels/AdaBoost/7-adult-census-income-eda-and-prediction-87-35.py +451 -0
  23. AdultNoteBook/Kernels/Adult_Fairness.xlsx +0 -0
  24. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/1-income-prediction-84-369-accuracy-checkpoint.ipynb +0 -0
  25. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/10-accurate-predictions-with-20-test-data-checkpoint.ipynb +0 -0
  26. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/11-adult-checkpoint.ipynb +0 -0
  27. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/12-compare-all-the-classification-models-checkpoint.ipynb +0 -0
  28. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/2-multiple-ml-techniques-and-analysis-of-dataset-checkpoint.ipynb +0 -0
  29. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/3-income-classification-using-meta-learning-checkpoint.ipynb +0 -0
  30. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/4-a-simple-knn-application-checkpoint.ipynb +0 -0
  31. AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/5-adult-census-income-eda-and-prediction-87-35-checkpoint.ipynb +0 -0
  32. AdultNoteBook/Kernels/ExtraTrees/1-income-prediction-84-369-accuracy.ipynb +0 -0
  33. AdultNoteBook/Kernels/ExtraTrees/1-income-prediction-84-369-accuracy.py +340 -0
  34. AdultNoteBook/Kernels/ExtraTrees/10-accurate-predictions-with-20-test-data.ipynb +0 -0
  35. AdultNoteBook/Kernels/ExtraTrees/10-accurate-predictions-with-20-test-data.py +195 -0
  36. AdultNoteBook/Kernels/ExtraTrees/12-compare-all-the-classification-models.ipynb +0 -0
  37. AdultNoteBook/Kernels/ExtraTrees/12-compare-all-the-classification-models.py +1690 -0
  38. AdultNoteBook/Kernels/ExtraTrees/3-income-classification-using-meta-learning.ipynb +0 -0
  39. AdultNoteBook/Kernels/ExtraTrees/3-income-classification-using-meta-learning.py +632 -0
  40. AdultNoteBook/Kernels/ExtraTrees/4-a-simple-knn-application.ipynb +0 -0
  41. AdultNoteBook/Kernels/ExtraTrees/4-a-simple-knn-application.py +203 -0
  42. AdultNoteBook/Kernels/ExtraTrees/5-adult-census-income-eda-and-prediction-87-35.ipynb +0 -0
  43. AdultNoteBook/Kernels/ExtraTrees/5-adult-census-income-eda-and-prediction-87-35.py +451 -0
  44. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/0-income-prediction-84-369-accuracy-checkpoint.ipynb +1164 -0
  45. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/1-multiple-ml-techniques-and-analysis-of-dataset-checkpoint.ipynb +1476 -0
  46. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/10-adultincomeprediction-checkpoint.ipynb +0 -0
  47. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/11-ml-adult-income-checkpoint.ipynb +1050 -0
  48. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/2-income-census-prediction-gradient-boosting-algos-checkpoint.ipynb +0 -0
  49. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/3-income-prediction-xgbclassifier-auc-0-926-checkpoint.ipynb +0 -0
  50. AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/4-deep-analysis-and-90-accuracy-checkpoint.ipynb +0 -0
.gitattributes CHANGED
@@ -52,3 +52,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
 
52
  *.jpg filter=lfs diff=lfs merge=lfs -text
53
  *.jpeg filter=lfs diff=lfs merge=lfs -text
54
  *.webp filter=lfs diff=lfs merge=lfs -text
55
+ AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/1-adult-income-checkpoint.ipynb filter=lfs diff=lfs merge=lfs -text
56
+ AdultNoteBook/Kernels/AdaBoost/1-adult-income.ipynb filter=lfs diff=lfs merge=lfs -text
AdultNoteBook/Data/adult.csv ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/0-income-prediction-84-369-accuracy-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/1-adult-income-checkpoint.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eda7055d163932c8aba0b99ad0452eb0bf5e43234a9bdc3ca785e7fea7620a3c
3
+ size 17587341
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/2-boosting-algorithms-model-for-adult-census-income-checkpoint.ipynb ADDED
@@ -0,0 +1,2165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {
7
+ "execution": {
8
+ "iopub.execute_input": "2021-03-19T05:38:00.573511Z",
9
+ "iopub.status.busy": "2021-03-19T05:38:00.572716Z",
10
+ "iopub.status.idle": "2021-03-19T05:38:02.804511Z",
11
+ "shell.execute_reply": "2021-03-19T05:38:02.803821Z"
12
+ },
13
+ "papermill": {
14
+ "duration": 2.263268,
15
+ "end_time": "2021-03-19T05:38:02.804744",
16
+ "exception": false,
17
+ "start_time": "2021-03-19T05:38:00.541476",
18
+ "status": "completed"
19
+ },
20
+ "tags": []
21
+ },
22
+ "outputs": [],
23
+ "source": [
24
+ "import pandas as pd\n",
25
+ "import numpy as np\n",
26
+ "import seaborn as sns\n",
27
+ "import matplotlib.pyplot as plt\n",
28
+ "import warnings\n",
29
+ "warnings.filterwarnings(\"ignore\")\n",
30
+ "\n",
31
+ "from sklearn.pipeline import Pipeline\n",
32
+ "from sklearn.preprocessing import OneHotEncoder\n",
33
+ "import category_encoders as ce\n",
34
+ "from sklearn.impute import SimpleImputer\n",
35
+ "from sklearn.compose import ColumnTransformer\n",
36
+ "\n",
37
+ "from sklearn.model_selection import train_test_split, RandomizedSearchCV, StratifiedKFold, cross_val_score\n",
38
+ "\n",
39
+ "from sklearn.tree import DecisionTreeClassifier\n",
40
+ "from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier\n",
41
+ "from xgboost.sklearn import XGBClassifier\n",
42
+ "from sklearn.metrics import classification_report, f1_score, plot_roc_curve"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 2,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "from aif360.datasets import StandardDataset\n",
52
+ "from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric\n",
53
+ "import matplotlib.patches as patches\n",
54
+ "from aif360.algorithms.preprocessing import Reweighing\n",
55
+ "#from packages import *\n",
56
+ "#from ml_fairness import *\n",
57
+ "import matplotlib.pyplot as plt\n",
58
+ "import seaborn as sns\n",
59
+ "\n",
60
+ "\n",
61
+ "\n",
62
+ "from IPython.display import Markdown, display"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 3,
68
+ "metadata": {
69
+ "execution": {
70
+ "iopub.execute_input": "2021-03-19T05:38:02.853355Z",
71
+ "iopub.status.busy": "2021-03-19T05:38:02.852585Z",
72
+ "iopub.status.idle": "2021-03-19T05:38:03.027399Z",
73
+ "shell.execute_reply": "2021-03-19T05:38:03.026828Z"
74
+ },
75
+ "papermill": {
76
+ "duration": 0.200945,
77
+ "end_time": "2021-03-19T05:38:03.027547",
78
+ "exception": false,
79
+ "start_time": "2021-03-19T05:38:02.826602",
80
+ "status": "completed"
81
+ },
82
+ "tags": []
83
+ },
84
+ "outputs": [
85
+ {
86
+ "data": {
87
+ "text/html": [
88
+ "<div>\n",
89
+ "<style scoped>\n",
90
+ " .dataframe tbody tr th:only-of-type {\n",
91
+ " vertical-align: middle;\n",
92
+ " }\n",
93
+ "\n",
94
+ " .dataframe tbody tr th {\n",
95
+ " vertical-align: top;\n",
96
+ " }\n",
97
+ "\n",
98
+ " .dataframe thead th {\n",
99
+ " text-align: right;\n",
100
+ " }\n",
101
+ "</style>\n",
102
+ "<table border=\"1\" class=\"dataframe\">\n",
103
+ " <thead>\n",
104
+ " <tr style=\"text-align: right;\">\n",
105
+ " <th></th>\n",
106
+ " <th>age</th>\n",
107
+ " <th>workclass</th>\n",
108
+ " <th>fnlwgt</th>\n",
109
+ " <th>education</th>\n",
110
+ " <th>education.num</th>\n",
111
+ " <th>marital.status</th>\n",
112
+ " <th>occupation</th>\n",
113
+ " <th>relationship</th>\n",
114
+ " <th>race</th>\n",
115
+ " <th>sex</th>\n",
116
+ " <th>capital.gain</th>\n",
117
+ " <th>capital.loss</th>\n",
118
+ " <th>hours.per.week</th>\n",
119
+ " <th>native.country</th>\n",
120
+ " <th>income</th>\n",
121
+ " </tr>\n",
122
+ " </thead>\n",
123
+ " <tbody>\n",
124
+ " <tr>\n",
125
+ " <th>24505</th>\n",
126
+ " <td>51</td>\n",
127
+ " <td>Private</td>\n",
128
+ " <td>173987</td>\n",
129
+ " <td>9th</td>\n",
130
+ " <td>5</td>\n",
131
+ " <td>Married-civ-spouse</td>\n",
132
+ " <td>Sales</td>\n",
133
+ " <td>Husband</td>\n",
134
+ " <td>White</td>\n",
135
+ " <td>Male</td>\n",
136
+ " <td>0</td>\n",
137
+ " <td>0</td>\n",
138
+ " <td>40</td>\n",
139
+ " <td>United-States</td>\n",
140
+ " <td>&lt;=50K</td>\n",
141
+ " </tr>\n",
142
+ " </tbody>\n",
143
+ "</table>\n",
144
+ "</div>"
145
+ ],
146
+ "text/plain": [
147
+ " age workclass fnlwgt education education.num marital.status \\\n",
148
+ "24505 51 Private 173987 9th 5 Married-civ-spouse \n",
149
+ "\n",
150
+ " occupation relationship race sex capital.gain capital.loss \\\n",
151
+ "24505 Sales Husband White Male 0 0 \n",
152
+ "\n",
153
+ " hours.per.week native.country income \n",
154
+ "24505 40 United-States <=50K "
155
+ ]
156
+ },
157
+ "execution_count": 3,
158
+ "metadata": {},
159
+ "output_type": "execute_result"
160
+ }
161
+ ],
162
+ "source": [
163
+ "adult = pd.read_csv('../../Data/adult.csv')\n",
164
+ "adult.sample()"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 4,
170
+ "metadata": {
171
+ "execution": {
172
+ "iopub.execute_input": "2021-03-19T05:38:03.108677Z",
173
+ "iopub.status.busy": "2021-03-19T05:38:03.108008Z",
174
+ "iopub.status.idle": "2021-03-19T05:38:03.125920Z",
175
+ "shell.execute_reply": "2021-03-19T05:38:03.125335Z"
176
+ },
177
+ "papermill": {
178
+ "duration": 0.076114,
179
+ "end_time": "2021-03-19T05:38:03.126077",
180
+ "exception": false,
181
+ "start_time": "2021-03-19T05:38:03.049963",
182
+ "status": "completed"
183
+ },
184
+ "tags": []
185
+ },
186
+ "outputs": [
187
+ {
188
+ "name": "stdout",
189
+ "output_type": "stream",
190
+ "text": [
191
+ "<class 'pandas.core.frame.DataFrame'>\n",
192
+ "RangeIndex: 32561 entries, 0 to 32560\n",
193
+ "Data columns (total 15 columns):\n",
194
+ " # Column Non-Null Count Dtype \n",
195
+ "--- ------ -------------- ----- \n",
196
+ " 0 age 32561 non-null int64 \n",
197
+ " 1 workclass 32561 non-null object\n",
198
+ " 2 fnlwgt 32561 non-null int64 \n",
199
+ " 3 education 32561 non-null object\n",
200
+ " 4 education.num 32561 non-null int64 \n",
201
+ " 5 marital.status 32561 non-null object\n",
202
+ " 6 occupation 32561 non-null object\n",
203
+ " 7 relationship 32561 non-null object\n",
204
+ " 8 race 32561 non-null object\n",
205
+ " 9 sex 32561 non-null object\n",
206
+ " 10 capital.gain 32561 non-null int64 \n",
207
+ " 11 capital.loss 32561 non-null int64 \n",
208
+ " 12 hours.per.week 32561 non-null int64 \n",
209
+ " 13 native.country 32561 non-null object\n",
210
+ " 14 income 32561 non-null object\n",
211
+ "dtypes: int64(6), object(9)\n",
212
+ "memory usage: 3.7+ MB\n"
213
+ ]
214
+ }
215
+ ],
216
+ "source": [
217
+ "adult.info()"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "markdown",
222
+ "metadata": {
223
+ "papermill": {
224
+ "duration": 0.022213,
225
+ "end_time": "2021-03-19T05:38:03.171196",
226
+ "exception": false,
227
+ "start_time": "2021-03-19T05:38:03.148983",
228
+ "status": "completed"
229
+ },
230
+ "tags": []
231
+ },
232
+ "source": [
233
+ "*In this info detail, indicate that there is no missing value at all. But if you see the whole data carefully, you will find **missing value with '?'**.*"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "markdown",
238
+ "metadata": {
239
+ "papermill": {
240
+ "duration": 0.023501,
241
+ "end_time": "2021-03-19T05:38:03.217098",
242
+ "exception": false,
243
+ "start_time": "2021-03-19T05:38:03.193597",
244
+ "status": "completed"
245
+ },
246
+ "tags": []
247
+ },
248
+ "source": [
249
+ "# PreProcessing"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "markdown",
254
+ "metadata": {
255
+ "papermill": {
256
+ "duration": 0.022005,
257
+ "end_time": "2021-03-19T05:38:03.261561",
258
+ "exception": false,
259
+ "start_time": "2021-03-19T05:38:03.239556",
260
+ "status": "completed"
261
+ },
262
+ "tags": []
263
+ },
264
+ "source": [
265
+ "*Preprocessing scheme:*\n",
266
+ "* Encode all columns\n",
267
+ "* Drop education because it's already encoded on education.num\n",
268
+ "* Drop fnlwgt because it's unique"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "markdown",
273
+ "metadata": {
274
+ "papermill": {
275
+ "duration": 0.022184,
276
+ "end_time": "2021-03-19T05:38:03.306152",
277
+ "exception": false,
278
+ "start_time": "2021-03-19T05:38:03.283968",
279
+ "status": "completed"
280
+ },
281
+ "tags": []
282
+ },
283
+ "source": [
284
+ "*Handling Missing Value In Pipeline*"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": 5,
290
+ "metadata": {
291
+ "execution": {
292
+ "iopub.execute_input": "2021-03-19T05:38:03.356440Z",
293
+ "iopub.status.busy": "2021-03-19T05:38:03.355434Z",
294
+ "iopub.status.idle": "2021-03-19T05:38:03.361557Z",
295
+ "shell.execute_reply": "2021-03-19T05:38:03.362164Z"
296
+ },
297
+ "papermill": {
298
+ "duration": 0.033769,
299
+ "end_time": "2021-03-19T05:38:03.362348",
300
+ "exception": false,
301
+ "start_time": "2021-03-19T05:38:03.328579",
302
+ "status": "completed"
303
+ },
304
+ "tags": []
305
+ },
306
+ "outputs": [],
307
+ "source": [
308
+ "binary_encoder_pipe = Pipeline([\n",
309
+ " ('imputer', SimpleImputer(strategy = 'constant', fill_value = 'NC', missing_values = '?')),\n",
310
+ " ('binary', ce.BinaryEncoder())\n",
311
+ "])\n",
312
+ "\n",
313
+ "transformer = ColumnTransformer([\n",
314
+ " ('one hot', OneHotEncoder(drop = 'first'), ['relationship', 'race', 'sex']),\n",
315
+ " ('binary', binary_encoder_pipe, ['workclass', 'marital.status', 'occupation', 'native.country'])],\n",
316
+ " remainder = 'passthrough')"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "markdown",
321
+ "metadata": {
322
+ "papermill": {
323
+ "duration": 0.022265,
324
+ "end_time": "2021-03-19T05:38:03.407584",
325
+ "exception": false,
326
+ "start_time": "2021-03-19T05:38:03.385319",
327
+ "status": "completed"
328
+ },
329
+ "tags": []
330
+ },
331
+ "source": [
332
+ "*Splitting Data*"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": 6,
338
+ "metadata": {
339
+ "execution": {
340
+ "iopub.execute_input": "2021-03-19T05:38:03.456571Z",
341
+ "iopub.status.busy": "2021-03-19T05:38:03.455612Z",
342
+ "iopub.status.idle": "2021-03-19T05:38:03.470867Z",
343
+ "shell.execute_reply": "2021-03-19T05:38:03.471412Z"
344
+ },
345
+ "papermill": {
346
+ "duration": 0.041355,
347
+ "end_time": "2021-03-19T05:38:03.471590",
348
+ "exception": false,
349
+ "start_time": "2021-03-19T05:38:03.430235",
350
+ "status": "completed"
351
+ },
352
+ "tags": []
353
+ },
354
+ "outputs": [
355
+ {
356
+ "data": {
357
+ "text/plain": [
358
+ "<=50K 24720\n",
359
+ ">50K 7841\n",
360
+ "Name: income, dtype: int64"
361
+ ]
362
+ },
363
+ "execution_count": 6,
364
+ "metadata": {},
365
+ "output_type": "execute_result"
366
+ }
367
+ ],
368
+ "source": [
369
+ "adult['income'].value_counts()"
370
+ ]
371
+ },
372
+ {
373
+ "cell_type": "markdown",
374
+ "metadata": {
375
+ "papermill": {
376
+ "duration": 0.023083,
377
+ "end_time": "2021-03-19T05:38:03.517599",
378
+ "exception": false,
379
+ "start_time": "2021-03-19T05:38:03.494516",
380
+ "status": "completed"
381
+ },
382
+ "tags": []
383
+ },
384
+ "source": [
385
+ "Income is the target data and **indicated with imbalance data**. I define **income with 1 if income is >50K and 0 if income is <50K**."
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ "execution_count": 7,
391
+ "metadata": {
392
+ "execution": {
393
+ "iopub.execute_input": "2021-03-19T05:38:03.568392Z",
394
+ "iopub.status.busy": "2021-03-19T05:38:03.567403Z",
395
+ "iopub.status.idle": "2021-03-19T05:38:03.580593Z",
396
+ "shell.execute_reply": "2021-03-19T05:38:03.581142Z"
397
+ },
398
+ "papermill": {
399
+ "duration": 0.04025,
400
+ "end_time": "2021-03-19T05:38:03.581329",
401
+ "exception": false,
402
+ "start_time": "2021-03-19T05:38:03.541079",
403
+ "status": "completed"
404
+ },
405
+ "tags": []
406
+ },
407
+ "outputs": [],
408
+ "source": [
409
+ "X = adult.drop(['fnlwgt', 'education', 'income'], axis = 1)\n",
410
+ "y = np.where(adult['income'] == '>50K', 1, 0)"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": 8,
416
+ "metadata": {
417
+ "execution": {
418
+ "iopub.execute_input": "2021-03-19T05:38:03.632000Z",
419
+ "iopub.status.busy": "2021-03-19T05:38:03.631007Z",
420
+ "iopub.status.idle": "2021-03-19T05:38:03.637002Z",
421
+ "shell.execute_reply": "2021-03-19T05:38:03.637530Z"
422
+ },
423
+ "papermill": {
424
+ "duration": 0.033104,
425
+ "end_time": "2021-03-19T05:38:03.637822",
426
+ "exception": false,
427
+ "start_time": "2021-03-19T05:38:03.604718",
428
+ "status": "completed"
429
+ },
430
+ "tags": []
431
+ },
432
+ "outputs": [
433
+ {
434
+ "data": {
435
+ "text/plain": [
436
+ "(32561, 12)"
437
+ ]
438
+ },
439
+ "execution_count": 8,
440
+ "metadata": {},
441
+ "output_type": "execute_result"
442
+ }
443
+ ],
444
+ "source": [
445
+ "X.shape"
446
+ ]
447
+ },
448
+ {
449
+ "cell_type": "code",
450
+ "execution_count": 9,
451
+ "metadata": {
452
+ "execution": {
453
+ "iopub.execute_input": "2021-03-19T05:38:03.689040Z",
454
+ "iopub.status.busy": "2021-03-19T05:38:03.688033Z",
455
+ "iopub.status.idle": "2021-03-19T05:38:03.726523Z",
456
+ "shell.execute_reply": "2021-03-19T05:38:03.725847Z"
457
+ },
458
+ "papermill": {
459
+ "duration": 0.065514,
460
+ "end_time": "2021-03-19T05:38:03.726708",
461
+ "exception": false,
462
+ "start_time": "2021-03-19T05:38:03.661194",
463
+ "status": "completed"
464
+ },
465
+ "tags": []
466
+ },
467
+ "outputs": [],
468
+ "source": [
469
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y,\n",
470
+ " test_size = 0.3, random_state = 1212)"
471
+ ]
472
+ },
473
+ {
474
+ "cell_type": "markdown",
475
+ "metadata": {
476
+ "papermill": {
477
+ "duration": 0.023919,
478
+ "end_time": "2021-03-19T05:38:03.775037",
479
+ "exception": false,
480
+ "start_time": "2021-03-19T05:38:03.751118",
481
+ "status": "completed"
482
+ },
483
+ "tags": []
484
+ },
485
+ "source": [
486
+ "I use 0.3 as default score for test_size and X.shape for random_state so the data will be devided equally."
487
+ ]
488
+ },
489
+ {
490
+ "cell_type": "markdown",
491
+ "metadata": {
492
+ "papermill": {
493
+ "duration": 0.023793,
494
+ "end_time": "2021-03-19T05:38:03.822815",
495
+ "exception": false,
496
+ "start_time": "2021-03-19T05:38:03.799022",
497
+ "status": "completed"
498
+ },
499
+ "tags": []
500
+ },
501
+ "source": [
502
+ "# Define Model"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "markdown",
507
+ "metadata": {
508
+ "papermill": {
509
+ "duration": 0.023731,
510
+ "end_time": "2021-03-19T05:38:03.871290",
511
+ "exception": false,
512
+ "start_time": "2021-03-19T05:38:03.847559",
513
+ "status": "completed"
514
+ },
515
+ "tags": []
516
+ },
517
+ "source": [
518
+ "I use 3 Boosting Algorithms Models:\n",
519
+ "* Ada Boost Classifier\n",
520
+ "* Gradient Boosting Classifier\n",
521
+ "* XGB Classifier"
522
+ ]
523
+ },
524
+ {
525
+ "cell_type": "code",
526
+ "execution_count": 10,
527
+ "metadata": {
528
+ "execution": {
529
+ "iopub.execute_input": "2021-03-19T05:38:03.927028Z",
530
+ "iopub.status.busy": "2021-03-19T05:38:03.926229Z",
531
+ "iopub.status.idle": "2021-03-19T05:38:03.929154Z",
532
+ "shell.execute_reply": "2021-03-19T05:38:03.929616Z"
533
+ },
534
+ "papermill": {
535
+ "duration": 0.034442,
536
+ "end_time": "2021-03-19T05:38:03.929828",
537
+ "exception": false,
538
+ "start_time": "2021-03-19T05:38:03.895386",
539
+ "status": "completed"
540
+ },
541
+ "tags": []
542
+ },
543
+ "outputs": [],
544
+ "source": [
545
+ "adaboost = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212)\n",
546
+ "pipe_ada = Pipeline([\n",
547
+ " ('transformer', transformer),\n",
548
+ " ('adaboost', adaboost)])\n",
549
+ "\n",
550
+ "gradboost = GradientBoostingClassifier(random_state = 1212)\n",
551
+ "pipe_grad = Pipeline([\n",
552
+ " ('transformer', transformer),\n",
553
+ " ('gradboost', gradboost)])\n",
554
+ "\n",
555
+ "XGBOOST = XGBClassifier(random_state = 1212)\n",
556
+ "pipe_XGB = Pipeline([\n",
557
+ " ('transformer', transformer),\n",
558
+ " ('XGBOOST', XGBOOST)])"
559
+ ]
560
+ },
561
+ {
562
+ "cell_type": "markdown",
563
+ "metadata": {
564
+ "papermill": {
565
+ "duration": 0.023691,
566
+ "end_time": "2021-03-19T05:38:03.977709",
567
+ "exception": false,
568
+ "start_time": "2021-03-19T05:38:03.954018",
569
+ "status": "completed"
570
+ },
571
+ "tags": []
572
+ },
573
+ "source": [
574
+ "# Cross Validation"
575
+ ]
576
+ },
577
+ {
578
+ "cell_type": "markdown",
579
+ "metadata": {
580
+ "papermill": {
581
+ "duration": 0.023661,
582
+ "end_time": "2021-03-19T05:38:04.025361",
583
+ "exception": false,
584
+ "start_time": "2021-03-19T05:38:04.001700",
585
+ "status": "completed"
586
+ },
587
+ "tags": []
588
+ },
589
+ "source": [
590
+ "*Model Evaluation*"
591
+ ]
592
+ },
593
+ {
594
+ "cell_type": "code",
595
+ "execution_count": 12,
596
+ "metadata": {
597
+ "execution": {
598
+ "iopub.execute_input": "2021-03-19T05:38:04.081401Z",
599
+ "iopub.status.busy": "2021-03-19T05:38:04.080271Z",
600
+ "iopub.status.idle": "2021-03-19T05:38:44.672230Z",
601
+ "shell.execute_reply": "2021-03-19T05:38:44.672845Z"
602
+ },
603
+ "papermill": {
604
+ "duration": 40.623782,
605
+ "end_time": "2021-03-19T05:38:44.673057",
606
+ "exception": false,
607
+ "start_time": "2021-03-19T05:38:04.049275",
608
+ "status": "completed"
609
+ },
610
+ "tags": []
611
+ },
612
+ "outputs": [],
613
+ "source": [
614
+ "def model_evaluation(model, metric):\n",
615
+ " skfold = StratifiedKFold(n_splits = 5)\n",
616
+ " model_cv = cross_val_score(model, X_train, y_train, cv = skfold, scoring = metric)\n",
617
+ " return model_cv\n",
618
+ "\n",
619
+ "pipe_ada_cv = model_evaluation(pipe_ada, 'f1')\n",
620
+ "pipe_grad_cv = model_evaluation(pipe_grad, 'f1')\n",
621
+ "pipe_XGB_cv = model_evaluation(pipe_XGB, 'f1')"
622
+ ]
623
+ },
624
+ {
625
+ "cell_type": "markdown",
626
+ "metadata": {
627
+ "papermill": {
628
+ "duration": 0.025351,
629
+ "end_time": "2021-03-19T05:38:44.725538",
630
+ "exception": false,
631
+ "start_time": "2021-03-19T05:38:44.700187",
632
+ "status": "completed"
633
+ },
634
+ "tags": []
635
+ },
636
+ "source": [
637
+ "*Fitting Data*"
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "code",
642
+ "execution_count": null,
643
+ "metadata": {
644
+ "execution": {
645
+ "iopub.execute_input": "2021-03-19T05:38:44.789205Z",
646
+ "iopub.status.busy": "2021-03-19T05:38:44.788134Z",
647
+ "iopub.status.idle": "2021-03-19T05:38:53.398606Z",
648
+ "shell.execute_reply": "2021-03-19T05:38:53.398036Z"
649
+ },
650
+ "papermill": {
651
+ "duration": 8.647612,
652
+ "end_time": "2021-03-19T05:38:53.398853",
653
+ "exception": false,
654
+ "start_time": "2021-03-19T05:38:44.751241",
655
+ "status": "completed"
656
+ },
657
+ "tags": []
658
+ },
659
+ "outputs": [],
660
+ "source": [
661
+ "for model in [pipe_ada, pipe_grad, pipe_XGB]:\n",
662
+ " model.fit(X_train, y_train)"
663
+ ]
664
+ },
665
+ {
666
+ "cell_type": "markdown",
667
+ "metadata": {
668
+ "papermill": {
669
+ "duration": 0.025784,
670
+ "end_time": "2021-03-19T05:38:53.451009",
671
+ "exception": false,
672
+ "start_time": "2021-03-19T05:38:53.425225",
673
+ "status": "completed"
674
+ },
675
+ "tags": []
676
+ },
677
+ "source": [
678
+ "*Summary*"
679
+ ]
680
+ },
681
+ {
682
+ "cell_type": "code",
683
+ "execution_count": null,
684
+ "metadata": {
685
+ "execution": {
686
+ "iopub.execute_input": "2021-03-19T05:38:53.527446Z",
687
+ "iopub.status.busy": "2021-03-19T05:38:53.526733Z",
688
+ "iopub.status.idle": "2021-03-19T05:38:53.815303Z",
689
+ "shell.execute_reply": "2021-03-19T05:38:53.815805Z"
690
+ },
691
+ "papermill": {
692
+ "duration": 0.33887,
693
+ "end_time": "2021-03-19T05:38:53.815995",
694
+ "exception": false,
695
+ "start_time": "2021-03-19T05:38:53.477125",
696
+ "status": "completed"
697
+ },
698
+ "tags": []
699
+ },
700
+ "outputs": [],
701
+ "source": [
702
+ "score_mean = [pipe_ada_cv.mean(), pipe_grad_cv.mean(), pipe_XGB_cv.mean()]\n",
703
+ "score_std = [pipe_ada_cv.std(), pipe_grad_cv.std(), pipe_XGB_cv.std()]\n",
704
+ "score_f1 = [f1_score(y_test, pipe_ada.predict(X_test)),\n",
705
+ " f1_score(y_test, pipe_grad.predict(X_test)), \n",
706
+ " f1_score(y_test, pipe_XGB.predict(X_test))]\n",
707
+ "method_name = ['Ada Boost Classifier', 'Gradient Boost Classifier ',\n",
708
+ " 'XGB Classifier']\n",
709
+ "summary = pd.DataFrame({'method': method_name, 'mean score': score_mean,\n",
710
+ " 'std score': score_std, 'f1 score': score_f1})\n",
711
+ "summary"
712
+ ]
713
+ },
714
+ {
715
+ "cell_type": "markdown",
716
+ "metadata": {
717
+ "papermill": {
718
+ "duration": 0.026318,
719
+ "end_time": "2021-03-19T05:38:53.869169",
720
+ "exception": false,
721
+ "start_time": "2021-03-19T05:38:53.842851",
722
+ "status": "completed"
723
+ },
724
+ "tags": []
725
+ },
726
+ "source": [
727
+ "From these scores, **XGB Classifier is the best one** with the highest f1 score and mean score, also the lowest std score. Let's cross-check with the important features, see if the model is correct."
728
+ ]
729
+ },
730
+ {
731
+ "cell_type": "code",
732
+ "execution_count": null,
733
+ "metadata": {
734
+ "execution": {
735
+ "iopub.execute_input": "2021-03-19T05:38:53.944904Z",
736
+ "iopub.status.busy": "2021-03-19T05:38:53.929491Z",
737
+ "iopub.status.idle": "2021-03-19T05:38:54.176548Z",
738
+ "shell.execute_reply": "2021-03-19T05:38:54.175387Z"
739
+ },
740
+ "papermill": {
741
+ "duration": 0.28086,
742
+ "end_time": "2021-03-19T05:38:54.176735",
743
+ "exception": false,
744
+ "start_time": "2021-03-19T05:38:53.895875",
745
+ "status": "completed"
746
+ },
747
+ "tags": []
748
+ },
749
+ "outputs": [],
750
+ "source": [
751
+ "plot_roc_curve(pipe_XGB, X_test, y_test)"
752
+ ]
753
+ },
754
+ {
755
+ "cell_type": "markdown",
756
+ "metadata": {
757
+ "papermill": {
758
+ "duration": 0.027942,
759
+ "end_time": "2021-03-19T05:38:54.233278",
760
+ "exception": false,
761
+ "start_time": "2021-03-19T05:38:54.205336",
762
+ "status": "completed"
763
+ },
764
+ "tags": []
765
+ },
766
+ "source": [
767
+ "# Importance Features"
768
+ ]
769
+ },
770
+ {
771
+ "cell_type": "code",
772
+ "execution_count": null,
773
+ "metadata": {
774
+ "execution": {
775
+ "iopub.execute_input": "2021-03-19T05:38:54.295503Z",
776
+ "iopub.status.busy": "2021-03-19T05:38:54.294857Z",
777
+ "iopub.status.idle": "2021-03-19T05:38:54.297765Z",
778
+ "shell.execute_reply": "2021-03-19T05:38:54.297092Z"
779
+ },
780
+ "papermill": {
781
+ "duration": 0.036542,
782
+ "end_time": "2021-03-19T05:38:54.297927",
783
+ "exception": false,
784
+ "start_time": "2021-03-19T05:38:54.261385",
785
+ "status": "completed"
786
+ },
787
+ "tags": []
788
+ },
789
+ "outputs": [],
790
+ "source": [
791
+ "features = list(pipe_ada[0].transformers_[0][1].get_feature_names()) + pipe_ada[0].transformers_[1][1][1].get_feature_names() + ['age', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']"
792
+ ]
793
+ },
794
+ {
795
+ "cell_type": "code",
796
+ "execution_count": null,
797
+ "metadata": {
798
+ "execution": {
799
+ "iopub.execute_input": "2021-03-19T05:38:54.358245Z",
800
+ "iopub.status.busy": "2021-03-19T05:38:54.357625Z",
801
+ "iopub.status.idle": "2021-03-19T05:38:54.792959Z",
802
+ "shell.execute_reply": "2021-03-19T05:38:54.792423Z"
803
+ },
804
+ "papermill": {
805
+ "duration": 0.466676,
806
+ "end_time": "2021-03-19T05:38:54.793113",
807
+ "exception": false,
808
+ "start_time": "2021-03-19T05:38:54.326437",
809
+ "status": "completed"
810
+ },
811
+ "tags": []
812
+ },
813
+ "outputs": [],
814
+ "source": [
815
+ "imptab_ada = pd.DataFrame(pipe_ada[1].feature_importances_, columns = ['imp'], index = features)\n",
816
+ "imptab_ada.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
817
+ "plt.title('Importance Table For Ada Boost Classifier Model')\n",
818
+ "plt.show()"
819
+ ]
820
+ },
821
+ {
822
+ "cell_type": "code",
823
+ "execution_count": null,
824
+ "metadata": {
825
+ "execution": {
826
+ "iopub.execute_input": "2021-03-19T05:38:54.859604Z",
827
+ "iopub.status.busy": "2021-03-19T05:38:54.858939Z",
828
+ "iopub.status.idle": "2021-03-19T05:38:55.286891Z",
829
+ "shell.execute_reply": "2021-03-19T05:38:55.286335Z"
830
+ },
831
+ "papermill": {
832
+ "duration": 0.464119,
833
+ "end_time": "2021-03-19T05:38:55.287039",
834
+ "exception": false,
835
+ "start_time": "2021-03-19T05:38:54.822920",
836
+ "status": "completed"
837
+ },
838
+ "tags": []
839
+ },
840
+ "outputs": [],
841
+ "source": [
842
+ "imptab_grad = pd.DataFrame(pipe_grad[1].feature_importances_, columns = ['imp'], index = features)\n",
843
+ "imptab_grad.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
844
+ "plt.title('Importance Table For Gradient Boost Classifier Model')\n",
845
+ "plt.show()"
846
+ ]
847
+ },
848
+ {
849
+ "cell_type": "code",
850
+ "execution_count": null,
851
+ "metadata": {
852
+ "execution": {
853
+ "iopub.execute_input": "2021-03-19T05:38:55.356257Z",
854
+ "iopub.status.busy": "2021-03-19T05:38:55.355583Z",
855
+ "iopub.status.idle": "2021-03-19T05:38:55.939126Z",
856
+ "shell.execute_reply": "2021-03-19T05:38:55.938530Z"
857
+ },
858
+ "papermill": {
859
+ "duration": 0.62115,
860
+ "end_time": "2021-03-19T05:38:55.939279",
861
+ "exception": false,
862
+ "start_time": "2021-03-19T05:38:55.318129",
863
+ "status": "completed"
864
+ },
865
+ "tags": []
866
+ },
867
+ "outputs": [],
868
+ "source": [
869
+ "imptab_XGB = pd.DataFrame(pipe_XGB[1].feature_importances_, columns = ['imp'], index = features)\n",
870
+ "imptab_XGB.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
871
+ "plt.title('Importance Table For XGB Classifier Model')\n",
872
+ "plt.show()"
873
+ ]
874
+ },
875
+ {
876
+ "cell_type": "markdown",
877
+ "metadata": {
878
+ "papermill": {
879
+ "duration": 0.031945,
880
+ "end_time": "2021-03-19T05:38:56.003841",
881
+ "exception": false,
882
+ "start_time": "2021-03-19T05:38:55.971896",
883
+ "status": "completed"
884
+ },
885
+ "tags": []
886
+ },
887
+ "source": [
888
+ "From Importance Features Table, the **XGB Classifier can boost almost all the features**. It's has a consistency with the cross validation result. Now, see if the HyperParameter Tuning process can boost until getting the maximum score."
889
+ ]
890
+ },
891
+ {
892
+ "cell_type": "markdown",
893
+ "metadata": {
894
+ "papermill": {
895
+ "duration": 0.03221,
896
+ "end_time": "2021-03-19T05:38:56.068184",
897
+ "exception": false,
898
+ "start_time": "2021-03-19T05:38:56.035974",
899
+ "status": "completed"
900
+ },
901
+ "tags": []
902
+ },
903
+ "source": [
904
+ "# HyperParameter Tuning"
905
+ ]
906
+ },
907
+ {
908
+ "cell_type": "code",
909
+ "execution_count": null,
910
+ "metadata": {
911
+ "execution": {
912
+ "iopub.execute_input": "2021-03-19T05:38:56.141374Z",
913
+ "iopub.status.busy": "2021-03-19T05:38:56.140625Z",
914
+ "iopub.status.idle": "2021-03-19T06:01:37.865510Z",
915
+ "shell.execute_reply": "2021-03-19T06:01:37.864282Z"
916
+ },
917
+ "papermill": {
918
+ "duration": 1361.765454,
919
+ "end_time": "2021-03-19T06:01:37.865698",
920
+ "exception": false,
921
+ "start_time": "2021-03-19T05:38:56.100244",
922
+ "status": "completed"
923
+ },
924
+ "tags": []
925
+ },
926
+ "outputs": [],
927
+ "source": [
928
+ "XGBOOST = XGBClassifier(random_state = 1212)\n",
929
+ "estimator = Pipeline([('transformer', transformer), ('XGBOOST', XGBOOST)])\n",
930
+ "\n",
931
+ "hyperparam_space = {\n",
932
+ " 'XGBOOST__learning_rate': [0.1, 0.05, 0.01, 0.005],\n",
933
+ " 'XGBOOST__n_estimators': [50, 100, 150, 200],\n",
934
+ " 'XGBOOST__max_depth': [3, 5, 7, 9]\n",
935
+ "}\n",
936
+ "\n",
937
+ "random = RandomizedSearchCV(\n",
938
+ " estimator,\n",
939
+ " param_distributions = hyperparam_space,\n",
940
+ " cv = StratifiedKFold(n_splits = 5),\n",
941
+ " scoring = 'f1',\n",
942
+ " n_iter = 10,\n",
943
+ " n_jobs = -1)\n",
944
+ "\n",
945
+ "random.fit(X_train, y_train)"
946
+ ]
947
+ },
948
+ {
949
+ "cell_type": "code",
950
+ "execution_count": 19,
951
+ "metadata": {
952
+ "execution": {
953
+ "iopub.execute_input": "2021-03-19T06:01:37.939505Z",
954
+ "iopub.status.busy": "2021-03-19T06:01:37.938761Z",
955
+ "iopub.status.idle": "2021-03-19T06:01:37.942876Z",
956
+ "shell.execute_reply": "2021-03-19T06:01:37.942253Z"
957
+ },
958
+ "papermill": {
959
+ "duration": 0.043411,
960
+ "end_time": "2021-03-19T06:01:37.943017",
961
+ "exception": false,
962
+ "start_time": "2021-03-19T06:01:37.899606",
963
+ "status": "completed"
964
+ },
965
+ "tags": []
966
+ },
967
+ "outputs": [
968
+ {
969
+ "name": "stdout",
970
+ "output_type": "stream",
971
+ "text": [
972
+ "best score 0.7017567140030995\n",
973
+ "best param {'XGBOOST__n_estimators': 200, 'XGBOOST__max_depth': 9, 'XGBOOST__learning_rate': 0.05}\n"
974
+ ]
975
+ }
976
+ ],
977
+ "source": [
978
+ "print('best score', random.best_score_)\n",
979
+ "print('best param', random.best_params_)"
980
+ ]
981
+ },
982
+ {
983
+ "cell_type": "markdown",
984
+ "metadata": {
985
+ "papermill": {
986
+ "duration": 0.034769,
987
+ "end_time": "2021-03-19T06:01:38.011326",
988
+ "exception": false,
989
+ "start_time": "2021-03-19T06:01:37.976557",
990
+ "status": "completed"
991
+ },
992
+ "tags": []
993
+ },
994
+ "source": [
995
+ "After HyperParameter Tuning, the best score is 0.6996, which getting lower. N estimator is 150, Max depth is 5, and Learning rate is 0.1. Let's compare the result."
996
+ ]
997
+ },
998
+ {
999
+ "cell_type": "markdown",
1000
+ "metadata": {
1001
+ "papermill": {
1002
+ "duration": 0.033061,
1003
+ "end_time": "2021-03-19T06:01:38.078046",
1004
+ "exception": false,
1005
+ "start_time": "2021-03-19T06:01:38.044985",
1006
+ "status": "completed"
1007
+ },
1008
+ "tags": []
1009
+ },
1010
+ "source": [
1011
+ "# Before VS After Tuning Comparison"
1012
+ ]
1013
+ },
1014
+ {
1015
+ "cell_type": "code",
1016
+ "execution_count": 20,
1017
+ "metadata": {
1018
+ "execution": {
1019
+ "iopub.execute_input": "2021-03-19T06:01:38.173068Z",
1020
+ "iopub.status.busy": "2021-03-19T06:01:38.164508Z",
1021
+ "iopub.status.idle": "2021-03-19T06:01:39.914461Z",
1022
+ "shell.execute_reply": "2021-03-19T06:01:39.915204Z"
1023
+ },
1024
+ "papermill": {
1025
+ "duration": 1.803827,
1026
+ "end_time": "2021-03-19T06:01:39.915379",
1027
+ "exception": false,
1028
+ "start_time": "2021-03-19T06:01:38.111552",
1029
+ "status": "completed"
1030
+ },
1031
+ "tags": []
1032
+ },
1033
+ "outputs": [
1034
+ {
1035
+ "name": "stdout",
1036
+ "output_type": "stream",
1037
+ "text": [
1038
+ "[06:01:38] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n",
1039
+ " precision recall f1-score support\n",
1040
+ "\n",
1041
+ " 0 0.90 0.94 0.92 7417\n",
1042
+ " 1 0.77 0.66 0.71 2352\n",
1043
+ "\n",
1044
+ " accuracy 0.87 9769\n",
1045
+ " macro avg 0.83 0.80 0.82 9769\n",
1046
+ "weighted avg 0.87 0.87 0.87 9769\n",
1047
+ "\n"
1048
+ ]
1049
+ }
1050
+ ],
1051
+ "source": [
1052
+ "estimator.fit(X_train, y_train)\n",
1053
+ "y_pred_estimator = estimator.predict(X_test)\n",
1054
+ "print(classification_report(y_test, y_pred_estimator))"
1055
+ ]
1056
+ },
1057
+ {
1058
+ "cell_type": "code",
1059
+ "execution_count": 21,
1060
+ "metadata": {
1061
+ "execution": {
1062
+ "iopub.execute_input": "2021-03-19T06:01:39.991027Z",
1063
+ "iopub.status.busy": "2021-03-19T06:01:39.990364Z",
1064
+ "iopub.status.idle": "2021-03-19T06:01:45.312598Z",
1065
+ "shell.execute_reply": "2021-03-19T06:01:45.311942Z"
1066
+ },
1067
+ "papermill": {
1068
+ "duration": 5.363188,
1069
+ "end_time": "2021-03-19T06:01:45.312764",
1070
+ "exception": false,
1071
+ "start_time": "2021-03-19T06:01:39.949576",
1072
+ "status": "completed"
1073
+ },
1074
+ "tags": []
1075
+ },
1076
+ "outputs": [
1077
+ {
1078
+ "name": "stdout",
1079
+ "output_type": "stream",
1080
+ "text": [
1081
+ "[06:01:40] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n",
1082
+ " precision recall f1-score support\n",
1083
+ "\n",
1084
+ " 0 0.89 0.94 0.92 7417\n",
1085
+ " 1 0.78 0.64 0.70 2352\n",
1086
+ "\n",
1087
+ " accuracy 0.87 9769\n",
1088
+ " macro avg 0.84 0.79 0.81 9769\n",
1089
+ "weighted avg 0.86 0.87 0.87 9769\n",
1090
+ "\n"
1091
+ ]
1092
+ }
1093
+ ],
1094
+ "source": [
1095
+ "random.best_estimator_.fit(X_train, y_train)\n",
1096
+ "y_pred_random = random.best_estimator_.predict(X_test)\n",
1097
+ "print(classification_report(y_test, y_pred_random))"
1098
+ ]
1099
+ },
1100
+ {
1101
+ "cell_type": "code",
1102
+ "execution_count": 22,
1103
+ "metadata": {
1104
+ "execution": {
1105
+ "iopub.execute_input": "2021-03-19T06:01:45.389702Z",
1106
+ "iopub.status.busy": "2021-03-19T06:01:45.388729Z",
1107
+ "iopub.status.idle": "2021-03-19T06:01:45.407492Z",
1108
+ "shell.execute_reply": "2021-03-19T06:01:45.406952Z"
1109
+ },
1110
+ "papermill": {
1111
+ "duration": 0.05942,
1112
+ "end_time": "2021-03-19T06:01:45.407632",
1113
+ "exception": false,
1114
+ "start_time": "2021-03-19T06:01:45.348212",
1115
+ "status": "completed"
1116
+ },
1117
+ "tags": []
1118
+ },
1119
+ "outputs": [
1120
+ {
1121
+ "data": {
1122
+ "text/html": [
1123
+ "<div>\n",
1124
+ "<style scoped>\n",
1125
+ " .dataframe tbody tr th:only-of-type {\n",
1126
+ " vertical-align: middle;\n",
1127
+ " }\n",
1128
+ "\n",
1129
+ " .dataframe tbody tr th {\n",
1130
+ " vertical-align: top;\n",
1131
+ " }\n",
1132
+ "\n",
1133
+ " .dataframe thead th {\n",
1134
+ " text-align: right;\n",
1135
+ " }\n",
1136
+ "</style>\n",
1137
+ "<table border=\"1\" class=\"dataframe\">\n",
1138
+ " <thead>\n",
1139
+ " <tr style=\"text-align: right;\">\n",
1140
+ " <th></th>\n",
1141
+ " <th>method</th>\n",
1142
+ " <th>f1 score</th>\n",
1143
+ " </tr>\n",
1144
+ " </thead>\n",
1145
+ " <tbody>\n",
1146
+ " <tr>\n",
1147
+ " <th>0</th>\n",
1148
+ " <td>XGB Classifier Before Tuning</td>\n",
1149
+ " <td>0.713175</td>\n",
1150
+ " </tr>\n",
1151
+ " <tr>\n",
1152
+ " <th>1</th>\n",
1153
+ " <td>XGB Classifier After Tuning</td>\n",
1154
+ " <td>0.703169</td>\n",
1155
+ " </tr>\n",
1156
+ " </tbody>\n",
1157
+ "</table>\n",
1158
+ "</div>"
1159
+ ],
1160
+ "text/plain": [
1161
+ " method f1 score\n",
1162
+ "0 XGB Classifier Before Tuning 0.713175\n",
1163
+ "1 XGB Classifier After Tuning 0.703169"
1164
+ ]
1165
+ },
1166
+ "execution_count": 22,
1167
+ "metadata": {},
1168
+ "output_type": "execute_result"
1169
+ }
1170
+ ],
1171
+ "source": [
1172
+ "score_list = [f1_score(y_test, y_pred_estimator), f1_score(y_test, y_pred_random)]\n",
1173
+ "method_name = ['XGB Classifier Before Tuning', 'XGB Classifier After Tuning']\n",
1174
+ "best_summary = pd.DataFrame({\n",
1175
+ " 'method': method_name,\n",
1176
+ " 'f1 score': score_list\n",
1177
+ "})\n",
1178
+ "best_summary"
1179
+ ]
1180
+ },
1181
+ {
1182
+ "cell_type": "markdown",
1183
+ "metadata": {
1184
+ "papermill": {
1185
+ "duration": 0.035589,
1186
+ "end_time": "2021-03-19T06:01:45.478797",
1187
+ "exception": false,
1188
+ "start_time": "2021-03-19T06:01:45.443208",
1189
+ "status": "completed"
1190
+ },
1191
+ "tags": []
1192
+ },
1193
+ "source": [
1194
+ "After all, HyperParameter Tuning doesn't work good in this data. So if I have to choose, I pick the **XGB Classifier score Before Tuning, which is 0.71**. I know the number isn't good enough either because the data is imbalance and I don't process any resampling on it."
1195
+ ]
1196
+ },
1197
+ {
1198
+ "cell_type": "markdown",
1199
+ "metadata": {},
1200
+ "source": [
1201
+ "## Fairness"
1202
+ ]
1203
+ },
1204
+ {
1205
+ "cell_type": "code",
1206
+ "execution_count": 13,
1207
+ "metadata": {},
1208
+ "outputs": [],
1209
+ "source": [
1210
+ "# This DataFrame is created to stock differents models and fair metrics that we produce in this notebook\n",
1211
+ "algo_metrics = pd.DataFrame(columns=['model', 'fair_metrics', 'prediction', 'probs'])\n",
1212
+ "\n",
1213
+ "def add_to_df_algo_metrics(algo_metrics, model, fair_metrics, preds, probs, name):\n",
1214
+ " return algo_metrics.append(pd.DataFrame(data=[[model, fair_metrics, preds, probs]], columns=['model', 'fair_metrics', 'prediction', 'probs'], index=[name]))"
1215
+ ]
1216
+ },
1217
+ {
1218
+ "cell_type": "code",
1219
+ "execution_count": 14,
1220
+ "metadata": {},
1221
+ "outputs": [],
1222
+ "source": [
1223
+ "def fair_metrics(dataset, pred, pred_is_dataset=False):\n",
1224
+ " if pred_is_dataset:\n",
1225
+ " dataset_pred = pred\n",
1226
+ " else:\n",
1227
+ " dataset_pred = dataset.copy()\n",
1228
+ " dataset_pred.labels = pred\n",
1229
+ " \n",
1230
+ " cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']\n",
1231
+ " obj_fairness = [[0,0,0,1,0]]\n",
1232
+ " \n",
1233
+ " fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)\n",
1234
+ " \n",
1235
+ " for attr in dataset_pred.protected_attribute_names:\n",
1236
+ " idx = dataset_pred.protected_attribute_names.index(attr)\n",
1237
+ " privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] \n",
1238
+ " unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}] \n",
1239
+ " \n",
1240
+ " classified_metric = ClassificationMetric(dataset, \n",
1241
+ " dataset_pred,\n",
1242
+ " unprivileged_groups=unprivileged_groups,\n",
1243
+ " privileged_groups=privileged_groups)\n",
1244
+ "\n",
1245
+ " metric_pred = BinaryLabelDatasetMetric(dataset_pred,\n",
1246
+ " unprivileged_groups=unprivileged_groups,\n",
1247
+ " privileged_groups=privileged_groups)\n",
1248
+ "\n",
1249
+ " acc = classified_metric.accuracy()\n",
1250
+ "\n",
1251
+ " row = pd.DataFrame([[metric_pred.mean_difference(),\n",
1252
+ " classified_metric.equal_opportunity_difference(),\n",
1253
+ " classified_metric.average_abs_odds_difference(),\n",
1254
+ " metric_pred.disparate_impact(),\n",
1255
+ " classified_metric.theil_index()]],\n",
1256
+ " columns = cols,\n",
1257
+ " index = [attr]\n",
1258
+ " )\n",
1259
+ " fair_metrics = fair_metrics.append(row) \n",
1260
+ " \n",
1261
+ " fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)\n",
1262
+ " \n",
1263
+ " return fair_metrics\n",
1264
+ "\n",
1265
+ "def plot_fair_metrics(fair_metrics):\n",
1266
+ " fig, ax = plt.subplots(figsize=(20,4), ncols=5, nrows=1)\n",
1267
+ "\n",
1268
+ " plt.subplots_adjust(\n",
1269
+ " left = 0.125, \n",
1270
+ " bottom = 0.1, \n",
1271
+ " right = 0.9, \n",
1272
+ " top = 0.9, \n",
1273
+ " wspace = .5, \n",
1274
+ " hspace = 1.1\n",
1275
+ " )\n",
1276
+ "\n",
1277
+ " y_title_margin = 1.2\n",
1278
+ "\n",
1279
+ " plt.suptitle(\"Fairness metrics\", y = 1.09, fontsize=20)\n",
1280
+ " sns.set(style=\"dark\")\n",
1281
+ "\n",
1282
+ " cols = fair_metrics.columns.values\n",
1283
+ " obj = fair_metrics.loc['objective']\n",
1284
+ " size_rect = [0.2,0.2,0.2,0.4,0.25]\n",
1285
+ " rect = [-0.1,-0.1,-0.1,0.8,0]\n",
1286
+ " bottom = [-1,-1,-1,0,0]\n",
1287
+ " top = [1,1,1,2,1]\n",
1288
+ " bound = [[-0.1,0.1],[-0.1,0.1],[-0.1,0.1],[0.8,1.2],[0,0.25]]\n",
1289
+ "\n",
1290
+ " display(Markdown(\"### Check bias metrics :\"))\n",
1291
+ " display(Markdown(\"A model can be considered bias if just one of these five metrics show that this model is biased.\"))\n",
1292
+ " for attr in fair_metrics.index[1:len(fair_metrics)].values:\n",
1293
+ " display(Markdown(\"#### For the %s attribute :\"%attr))\n",
1294
+ " check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,5)]\n",
1295
+ " display(Markdown(\"With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics\"%(5 - sum(check))))\n",
1296
+ "\n",
1297
+ " for i in range(0,5):\n",
1298
+ " plt.subplot(1, 5, i+1)\n",
1299
+ " ax = sns.barplot(x=fair_metrics.index[1:len(fair_metrics)], y=fair_metrics.iloc[1:len(fair_metrics)][cols[i]])\n",
1300
+ " \n",
1301
+ " for j in range(0,len(fair_metrics)-1):\n",
1302
+ " a, val = ax.patches[j], fair_metrics.iloc[j+1][cols[i]]\n",
1303
+ " marg = -0.2 if val < 0 else 0.1\n",
1304
+ " ax.text(a.get_x()+a.get_width()/5, a.get_y()+a.get_height()+marg, round(val, 3), fontsize=15,color='black')\n",
1305
+ "\n",
1306
+ " plt.ylim(bottom[i], top[i])\n",
1307
+ " plt.setp(ax.patches, linewidth=0)\n",
1308
+ " ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=\"green\", linewidth=1, linestyle='solid'))\n",
1309
+ " plt.axhline(obj[i], color='black', alpha=0.3)\n",
1310
+ " plt.title(cols[i])\n",
1311
+ " ax.set_ylabel('') \n",
1312
+ " ax.set_xlabel('')"
1313
+ ]
1314
+ },
1315
+ {
1316
+ "cell_type": "code",
1317
+ "execution_count": 15,
1318
+ "metadata": {},
1319
+ "outputs": [],
1320
+ "source": [
1321
+ "def get_fair_metrics_and_plot(data, model, plot=False, model_aif=False):\n",
1322
+ " pred = model.predict(data).labels if model_aif else model.predict(data.features)\n",
1323
+ " # fair_metrics function available in the metrics.py file\n",
1324
+ " fair = fair_metrics(data, pred)\n",
1325
+ "\n",
1326
+ " if plot:\n",
1327
+ " # plot_fair_metrics function available in the visualisations.py file\n",
1328
+ " # The visualisation of this function is inspired by the dashboard on the demo of IBM aif360 \n",
1329
+ " plot_fair_metrics(fair)\n",
1330
+ " display(fair)\n",
1331
+ " \n",
1332
+ " return fair"
1333
+ ]
1334
+ },
1335
+ {
1336
+ "cell_type": "code",
1337
+ "execution_count": 16,
1338
+ "metadata": {},
1339
+ "outputs": [
1340
+ {
1341
+ "data": {
1342
+ "text/html": [
1343
+ "<div>\n",
1344
+ "<style scoped>\n",
1345
+ " .dataframe tbody tr th:only-of-type {\n",
1346
+ " vertical-align: middle;\n",
1347
+ " }\n",
1348
+ "\n",
1349
+ " .dataframe tbody tr th {\n",
1350
+ " vertical-align: top;\n",
1351
+ " }\n",
1352
+ "\n",
1353
+ " .dataframe thead th {\n",
1354
+ " text-align: right;\n",
1355
+ " }\n",
1356
+ "</style>\n",
1357
+ "<table border=\"1\" class=\"dataframe\">\n",
1358
+ " <thead>\n",
1359
+ " <tr style=\"text-align: right;\">\n",
1360
+ " <th></th>\n",
1361
+ " <th>age</th>\n",
1362
+ " <th>workclass</th>\n",
1363
+ " <th>education.num</th>\n",
1364
+ " <th>marital.status</th>\n",
1365
+ " <th>occupation</th>\n",
1366
+ " <th>relationship</th>\n",
1367
+ " <th>race</th>\n",
1368
+ " <th>sex</th>\n",
1369
+ " <th>capital.gain</th>\n",
1370
+ " <th>capital.loss</th>\n",
1371
+ " <th>hours.per.week</th>\n",
1372
+ " <th>native.country</th>\n",
1373
+ " <th>income</th>\n",
1374
+ " </tr>\n",
1375
+ " </thead>\n",
1376
+ " <tbody>\n",
1377
+ " <tr>\n",
1378
+ " <th>0</th>\n",
1379
+ " <td>90</td>\n",
1380
+ " <td>NaN</td>\n",
1381
+ " <td>9</td>\n",
1382
+ " <td>Widowed</td>\n",
1383
+ " <td>NaN</td>\n",
1384
+ " <td>Not-in-family</td>\n",
1385
+ " <td>White</td>\n",
1386
+ " <td>0</td>\n",
1387
+ " <td>0</td>\n",
1388
+ " <td>4356</td>\n",
1389
+ " <td>40</td>\n",
1390
+ " <td>United-States</td>\n",
1391
+ " <td>0</td>\n",
1392
+ " </tr>\n",
1393
+ " <tr>\n",
1394
+ " <th>1</th>\n",
1395
+ " <td>82</td>\n",
1396
+ " <td>Private</td>\n",
1397
+ " <td>9</td>\n",
1398
+ " <td>Widowed</td>\n",
1399
+ " <td>Exec-managerial</td>\n",
1400
+ " <td>Not-in-family</td>\n",
1401
+ " <td>White</td>\n",
1402
+ " <td>0</td>\n",
1403
+ " <td>0</td>\n",
1404
+ " <td>4356</td>\n",
1405
+ " <td>18</td>\n",
1406
+ " <td>United-States</td>\n",
1407
+ " <td>0</td>\n",
1408
+ " </tr>\n",
1409
+ " <tr>\n",
1410
+ " <th>2</th>\n",
1411
+ " <td>66</td>\n",
1412
+ " <td>NaN</td>\n",
1413
+ " <td>10</td>\n",
1414
+ " <td>Widowed</td>\n",
1415
+ " <td>NaN</td>\n",
1416
+ " <td>Unmarried</td>\n",
1417
+ " <td>Black</td>\n",
1418
+ " <td>0</td>\n",
1419
+ " <td>0</td>\n",
1420
+ " <td>4356</td>\n",
1421
+ " <td>40</td>\n",
1422
+ " <td>United-States</td>\n",
1423
+ " <td>0</td>\n",
1424
+ " </tr>\n",
1425
+ " <tr>\n",
1426
+ " <th>3</th>\n",
1427
+ " <td>54</td>\n",
1428
+ " <td>Private</td>\n",
1429
+ " <td>4</td>\n",
1430
+ " <td>Divorced</td>\n",
1431
+ " <td>Machine-op-inspct</td>\n",
1432
+ " <td>Unmarried</td>\n",
1433
+ " <td>White</td>\n",
1434
+ " <td>0</td>\n",
1435
+ " <td>0</td>\n",
1436
+ " <td>3900</td>\n",
1437
+ " <td>40</td>\n",
1438
+ " <td>United-States</td>\n",
1439
+ " <td>0</td>\n",
1440
+ " </tr>\n",
1441
+ " <tr>\n",
1442
+ " <th>4</th>\n",
1443
+ " <td>41</td>\n",
1444
+ " <td>Private</td>\n",
1445
+ " <td>10</td>\n",
1446
+ " <td>Separated</td>\n",
1447
+ " <td>Prof-specialty</td>\n",
1448
+ " <td>Own-child</td>\n",
1449
+ " <td>White</td>\n",
1450
+ " <td>0</td>\n",
1451
+ " <td>0</td>\n",
1452
+ " <td>3900</td>\n",
1453
+ " <td>40</td>\n",
1454
+ " <td>United-States</td>\n",
1455
+ " <td>0</td>\n",
1456
+ " </tr>\n",
1457
+ " </tbody>\n",
1458
+ "</table>\n",
1459
+ "</div>"
1460
+ ],
1461
+ "text/plain": [
1462
+ " age workclass education.num marital.status occupation \\\n",
1463
+ "0 90 NaN 9 Widowed NaN \n",
1464
+ "1 82 Private 9 Widowed Exec-managerial \n",
1465
+ "2 66 NaN 10 Widowed NaN \n",
1466
+ "3 54 Private 4 Divorced Machine-op-inspct \n",
1467
+ "4 41 Private 10 Separated Prof-specialty \n",
1468
+ "\n",
1469
+ " relationship race sex capital.gain capital.loss hours.per.week \\\n",
1470
+ "0 Not-in-family White 0 0 4356 40 \n",
1471
+ "1 Not-in-family White 0 0 4356 18 \n",
1472
+ "2 Unmarried Black 0 0 4356 40 \n",
1473
+ "3 Unmarried White 0 0 3900 40 \n",
1474
+ "4 Own-child White 0 0 3900 40 \n",
1475
+ "\n",
1476
+ " native.country income \n",
1477
+ "0 United-States 0 \n",
1478
+ "1 United-States 0 \n",
1479
+ "2 United-States 0 \n",
1480
+ "3 United-States 0 \n",
1481
+ "4 United-States 0 "
1482
+ ]
1483
+ },
1484
+ "execution_count": 16,
1485
+ "metadata": {},
1486
+ "output_type": "execute_result"
1487
+ }
1488
+ ],
1489
+ "source": [
1490
+ "##train['Sex'] = train['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\n",
1491
+ "adult_df = adult.drop(['fnlwgt', 'education'], axis = 1)\n",
1492
+ "adult_df[\"income\"]=adult_df[\"income\"].map({\"<=50K\":0,\">50K\":1})\n",
1493
+ "adult_df[\"sex\"] = adult_df[\"sex\"].map({\"Male\":1,\"Female\":0})\n",
1494
+ "adult_df[\"workclass\"] = adult_df[\"workclass\"].replace(\"?\",np.nan)\n",
1495
+ "adult_df[\"occupation\"] = adult_df[\"occupation\"].replace(\"?\",np.nan)\n",
1496
+ "adult_df[\"native.country\"] = adult_df[\"native.country\"].replace(\"?\",np.nan)\n",
1497
+ "adult_df.head()\n",
1498
+ "#features = [\"Pclass\", \"Sex\", \"SibSp\", \"Parch\", \"Survived\"]\n",
1499
+ "#X = pd.get_dummies(train_data[features])"
1500
+ ]
1501
+ },
1502
+ {
1503
+ "cell_type": "code",
1504
+ "execution_count": 17,
1505
+ "metadata": {},
1506
+ "outputs": [],
1507
+ "source": [
1508
+ "adult_df[\"workclass\"] = adult_df[\"workclass\"].fillna(adult_df[\"workclass\"].mode()[0])\n",
1509
+ "adult_df[\"occupation\"] = adult_df[\"occupation\"].fillna(adult_df[\"occupation\"].mode()[0])\n",
1510
+ "adult_df[\"native.country\"] = adult_df[\"native.country\"].fillna(adult_df[\"native.country\"].mode()[0])"
1511
+ ]
1512
+ },
1513
+ {
1514
+ "cell_type": "code",
1515
+ "execution_count": 18,
1516
+ "metadata": {},
1517
+ "outputs": [
1518
+ {
1519
+ "name": "stdout",
1520
+ "output_type": "stream",
1521
+ "text": [
1522
+ "workclass\n",
1523
+ "marital.status\n",
1524
+ "occupation\n",
1525
+ "relationship\n",
1526
+ "race\n",
1527
+ "native.country\n"
1528
+ ]
1529
+ }
1530
+ ],
1531
+ "source": [
1532
+ "from sklearn import preprocessing\n",
1533
+ "\n",
1534
+ "categorical = ['workclass', 'marital.status', 'occupation', 'relationship','race','native.country',]\n",
1535
+ "for feature in categorical:\n",
1536
+ " print(feature)\n",
1537
+ " le = preprocessing.LabelEncoder()\n",
1538
+ " adult_df[feature] = le.fit_transform(adult_df[feature])\n",
1539
+ " #X_test[feature] = le.transform(X_test[feature])"
1540
+ ]
1541
+ },
1542
+ {
1543
+ "cell_type": "code",
1544
+ "execution_count": 19,
1545
+ "metadata": {},
1546
+ "outputs": [
1547
+ {
1548
+ "data": {
1549
+ "text/html": [
1550
+ "<div>\n",
1551
+ "<style scoped>\n",
1552
+ " .dataframe tbody tr th:only-of-type {\n",
1553
+ " vertical-align: middle;\n",
1554
+ " }\n",
1555
+ "\n",
1556
+ " .dataframe tbody tr th {\n",
1557
+ " vertical-align: top;\n",
1558
+ " }\n",
1559
+ "\n",
1560
+ " .dataframe thead th {\n",
1561
+ " text-align: right;\n",
1562
+ " }\n",
1563
+ "</style>\n",
1564
+ "<table border=\"1\" class=\"dataframe\">\n",
1565
+ " <thead>\n",
1566
+ " <tr style=\"text-align: right;\">\n",
1567
+ " <th></th>\n",
1568
+ " <th>age</th>\n",
1569
+ " <th>workclass</th>\n",
1570
+ " <th>education.num</th>\n",
1571
+ " <th>marital.status</th>\n",
1572
+ " <th>occupation</th>\n",
1573
+ " <th>relationship</th>\n",
1574
+ " <th>race</th>\n",
1575
+ " <th>sex</th>\n",
1576
+ " <th>capital.gain</th>\n",
1577
+ " <th>capital.loss</th>\n",
1578
+ " <th>hours.per.week</th>\n",
1579
+ " <th>native.country</th>\n",
1580
+ " <th>income</th>\n",
1581
+ " </tr>\n",
1582
+ " </thead>\n",
1583
+ " <tbody>\n",
1584
+ " <tr>\n",
1585
+ " <th>0</th>\n",
1586
+ " <td>90</td>\n",
1587
+ " <td>3</td>\n",
1588
+ " <td>9</td>\n",
1589
+ " <td>6</td>\n",
1590
+ " <td>9</td>\n",
1591
+ " <td>1</td>\n",
1592
+ " <td>4</td>\n",
1593
+ " <td>0</td>\n",
1594
+ " <td>0</td>\n",
1595
+ " <td>4356</td>\n",
1596
+ " <td>40</td>\n",
1597
+ " <td>38</td>\n",
1598
+ " <td>0</td>\n",
1599
+ " </tr>\n",
1600
+ " <tr>\n",
1601
+ " <th>1</th>\n",
1602
+ " <td>82</td>\n",
1603
+ " <td>3</td>\n",
1604
+ " <td>9</td>\n",
1605
+ " <td>6</td>\n",
1606
+ " <td>3</td>\n",
1607
+ " <td>1</td>\n",
1608
+ " <td>4</td>\n",
1609
+ " <td>0</td>\n",
1610
+ " <td>0</td>\n",
1611
+ " <td>4356</td>\n",
1612
+ " <td>18</td>\n",
1613
+ " <td>38</td>\n",
1614
+ " <td>0</td>\n",
1615
+ " </tr>\n",
1616
+ " <tr>\n",
1617
+ " <th>2</th>\n",
1618
+ " <td>66</td>\n",
1619
+ " <td>3</td>\n",
1620
+ " <td>10</td>\n",
1621
+ " <td>6</td>\n",
1622
+ " <td>9</td>\n",
1623
+ " <td>4</td>\n",
1624
+ " <td>2</td>\n",
1625
+ " <td>0</td>\n",
1626
+ " <td>0</td>\n",
1627
+ " <td>4356</td>\n",
1628
+ " <td>40</td>\n",
1629
+ " <td>38</td>\n",
1630
+ " <td>0</td>\n",
1631
+ " </tr>\n",
1632
+ " <tr>\n",
1633
+ " <th>3</th>\n",
1634
+ " <td>54</td>\n",
1635
+ " <td>3</td>\n",
1636
+ " <td>4</td>\n",
1637
+ " <td>0</td>\n",
1638
+ " <td>6</td>\n",
1639
+ " <td>4</td>\n",
1640
+ " <td>4</td>\n",
1641
+ " <td>0</td>\n",
1642
+ " <td>0</td>\n",
1643
+ " <td>3900</td>\n",
1644
+ " <td>40</td>\n",
1645
+ " <td>38</td>\n",
1646
+ " <td>0</td>\n",
1647
+ " </tr>\n",
1648
+ " <tr>\n",
1649
+ " <th>4</th>\n",
1650
+ " <td>41</td>\n",
1651
+ " <td>3</td>\n",
1652
+ " <td>10</td>\n",
1653
+ " <td>5</td>\n",
1654
+ " <td>9</td>\n",
1655
+ " <td>3</td>\n",
1656
+ " <td>4</td>\n",
1657
+ " <td>0</td>\n",
1658
+ " <td>0</td>\n",
1659
+ " <td>3900</td>\n",
1660
+ " <td>40</td>\n",
1661
+ " <td>38</td>\n",
1662
+ " <td>0</td>\n",
1663
+ " </tr>\n",
1664
+ " </tbody>\n",
1665
+ "</table>\n",
1666
+ "</div>"
1667
+ ],
1668
+ "text/plain": [
1669
+ " age workclass education.num marital.status occupation relationship \\\n",
1670
+ "0 90 3 9 6 9 1 \n",
1671
+ "1 82 3 9 6 3 1 \n",
1672
+ "2 66 3 10 6 9 4 \n",
1673
+ "3 54 3 4 0 6 4 \n",
1674
+ "4 41 3 10 5 9 3 \n",
1675
+ "\n",
1676
+ " race sex capital.gain capital.loss hours.per.week native.country \\\n",
1677
+ "0 4 0 0 4356 40 38 \n",
1678
+ "1 4 0 0 4356 18 38 \n",
1679
+ "2 2 0 0 4356 40 38 \n",
1680
+ "3 4 0 0 3900 40 38 \n",
1681
+ "4 4 0 0 3900 40 38 \n",
1682
+ "\n",
1683
+ " income \n",
1684
+ "0 0 \n",
1685
+ "1 0 \n",
1686
+ "2 0 \n",
1687
+ "3 0 \n",
1688
+ "4 0 "
1689
+ ]
1690
+ },
1691
+ "execution_count": 19,
1692
+ "metadata": {},
1693
+ "output_type": "execute_result"
1694
+ }
1695
+ ],
1696
+ "source": [
1697
+ "adult_df.head()"
1698
+ ]
1699
+ },
1700
+ {
1701
+ "cell_type": "code",
1702
+ "execution_count": 20,
1703
+ "metadata": {},
1704
+ "outputs": [],
1705
+ "source": [
1706
+ "privileged_groups = [{'sex': 1}]\n",
1707
+ "unprivileged_groups = [{'sex': 0}]\n",
1708
+ "dataset_orig = StandardDataset(adult_df,\n",
1709
+ " label_name='income',\n",
1710
+ " protected_attribute_names=['sex'],\n",
1711
+ " favorable_classes=[1],\n",
1712
+ " privileged_classes=[[1]])"
1713
+ ]
1714
+ },
1715
+ {
1716
+ "cell_type": "code",
1717
+ "execution_count": 21,
1718
+ "metadata": {},
1719
+ "outputs": [
1720
+ {
1721
+ "data": {
1722
+ "text/markdown": [
1723
+ "#### Original training dataset"
1724
+ ],
1725
+ "text/plain": [
1726
+ "<IPython.core.display.Markdown object>"
1727
+ ]
1728
+ },
1729
+ "metadata": {},
1730
+ "output_type": "display_data"
1731
+ },
1732
+ {
1733
+ "name": "stdout",
1734
+ "output_type": "stream",
1735
+ "text": [
1736
+ "Difference in mean outcomes between unprivileged and privileged groups = -0.196276\n"
1737
+ ]
1738
+ }
1739
+ ],
1740
+ "source": [
1741
+ "metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
1742
+ " unprivileged_groups=unprivileged_groups,\n",
1743
+ " privileged_groups=privileged_groups)\n",
1744
+ "display(Markdown(\"#### Original training dataset\"))\n",
1745
+ "print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())"
1746
+ ]
1747
+ },
1748
+ {
1749
+ "cell_type": "code",
1750
+ "execution_count": 22,
1751
+ "metadata": {},
1752
+ "outputs": [],
1753
+ "source": [
1754
+ "import ipynbname\n",
1755
+ "nb_fname = ipynbname.name()\n",
1756
+ "nb_path = ipynbname.path()\n",
1757
+ "\n",
1758
+ "from sklearn.ensemble import AdaBoostClassifier\n",
1759
+ "import pickle\n",
1760
+ "\n",
1761
+ "data_orig_train, data_orig_test = dataset_orig.split([0.7], shuffle=True)\n",
1762
+ "X_train = data_orig_train.features\n",
1763
+ "y_train = data_orig_train.labels.ravel()\n",
1764
+ "\n",
1765
+ "X_test = data_orig_test.features\n",
1766
+ "y_test = data_orig_test.labels.ravel()\n",
1767
+ "num_estimators = 100\n",
1768
+ "\n",
1769
+ "model = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212, n_estimators= 1)\n",
1770
+ "\n",
1771
+ "mdl = model.fit(X_train, y_train)\n",
1772
+ "with open('../../Results/AdaBoost/' + nb_fname + '.pkl', 'wb') as f:\n",
1773
+ " pickle.dump(mdl, f)\n",
1774
+ "\n",
1775
+ "with open('../../Results/AdaBoost/' + nb_fname + '_Train' + '.pkl', 'wb') as f:\n",
1776
+ " pickle.dump(data_orig_train, f) \n",
1777
+ " \n",
1778
+ "with open('../../Results/AdaBoost/' + nb_fname + '_Test' + '.pkl', 'wb') as f:\n",
1779
+ " pickle.dump(data_orig_test, f) "
1780
+ ]
1781
+ },
1782
+ {
1783
+ "cell_type": "code",
1784
+ "execution_count": 23,
1785
+ "metadata": {},
1786
+ "outputs": [],
1787
+ "source": [
1788
+ "from csv import writer\n",
1789
+ "from sklearn.metrics import accuracy_score, f1_score\n",
1790
+ "\n",
1791
+ "final_metrics = []\n",
1792
+ "accuracy = []\n",
1793
+ "f1= []\n",
1794
+ "\n",
1795
+ "for i in range(1,num_estimators+1):\n",
1796
+ " \n",
1797
+ " model = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212, n_estimators= i)\n",
1798
+ " \n",
1799
+ " mdl = model.fit(X_train, y_train)\n",
1800
+ " yy = mdl.predict(X_test)\n",
1801
+ " accuracy.append(accuracy_score(y_test, yy))\n",
1802
+ " f1.append(f1_score(y_test, yy))\n",
1803
+ " fair = get_fair_metrics_and_plot(data_orig_test, mdl) \n",
1804
+ " fair_list = fair.iloc[1].tolist()\n",
1805
+ " #fair_list.insert(0, i)\n",
1806
+ " final_metrics.append(fair_list)\n"
1807
+ ]
1808
+ },
1809
+ {
1810
+ "cell_type": "code",
1811
+ "execution_count": 24,
1812
+ "metadata": {},
1813
+ "outputs": [
1814
+ {
1815
+ "name": "stdout",
1816
+ "output_type": "stream",
1817
+ "text": [
1818
+ " 0 1 2 3 4\n",
1819
+ "0 -0.183168 -0.076254 0.087144 0.380655 0.131560\n",
1820
+ "1 -0.173759 -0.102515 0.092808 0.343206 0.134372\n",
1821
+ "2 -0.187010 -0.061932 0.081091 0.377050 0.126129\n",
1822
+ "3 -0.188524 -0.076409 0.087742 0.346592 0.126292\n",
1823
+ "4 -0.186375 -0.066125 0.081767 0.352659 0.125458\n",
1824
+ ".. ... ... ... ... ...\n",
1825
+ "95 -0.205228 -0.191345 0.148209 0.268040 0.132411\n",
1826
+ "96 -0.205689 -0.189028 0.147491 0.268399 0.132296\n",
1827
+ "97 -0.205074 -0.188520 0.146859 0.268187 0.132236\n",
1828
+ "98 -0.205533 -0.194170 0.149477 0.264534 0.132262\n",
1829
+ "99 -0.204459 -0.191345 0.147658 0.268777 0.132264\n",
1830
+ "\n",
1831
+ "[100 rows x 5 columns]\n"
1832
+ ]
1833
+ },
1834
+ {
1835
+ "data": {
1836
+ "text/html": [
1837
+ "<div>\n",
1838
+ "<style scoped>\n",
1839
+ " .dataframe tbody tr th:only-of-type {\n",
1840
+ " vertical-align: middle;\n",
1841
+ " }\n",
1842
+ "\n",
1843
+ " .dataframe tbody tr th {\n",
1844
+ " vertical-align: top;\n",
1845
+ " }\n",
1846
+ "\n",
1847
+ " .dataframe thead th {\n",
1848
+ " text-align: right;\n",
1849
+ " }\n",
1850
+ "</style>\n",
1851
+ "<table border=\"1\" class=\"dataframe\">\n",
1852
+ " <thead>\n",
1853
+ " <tr style=\"text-align: right;\">\n",
1854
+ " <th></th>\n",
1855
+ " <th>classifier</th>\n",
1856
+ " <th>T0</th>\n",
1857
+ " <th>T1</th>\n",
1858
+ " <th>T2</th>\n",
1859
+ " <th>T3</th>\n",
1860
+ " <th>T4</th>\n",
1861
+ " <th>T5</th>\n",
1862
+ " <th>T6</th>\n",
1863
+ " <th>T7</th>\n",
1864
+ " <th>T8</th>\n",
1865
+ " <th>...</th>\n",
1866
+ " <th>T90</th>\n",
1867
+ " <th>T91</th>\n",
1868
+ " <th>T92</th>\n",
1869
+ " <th>T93</th>\n",
1870
+ " <th>T94</th>\n",
1871
+ " <th>T95</th>\n",
1872
+ " <th>T96</th>\n",
1873
+ " <th>T97</th>\n",
1874
+ " <th>T98</th>\n",
1875
+ " <th>T99</th>\n",
1876
+ " </tr>\n",
1877
+ " </thead>\n",
1878
+ " <tbody>\n",
1879
+ " <tr>\n",
1880
+ " <th>accuracy</th>\n",
1881
+ " <td>0.825468</td>\n",
1882
+ " <td>0.812263</td>\n",
1883
+ " <td>0.825366</td>\n",
1884
+ " <td>0.818098</td>\n",
1885
+ " <td>0.825366</td>\n",
1886
+ " <td>0.826492</td>\n",
1887
+ " <td>0.826901</td>\n",
1888
+ " <td>0.825878</td>\n",
1889
+ " <td>0.829563</td>\n",
1890
+ " <td>0.831201</td>\n",
1891
+ " <td>...</td>\n",
1892
+ " <td>0.825775</td>\n",
1893
+ " <td>0.826287</td>\n",
1894
+ " <td>0.826901</td>\n",
1895
+ " <td>0.826083</td>\n",
1896
+ " <td>0.824547</td>\n",
1897
+ " <td>0.824956</td>\n",
1898
+ " <td>0.824752</td>\n",
1899
+ " <td>0.825264</td>\n",
1900
+ " <td>0.825775</td>\n",
1901
+ " <td>0.825468</td>\n",
1902
+ " </tr>\n",
1903
+ " <tr>\n",
1904
+ " <th>f1</th>\n",
1905
+ " <td>0.611529</td>\n",
1906
+ " <td>0.602686</td>\n",
1907
+ " <td>0.607094</td>\n",
1908
+ " <td>0.617603</td>\n",
1909
+ " <td>0.623233</td>\n",
1910
+ " <td>0.625745</td>\n",
1911
+ " <td>0.625636</td>\n",
1912
+ " <td>0.623089</td>\n",
1913
+ " <td>0.619255</td>\n",
1914
+ " <td>0.622568</td>\n",
1915
+ " <td>...</td>\n",
1916
+ " <td>0.611416</td>\n",
1917
+ " <td>0.612292</td>\n",
1918
+ " <td>0.613132</td>\n",
1919
+ " <td>0.611480</td>\n",
1920
+ " <td>0.610455</td>\n",
1921
+ " <td>0.610833</td>\n",
1922
+ " <td>0.610909</td>\n",
1923
+ " <td>0.611427</td>\n",
1924
+ " <td>0.611770</td>\n",
1925
+ " <td>0.611529</td>\n",
1926
+ " </tr>\n",
1927
+ " <tr>\n",
1928
+ " <th>statistical_parity_difference</th>\n",
1929
+ " <td>-0.204459</td>\n",
1930
+ " <td>-0.183168</td>\n",
1931
+ " <td>-0.173759</td>\n",
1932
+ " <td>-0.187010</td>\n",
1933
+ " <td>-0.188524</td>\n",
1934
+ " <td>-0.186375</td>\n",
1935
+ " <td>-0.186373</td>\n",
1936
+ " <td>-0.184378</td>\n",
1937
+ " <td>-0.173918</td>\n",
1938
+ " <td>-0.174685</td>\n",
1939
+ " <td>...</td>\n",
1940
+ " <td>-0.201235</td>\n",
1941
+ " <td>-0.201235</td>\n",
1942
+ " <td>-0.199853</td>\n",
1943
+ " <td>-0.200160</td>\n",
1944
+ " <td>-0.202466</td>\n",
1945
+ " <td>-0.205228</td>\n",
1946
+ " <td>-0.205689</td>\n",
1947
+ " <td>-0.205074</td>\n",
1948
+ " <td>-0.205533</td>\n",
1949
+ " <td>-0.204459</td>\n",
1950
+ " </tr>\n",
1951
+ " <tr>\n",
1952
+ " <th>equal_opportunity_difference</th>\n",
1953
+ " <td>-0.191345</td>\n",
1954
+ " <td>-0.076254</td>\n",
1955
+ " <td>-0.102515</td>\n",
1956
+ " <td>-0.061932</td>\n",
1957
+ " <td>-0.076409</td>\n",
1958
+ " <td>-0.066125</td>\n",
1959
+ " <td>-0.064094</td>\n",
1960
+ " <td>-0.050543</td>\n",
1961
+ " <td>-0.064137</td>\n",
1962
+ " <td>-0.063850</td>\n",
1963
+ " <td>...</td>\n",
1964
+ " <td>-0.189822</td>\n",
1965
+ " <td>-0.190330</td>\n",
1966
+ " <td>-0.190330</td>\n",
1967
+ " <td>-0.192139</td>\n",
1968
+ " <td>-0.181855</td>\n",
1969
+ " <td>-0.191345</td>\n",
1970
+ " <td>-0.189028</td>\n",
1971
+ " <td>-0.188520</td>\n",
1972
+ " <td>-0.194170</td>\n",
1973
+ " <td>-0.191345</td>\n",
1974
+ " </tr>\n",
1975
+ " <tr>\n",
1976
+ " <th>average_abs_odds_difference</th>\n",
1977
+ " <td>0.147658</td>\n",
1978
+ " <td>0.087144</td>\n",
1979
+ " <td>0.092808</td>\n",
1980
+ " <td>0.081091</td>\n",
1981
+ " <td>0.087742</td>\n",
1982
+ " <td>0.081767</td>\n",
1983
+ " <td>0.080999</td>\n",
1984
+ " <td>0.074554</td>\n",
1985
+ " <td>0.075044</td>\n",
1986
+ " <td>0.074928</td>\n",
1987
+ " <td>...</td>\n",
1988
+ " <td>0.145107</td>\n",
1989
+ " <td>0.145203</td>\n",
1990
+ " <td>0.144259</td>\n",
1991
+ " <td>0.145433</td>\n",
1992
+ " <td>0.142604</td>\n",
1993
+ " <td>0.148209</td>\n",
1994
+ " <td>0.147491</td>\n",
1995
+ " <td>0.146859</td>\n",
1996
+ " <td>0.149477</td>\n",
1997
+ " <td>0.147658</td>\n",
1998
+ " </tr>\n",
1999
+ " <tr>\n",
2000
+ " <th>disparate_impact</th>\n",
2001
+ " <td>-1.313875</td>\n",
2002
+ " <td>-0.965861</td>\n",
2003
+ " <td>-1.069424</td>\n",
2004
+ " <td>-0.975379</td>\n",
2005
+ " <td>-1.059607</td>\n",
2006
+ " <td>-1.042253</td>\n",
2007
+ " <td>-1.050134</td>\n",
2008
+ " <td>-1.037243</td>\n",
2009
+ " <td>-1.048319</td>\n",
2010
+ " <td>-1.057598</td>\n",
2011
+ " <td>...</td>\n",
2012
+ " <td>-1.290512</td>\n",
2013
+ " <td>-1.293428</td>\n",
2014
+ " <td>-1.285519</td>\n",
2015
+ " <td>-1.286630</td>\n",
2016
+ " <td>-1.283391</td>\n",
2017
+ " <td>-1.316618</td>\n",
2018
+ " <td>-1.315281</td>\n",
2019
+ " <td>-1.316070</td>\n",
2020
+ " <td>-1.329787</td>\n",
2021
+ " <td>-1.313875</td>\n",
2022
+ " </tr>\n",
2023
+ " <tr>\n",
2024
+ " <th>theil_index</th>\n",
2025
+ " <td>0.132264</td>\n",
2026
+ " <td>0.131560</td>\n",
2027
+ " <td>0.134372</td>\n",
2028
+ " <td>0.126129</td>\n",
2029
+ " <td>0.126292</td>\n",
2030
+ " <td>0.125458</td>\n",
2031
+ " <td>0.125693</td>\n",
2032
+ " <td>0.126586</td>\n",
2033
+ " <td>0.130037</td>\n",
2034
+ " <td>0.129040</td>\n",
2035
+ " <td>...</td>\n",
2036
+ " <td>0.132435</td>\n",
2037
+ " <td>0.132200</td>\n",
2038
+ " <td>0.132021</td>\n",
2039
+ " <td>0.132519</td>\n",
2040
+ " <td>0.132442</td>\n",
2041
+ " <td>0.132411</td>\n",
2042
+ " <td>0.132296</td>\n",
2043
+ " <td>0.132236</td>\n",
2044
+ " <td>0.132262</td>\n",
2045
+ " <td>0.132264</td>\n",
2046
+ " </tr>\n",
2047
+ " </tbody>\n",
2048
+ "</table>\n",
2049
+ "<p>7 rows × 101 columns</p>\n",
2050
+ "</div>"
2051
+ ],
2052
+ "text/plain": [
2053
+ " classifier T0 T1 T2 \\\n",
2054
+ "accuracy 0.825468 0.812263 0.825366 0.818098 \n",
2055
+ "f1 0.611529 0.602686 0.607094 0.617603 \n",
2056
+ "statistical_parity_difference -0.204459 -0.183168 -0.173759 -0.187010 \n",
2057
+ "equal_opportunity_difference -0.191345 -0.076254 -0.102515 -0.061932 \n",
2058
+ "average_abs_odds_difference 0.147658 0.087144 0.092808 0.081091 \n",
2059
+ "disparate_impact -1.313875 -0.965861 -1.069424 -0.975379 \n",
2060
+ "theil_index 0.132264 0.131560 0.134372 0.126129 \n",
2061
+ "\n",
2062
+ " T3 T4 T5 T6 \\\n",
2063
+ "accuracy 0.825366 0.826492 0.826901 0.825878 \n",
2064
+ "f1 0.623233 0.625745 0.625636 0.623089 \n",
2065
+ "statistical_parity_difference -0.188524 -0.186375 -0.186373 -0.184378 \n",
2066
+ "equal_opportunity_difference -0.076409 -0.066125 -0.064094 -0.050543 \n",
2067
+ "average_abs_odds_difference 0.087742 0.081767 0.080999 0.074554 \n",
2068
+ "disparate_impact -1.059607 -1.042253 -1.050134 -1.037243 \n",
2069
+ "theil_index 0.126292 0.125458 0.125693 0.126586 \n",
2070
+ "\n",
2071
+ " T7 T8 ... T90 T91 \\\n",
2072
+ "accuracy 0.829563 0.831201 ... 0.825775 0.826287 \n",
2073
+ "f1 0.619255 0.622568 ... 0.611416 0.612292 \n",
2074
+ "statistical_parity_difference -0.173918 -0.174685 ... -0.201235 -0.201235 \n",
2075
+ "equal_opportunity_difference -0.064137 -0.063850 ... -0.189822 -0.190330 \n",
2076
+ "average_abs_odds_difference 0.075044 0.074928 ... 0.145107 0.145203 \n",
2077
+ "disparate_impact -1.048319 -1.057598 ... -1.290512 -1.293428 \n",
2078
+ "theil_index 0.130037 0.129040 ... 0.132435 0.132200 \n",
2079
+ "\n",
2080
+ " T92 T93 T94 T95 \\\n",
2081
+ "accuracy 0.826901 0.826083 0.824547 0.824956 \n",
2082
+ "f1 0.613132 0.611480 0.610455 0.610833 \n",
2083
+ "statistical_parity_difference -0.199853 -0.200160 -0.202466 -0.205228 \n",
2084
+ "equal_opportunity_difference -0.190330 -0.192139 -0.181855 -0.191345 \n",
2085
+ "average_abs_odds_difference 0.144259 0.145433 0.142604 0.148209 \n",
2086
+ "disparate_impact -1.285519 -1.286630 -1.283391 -1.316618 \n",
2087
+ "theil_index 0.132021 0.132519 0.132442 0.132411 \n",
2088
+ "\n",
2089
+ " T96 T97 T98 T99 \n",
2090
+ "accuracy 0.824752 0.825264 0.825775 0.825468 \n",
2091
+ "f1 0.610909 0.611427 0.611770 0.611529 \n",
2092
+ "statistical_parity_difference -0.205689 -0.205074 -0.205533 -0.204459 \n",
2093
+ "equal_opportunity_difference -0.189028 -0.188520 -0.194170 -0.191345 \n",
2094
+ "average_abs_odds_difference 0.147491 0.146859 0.149477 0.147658 \n",
2095
+ "disparate_impact -1.315281 -1.316070 -1.329787 -1.313875 \n",
2096
+ "theil_index 0.132296 0.132236 0.132262 0.132264 \n",
2097
+ "\n",
2098
+ "[7 rows x 101 columns]"
2099
+ ]
2100
+ },
2101
+ "execution_count": 24,
2102
+ "metadata": {},
2103
+ "output_type": "execute_result"
2104
+ }
2105
+ ],
2106
+ "source": [
2107
+ "import numpy as np\n",
2108
+ "final_result = pd.DataFrame(final_metrics)\n",
2109
+ "print(final_result)\n",
2110
+ "final_result[3] = np.log(final_result[3])\n",
2111
+ "final_result = final_result.transpose()\n",
2112
+ "acc_f1 = pd.DataFrame(accuracy)\n",
2113
+ "acc_f1['f1'] = f1\n",
2114
+ "acc_f1 = pd.DataFrame(acc_f1).transpose()\n",
2115
+ "acc = acc_f1.rename(index={0: 'accuracy', 1: 'f1'})\n",
2116
+ "final_result = final_result.rename(index={0: 'statistical_parity_difference', 1: 'equal_opportunity_difference', 2: 'average_abs_odds_difference', 3: 'disparate_impact', 4: 'theil_index'})\n",
2117
+ "final_result = pd.concat([acc,final_result])\n",
2118
+ "final_result.columns = ['T' + str(col) for col in final_result.columns]\n",
2119
+ "final_result.insert(0, \"classifier\", final_result['T' + str(num_estimators - 1)]) ##Add final metrics add the beginning of the df\n",
2120
+ "final_result.to_csv('../../Results/AdaBoost/' + nb_fname + '.csv')\n",
2121
+ "final_result"
2122
+ ]
2123
+ },
2124
+ {
2125
+ "cell_type": "code",
2126
+ "execution_count": null,
2127
+ "metadata": {},
2128
+ "outputs": [],
2129
+ "source": []
2130
+ }
2131
+ ],
2132
+ "metadata": {
2133
+ "kernelspec": {
2134
+ "display_name": "Python 3",
2135
+ "language": "python",
2136
+ "name": "python3"
2137
+ },
2138
+ "language_info": {
2139
+ "codemirror_mode": {
2140
+ "name": "ipython",
2141
+ "version": 3
2142
+ },
2143
+ "file_extension": ".py",
2144
+ "mimetype": "text/x-python",
2145
+ "name": "python",
2146
+ "nbconvert_exporter": "python",
2147
+ "pygments_lexer": "ipython3",
2148
+ "version": "3.8.5"
2149
+ },
2150
+ "papermill": {
2151
+ "default_parameters": {},
2152
+ "duration": 1432.173477,
2153
+ "end_time": "2021-03-19T06:01:46.424650",
2154
+ "environment_variables": {},
2155
+ "exception": null,
2156
+ "input_path": "__notebook__.ipynb",
2157
+ "output_path": "__notebook__.ipynb",
2158
+ "parameters": {},
2159
+ "start_time": "2021-03-19T05:37:54.251173",
2160
+ "version": "2.2.2"
2161
+ }
2162
+ },
2163
+ "nbformat": 4,
2164
+ "nbformat_minor": 4
2165
+ }
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/3-income-prediction-xgbclassifier-auc-0-926-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/4-deep-analysis-and-90-accuracy-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/5-income-classification-using-meta-learning-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/6-income-prediction-eda-to-visuals-0-98-auc-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/.ipynb_checkpoints/7-adult-census-income-eda-and-prediction-87-35-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/1-adult-income.ipynb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b26b047c35cd5c6fb4d43768b35b1f6ed4f7bffbdd7e03346c923e78e5ae4b
3
+ size 17588574
AdultNoteBook/Kernels/AdaBoost/1-adult-income.py ADDED
@@ -0,0 +1,638 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # <p id="part0"></p>
5
+ #
6
+ # <p style="font-family: Arials; line-height: 2; font-size: 24px; font-weight: bold; letter-spacing: 2px; text-align: center; color: #FF8C00">Adult Income 💸🤑💰 </p>
7
+ #
8
+ # <img src="https://miro.medium.com/max/1000/1*08ltbgXFxujakJZSJswp1Q.png" width="100%" align="center" hspace="5%" vspace="5%"/>
9
+ #
10
+ # <p style = "font-family: Inter, sans-serif; font-size: 14px; color: rgba(0,0,0,.7)"> An individual’s annual income results from various factors. Intuitively, it is influenced by the individual’s education level, age, gender, occupation, and etc.</p>
11
+ #
12
+ #
13
+ # <p style="font-family: Arials; font-size: 20px; font-style: normal; font-weight: bold; letter-spacing: 3px; color: #808080; line-height:1.0">TABLE OF CONTENT</p>
14
+ #
15
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part1" style="color:#808080">0 PROLOGUE</a></p>
16
+ #
17
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part2" style="color:#808080">1 IMPORTING LIBRARIES</a></p>
18
+ #
19
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part3" style="color:#808080">2 DATA DESCRIPTION AND DATA CLEANING</a></p>
20
+ #
21
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
22
+ # <a href="#part4" style="color:#808080">2.1 Import Data</a></p>
23
+ #
24
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
25
+ # <a href="#part5" style="color:#808080">2.2 Data types</a></p>
26
+ #
27
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
28
+ # <a href="#part6" style="color:#808080">2.3 Missing values</a></p>
29
+ #
30
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
31
+ # <a href="#part7" style="color:#808080">2.4 Duplicates</a></p>
32
+ #
33
+ #
34
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part8" style="color:#808080">3 ANALYSIS</a></p>
35
+ #
36
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
37
+ # <a href="#part9" style="color:#808080">3.1 Uni-Vriate Analysis:</a></p>
38
+ #
39
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
40
+ # <a href="#part10" style="color:#808080">3.2 Bi-Vriate Analysis:</a></p>
41
+ #
42
+ # <p style="text-indent: 1vw; font-family: Arials; font-size: 14px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3">
43
+ # <a href="#part11" style="color:#808080">3.3 Multi-Vriate Analysis:</a></p>
44
+ #
45
+ #
46
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part12" style="color:#808080">4 FINAL CONCLUSIONS</a></p>
47
+ #
48
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 2px; color: #808080; line-height:1.3"><a href="#part13" style="color:#808080">5 MODELLING</a></p>
49
+
50
+ # <p id="part1"></p>
51
+ #
52
+ # <p style="font-family: Arials; font-size: 16px; font-style: bold; font-weight: bold; letter-spacing: 3px; color: #FF8C00">0 PROLOGUE</p>
53
+ # <hr style="height: 0.5px; border: 0; background-color: #808080">
54
+ #
55
+ #
56
+ #
57
+ # <p style="font-family: Arials, sans-serif; font-size: 14px; color: rgba(0,0,0,.7)"><strong>FEATURES:</strong></p>
58
+ #
59
+ # <ol style="font-family: Arials, sans-serif; font-size: 14px; line-height:1.5; color: rgba(0,0,0,.7)">
60
+ # <li><strong>AGE</strong> -continuous. </li>
61
+ # <p></p>
62
+ # <li><strong> workclass</strong> -Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.</li>
63
+ # <p></p>
64
+ # <li><strong>fnlwgt</strong> -continuous.</li>
65
+ # <p></p>
66
+ # <li><strong>education</strong> - Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.</li>
67
+ # <p></p>
68
+ # <li><strong>education-num</strong> - continuous.</li>
69
+ # <p></p>
70
+ # <li><strong>marital-status</strong> -Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.</li>
71
+ # <p></p>
72
+ # <li><strong>occupation</strong> -Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.</li>
73
+ # <p></p>
74
+ # <li><strong>relationship</strong> -Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.</li>
75
+ # <p></p>
76
+ # <li><strong>race</strong> -White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.</li>
77
+ # <p></p>
78
+ # <li><strong>sex</strong> -Female, Male.</li>
79
+ # <p></p>
80
+ # <li><strong>capital-gain</strong> -continuous.</li>
81
+ # <p></p>
82
+ # <li><strong>capital-loss</strong> -continuous.</li>
83
+ # <p></p>
84
+ # <li><strong>hours-per-week</strong> -continuous.</li>
85
+ # <p></p>
86
+ # <li><strong>native-country</strong> -United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.</li>
87
+ # <p></p>
88
+ # <li><strong>class</strong> - >50K, <=50K</li>
89
+ # <p></p>
90
+ #
91
+ # </ol>
92
+ #
93
+ #
94
+ #
95
+ #
96
+
97
+ # <p style="font-family: Arials; line-height: 1.5; font-size: 16px; font-weight: bold; letter-spacing: 2px; text-align: center; color: #FF8C00">If you liked this notebook, please upvote.</p>
98
+ # <p style="text-align: center">😊😊😊</p>
99
+
100
+ # <p id="part2"></p>
101
+ #
102
+ # # <span style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 3px; color: #FF8C00">1 IMPORTING LIBRARIES</span>
103
+ # <hr style="height: 0.5px; border: 0; background-color: #808080">
104
+ #
105
+ # <p style="font-family: Arials, sans-serif; font-size: 14px; line-height:1.0; color: rgba(0,0,0,.7)"><strong>LIBRARIES:</strong></p>
106
+ #
107
+ # <ol style="font-family: Arials, sans-serif; font-size: 14px; line-height:1.5; color: rgba(0,0,0,.7)">
108
+ # <li>Library <strong>pandas</strong> will be required to work with data in tabular representation.</li>
109
+ # <p></p>
110
+ # <li>Library <strong>numpy</strong> will be required to round the data in the correlation matrix.</li>
111
+ # <p></p>
112
+ # <li>Library <strong>missingno</strong> will be required to visualize missing values in the data.</li>
113
+ # <p></p>
114
+ # <li>Library <strong>matplotlib, seaborn, plotly</strong> required for data visualization.</li>
115
+ # <p></p>
116
+ # </ol>
117
+
118
+ # In[1]:
119
+
120
+
121
+ ## for eda and visuls:
122
+ import pandas as pd
123
+ import numpy as np
124
+ import seaborn as sns
125
+ import plotly.express as px
126
+ import missingno
127
+ import matplotlib.pyplot as plt
128
+ from plotly.subplots import make_subplots
129
+ import plotly.graph_objects as go
130
+ import plotly.figure_factory as ff
131
+
132
+
133
+
134
+ # <p id="part3"></p>
135
+ #
136
+ # # <span style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: bold; letter-spacing: 3px; color: #FF8C00">2 DATA DESCRIPTION AND DATA CLEANING</span>
137
+ # <hr style="height: 0.5px; border: 0; background-color: #808080">
138
+ #
139
+ # <p style="font-family: Arials, sans-serif; font-size: 14px; color: rgba(0,0,0,.7)">In this block, cleaning part will be carried out, data types, missing values, duplicates.</p>
140
+
141
+ # <p id="part4"></p>
142
+ #
143
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">2.1 Import Data</p>
144
+
145
+ # In[2]:
146
+
147
+
148
+ # Reading Data:
149
+ df=pd.read_csv("/kaggle/input/adult-census-income/adult.csv")
150
+ df.head() #Loading the First Five Rows:
151
+
152
+
153
+ # In[3]:
154
+
155
+
156
+ # Let's Look The Dimensions Of The Data:
157
+ print(f'The Data-Set Contain {df.shape[0]} Rows and {df.shape[1]} Columns')
158
+
159
+
160
+ # <p id="part5"></p>
161
+ #
162
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">2.2 Data Types</p>
163
+
164
+ # In[4]:
165
+
166
+
167
+ #Check Data Types
168
+ df.dtypes
169
+
170
+
171
+ # <p id="part6"></p>
172
+ #
173
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">2.3 Missing values</p>
174
+
175
+ # <p style = "font-family: Inter, sans-serif; font-size: 14px; color: rgba(0,0,0,.7)"> Let's calculate the percentage of blanks and filled values for all columns.</p>
176
+
177
+ # In[5]:
178
+
179
+
180
+ # loop through the columns and check the missing values
181
+ for col in df.columns:
182
+ pct_missing = df[col].isnull().sum()
183
+ print(f'{col} - {pct_missing :.1%}')
184
+
185
+
186
+ # In[6]:
187
+
188
+
189
+ # Build a matrix of missing values
190
+ missingno.matrix(df, fontsize = 16)
191
+ plt.show()
192
+
193
+
194
+ # <div style="background: #DCDCDC"><p style="font-family: Arials, sans-serif; font-size: 16px; color: #000000"><strong>CONCLUSION:</strong> The data has no missing values, so no further transformations are required.</p></div>
195
+
196
+ # <p id="part7"></p>
197
+ #
198
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">2.4 Duplicates</p>
199
+
200
+ # In[7]:
201
+
202
+
203
+ #Check The Duplicates In the Data-Set:
204
+ df.duplicated().sum()
205
+
206
+
207
+ # <p style = "font-family: Inter, sans-serif; font-size: 14px; color: rgba(0,0,0,.7)">There are 24 Duplicate Value Present in the Data-set.</p>
208
+
209
+ # In[8]:
210
+
211
+
212
+ # We will drop the Duplicate value:
213
+ df=df.drop_duplicates(keep="first")
214
+
215
+
216
+ # ### In some columns there is ? present or null values let's handle that also:
217
+
218
+ # In[9]:
219
+
220
+
221
+ df["workclass"]=df["workclass"].replace("?",np.nan)
222
+ df["occupation"]=df["occupation"].replace("?",np.nan)
223
+ df["native.country"]=df["native.country"].replace("?",np.nan)
224
+
225
+
226
+ # In[10]:
227
+
228
+
229
+ df.isna().sum()
230
+
231
+
232
+ # In[11]:
233
+
234
+
235
+ df["workclass"]=df["workclass"].fillna(df["workclass"].mode()[0])
236
+ df["occupation"]=df["occupation"].fillna(df["occupation"].mode()[0])
237
+ df["native.country"]=df["native.country"].fillna(df["native.country"].mode()[0])
238
+
239
+
240
+ # <div style="background: #DCDCDC"><p style="font-family: Arials, sans-serif; font-size: 16px; color: #000000"><strong>CONCLUSION:</strong>Now our Data is Clean We can do Further Analysis.</p></div>
241
+
242
+ # <p id="part8"></p>
243
+ #
244
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">3. Analysis:</p>
245
+
246
+ # <p id="part9"></p>
247
+ #
248
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">3.1 Uni-variate Analysis:</p>
249
+
250
+ # In[12]:
251
+
252
+
253
+ fig = plt.figure(figsize = (15,6))
254
+ fig.patch.set_facecolor('#f5f6f6')
255
+
256
+
257
+
258
+ gs = fig.add_gridspec(2,3)
259
+ gs.update(wspace=0.2,hspace= 0.2)
260
+
261
+ ax0 = fig.add_subplot(gs[0,0])
262
+ ax1 = fig.add_subplot(gs[0,1])
263
+ ax2 = fig.add_subplot(gs[0,2])
264
+ ax3 = fig.add_subplot(gs[1,0])
265
+ ax4 = fig.add_subplot(gs[1,1])
266
+ ax5 = fig.add_subplot(gs[1,2])
267
+
268
+ axes=[ax0,ax1,ax2,ax3,ax4,ax5]
269
+ for ax in axes:
270
+ ax.set_facecolor('#f5f6f6')
271
+ ax.tick_params(axis='x',
272
+ labelsize = 12, which = 'major',
273
+ direction = 'out',pad = 2,
274
+ length = 1.5)
275
+ ax.tick_params(axis='y', colors= 'black')
276
+ ax.axes.get_yaxis().set_visible(False)
277
+
278
+ for loc in ['left', 'right', 'top', 'bottom']:
279
+ ax.spines[loc].set_visible(False)
280
+
281
+
282
+
283
+ cols = df.select_dtypes(exclude = 'object').columns
284
+
285
+ sns.kdeplot(x = df[cols[0]],color="green",fill=True,ax = ax0)
286
+ sns.kdeplot(x = df[cols[1]],color="red",fill=True,ax = ax1)
287
+ sns.kdeplot(x = df[cols[2]],color="blue",fill=True,ax = ax2)
288
+ sns.kdeplot(x = df[cols[3]],color="black",fill=True,ax = ax3)
289
+ sns.kdeplot(x = df[cols[4]],color="pink",fill=True,ax = ax4)
290
+ sns.kdeplot(x = df[cols[5]],color="green",fill=True,ax = ax5)
291
+
292
+ fig.text(0.2,0.98,"Univariate Analysis on Numerical Columns:",**{'font':'serif', 'size':18,'weight':'bold'}, alpha = 1)
293
+ fig.text(0.1,0.90,"Most of the adults are range of 20-45 and on average an adult spend around 40hrs per week on work\n Also as we can see there is so much otliers present in the numerical columns:",**{'font':'serif', 'size':12,'weight':'bold'}, alpha = 1)
294
+
295
+
296
+ # In[13]:
297
+
298
+
299
+ df.select_dtypes(include="object").columns
300
+
301
+
302
+ # In[14]:
303
+
304
+
305
+ income=df["income"].reset_index()
306
+ px.pie(values=income["index"],names=income["income"], color_discrete_sequence=px.colors.sequential.RdBu,
307
+ title='Income of the Adults')
308
+
309
+
310
+ # In[15]:
311
+
312
+
313
+ sex=df["sex"].reset_index()
314
+ px.pie(values=sex["index"],names=sex["sex"],title='%AGE OF MALE AND FEMALE', hole=.3)
315
+
316
+
317
+ # In[16]:
318
+
319
+
320
+ race=df["race"].reset_index()
321
+ px.pie(values=race["index"],names=race["race"])
322
+
323
+
324
+ # In[17]:
325
+
326
+
327
+ relationship=df["relationship"].reset_index()
328
+ px.pie(values=relationship["index"],names=relationship["relationship"])
329
+
330
+
331
+ # In[18]:
332
+
333
+
334
+ occupation=df["occupation"].reset_index()
335
+ px.pie(values=occupation["index"],names=occupation["occupation"])
336
+
337
+
338
+ # In[19]:
339
+
340
+
341
+ marital_status=df["marital.status"].reset_index()
342
+ px.pie(values=marital_status["index"],names=marital_status["marital.status"])
343
+
344
+
345
+ # In[20]:
346
+
347
+
348
+ education=df["education"].reset_index()
349
+ px.pie(values=education["index"],names=education["education"])
350
+
351
+
352
+ # In[21]:
353
+
354
+
355
+
356
+ fig=plt.figure(figsize=(10,6))
357
+ ax=sns.countplot(df["workclass"])
358
+ plt.title("COUNT OF WORK CLASS")
359
+
360
+ for loc in ['left', 'right', 'top', 'bottom']:
361
+ ax.spines[loc].set_visible(False)
362
+
363
+
364
+ fig.show()
365
+
366
+ #workcls=df["workclass"].reset_index()
367
+ #px.pie(values=workcls["index"],names=workcls["workclass"])
368
+
369
+
370
+ # <p id="part10"></p>
371
+ #
372
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">3.2 Bi-Variate Analysis:</p>
373
+
374
+ # In[22]:
375
+
376
+
377
+ df.head(1)
378
+
379
+
380
+ # In[23]:
381
+
382
+
383
+ fig=plt.figure(figsize=(10,6))
384
+ ax=sns.kdeplot(x=df["age"],hue=df["income"],fill=True)
385
+ ax.set_facecolor('#f5f6f6')
386
+ for loc in ['left', 'right', 'top', 'bottom']:
387
+ ax.spines[loc].set_visible(False)
388
+
389
+ fig.text(0.4,1,"Distribution of income with age:",**{'font':'serif', 'size':18,'weight':'bold'}, alpha = 1)
390
+ fig.text(0.1,0.90,"First of all most of the adults have income less than 50k \n But With increasing in age Income is also increasing :",**{'font':'serif', 'size':12,}, alpha = 1)
391
+
392
+
393
+ fig.show()
394
+
395
+
396
+ # In[24]:
397
+
398
+
399
+ fig=plt.figure(figsize=(10,6))
400
+ ax=sns.kdeplot(x=df["education.num"],hue=df["income"],fill=True,)
401
+ ax.set_facecolor('#f5f6f6')
402
+ for loc in ['left', 'right', 'top', 'bottom']:
403
+ ax.spines[loc].set_visible(False)
404
+
405
+ fig.text(0.2,1,"Distribution of Number of years of Education with Income:",**{'font':'serif', 'size':18,'weight':'bold'}, alpha = 1)
406
+ fig.text(0.1,0.90,"With increasing in years of education Income is also increasing :",**{'font':'serif', 'size':12,'weight':'bold'}, alpha = 1)
407
+
408
+
409
+ fig.show()
410
+
411
+
412
+ # <p id="part11"></p>
413
+ #
414
+ # <p style="font-family: Arials; font-size: 16px; font-style: normal; font-weight: normal; letter-spacing: 3px; color: #FF8C00; line-height:1.0">3.3 Multi-Variate Analysis:</p>
415
+
416
+ # <p id="part13"></p>
417
+ #
418
+ # <p style="font-family: Arials; font-size: 16px; font-style: bold; font-weight: bold; letter-spacing: 3px; color: #FF8C00; line-height:1.0">5. Modelling:</p>
419
+ #
420
+ #
421
+
422
+ # <p style="font-family: Arials; font-size: 16px; font-style: bold; font-weight: bold; letter-spacing: 3px; color: #FF8C00; line-height:1.0">5.0 Make data ready for Modelling:</p>
423
+
424
+ # In[25]:
425
+
426
+
427
+ df.income.unique()
428
+
429
+
430
+ # In[26]:
431
+
432
+
433
+ df["income"]=df["income"].map({"<=50K":0,">50K":1})
434
+
435
+
436
+ # In[27]:
437
+
438
+
439
+ X = df.drop(['income'], axis=1)
440
+ y = df['income']
441
+
442
+
443
+ # In[28]:
444
+
445
+
446
+ from sklearn.model_selection import train_test_split
447
+
448
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 42)
449
+
450
+
451
+ # In[29]:
452
+
453
+
454
+ from sklearn import preprocessing
455
+
456
+ categorical = ['workclass','education', 'marital.status', 'occupation', 'relationship','race', 'sex','native.country',]
457
+ for feature in categorical:
458
+ le = preprocessing.LabelEncoder()
459
+ X_train[feature] = le.fit_transform(X_train[feature])
460
+ X_test[feature] = le.transform(X_test[feature])
461
+
462
+
463
+ # In[30]:
464
+
465
+
466
+
467
+ from sklearn.preprocessing import StandardScaler
468
+
469
+ scaler = StandardScaler()
470
+
471
+ X_train = pd.DataFrame(scaler.fit_transform(X_train), columns = X.columns)
472
+
473
+ X_test = pd.DataFrame(scaler.transform(X_test), columns = X.columns)
474
+
475
+
476
+ # ## Random forest
477
+
478
+ # In[31]:
479
+
480
+
481
+ from sklearn.ensemble import RandomForestClassifier
482
+ random_forest = RandomForestClassifier(n_estimators=100)
483
+ random_forest.fit(X_train, y_train)
484
+ y_pred = random_forest.predict(X_test)
485
+
486
+ from sklearn import metrics
487
+ print(metrics.classification_report(y_test,y_pred))
488
+
489
+
490
+ # ## knn
491
+
492
+ # from sklearn.neighbors import KNeighborsClassifier
493
+ # neig = np.arange(1, 25)
494
+ # train_accuracy = []
495
+ # test_accuracy = []
496
+ # # Loop over different values of k
497
+ # for i, k in enumerate(neig):
498
+ # # k from 1 to 25(exclude)
499
+ # knn = KNeighborsClassifier(n_neighbors=k)
500
+ # # Fit with knn
501
+ # knn.fit(X_train,y_train)
502
+ # #train accuracy
503
+ # train_accuracy.append(knn.score(X_train, y_train))
504
+ # # test accuracy
505
+ # test_accuracy.append(knn.score(X_test, y_test))
506
+ #
507
+ # # Plot
508
+ # plt.figure(figsize=[13,8])
509
+ # plt.plot(neig, test_accuracy, label = 'Testing Accuracy')
510
+ # plt.plot(neig, train_accuracy, label = 'Training Accuracy')
511
+ # plt.legend()
512
+ # plt.title('-value VS Accuracy')
513
+ # plt.xlabel('Number of Neighbors')
514
+ # plt.ylabel('Accuracy')
515
+ # plt.xticks(neig)
516
+ # plt.savefig('graph.png')
517
+ # plt.show()
518
+ # print("Best accuracy is {} with K = {}".format(np.max(test_accuracy),1+test_accuracy.index(np.max(test_accuracy))))
519
+
520
+ # In[32]:
521
+
522
+
523
+
524
+ from sklearn.linear_model import LogisticRegression
525
+ model=LogisticRegression()
526
+ model.fit(X_train,y_train)
527
+
528
+ y_test_pred=model.predict(X_test)
529
+ from sklearn import metrics
530
+ print(metrics.classification_report(y_test,y_pred))
531
+
532
+
533
+ # In[33]:
534
+
535
+
536
+ from sklearn.metrics import accuracy_score
537
+
538
+
539
+ # In[34]:
540
+
541
+
542
+ test_score = accuracy_score(y_test, model.predict(X_test)) * 100
543
+ train_score = accuracy_score(y_train, model.predict(X_train)) * 100
544
+
545
+ results_df = pd.DataFrame(data=[["Logistic Regression", train_score, test_score]],
546
+ columns=['Model', 'Training Accuracy %', 'Testing Accuracy %'])
547
+ results_df
548
+
549
+
550
+ # In[35]:
551
+
552
+
553
+ from sklearn.tree import DecisionTreeClassifier
554
+ model=DecisionTreeClassifier()
555
+ model.fit(X_train,y_train)
556
+ dt=model.predict(X_test)
557
+ from sklearn import metrics
558
+ print(metrics.classification_report(y_test,y_pred))
559
+
560
+
561
+ # In[36]:
562
+
563
+
564
+ from sklearn.svm import SVC
565
+ model=SVC(kernel="rbf")
566
+ model.fit(X_train,y_train)
567
+ sv=model.predict(X_test)
568
+ from sklearn import metrics
569
+ print(metrics.classification_report(y_test,y_pred))
570
+
571
+
572
+ # In[37]:
573
+
574
+
575
+ from sklearn.ensemble import AdaBoostClassifier
576
+
577
+ model=AdaBoostClassifier(learning_rate= 0.15,n_estimators= 25)
578
+ model.fit(X_train,y_train)
579
+ ab=model.predict(X_test)
580
+ from sklearn import metrics
581
+ print(metrics.classification_report(y_test,y_pred))
582
+
583
+
584
+ # ## hyper para meter tuning:
585
+
586
+ # In[38]:
587
+
588
+
589
+ from sklearn.model_selection import GridSearchCV
590
+
591
+
592
+ # In[39]:
593
+
594
+
595
+ clf = DecisionTreeClassifier()
596
+ # Hyperparameter Optimization
597
+ parameters = {'max_features': ['log2', 'sqrt','auto'],
598
+ 'criterion': ['entropy', 'gini'],
599
+ 'max_depth': [2, 3, 5, 10, 50],
600
+ 'min_samples_split': [2, 3, 50, 100],
601
+ 'min_samples_leaf': [1, 5, 8, 10]
602
+ }
603
+
604
+ # Run the grid search
605
+ grid_obj = GridSearchCV(clf, parameters)
606
+ grid_obj = grid_obj.fit(X_train, y_train)
607
+
608
+ # Set the clf to the best combination of parameters
609
+ clf = grid_obj.best_estimator_
610
+
611
+ # Train the model using the training sets
612
+ clf.fit(X_train, y_train)
613
+
614
+
615
+ # In[40]:
616
+
617
+
618
+ y_pred = clf.predict(X_test)
619
+
620
+
621
+ # In[41]:
622
+
623
+
624
+ # Calculating the accuracy
625
+ acc_dt = round( metrics.accuracy_score(y_test, y_pred) * 100, 2 )
626
+ print( 'Accuracy of Decision Tree model : ', acc_dt )
627
+
628
+
629
+ # <p style="font-family: Arials; line-height: 1.5; font-size: 16px; font-weight: bold; letter-spacing: 2px; text-align: center; color: #FF8C00">Thank you for reading this work!
630
+ # Any feedback on this work would be very grateful.
631
+ # If you liked this notebook, Upvote.</p>
632
+ # <p style="text-align: center">😊😊😊</p>
633
+
634
+ # In[ ]:
635
+
636
+
637
+
638
+
AdultNoteBook/Kernels/AdaBoost/2-boosting-algorithms-model-for-adult-census-income.ipynb ADDED
@@ -0,0 +1,2199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {
7
+ "execution": {
8
+ "iopub.execute_input": "2021-03-19T05:38:00.573511Z",
9
+ "iopub.status.busy": "2021-03-19T05:38:00.572716Z",
10
+ "iopub.status.idle": "2021-03-19T05:38:02.804511Z",
11
+ "shell.execute_reply": "2021-03-19T05:38:02.803821Z"
12
+ },
13
+ "papermill": {
14
+ "duration": 2.263268,
15
+ "end_time": "2021-03-19T05:38:02.804744",
16
+ "exception": false,
17
+ "start_time": "2021-03-19T05:38:00.541476",
18
+ "status": "completed"
19
+ },
20
+ "tags": []
21
+ },
22
+ "outputs": [],
23
+ "source": [
24
+ "import pandas as pd\n",
25
+ "import numpy as np\n",
26
+ "import seaborn as sns\n",
27
+ "import matplotlib.pyplot as plt\n",
28
+ "import warnings\n",
29
+ "warnings.filterwarnings(\"ignore\")\n",
30
+ "\n",
31
+ "from sklearn.pipeline import Pipeline\n",
32
+ "from sklearn.preprocessing import OneHotEncoder\n",
33
+ "import category_encoders as ce\n",
34
+ "from sklearn.impute import SimpleImputer\n",
35
+ "from sklearn.compose import ColumnTransformer\n",
36
+ "\n",
37
+ "from sklearn.model_selection import train_test_split, RandomizedSearchCV, StratifiedKFold, cross_val_score\n",
38
+ "\n",
39
+ "from sklearn.tree import DecisionTreeClassifier\n",
40
+ "from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier\n",
41
+ "from xgboost.sklearn import XGBClassifier\n",
42
+ "from sklearn.metrics import classification_report, f1_score, plot_roc_curve"
43
+ ]
44
+ },
45
+ {
46
+ "cell_type": "code",
47
+ "execution_count": 2,
48
+ "metadata": {},
49
+ "outputs": [],
50
+ "source": [
51
+ "from aif360.datasets import StandardDataset\n",
52
+ "from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric\n",
53
+ "import matplotlib.patches as patches\n",
54
+ "from aif360.algorithms.preprocessing import Reweighing\n",
55
+ "#from packages import *\n",
56
+ "#from ml_fairness import *\n",
57
+ "import matplotlib.pyplot as plt\n",
58
+ "import seaborn as sns\n",
59
+ "\n",
60
+ "\n",
61
+ "\n",
62
+ "from IPython.display import Markdown, display"
63
+ ]
64
+ },
65
+ {
66
+ "cell_type": "code",
67
+ "execution_count": 3,
68
+ "metadata": {
69
+ "execution": {
70
+ "iopub.execute_input": "2021-03-19T05:38:02.853355Z",
71
+ "iopub.status.busy": "2021-03-19T05:38:02.852585Z",
72
+ "iopub.status.idle": "2021-03-19T05:38:03.027399Z",
73
+ "shell.execute_reply": "2021-03-19T05:38:03.026828Z"
74
+ },
75
+ "papermill": {
76
+ "duration": 0.200945,
77
+ "end_time": "2021-03-19T05:38:03.027547",
78
+ "exception": false,
79
+ "start_time": "2021-03-19T05:38:02.826602",
80
+ "status": "completed"
81
+ },
82
+ "tags": []
83
+ },
84
+ "outputs": [
85
+ {
86
+ "data": {
87
+ "text/html": [
88
+ "<div>\n",
89
+ "<style scoped>\n",
90
+ " .dataframe tbody tr th:only-of-type {\n",
91
+ " vertical-align: middle;\n",
92
+ " }\n",
93
+ "\n",
94
+ " .dataframe tbody tr th {\n",
95
+ " vertical-align: top;\n",
96
+ " }\n",
97
+ "\n",
98
+ " .dataframe thead th {\n",
99
+ " text-align: right;\n",
100
+ " }\n",
101
+ "</style>\n",
102
+ "<table border=\"1\" class=\"dataframe\">\n",
103
+ " <thead>\n",
104
+ " <tr style=\"text-align: right;\">\n",
105
+ " <th></th>\n",
106
+ " <th>age</th>\n",
107
+ " <th>workclass</th>\n",
108
+ " <th>fnlwgt</th>\n",
109
+ " <th>education</th>\n",
110
+ " <th>education.num</th>\n",
111
+ " <th>marital.status</th>\n",
112
+ " <th>occupation</th>\n",
113
+ " <th>relationship</th>\n",
114
+ " <th>race</th>\n",
115
+ " <th>sex</th>\n",
116
+ " <th>capital.gain</th>\n",
117
+ " <th>capital.loss</th>\n",
118
+ " <th>hours.per.week</th>\n",
119
+ " <th>native.country</th>\n",
120
+ " <th>income</th>\n",
121
+ " </tr>\n",
122
+ " </thead>\n",
123
+ " <tbody>\n",
124
+ " <tr>\n",
125
+ " <th>5898</th>\n",
126
+ " <td>35</td>\n",
127
+ " <td>Private</td>\n",
128
+ " <td>28160</td>\n",
129
+ " <td>Bachelors</td>\n",
130
+ " <td>13</td>\n",
131
+ " <td>Married-spouse-absent</td>\n",
132
+ " <td>Exec-managerial</td>\n",
133
+ " <td>Unmarried</td>\n",
134
+ " <td>White</td>\n",
135
+ " <td>Female</td>\n",
136
+ " <td>0</td>\n",
137
+ " <td>0</td>\n",
138
+ " <td>40</td>\n",
139
+ " <td>United-States</td>\n",
140
+ " <td>&lt;=50K</td>\n",
141
+ " </tr>\n",
142
+ " </tbody>\n",
143
+ "</table>\n",
144
+ "</div>"
145
+ ],
146
+ "text/plain": [
147
+ " age workclass fnlwgt education education.num marital.status \\\n",
148
+ "5898 35 Private 28160 Bachelors 13 Married-spouse-absent \n",
149
+ "\n",
150
+ " occupation relationship race sex capital.gain capital.loss \\\n",
151
+ "5898 Exec-managerial Unmarried White Female 0 0 \n",
152
+ "\n",
153
+ " hours.per.week native.country income \n",
154
+ "5898 40 United-States <=50K "
155
+ ]
156
+ },
157
+ "execution_count": 3,
158
+ "metadata": {},
159
+ "output_type": "execute_result"
160
+ }
161
+ ],
162
+ "source": [
163
+ "adult = pd.read_csv('../../Data/adult.csv')\n",
164
+ "adult.sample()"
165
+ ]
166
+ },
167
+ {
168
+ "cell_type": "code",
169
+ "execution_count": 4,
170
+ "metadata": {
171
+ "execution": {
172
+ "iopub.execute_input": "2021-03-19T05:38:03.108677Z",
173
+ "iopub.status.busy": "2021-03-19T05:38:03.108008Z",
174
+ "iopub.status.idle": "2021-03-19T05:38:03.125920Z",
175
+ "shell.execute_reply": "2021-03-19T05:38:03.125335Z"
176
+ },
177
+ "papermill": {
178
+ "duration": 0.076114,
179
+ "end_time": "2021-03-19T05:38:03.126077",
180
+ "exception": false,
181
+ "start_time": "2021-03-19T05:38:03.049963",
182
+ "status": "completed"
183
+ },
184
+ "tags": []
185
+ },
186
+ "outputs": [
187
+ {
188
+ "name": "stdout",
189
+ "output_type": "stream",
190
+ "text": [
191
+ "<class 'pandas.core.frame.DataFrame'>\n",
192
+ "RangeIndex: 32561 entries, 0 to 32560\n",
193
+ "Data columns (total 15 columns):\n",
194
+ " # Column Non-Null Count Dtype \n",
195
+ "--- ------ -------------- ----- \n",
196
+ " 0 age 32561 non-null int64 \n",
197
+ " 1 workclass 32561 non-null object\n",
198
+ " 2 fnlwgt 32561 non-null int64 \n",
199
+ " 3 education 32561 non-null object\n",
200
+ " 4 education.num 32561 non-null int64 \n",
201
+ " 5 marital.status 32561 non-null object\n",
202
+ " 6 occupation 32561 non-null object\n",
203
+ " 7 relationship 32561 non-null object\n",
204
+ " 8 race 32561 non-null object\n",
205
+ " 9 sex 32561 non-null object\n",
206
+ " 10 capital.gain 32561 non-null int64 \n",
207
+ " 11 capital.loss 32561 non-null int64 \n",
208
+ " 12 hours.per.week 32561 non-null int64 \n",
209
+ " 13 native.country 32561 non-null object\n",
210
+ " 14 income 32561 non-null object\n",
211
+ "dtypes: int64(6), object(9)\n",
212
+ "memory usage: 3.7+ MB\n"
213
+ ]
214
+ }
215
+ ],
216
+ "source": [
217
+ "adult.info()"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "markdown",
222
+ "metadata": {
223
+ "papermill": {
224
+ "duration": 0.022213,
225
+ "end_time": "2021-03-19T05:38:03.171196",
226
+ "exception": false,
227
+ "start_time": "2021-03-19T05:38:03.148983",
228
+ "status": "completed"
229
+ },
230
+ "tags": []
231
+ },
232
+ "source": [
233
+ "*In this info detail, indicate that there is no missing value at all. But if you see the whole data carefully, you will find **missing value with '?'**.*"
234
+ ]
235
+ },
236
+ {
237
+ "cell_type": "markdown",
238
+ "metadata": {
239
+ "papermill": {
240
+ "duration": 0.023501,
241
+ "end_time": "2021-03-19T05:38:03.217098",
242
+ "exception": false,
243
+ "start_time": "2021-03-19T05:38:03.193597",
244
+ "status": "completed"
245
+ },
246
+ "tags": []
247
+ },
248
+ "source": [
249
+ "# PreProcessing"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "markdown",
254
+ "metadata": {
255
+ "papermill": {
256
+ "duration": 0.022005,
257
+ "end_time": "2021-03-19T05:38:03.261561",
258
+ "exception": false,
259
+ "start_time": "2021-03-19T05:38:03.239556",
260
+ "status": "completed"
261
+ },
262
+ "tags": []
263
+ },
264
+ "source": [
265
+ "*Preprocessing scheme:*\n",
266
+ "* Encode all columns\n",
267
+ "* Drop education because it's already encoded on education.num\n",
268
+ "* Drop fnlwgt because it's unique"
269
+ ]
270
+ },
271
+ {
272
+ "cell_type": "markdown",
273
+ "metadata": {
274
+ "papermill": {
275
+ "duration": 0.022184,
276
+ "end_time": "2021-03-19T05:38:03.306152",
277
+ "exception": false,
278
+ "start_time": "2021-03-19T05:38:03.283968",
279
+ "status": "completed"
280
+ },
281
+ "tags": []
282
+ },
283
+ "source": [
284
+ "*Handling Missing Value In Pipeline*"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": 5,
290
+ "metadata": {
291
+ "execution": {
292
+ "iopub.execute_input": "2021-03-19T05:38:03.356440Z",
293
+ "iopub.status.busy": "2021-03-19T05:38:03.355434Z",
294
+ "iopub.status.idle": "2021-03-19T05:38:03.361557Z",
295
+ "shell.execute_reply": "2021-03-19T05:38:03.362164Z"
296
+ },
297
+ "papermill": {
298
+ "duration": 0.033769,
299
+ "end_time": "2021-03-19T05:38:03.362348",
300
+ "exception": false,
301
+ "start_time": "2021-03-19T05:38:03.328579",
302
+ "status": "completed"
303
+ },
304
+ "tags": []
305
+ },
306
+ "outputs": [],
307
+ "source": [
308
+ "binary_encoder_pipe = Pipeline([\n",
309
+ " ('imputer', SimpleImputer(strategy = 'constant', fill_value = 'NC', missing_values = '?')),\n",
310
+ " ('binary', ce.BinaryEncoder())\n",
311
+ "])\n",
312
+ "\n",
313
+ "transformer = ColumnTransformer([\n",
314
+ " ('one hot', OneHotEncoder(drop = 'first'), ['relationship', 'race', 'sex']),\n",
315
+ " ('binary', binary_encoder_pipe, ['workclass', 'marital.status', 'occupation', 'native.country'])],\n",
316
+ " remainder = 'passthrough')"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "markdown",
321
+ "metadata": {
322
+ "papermill": {
323
+ "duration": 0.022265,
324
+ "end_time": "2021-03-19T05:38:03.407584",
325
+ "exception": false,
326
+ "start_time": "2021-03-19T05:38:03.385319",
327
+ "status": "completed"
328
+ },
329
+ "tags": []
330
+ },
331
+ "source": [
332
+ "*Splitting Data*"
333
+ ]
334
+ },
335
+ {
336
+ "cell_type": "code",
337
+ "execution_count": 6,
338
+ "metadata": {
339
+ "execution": {
340
+ "iopub.execute_input": "2021-03-19T05:38:03.456571Z",
341
+ "iopub.status.busy": "2021-03-19T05:38:03.455612Z",
342
+ "iopub.status.idle": "2021-03-19T05:38:03.470867Z",
343
+ "shell.execute_reply": "2021-03-19T05:38:03.471412Z"
344
+ },
345
+ "papermill": {
346
+ "duration": 0.041355,
347
+ "end_time": "2021-03-19T05:38:03.471590",
348
+ "exception": false,
349
+ "start_time": "2021-03-19T05:38:03.430235",
350
+ "status": "completed"
351
+ },
352
+ "tags": []
353
+ },
354
+ "outputs": [
355
+ {
356
+ "data": {
357
+ "text/plain": [
358
+ "<=50K 24720\n",
359
+ ">50K 7841\n",
360
+ "Name: income, dtype: int64"
361
+ ]
362
+ },
363
+ "execution_count": 6,
364
+ "metadata": {},
365
+ "output_type": "execute_result"
366
+ }
367
+ ],
368
+ "source": [
369
+ "adult['income'].value_counts()"
370
+ ]
371
+ },
372
+ {
373
+ "cell_type": "markdown",
374
+ "metadata": {
375
+ "papermill": {
376
+ "duration": 0.023083,
377
+ "end_time": "2021-03-19T05:38:03.517599",
378
+ "exception": false,
379
+ "start_time": "2021-03-19T05:38:03.494516",
380
+ "status": "completed"
381
+ },
382
+ "tags": []
383
+ },
384
+ "source": [
385
+ "Income is the target data and **indicated with imbalance data**. I define **income with 1 if income is >50K and 0 if income is <50K**."
386
+ ]
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ "execution_count": 7,
391
+ "metadata": {
392
+ "execution": {
393
+ "iopub.execute_input": "2021-03-19T05:38:03.568392Z",
394
+ "iopub.status.busy": "2021-03-19T05:38:03.567403Z",
395
+ "iopub.status.idle": "2021-03-19T05:38:03.580593Z",
396
+ "shell.execute_reply": "2021-03-19T05:38:03.581142Z"
397
+ },
398
+ "papermill": {
399
+ "duration": 0.04025,
400
+ "end_time": "2021-03-19T05:38:03.581329",
401
+ "exception": false,
402
+ "start_time": "2021-03-19T05:38:03.541079",
403
+ "status": "completed"
404
+ },
405
+ "tags": []
406
+ },
407
+ "outputs": [],
408
+ "source": [
409
+ "X = adult.drop(['fnlwgt', 'education', 'income'], axis = 1)\n",
410
+ "y = np.where(adult['income'] == '>50K', 1, 0)"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": 8,
416
+ "metadata": {
417
+ "execution": {
418
+ "iopub.execute_input": "2021-03-19T05:38:03.632000Z",
419
+ "iopub.status.busy": "2021-03-19T05:38:03.631007Z",
420
+ "iopub.status.idle": "2021-03-19T05:38:03.637002Z",
421
+ "shell.execute_reply": "2021-03-19T05:38:03.637530Z"
422
+ },
423
+ "papermill": {
424
+ "duration": 0.033104,
425
+ "end_time": "2021-03-19T05:38:03.637822",
426
+ "exception": false,
427
+ "start_time": "2021-03-19T05:38:03.604718",
428
+ "status": "completed"
429
+ },
430
+ "tags": []
431
+ },
432
+ "outputs": [
433
+ {
434
+ "data": {
435
+ "text/plain": [
436
+ "(32561, 12)"
437
+ ]
438
+ },
439
+ "execution_count": 8,
440
+ "metadata": {},
441
+ "output_type": "execute_result"
442
+ }
443
+ ],
444
+ "source": [
445
+ "X.shape"
446
+ ]
447
+ },
448
+ {
449
+ "cell_type": "code",
450
+ "execution_count": 9,
451
+ "metadata": {
452
+ "execution": {
453
+ "iopub.execute_input": "2021-03-19T05:38:03.689040Z",
454
+ "iopub.status.busy": "2021-03-19T05:38:03.688033Z",
455
+ "iopub.status.idle": "2021-03-19T05:38:03.726523Z",
456
+ "shell.execute_reply": "2021-03-19T05:38:03.725847Z"
457
+ },
458
+ "papermill": {
459
+ "duration": 0.065514,
460
+ "end_time": "2021-03-19T05:38:03.726708",
461
+ "exception": false,
462
+ "start_time": "2021-03-19T05:38:03.661194",
463
+ "status": "completed"
464
+ },
465
+ "tags": []
466
+ },
467
+ "outputs": [],
468
+ "source": [
469
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y,\n",
470
+ " test_size = 0.3, random_state = 1212)"
471
+ ]
472
+ },
473
+ {
474
+ "cell_type": "markdown",
475
+ "metadata": {
476
+ "papermill": {
477
+ "duration": 0.023919,
478
+ "end_time": "2021-03-19T05:38:03.775037",
479
+ "exception": false,
480
+ "start_time": "2021-03-19T05:38:03.751118",
481
+ "status": "completed"
482
+ },
483
+ "tags": []
484
+ },
485
+ "source": [
486
+ "I use 0.3 as default score for test_size and X.shape for random_state so the data will be devided equally."
487
+ ]
488
+ },
489
+ {
490
+ "cell_type": "markdown",
491
+ "metadata": {
492
+ "papermill": {
493
+ "duration": 0.023793,
494
+ "end_time": "2021-03-19T05:38:03.822815",
495
+ "exception": false,
496
+ "start_time": "2021-03-19T05:38:03.799022",
497
+ "status": "completed"
498
+ },
499
+ "tags": []
500
+ },
501
+ "source": [
502
+ "# Define Model"
503
+ ]
504
+ },
505
+ {
506
+ "cell_type": "markdown",
507
+ "metadata": {
508
+ "papermill": {
509
+ "duration": 0.023731,
510
+ "end_time": "2021-03-19T05:38:03.871290",
511
+ "exception": false,
512
+ "start_time": "2021-03-19T05:38:03.847559",
513
+ "status": "completed"
514
+ },
515
+ "tags": []
516
+ },
517
+ "source": [
518
+ "I use 3 Boosting Algorithms Models:\n",
519
+ "* Ada Boost Classifier\n",
520
+ "* Gradient Boosting Classifier\n",
521
+ "* XGB Classifier"
522
+ ]
523
+ },
524
+ {
525
+ "cell_type": "code",
526
+ "execution_count": 10,
527
+ "metadata": {
528
+ "execution": {
529
+ "iopub.execute_input": "2021-03-19T05:38:03.927028Z",
530
+ "iopub.status.busy": "2021-03-19T05:38:03.926229Z",
531
+ "iopub.status.idle": "2021-03-19T05:38:03.929154Z",
532
+ "shell.execute_reply": "2021-03-19T05:38:03.929616Z"
533
+ },
534
+ "papermill": {
535
+ "duration": 0.034442,
536
+ "end_time": "2021-03-19T05:38:03.929828",
537
+ "exception": false,
538
+ "start_time": "2021-03-19T05:38:03.895386",
539
+ "status": "completed"
540
+ },
541
+ "tags": []
542
+ },
543
+ "outputs": [],
544
+ "source": [
545
+ "adaboost = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212)\n",
546
+ "pipe_ada = Pipeline([\n",
547
+ " ('transformer', transformer),\n",
548
+ " ('adaboost', adaboost)])\n",
549
+ "\n",
550
+ "gradboost = GradientBoostingClassifier(random_state = 1212)\n",
551
+ "pipe_grad = Pipeline([\n",
552
+ " ('transformer', transformer),\n",
553
+ " ('gradboost', gradboost)])\n",
554
+ "\n",
555
+ "XGBOOST = XGBClassifier(random_state = 1212)\n",
556
+ "pipe_XGB = Pipeline([\n",
557
+ " ('transformer', transformer),\n",
558
+ " ('XGBOOST', XGBOOST)])"
559
+ ]
560
+ },
561
+ {
562
+ "cell_type": "markdown",
563
+ "metadata": {
564
+ "papermill": {
565
+ "duration": 0.023691,
566
+ "end_time": "2021-03-19T05:38:03.977709",
567
+ "exception": false,
568
+ "start_time": "2021-03-19T05:38:03.954018",
569
+ "status": "completed"
570
+ },
571
+ "tags": []
572
+ },
573
+ "source": [
574
+ "# Cross Validation"
575
+ ]
576
+ },
577
+ {
578
+ "cell_type": "markdown",
579
+ "metadata": {
580
+ "papermill": {
581
+ "duration": 0.023661,
582
+ "end_time": "2021-03-19T05:38:04.025361",
583
+ "exception": false,
584
+ "start_time": "2021-03-19T05:38:04.001700",
585
+ "status": "completed"
586
+ },
587
+ "tags": []
588
+ },
589
+ "source": [
590
+ "*Model Evaluation*"
591
+ ]
592
+ },
593
+ {
594
+ "cell_type": "code",
595
+ "execution_count": 12,
596
+ "metadata": {
597
+ "execution": {
598
+ "iopub.execute_input": "2021-03-19T05:38:04.081401Z",
599
+ "iopub.status.busy": "2021-03-19T05:38:04.080271Z",
600
+ "iopub.status.idle": "2021-03-19T05:38:44.672230Z",
601
+ "shell.execute_reply": "2021-03-19T05:38:44.672845Z"
602
+ },
603
+ "papermill": {
604
+ "duration": 40.623782,
605
+ "end_time": "2021-03-19T05:38:44.673057",
606
+ "exception": false,
607
+ "start_time": "2021-03-19T05:38:04.049275",
608
+ "status": "completed"
609
+ },
610
+ "tags": []
611
+ },
612
+ "outputs": [],
613
+ "source": [
614
+ "def model_evaluation(model, metric):\n",
615
+ " skfold = StratifiedKFold(n_splits = 5)\n",
616
+ " model_cv = cross_val_score(model, X_train, y_train, cv = skfold, scoring = metric)\n",
617
+ " return model_cv\n",
618
+ "\n",
619
+ "pipe_ada_cv = model_evaluation(pipe_ada, 'f1')\n",
620
+ "pipe_grad_cv = model_evaluation(pipe_grad, 'f1')\n",
621
+ "pipe_XGB_cv = model_evaluation(pipe_XGB, 'f1')"
622
+ ]
623
+ },
624
+ {
625
+ "cell_type": "markdown",
626
+ "metadata": {
627
+ "papermill": {
628
+ "duration": 0.025351,
629
+ "end_time": "2021-03-19T05:38:44.725538",
630
+ "exception": false,
631
+ "start_time": "2021-03-19T05:38:44.700187",
632
+ "status": "completed"
633
+ },
634
+ "tags": []
635
+ },
636
+ "source": [
637
+ "*Fitting Data*"
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "code",
642
+ "execution_count": null,
643
+ "metadata": {
644
+ "execution": {
645
+ "iopub.execute_input": "2021-03-19T05:38:44.789205Z",
646
+ "iopub.status.busy": "2021-03-19T05:38:44.788134Z",
647
+ "iopub.status.idle": "2021-03-19T05:38:53.398606Z",
648
+ "shell.execute_reply": "2021-03-19T05:38:53.398036Z"
649
+ },
650
+ "papermill": {
651
+ "duration": 8.647612,
652
+ "end_time": "2021-03-19T05:38:53.398853",
653
+ "exception": false,
654
+ "start_time": "2021-03-19T05:38:44.751241",
655
+ "status": "completed"
656
+ },
657
+ "tags": []
658
+ },
659
+ "outputs": [],
660
+ "source": [
661
+ "for model in [pipe_ada, pipe_grad, pipe_XGB]:\n",
662
+ " model.fit(X_train, y_train)"
663
+ ]
664
+ },
665
+ {
666
+ "cell_type": "markdown",
667
+ "metadata": {
668
+ "papermill": {
669
+ "duration": 0.025784,
670
+ "end_time": "2021-03-19T05:38:53.451009",
671
+ "exception": false,
672
+ "start_time": "2021-03-19T05:38:53.425225",
673
+ "status": "completed"
674
+ },
675
+ "tags": []
676
+ },
677
+ "source": [
678
+ "*Summary*"
679
+ ]
680
+ },
681
+ {
682
+ "cell_type": "code",
683
+ "execution_count": null,
684
+ "metadata": {
685
+ "execution": {
686
+ "iopub.execute_input": "2021-03-19T05:38:53.527446Z",
687
+ "iopub.status.busy": "2021-03-19T05:38:53.526733Z",
688
+ "iopub.status.idle": "2021-03-19T05:38:53.815303Z",
689
+ "shell.execute_reply": "2021-03-19T05:38:53.815805Z"
690
+ },
691
+ "papermill": {
692
+ "duration": 0.33887,
693
+ "end_time": "2021-03-19T05:38:53.815995",
694
+ "exception": false,
695
+ "start_time": "2021-03-19T05:38:53.477125",
696
+ "status": "completed"
697
+ },
698
+ "tags": []
699
+ },
700
+ "outputs": [],
701
+ "source": [
702
+ "score_mean = [pipe_ada_cv.mean(), pipe_grad_cv.mean(), pipe_XGB_cv.mean()]\n",
703
+ "score_std = [pipe_ada_cv.std(), pipe_grad_cv.std(), pipe_XGB_cv.std()]\n",
704
+ "score_f1 = [f1_score(y_test, pipe_ada.predict(X_test)),\n",
705
+ " f1_score(y_test, pipe_grad.predict(X_test)), \n",
706
+ " f1_score(y_test, pipe_XGB.predict(X_test))]\n",
707
+ "method_name = ['Ada Boost Classifier', 'Gradient Boost Classifier ',\n",
708
+ " 'XGB Classifier']\n",
709
+ "summary = pd.DataFrame({'method': method_name, 'mean score': score_mean,\n",
710
+ " 'std score': score_std, 'f1 score': score_f1})\n",
711
+ "summary"
712
+ ]
713
+ },
714
+ {
715
+ "cell_type": "markdown",
716
+ "metadata": {
717
+ "papermill": {
718
+ "duration": 0.026318,
719
+ "end_time": "2021-03-19T05:38:53.869169",
720
+ "exception": false,
721
+ "start_time": "2021-03-19T05:38:53.842851",
722
+ "status": "completed"
723
+ },
724
+ "tags": []
725
+ },
726
+ "source": [
727
+ "From these scores, **XGB Classifier is the best one** with the highest f1 score and mean score, also the lowest std score. Let's cross-check with the important features, see if the model is correct."
728
+ ]
729
+ },
730
+ {
731
+ "cell_type": "code",
732
+ "execution_count": null,
733
+ "metadata": {
734
+ "execution": {
735
+ "iopub.execute_input": "2021-03-19T05:38:53.944904Z",
736
+ "iopub.status.busy": "2021-03-19T05:38:53.929491Z",
737
+ "iopub.status.idle": "2021-03-19T05:38:54.176548Z",
738
+ "shell.execute_reply": "2021-03-19T05:38:54.175387Z"
739
+ },
740
+ "papermill": {
741
+ "duration": 0.28086,
742
+ "end_time": "2021-03-19T05:38:54.176735",
743
+ "exception": false,
744
+ "start_time": "2021-03-19T05:38:53.895875",
745
+ "status": "completed"
746
+ },
747
+ "tags": []
748
+ },
749
+ "outputs": [],
750
+ "source": [
751
+ "plot_roc_curve(pipe_XGB, X_test, y_test)"
752
+ ]
753
+ },
754
+ {
755
+ "cell_type": "markdown",
756
+ "metadata": {
757
+ "papermill": {
758
+ "duration": 0.027942,
759
+ "end_time": "2021-03-19T05:38:54.233278",
760
+ "exception": false,
761
+ "start_time": "2021-03-19T05:38:54.205336",
762
+ "status": "completed"
763
+ },
764
+ "tags": []
765
+ },
766
+ "source": [
767
+ "# Importance Features"
768
+ ]
769
+ },
770
+ {
771
+ "cell_type": "code",
772
+ "execution_count": null,
773
+ "metadata": {
774
+ "execution": {
775
+ "iopub.execute_input": "2021-03-19T05:38:54.295503Z",
776
+ "iopub.status.busy": "2021-03-19T05:38:54.294857Z",
777
+ "iopub.status.idle": "2021-03-19T05:38:54.297765Z",
778
+ "shell.execute_reply": "2021-03-19T05:38:54.297092Z"
779
+ },
780
+ "papermill": {
781
+ "duration": 0.036542,
782
+ "end_time": "2021-03-19T05:38:54.297927",
783
+ "exception": false,
784
+ "start_time": "2021-03-19T05:38:54.261385",
785
+ "status": "completed"
786
+ },
787
+ "tags": []
788
+ },
789
+ "outputs": [],
790
+ "source": [
791
+ "features = list(pipe_ada[0].transformers_[0][1].get_feature_names()) + pipe_ada[0].transformers_[1][1][1].get_feature_names() + ['age', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']"
792
+ ]
793
+ },
794
+ {
795
+ "cell_type": "code",
796
+ "execution_count": null,
797
+ "metadata": {
798
+ "execution": {
799
+ "iopub.execute_input": "2021-03-19T05:38:54.358245Z",
800
+ "iopub.status.busy": "2021-03-19T05:38:54.357625Z",
801
+ "iopub.status.idle": "2021-03-19T05:38:54.792959Z",
802
+ "shell.execute_reply": "2021-03-19T05:38:54.792423Z"
803
+ },
804
+ "papermill": {
805
+ "duration": 0.466676,
806
+ "end_time": "2021-03-19T05:38:54.793113",
807
+ "exception": false,
808
+ "start_time": "2021-03-19T05:38:54.326437",
809
+ "status": "completed"
810
+ },
811
+ "tags": []
812
+ },
813
+ "outputs": [],
814
+ "source": [
815
+ "imptab_ada = pd.DataFrame(pipe_ada[1].feature_importances_, columns = ['imp'], index = features)\n",
816
+ "imptab_ada.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
817
+ "plt.title('Importance Table For Ada Boost Classifier Model')\n",
818
+ "plt.show()"
819
+ ]
820
+ },
821
+ {
822
+ "cell_type": "code",
823
+ "execution_count": null,
824
+ "metadata": {
825
+ "execution": {
826
+ "iopub.execute_input": "2021-03-19T05:38:54.859604Z",
827
+ "iopub.status.busy": "2021-03-19T05:38:54.858939Z",
828
+ "iopub.status.idle": "2021-03-19T05:38:55.286891Z",
829
+ "shell.execute_reply": "2021-03-19T05:38:55.286335Z"
830
+ },
831
+ "papermill": {
832
+ "duration": 0.464119,
833
+ "end_time": "2021-03-19T05:38:55.287039",
834
+ "exception": false,
835
+ "start_time": "2021-03-19T05:38:54.822920",
836
+ "status": "completed"
837
+ },
838
+ "tags": []
839
+ },
840
+ "outputs": [],
841
+ "source": [
842
+ "imptab_grad = pd.DataFrame(pipe_grad[1].feature_importances_, columns = ['imp'], index = features)\n",
843
+ "imptab_grad.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
844
+ "plt.title('Importance Table For Gradient Boost Classifier Model')\n",
845
+ "plt.show()"
846
+ ]
847
+ },
848
+ {
849
+ "cell_type": "code",
850
+ "execution_count": null,
851
+ "metadata": {
852
+ "execution": {
853
+ "iopub.execute_input": "2021-03-19T05:38:55.356257Z",
854
+ "iopub.status.busy": "2021-03-19T05:38:55.355583Z",
855
+ "iopub.status.idle": "2021-03-19T05:38:55.939126Z",
856
+ "shell.execute_reply": "2021-03-19T05:38:55.938530Z"
857
+ },
858
+ "papermill": {
859
+ "duration": 0.62115,
860
+ "end_time": "2021-03-19T05:38:55.939279",
861
+ "exception": false,
862
+ "start_time": "2021-03-19T05:38:55.318129",
863
+ "status": "completed"
864
+ },
865
+ "tags": []
866
+ },
867
+ "outputs": [],
868
+ "source": [
869
+ "imptab_XGB = pd.DataFrame(pipe_XGB[1].feature_importances_, columns = ['imp'], index = features)\n",
870
+ "imptab_XGB.sort_values('imp').plot(kind = 'barh', figsize = (15,8))\n",
871
+ "plt.title('Importance Table For XGB Classifier Model')\n",
872
+ "plt.show()"
873
+ ]
874
+ },
875
+ {
876
+ "cell_type": "markdown",
877
+ "metadata": {
878
+ "papermill": {
879
+ "duration": 0.031945,
880
+ "end_time": "2021-03-19T05:38:56.003841",
881
+ "exception": false,
882
+ "start_time": "2021-03-19T05:38:55.971896",
883
+ "status": "completed"
884
+ },
885
+ "tags": []
886
+ },
887
+ "source": [
888
+ "From Importance Features Table, the **XGB Classifier can boost almost all the features**. It's has a consistency with the cross validation result. Now, see if the HyperParameter Tuning process can boost until getting the maximum score."
889
+ ]
890
+ },
891
+ {
892
+ "cell_type": "markdown",
893
+ "metadata": {
894
+ "papermill": {
895
+ "duration": 0.03221,
896
+ "end_time": "2021-03-19T05:38:56.068184",
897
+ "exception": false,
898
+ "start_time": "2021-03-19T05:38:56.035974",
899
+ "status": "completed"
900
+ },
901
+ "tags": []
902
+ },
903
+ "source": [
904
+ "# HyperParameter Tuning"
905
+ ]
906
+ },
907
+ {
908
+ "cell_type": "code",
909
+ "execution_count": null,
910
+ "metadata": {
911
+ "execution": {
912
+ "iopub.execute_input": "2021-03-19T05:38:56.141374Z",
913
+ "iopub.status.busy": "2021-03-19T05:38:56.140625Z",
914
+ "iopub.status.idle": "2021-03-19T06:01:37.865510Z",
915
+ "shell.execute_reply": "2021-03-19T06:01:37.864282Z"
916
+ },
917
+ "papermill": {
918
+ "duration": 1361.765454,
919
+ "end_time": "2021-03-19T06:01:37.865698",
920
+ "exception": false,
921
+ "start_time": "2021-03-19T05:38:56.100244",
922
+ "status": "completed"
923
+ },
924
+ "tags": []
925
+ },
926
+ "outputs": [],
927
+ "source": [
928
+ "XGBOOST = XGBClassifier(random_state = 1212)\n",
929
+ "estimator = Pipeline([('transformer', transformer), ('XGBOOST', XGBOOST)])\n",
930
+ "\n",
931
+ "hyperparam_space = {\n",
932
+ " 'XGBOOST__learning_rate': [0.1, 0.05, 0.01, 0.005],\n",
933
+ " 'XGBOOST__n_estimators': [50, 100, 150, 200],\n",
934
+ " 'XGBOOST__max_depth': [3, 5, 7, 9]\n",
935
+ "}\n",
936
+ "\n",
937
+ "random = RandomizedSearchCV(\n",
938
+ " estimator,\n",
939
+ " param_distributions = hyperparam_space,\n",
940
+ " cv = StratifiedKFold(n_splits = 5),\n",
941
+ " scoring = 'f1',\n",
942
+ " n_iter = 10,\n",
943
+ " n_jobs = -1)\n",
944
+ "\n",
945
+ "random.fit(X_train, y_train)"
946
+ ]
947
+ },
948
+ {
949
+ "cell_type": "code",
950
+ "execution_count": 19,
951
+ "metadata": {
952
+ "execution": {
953
+ "iopub.execute_input": "2021-03-19T06:01:37.939505Z",
954
+ "iopub.status.busy": "2021-03-19T06:01:37.938761Z",
955
+ "iopub.status.idle": "2021-03-19T06:01:37.942876Z",
956
+ "shell.execute_reply": "2021-03-19T06:01:37.942253Z"
957
+ },
958
+ "papermill": {
959
+ "duration": 0.043411,
960
+ "end_time": "2021-03-19T06:01:37.943017",
961
+ "exception": false,
962
+ "start_time": "2021-03-19T06:01:37.899606",
963
+ "status": "completed"
964
+ },
965
+ "tags": []
966
+ },
967
+ "outputs": [
968
+ {
969
+ "name": "stdout",
970
+ "output_type": "stream",
971
+ "text": [
972
+ "best score 0.7017567140030995\n",
973
+ "best param {'XGBOOST__n_estimators': 200, 'XGBOOST__max_depth': 9, 'XGBOOST__learning_rate': 0.05}\n"
974
+ ]
975
+ }
976
+ ],
977
+ "source": [
978
+ "print('best score', random.best_score_)\n",
979
+ "print('best param', random.best_params_)"
980
+ ]
981
+ },
982
+ {
983
+ "cell_type": "markdown",
984
+ "metadata": {
985
+ "papermill": {
986
+ "duration": 0.034769,
987
+ "end_time": "2021-03-19T06:01:38.011326",
988
+ "exception": false,
989
+ "start_time": "2021-03-19T06:01:37.976557",
990
+ "status": "completed"
991
+ },
992
+ "tags": []
993
+ },
994
+ "source": [
995
+ "After HyperParameter Tuning, the best score is 0.6996, which getting lower. N estimator is 150, Max depth is 5, and Learning rate is 0.1. Let's compare the result."
996
+ ]
997
+ },
998
+ {
999
+ "cell_type": "markdown",
1000
+ "metadata": {
1001
+ "papermill": {
1002
+ "duration": 0.033061,
1003
+ "end_time": "2021-03-19T06:01:38.078046",
1004
+ "exception": false,
1005
+ "start_time": "2021-03-19T06:01:38.044985",
1006
+ "status": "completed"
1007
+ },
1008
+ "tags": []
1009
+ },
1010
+ "source": [
1011
+ "# Before VS After Tuning Comparison"
1012
+ ]
1013
+ },
1014
+ {
1015
+ "cell_type": "code",
1016
+ "execution_count": 20,
1017
+ "metadata": {
1018
+ "execution": {
1019
+ "iopub.execute_input": "2021-03-19T06:01:38.173068Z",
1020
+ "iopub.status.busy": "2021-03-19T06:01:38.164508Z",
1021
+ "iopub.status.idle": "2021-03-19T06:01:39.914461Z",
1022
+ "shell.execute_reply": "2021-03-19T06:01:39.915204Z"
1023
+ },
1024
+ "papermill": {
1025
+ "duration": 1.803827,
1026
+ "end_time": "2021-03-19T06:01:39.915379",
1027
+ "exception": false,
1028
+ "start_time": "2021-03-19T06:01:38.111552",
1029
+ "status": "completed"
1030
+ },
1031
+ "tags": []
1032
+ },
1033
+ "outputs": [
1034
+ {
1035
+ "name": "stdout",
1036
+ "output_type": "stream",
1037
+ "text": [
1038
+ "[06:01:38] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n",
1039
+ " precision recall f1-score support\n",
1040
+ "\n",
1041
+ " 0 0.90 0.94 0.92 7417\n",
1042
+ " 1 0.77 0.66 0.71 2352\n",
1043
+ "\n",
1044
+ " accuracy 0.87 9769\n",
1045
+ " macro avg 0.83 0.80 0.82 9769\n",
1046
+ "weighted avg 0.87 0.87 0.87 9769\n",
1047
+ "\n"
1048
+ ]
1049
+ }
1050
+ ],
1051
+ "source": [
1052
+ "estimator.fit(X_train, y_train)\n",
1053
+ "y_pred_estimator = estimator.predict(X_test)\n",
1054
+ "print(classification_report(y_test, y_pred_estimator))"
1055
+ ]
1056
+ },
1057
+ {
1058
+ "cell_type": "code",
1059
+ "execution_count": 21,
1060
+ "metadata": {
1061
+ "execution": {
1062
+ "iopub.execute_input": "2021-03-19T06:01:39.991027Z",
1063
+ "iopub.status.busy": "2021-03-19T06:01:39.990364Z",
1064
+ "iopub.status.idle": "2021-03-19T06:01:45.312598Z",
1065
+ "shell.execute_reply": "2021-03-19T06:01:45.311942Z"
1066
+ },
1067
+ "papermill": {
1068
+ "duration": 5.363188,
1069
+ "end_time": "2021-03-19T06:01:45.312764",
1070
+ "exception": false,
1071
+ "start_time": "2021-03-19T06:01:39.949576",
1072
+ "status": "completed"
1073
+ },
1074
+ "tags": []
1075
+ },
1076
+ "outputs": [
1077
+ {
1078
+ "name": "stdout",
1079
+ "output_type": "stream",
1080
+ "text": [
1081
+ "[06:01:40] WARNING: ../src/learner.cc:1061: Starting in XGBoost 1.3.0, the default evaluation metric used with the objective 'binary:logistic' was changed from 'error' to 'logloss'. Explicitly set eval_metric if you'd like to restore the old behavior.\n",
1082
+ " precision recall f1-score support\n",
1083
+ "\n",
1084
+ " 0 0.89 0.94 0.92 7417\n",
1085
+ " 1 0.78 0.64 0.70 2352\n",
1086
+ "\n",
1087
+ " accuracy 0.87 9769\n",
1088
+ " macro avg 0.84 0.79 0.81 9769\n",
1089
+ "weighted avg 0.86 0.87 0.87 9769\n",
1090
+ "\n"
1091
+ ]
1092
+ }
1093
+ ],
1094
+ "source": [
1095
+ "random.best_estimator_.fit(X_train, y_train)\n",
1096
+ "y_pred_random = random.best_estimator_.predict(X_test)\n",
1097
+ "print(classification_report(y_test, y_pred_random))"
1098
+ ]
1099
+ },
1100
+ {
1101
+ "cell_type": "code",
1102
+ "execution_count": 22,
1103
+ "metadata": {
1104
+ "execution": {
1105
+ "iopub.execute_input": "2021-03-19T06:01:45.389702Z",
1106
+ "iopub.status.busy": "2021-03-19T06:01:45.388729Z",
1107
+ "iopub.status.idle": "2021-03-19T06:01:45.407492Z",
1108
+ "shell.execute_reply": "2021-03-19T06:01:45.406952Z"
1109
+ },
1110
+ "papermill": {
1111
+ "duration": 0.05942,
1112
+ "end_time": "2021-03-19T06:01:45.407632",
1113
+ "exception": false,
1114
+ "start_time": "2021-03-19T06:01:45.348212",
1115
+ "status": "completed"
1116
+ },
1117
+ "tags": []
1118
+ },
1119
+ "outputs": [
1120
+ {
1121
+ "data": {
1122
+ "text/html": [
1123
+ "<div>\n",
1124
+ "<style scoped>\n",
1125
+ " .dataframe tbody tr th:only-of-type {\n",
1126
+ " vertical-align: middle;\n",
1127
+ " }\n",
1128
+ "\n",
1129
+ " .dataframe tbody tr th {\n",
1130
+ " vertical-align: top;\n",
1131
+ " }\n",
1132
+ "\n",
1133
+ " .dataframe thead th {\n",
1134
+ " text-align: right;\n",
1135
+ " }\n",
1136
+ "</style>\n",
1137
+ "<table border=\"1\" class=\"dataframe\">\n",
1138
+ " <thead>\n",
1139
+ " <tr style=\"text-align: right;\">\n",
1140
+ " <th></th>\n",
1141
+ " <th>method</th>\n",
1142
+ " <th>f1 score</th>\n",
1143
+ " </tr>\n",
1144
+ " </thead>\n",
1145
+ " <tbody>\n",
1146
+ " <tr>\n",
1147
+ " <th>0</th>\n",
1148
+ " <td>XGB Classifier Before Tuning</td>\n",
1149
+ " <td>0.713175</td>\n",
1150
+ " </tr>\n",
1151
+ " <tr>\n",
1152
+ " <th>1</th>\n",
1153
+ " <td>XGB Classifier After Tuning</td>\n",
1154
+ " <td>0.703169</td>\n",
1155
+ " </tr>\n",
1156
+ " </tbody>\n",
1157
+ "</table>\n",
1158
+ "</div>"
1159
+ ],
1160
+ "text/plain": [
1161
+ " method f1 score\n",
1162
+ "0 XGB Classifier Before Tuning 0.713175\n",
1163
+ "1 XGB Classifier After Tuning 0.703169"
1164
+ ]
1165
+ },
1166
+ "execution_count": 22,
1167
+ "metadata": {},
1168
+ "output_type": "execute_result"
1169
+ }
1170
+ ],
1171
+ "source": [
1172
+ "score_list = [f1_score(y_test, y_pred_estimator), f1_score(y_test, y_pred_random)]\n",
1173
+ "method_name = ['XGB Classifier Before Tuning', 'XGB Classifier After Tuning']\n",
1174
+ "best_summary = pd.DataFrame({\n",
1175
+ " 'method': method_name,\n",
1176
+ " 'f1 score': score_list\n",
1177
+ "})\n",
1178
+ "best_summary"
1179
+ ]
1180
+ },
1181
+ {
1182
+ "cell_type": "markdown",
1183
+ "metadata": {
1184
+ "papermill": {
1185
+ "duration": 0.035589,
1186
+ "end_time": "2021-03-19T06:01:45.478797",
1187
+ "exception": false,
1188
+ "start_time": "2021-03-19T06:01:45.443208",
1189
+ "status": "completed"
1190
+ },
1191
+ "tags": []
1192
+ },
1193
+ "source": [
1194
+ "After all, HyperParameter Tuning doesn't work good in this data. So if I have to choose, I pick the **XGB Classifier score Before Tuning, which is 0.71**. I know the number isn't good enough either because the data is imbalance and I don't process any resampling on it."
1195
+ ]
1196
+ },
1197
+ {
1198
+ "cell_type": "markdown",
1199
+ "metadata": {},
1200
+ "source": [
1201
+ "## Fairness"
1202
+ ]
1203
+ },
1204
+ {
1205
+ "cell_type": "code",
1206
+ "execution_count": 7,
1207
+ "metadata": {},
1208
+ "outputs": [],
1209
+ "source": [
1210
+ "# This DataFrame is created to stock differents models and fair metrics that we produce in this notebook\n",
1211
+ "algo_metrics = pd.DataFrame(columns=['model', 'fair_metrics', 'prediction', 'probs'])\n",
1212
+ "\n",
1213
+ "def add_to_df_algo_metrics(algo_metrics, model, fair_metrics, preds, probs, name):\n",
1214
+ " return algo_metrics.append(pd.DataFrame(data=[[model, fair_metrics, preds, probs]], columns=['model', 'fair_metrics', 'prediction', 'probs'], index=[name]))"
1215
+ ]
1216
+ },
1217
+ {
1218
+ "cell_type": "code",
1219
+ "execution_count": 8,
1220
+ "metadata": {},
1221
+ "outputs": [],
1222
+ "source": [
1223
+ "def fair_metrics(dataset, pred, pred_is_dataset=False):\n",
1224
+ " if pred_is_dataset:\n",
1225
+ " dataset_pred = pred\n",
1226
+ " else:\n",
1227
+ " dataset_pred = dataset.copy()\n",
1228
+ " dataset_pred.labels = pred\n",
1229
+ " \n",
1230
+ " cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']\n",
1231
+ " obj_fairness = [[0,0,0,1,0]]\n",
1232
+ " \n",
1233
+ " fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)\n",
1234
+ " \n",
1235
+ " for attr in dataset_pred.protected_attribute_names:\n",
1236
+ " idx = dataset_pred.protected_attribute_names.index(attr)\n",
1237
+ " privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] \n",
1238
+ " unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}] \n",
1239
+ " \n",
1240
+ " classified_metric = ClassificationMetric(dataset, \n",
1241
+ " dataset_pred,\n",
1242
+ " unprivileged_groups=unprivileged_groups,\n",
1243
+ " privileged_groups=privileged_groups)\n",
1244
+ "\n",
1245
+ " metric_pred = BinaryLabelDatasetMetric(dataset_pred,\n",
1246
+ " unprivileged_groups=unprivileged_groups,\n",
1247
+ " privileged_groups=privileged_groups)\n",
1248
+ "\n",
1249
+ " acc = classified_metric.accuracy()\n",
1250
+ "\n",
1251
+ " row = pd.DataFrame([[metric_pred.mean_difference(),\n",
1252
+ " classified_metric.equal_opportunity_difference(),\n",
1253
+ " classified_metric.average_abs_odds_difference(),\n",
1254
+ " metric_pred.disparate_impact(),\n",
1255
+ " classified_metric.theil_index()]],\n",
1256
+ " columns = cols,\n",
1257
+ " index = [attr]\n",
1258
+ " )\n",
1259
+ " fair_metrics = fair_metrics.append(row) \n",
1260
+ " \n",
1261
+ " fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)\n",
1262
+ " \n",
1263
+ " return fair_metrics\n",
1264
+ "\n",
1265
+ "def plot_fair_metrics(fair_metrics):\n",
1266
+ " fig, ax = plt.subplots(figsize=(20,4), ncols=5, nrows=1)\n",
1267
+ "\n",
1268
+ " plt.subplots_adjust(\n",
1269
+ " left = 0.125, \n",
1270
+ " bottom = 0.1, \n",
1271
+ " right = 0.9, \n",
1272
+ " top = 0.9, \n",
1273
+ " wspace = .5, \n",
1274
+ " hspace = 1.1\n",
1275
+ " )\n",
1276
+ "\n",
1277
+ " y_title_margin = 1.2\n",
1278
+ "\n",
1279
+ " plt.suptitle(\"Fairness metrics\", y = 1.09, fontsize=20)\n",
1280
+ " sns.set(style=\"dark\")\n",
1281
+ "\n",
1282
+ " cols = fair_metrics.columns.values\n",
1283
+ " obj = fair_metrics.loc['objective']\n",
1284
+ " size_rect = [0.2,0.2,0.2,0.4,0.25]\n",
1285
+ " rect = [-0.1,-0.1,-0.1,0.8,0]\n",
1286
+ " bottom = [-1,-1,-1,0,0]\n",
1287
+ " top = [1,1,1,2,1]\n",
1288
+ " bound = [[-0.1,0.1],[-0.1,0.1],[-0.1,0.1],[0.8,1.2],[0,0.25]]\n",
1289
+ "\n",
1290
+ " display(Markdown(\"### Check bias metrics :\"))\n",
1291
+ " display(Markdown(\"A model can be considered bias if just one of these five metrics show that this model is biased.\"))\n",
1292
+ " for attr in fair_metrics.index[1:len(fair_metrics)].values:\n",
1293
+ " display(Markdown(\"#### For the %s attribute :\"%attr))\n",
1294
+ " check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,5)]\n",
1295
+ " display(Markdown(\"With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics\"%(5 - sum(check))))\n",
1296
+ "\n",
1297
+ " for i in range(0,5):\n",
1298
+ " plt.subplot(1, 5, i+1)\n",
1299
+ " ax = sns.barplot(x=fair_metrics.index[1:len(fair_metrics)], y=fair_metrics.iloc[1:len(fair_metrics)][cols[i]])\n",
1300
+ " \n",
1301
+ " for j in range(0,len(fair_metrics)-1):\n",
1302
+ " a, val = ax.patches[j], fair_metrics.iloc[j+1][cols[i]]\n",
1303
+ " marg = -0.2 if val < 0 else 0.1\n",
1304
+ " ax.text(a.get_x()+a.get_width()/5, a.get_y()+a.get_height()+marg, round(val, 3), fontsize=15,color='black')\n",
1305
+ "\n",
1306
+ " plt.ylim(bottom[i], top[i])\n",
1307
+ " plt.setp(ax.patches, linewidth=0)\n",
1308
+ " ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=\"green\", linewidth=1, linestyle='solid'))\n",
1309
+ " plt.axhline(obj[i], color='black', alpha=0.3)\n",
1310
+ " plt.title(cols[i])\n",
1311
+ " ax.set_ylabel('') \n",
1312
+ " ax.set_xlabel('')"
1313
+ ]
1314
+ },
1315
+ {
1316
+ "cell_type": "code",
1317
+ "execution_count": 9,
1318
+ "metadata": {},
1319
+ "outputs": [],
1320
+ "source": [
1321
+ "def get_fair_metrics_and_plot(data, model, plot=False, model_aif=False):\n",
1322
+ " pred = model.predict(data).labels if model_aif else model.predict(data.features)\n",
1323
+ " # fair_metrics function available in the metrics.py file\n",
1324
+ " fair = fair_metrics(data, pred)\n",
1325
+ "\n",
1326
+ " if plot:\n",
1327
+ " # plot_fair_metrics function available in the visualisations.py file\n",
1328
+ " # The visualisation of this function is inspired by the dashboard on the demo of IBM aif360 \n",
1329
+ " plot_fair_metrics(fair)\n",
1330
+ " display(fair)\n",
1331
+ " \n",
1332
+ " return fair"
1333
+ ]
1334
+ },
1335
+ {
1336
+ "cell_type": "code",
1337
+ "execution_count": 13,
1338
+ "metadata": {},
1339
+ "outputs": [
1340
+ {
1341
+ "data": {
1342
+ "text/html": [
1343
+ "<div>\n",
1344
+ "<style scoped>\n",
1345
+ " .dataframe tbody tr th:only-of-type {\n",
1346
+ " vertical-align: middle;\n",
1347
+ " }\n",
1348
+ "\n",
1349
+ " .dataframe tbody tr th {\n",
1350
+ " vertical-align: top;\n",
1351
+ " }\n",
1352
+ "\n",
1353
+ " .dataframe thead th {\n",
1354
+ " text-align: right;\n",
1355
+ " }\n",
1356
+ "</style>\n",
1357
+ "<table border=\"1\" class=\"dataframe\">\n",
1358
+ " <thead>\n",
1359
+ " <tr style=\"text-align: right;\">\n",
1360
+ " <th></th>\n",
1361
+ " <th>age</th>\n",
1362
+ " <th>workclass</th>\n",
1363
+ " <th>education.num</th>\n",
1364
+ " <th>marital.status</th>\n",
1365
+ " <th>occupation</th>\n",
1366
+ " <th>relationship</th>\n",
1367
+ " <th>race</th>\n",
1368
+ " <th>sex</th>\n",
1369
+ " <th>capital.gain</th>\n",
1370
+ " <th>capital.loss</th>\n",
1371
+ " <th>hours.per.week</th>\n",
1372
+ " <th>native.country</th>\n",
1373
+ " <th>income</th>\n",
1374
+ " </tr>\n",
1375
+ " </thead>\n",
1376
+ " <tbody>\n",
1377
+ " <tr>\n",
1378
+ " <th>0</th>\n",
1379
+ " <td>90</td>\n",
1380
+ " <td>NaN</td>\n",
1381
+ " <td>9</td>\n",
1382
+ " <td>Widowed</td>\n",
1383
+ " <td>NaN</td>\n",
1384
+ " <td>Not-in-family</td>\n",
1385
+ " <td>White</td>\n",
1386
+ " <td>0</td>\n",
1387
+ " <td>0</td>\n",
1388
+ " <td>4356</td>\n",
1389
+ " <td>40</td>\n",
1390
+ " <td>United-States</td>\n",
1391
+ " <td>0</td>\n",
1392
+ " </tr>\n",
1393
+ " <tr>\n",
1394
+ " <th>1</th>\n",
1395
+ " <td>82</td>\n",
1396
+ " <td>Private</td>\n",
1397
+ " <td>9</td>\n",
1398
+ " <td>Widowed</td>\n",
1399
+ " <td>Exec-managerial</td>\n",
1400
+ " <td>Not-in-family</td>\n",
1401
+ " <td>White</td>\n",
1402
+ " <td>0</td>\n",
1403
+ " <td>0</td>\n",
1404
+ " <td>4356</td>\n",
1405
+ " <td>18</td>\n",
1406
+ " <td>United-States</td>\n",
1407
+ " <td>0</td>\n",
1408
+ " </tr>\n",
1409
+ " <tr>\n",
1410
+ " <th>2</th>\n",
1411
+ " <td>66</td>\n",
1412
+ " <td>NaN</td>\n",
1413
+ " <td>10</td>\n",
1414
+ " <td>Widowed</td>\n",
1415
+ " <td>NaN</td>\n",
1416
+ " <td>Unmarried</td>\n",
1417
+ " <td>Black</td>\n",
1418
+ " <td>0</td>\n",
1419
+ " <td>0</td>\n",
1420
+ " <td>4356</td>\n",
1421
+ " <td>40</td>\n",
1422
+ " <td>United-States</td>\n",
1423
+ " <td>0</td>\n",
1424
+ " </tr>\n",
1425
+ " <tr>\n",
1426
+ " <th>3</th>\n",
1427
+ " <td>54</td>\n",
1428
+ " <td>Private</td>\n",
1429
+ " <td>4</td>\n",
1430
+ " <td>Divorced</td>\n",
1431
+ " <td>Machine-op-inspct</td>\n",
1432
+ " <td>Unmarried</td>\n",
1433
+ " <td>White</td>\n",
1434
+ " <td>0</td>\n",
1435
+ " <td>0</td>\n",
1436
+ " <td>3900</td>\n",
1437
+ " <td>40</td>\n",
1438
+ " <td>United-States</td>\n",
1439
+ " <td>0</td>\n",
1440
+ " </tr>\n",
1441
+ " <tr>\n",
1442
+ " <th>4</th>\n",
1443
+ " <td>41</td>\n",
1444
+ " <td>Private</td>\n",
1445
+ " <td>10</td>\n",
1446
+ " <td>Separated</td>\n",
1447
+ " <td>Prof-specialty</td>\n",
1448
+ " <td>Own-child</td>\n",
1449
+ " <td>White</td>\n",
1450
+ " <td>0</td>\n",
1451
+ " <td>0</td>\n",
1452
+ " <td>3900</td>\n",
1453
+ " <td>40</td>\n",
1454
+ " <td>United-States</td>\n",
1455
+ " <td>0</td>\n",
1456
+ " </tr>\n",
1457
+ " </tbody>\n",
1458
+ "</table>\n",
1459
+ "</div>"
1460
+ ],
1461
+ "text/plain": [
1462
+ " age workclass education.num marital.status occupation \\\n",
1463
+ "0 90 NaN 9 Widowed NaN \n",
1464
+ "1 82 Private 9 Widowed Exec-managerial \n",
1465
+ "2 66 NaN 10 Widowed NaN \n",
1466
+ "3 54 Private 4 Divorced Machine-op-inspct \n",
1467
+ "4 41 Private 10 Separated Prof-specialty \n",
1468
+ "\n",
1469
+ " relationship race sex capital.gain capital.loss hours.per.week \\\n",
1470
+ "0 Not-in-family White 0 0 4356 40 \n",
1471
+ "1 Not-in-family White 0 0 4356 18 \n",
1472
+ "2 Unmarried Black 0 0 4356 40 \n",
1473
+ "3 Unmarried White 0 0 3900 40 \n",
1474
+ "4 Own-child White 0 0 3900 40 \n",
1475
+ "\n",
1476
+ " native.country income \n",
1477
+ "0 United-States 0 \n",
1478
+ "1 United-States 0 \n",
1479
+ "2 United-States 0 \n",
1480
+ "3 United-States 0 \n",
1481
+ "4 United-States 0 "
1482
+ ]
1483
+ },
1484
+ "execution_count": 13,
1485
+ "metadata": {},
1486
+ "output_type": "execute_result"
1487
+ }
1488
+ ],
1489
+ "source": [
1490
+ "##train['Sex'] = train['Sex'].map( {'female': 1, 'male': 0} ).astype(int)\n",
1491
+ "adult_df = adult.drop(['fnlwgt', 'education'], axis = 1)\n",
1492
+ "adult_df[\"income\"]=adult_df[\"income\"].map({\"<=50K\":0,\">50K\":1})\n",
1493
+ "adult_df[\"sex\"] = adult_df[\"sex\"].map({\"Male\":1,\"Female\":0})\n",
1494
+ "adult_df[\"workclass\"] = adult_df[\"workclass\"].replace(\"?\",np.nan)\n",
1495
+ "adult_df[\"occupation\"] = adult_df[\"occupation\"].replace(\"?\",np.nan)\n",
1496
+ "adult_df[\"native.country\"] = adult_df[\"native.country\"].replace(\"?\",np.nan)\n",
1497
+ "adult_df.head()\n",
1498
+ "#features = [\"Pclass\", \"Sex\", \"SibSp\", \"Parch\", \"Survived\"]\n",
1499
+ "#X = pd.get_dummies(train_data[features])"
1500
+ ]
1501
+ },
1502
+ {
1503
+ "cell_type": "code",
1504
+ "execution_count": 14,
1505
+ "metadata": {},
1506
+ "outputs": [],
1507
+ "source": [
1508
+ "adult_df[\"workclass\"] = adult_df[\"workclass\"].fillna(adult_df[\"workclass\"].mode()[0])\n",
1509
+ "adult_df[\"occupation\"] = adult_df[\"occupation\"].fillna(adult_df[\"occupation\"].mode()[0])\n",
1510
+ "adult_df[\"native.country\"] = adult_df[\"native.country\"].fillna(adult_df[\"native.country\"].mode()[0])"
1511
+ ]
1512
+ },
1513
+ {
1514
+ "cell_type": "code",
1515
+ "execution_count": 15,
1516
+ "metadata": {},
1517
+ "outputs": [
1518
+ {
1519
+ "name": "stdout",
1520
+ "output_type": "stream",
1521
+ "text": [
1522
+ "workclass\n",
1523
+ "marital.status\n",
1524
+ "occupation\n",
1525
+ "relationship\n",
1526
+ "race\n",
1527
+ "native.country\n"
1528
+ ]
1529
+ }
1530
+ ],
1531
+ "source": [
1532
+ "from sklearn import preprocessing\n",
1533
+ "\n",
1534
+ "categorical = ['workclass', 'marital.status', 'occupation', 'relationship','race','native.country',]\n",
1535
+ "for feature in categorical:\n",
1536
+ " print(feature)\n",
1537
+ " le = preprocessing.LabelEncoder()\n",
1538
+ " adult_df[feature] = le.fit_transform(adult_df[feature])\n",
1539
+ " #X_test[feature] = le.transform(X_test[feature])"
1540
+ ]
1541
+ },
1542
+ {
1543
+ "cell_type": "code",
1544
+ "execution_count": 16,
1545
+ "metadata": {},
1546
+ "outputs": [
1547
+ {
1548
+ "data": {
1549
+ "text/html": [
1550
+ "<div>\n",
1551
+ "<style scoped>\n",
1552
+ " .dataframe tbody tr th:only-of-type {\n",
1553
+ " vertical-align: middle;\n",
1554
+ " }\n",
1555
+ "\n",
1556
+ " .dataframe tbody tr th {\n",
1557
+ " vertical-align: top;\n",
1558
+ " }\n",
1559
+ "\n",
1560
+ " .dataframe thead th {\n",
1561
+ " text-align: right;\n",
1562
+ " }\n",
1563
+ "</style>\n",
1564
+ "<table border=\"1\" class=\"dataframe\">\n",
1565
+ " <thead>\n",
1566
+ " <tr style=\"text-align: right;\">\n",
1567
+ " <th></th>\n",
1568
+ " <th>age</th>\n",
1569
+ " <th>workclass</th>\n",
1570
+ " <th>education.num</th>\n",
1571
+ " <th>marital.status</th>\n",
1572
+ " <th>occupation</th>\n",
1573
+ " <th>relationship</th>\n",
1574
+ " <th>race</th>\n",
1575
+ " <th>sex</th>\n",
1576
+ " <th>capital.gain</th>\n",
1577
+ " <th>capital.loss</th>\n",
1578
+ " <th>hours.per.week</th>\n",
1579
+ " <th>native.country</th>\n",
1580
+ " <th>income</th>\n",
1581
+ " </tr>\n",
1582
+ " </thead>\n",
1583
+ " <tbody>\n",
1584
+ " <tr>\n",
1585
+ " <th>0</th>\n",
1586
+ " <td>90</td>\n",
1587
+ " <td>3</td>\n",
1588
+ " <td>9</td>\n",
1589
+ " <td>6</td>\n",
1590
+ " <td>9</td>\n",
1591
+ " <td>1</td>\n",
1592
+ " <td>4</td>\n",
1593
+ " <td>0</td>\n",
1594
+ " <td>0</td>\n",
1595
+ " <td>4356</td>\n",
1596
+ " <td>40</td>\n",
1597
+ " <td>38</td>\n",
1598
+ " <td>0</td>\n",
1599
+ " </tr>\n",
1600
+ " <tr>\n",
1601
+ " <th>1</th>\n",
1602
+ " <td>82</td>\n",
1603
+ " <td>3</td>\n",
1604
+ " <td>9</td>\n",
1605
+ " <td>6</td>\n",
1606
+ " <td>3</td>\n",
1607
+ " <td>1</td>\n",
1608
+ " <td>4</td>\n",
1609
+ " <td>0</td>\n",
1610
+ " <td>0</td>\n",
1611
+ " <td>4356</td>\n",
1612
+ " <td>18</td>\n",
1613
+ " <td>38</td>\n",
1614
+ " <td>0</td>\n",
1615
+ " </tr>\n",
1616
+ " <tr>\n",
1617
+ " <th>2</th>\n",
1618
+ " <td>66</td>\n",
1619
+ " <td>3</td>\n",
1620
+ " <td>10</td>\n",
1621
+ " <td>6</td>\n",
1622
+ " <td>9</td>\n",
1623
+ " <td>4</td>\n",
1624
+ " <td>2</td>\n",
1625
+ " <td>0</td>\n",
1626
+ " <td>0</td>\n",
1627
+ " <td>4356</td>\n",
1628
+ " <td>40</td>\n",
1629
+ " <td>38</td>\n",
1630
+ " <td>0</td>\n",
1631
+ " </tr>\n",
1632
+ " <tr>\n",
1633
+ " <th>3</th>\n",
1634
+ " <td>54</td>\n",
1635
+ " <td>3</td>\n",
1636
+ " <td>4</td>\n",
1637
+ " <td>0</td>\n",
1638
+ " <td>6</td>\n",
1639
+ " <td>4</td>\n",
1640
+ " <td>4</td>\n",
1641
+ " <td>0</td>\n",
1642
+ " <td>0</td>\n",
1643
+ " <td>3900</td>\n",
1644
+ " <td>40</td>\n",
1645
+ " <td>38</td>\n",
1646
+ " <td>0</td>\n",
1647
+ " </tr>\n",
1648
+ " <tr>\n",
1649
+ " <th>4</th>\n",
1650
+ " <td>41</td>\n",
1651
+ " <td>3</td>\n",
1652
+ " <td>10</td>\n",
1653
+ " <td>5</td>\n",
1654
+ " <td>9</td>\n",
1655
+ " <td>3</td>\n",
1656
+ " <td>4</td>\n",
1657
+ " <td>0</td>\n",
1658
+ " <td>0</td>\n",
1659
+ " <td>3900</td>\n",
1660
+ " <td>40</td>\n",
1661
+ " <td>38</td>\n",
1662
+ " <td>0</td>\n",
1663
+ " </tr>\n",
1664
+ " </tbody>\n",
1665
+ "</table>\n",
1666
+ "</div>"
1667
+ ],
1668
+ "text/plain": [
1669
+ " age workclass education.num marital.status occupation relationship \\\n",
1670
+ "0 90 3 9 6 9 1 \n",
1671
+ "1 82 3 9 6 3 1 \n",
1672
+ "2 66 3 10 6 9 4 \n",
1673
+ "3 54 3 4 0 6 4 \n",
1674
+ "4 41 3 10 5 9 3 \n",
1675
+ "\n",
1676
+ " race sex capital.gain capital.loss hours.per.week native.country \\\n",
1677
+ "0 4 0 0 4356 40 38 \n",
1678
+ "1 4 0 0 4356 18 38 \n",
1679
+ "2 2 0 0 4356 40 38 \n",
1680
+ "3 4 0 0 3900 40 38 \n",
1681
+ "4 4 0 0 3900 40 38 \n",
1682
+ "\n",
1683
+ " income \n",
1684
+ "0 0 \n",
1685
+ "1 0 \n",
1686
+ "2 0 \n",
1687
+ "3 0 \n",
1688
+ "4 0 "
1689
+ ]
1690
+ },
1691
+ "execution_count": 16,
1692
+ "metadata": {},
1693
+ "output_type": "execute_result"
1694
+ }
1695
+ ],
1696
+ "source": [
1697
+ "adult_df.head()"
1698
+ ]
1699
+ },
1700
+ {
1701
+ "cell_type": "code",
1702
+ "execution_count": 17,
1703
+ "metadata": {},
1704
+ "outputs": [
1705
+ {
1706
+ "data": {
1707
+ "text/plain": [
1708
+ "(0.21598015058404135, 0.0)"
1709
+ ]
1710
+ },
1711
+ "execution_count": 17,
1712
+ "metadata": {},
1713
+ "output_type": "execute_result"
1714
+ }
1715
+ ],
1716
+ "source": [
1717
+ "from scipy.stats import pearsonr\n",
1718
+ "\n",
1719
+ "pearsonr(adult_df['income'], adult_df['sex'])\n",
1720
+ "\n",
1721
+ "\n"
1722
+ ]
1723
+ },
1724
+ {
1725
+ "cell_type": "code",
1726
+ "execution_count": 20,
1727
+ "metadata": {},
1728
+ "outputs": [],
1729
+ "source": [
1730
+ "privileged_groups = [{'sex': 1}]\n",
1731
+ "unprivileged_groups = [{'sex': 0}]\n",
1732
+ "dataset_orig = StandardDataset(adult_df,\n",
1733
+ " label_name='income',\n",
1734
+ " protected_attribute_names=['sex'],\n",
1735
+ " favorable_classes=[1],\n",
1736
+ " privileged_classes=[[1]])"
1737
+ ]
1738
+ },
1739
+ {
1740
+ "cell_type": "code",
1741
+ "execution_count": 21,
1742
+ "metadata": {},
1743
+ "outputs": [
1744
+ {
1745
+ "data": {
1746
+ "text/markdown": [
1747
+ "#### Original training dataset"
1748
+ ],
1749
+ "text/plain": [
1750
+ "<IPython.core.display.Markdown object>"
1751
+ ]
1752
+ },
1753
+ "metadata": {},
1754
+ "output_type": "display_data"
1755
+ },
1756
+ {
1757
+ "name": "stdout",
1758
+ "output_type": "stream",
1759
+ "text": [
1760
+ "Difference in mean outcomes between unprivileged and privileged groups = -0.196276\n"
1761
+ ]
1762
+ }
1763
+ ],
1764
+ "source": [
1765
+ "metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
1766
+ " unprivileged_groups=unprivileged_groups,\n",
1767
+ " privileged_groups=privileged_groups)\n",
1768
+ "display(Markdown(\"#### Original training dataset\"))\n",
1769
+ "print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())"
1770
+ ]
1771
+ },
1772
+ {
1773
+ "cell_type": "code",
1774
+ "execution_count": 10,
1775
+ "metadata": {},
1776
+ "outputs": [],
1777
+ "source": [
1778
+ "data_orig_train = pd.read_pickle('../../Results/AdaBoost/2-boosting-algorithms-model-for-adult-census-income_Train.pkl')\n",
1779
+ "data_orig_test = pd.read_pickle('../../Results/AdaBoost/2-boosting-algorithms-model-for-adult-census-income_Test.pkl')"
1780
+ ]
1781
+ },
1782
+ {
1783
+ "cell_type": "code",
1784
+ "execution_count": 11,
1785
+ "metadata": {},
1786
+ "outputs": [],
1787
+ "source": [
1788
+ "import ipynbname\n",
1789
+ "nb_fname = ipynbname.name()\n",
1790
+ "nb_path = ipynbname.path()\n",
1791
+ "\n",
1792
+ "from sklearn.ensemble import AdaBoostClassifier\n",
1793
+ "import pickle\n",
1794
+ "\n",
1795
+ "#data_orig_train, data_orig_test = dataset_orig.split([0.7], shuffle=True)\n",
1796
+ "X_train = data_orig_train.features\n",
1797
+ "y_train = data_orig_train.labels.ravel()\n",
1798
+ "\n",
1799
+ "X_test = data_orig_test.features\n",
1800
+ "y_test = data_orig_test.labels.ravel()\n",
1801
+ "num_estimators = 100\n",
1802
+ "\n",
1803
+ "model = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212, n_estimators= 1)\n",
1804
+ "\n",
1805
+ "mdl = model.fit(X_train, y_train)\n",
1806
+ "#with open('../../Results/AdaBoost/' + nb_fname + '.pkl', 'wb') as f:\n",
1807
+ "# pickle.dump(mdl, f)\n",
1808
+ "\n",
1809
+ "#with open('../../Results/AdaBoost/' + nb_fname + '_Train' + '.pkl', 'wb') as f:\n",
1810
+ "# pickle.dump(data_orig_train, f) \n",
1811
+ " \n",
1812
+ "#with open('../../Results/AdaBoost/' + nb_fname + '_Test' + '.pkl', 'wb') as f:\n",
1813
+ "# pickle.dump(data_orig_test, f) "
1814
+ ]
1815
+ },
1816
+ {
1817
+ "cell_type": "code",
1818
+ "execution_count": 26,
1819
+ "metadata": {},
1820
+ "outputs": [],
1821
+ "source": [
1822
+ "from csv import writer\n",
1823
+ "from sklearn.metrics import accuracy_score, f1_score\n",
1824
+ "\n",
1825
+ "final_metrics = []\n",
1826
+ "accuracy = []\n",
1827
+ "f1= []\n",
1828
+ "\n",
1829
+ "for i in range(1,num_estimators+1):\n",
1830
+ " \n",
1831
+ " model = AdaBoostClassifier(DecisionTreeClassifier(criterion='entropy', splitter = 'random', max_depth = 1), random_state = 1212, n_estimators= i)\n",
1832
+ " \n",
1833
+ " mdl = model.fit(X_train, y_train)\n",
1834
+ " yy = mdl.predict(X_test)\n",
1835
+ " accuracy.append(accuracy_score(y_test, yy))\n",
1836
+ " f1.append(f1_score(y_test, yy))\n",
1837
+ " fair = get_fair_metrics_and_plot(data_orig_test, mdl) \n",
1838
+ " fair_list = fair.iloc[1].tolist()\n",
1839
+ " #fair_list.insert(0, i)\n",
1840
+ " final_metrics.append(fair_list)\n"
1841
+ ]
1842
+ },
1843
+ {
1844
+ "cell_type": "code",
1845
+ "execution_count": 27,
1846
+ "metadata": {},
1847
+ "outputs": [
1848
+ {
1849
+ "name": "stdout",
1850
+ "output_type": "stream",
1851
+ "text": [
1852
+ " 0 1 2 3 4\n",
1853
+ "0 0.000000 0.000000 0.000000 NaN 0.271671\n",
1854
+ "1 0.000000 0.000000 0.000000 NaN 0.271671\n",
1855
+ "2 -0.435397 -0.718274 0.515449 0.000000 0.140487\n",
1856
+ "3 -0.021355 -0.012690 0.018903 0.000000 0.274012\n",
1857
+ "4 -0.456752 -0.730964 0.534352 0.000000 0.139671\n",
1858
+ ".. ... ... ... ... ...\n",
1859
+ "95 -0.182016 -0.143454 0.111036 0.269581 0.126545\n",
1860
+ "96 -0.182169 -0.145264 0.112051 0.268516 0.126926\n",
1861
+ "97 -0.183398 -0.147294 0.113555 0.268094 0.126696\n",
1862
+ "98 -0.183859 -0.147294 0.113837 0.266706 0.126696\n",
1863
+ "99 -0.184166 -0.145485 0.113153 0.267274 0.126415\n",
1864
+ "\n",
1865
+ "[100 rows x 5 columns]\n"
1866
+ ]
1867
+ },
1868
+ {
1869
+ "data": {
1870
+ "text/html": [
1871
+ "<div>\n",
1872
+ "<style scoped>\n",
1873
+ " .dataframe tbody tr th:only-of-type {\n",
1874
+ " vertical-align: middle;\n",
1875
+ " }\n",
1876
+ "\n",
1877
+ " .dataframe tbody tr th {\n",
1878
+ " vertical-align: top;\n",
1879
+ " }\n",
1880
+ "\n",
1881
+ " .dataframe thead th {\n",
1882
+ " text-align: right;\n",
1883
+ " }\n",
1884
+ "</style>\n",
1885
+ "<table border=\"1\" class=\"dataframe\">\n",
1886
+ " <thead>\n",
1887
+ " <tr style=\"text-align: right;\">\n",
1888
+ " <th></th>\n",
1889
+ " <th>classifier</th>\n",
1890
+ " <th>T0</th>\n",
1891
+ " <th>T1</th>\n",
1892
+ " <th>T2</th>\n",
1893
+ " <th>T3</th>\n",
1894
+ " <th>T4</th>\n",
1895
+ " <th>T5</th>\n",
1896
+ " <th>T6</th>\n",
1897
+ " <th>T7</th>\n",
1898
+ " <th>T8</th>\n",
1899
+ " <th>...</th>\n",
1900
+ " <th>T90</th>\n",
1901
+ " <th>T91</th>\n",
1902
+ " <th>T92</th>\n",
1903
+ " <th>T93</th>\n",
1904
+ " <th>T94</th>\n",
1905
+ " <th>T95</th>\n",
1906
+ " <th>T96</th>\n",
1907
+ " <th>T97</th>\n",
1908
+ " <th>T98</th>\n",
1909
+ " <th>T99</th>\n",
1910
+ " </tr>\n",
1911
+ " </thead>\n",
1912
+ " <tbody>\n",
1913
+ " <tr>\n",
1914
+ " <th>accuracy</th>\n",
1915
+ " <td>0.845941</td>\n",
1916
+ " <td>0.762105</td>\n",
1917
+ " <td>0.762105</td>\n",
1918
+ " <td>0.761695</td>\n",
1919
+ " <td>0.752994</td>\n",
1920
+ " <td>0.752585</td>\n",
1921
+ " <td>0.808169</td>\n",
1922
+ " <td>0.809499</td>\n",
1923
+ " <td>0.814208</td>\n",
1924
+ " <td>0.812673</td>\n",
1925
+ " <td>...</td>\n",
1926
+ " <td>0.846351</td>\n",
1927
+ " <td>0.845839</td>\n",
1928
+ " <td>0.845737</td>\n",
1929
+ " <td>0.845225</td>\n",
1930
+ " <td>0.845737</td>\n",
1931
+ " <td>0.846555</td>\n",
1932
+ " <td>0.846146</td>\n",
1933
+ " <td>0.845839</td>\n",
1934
+ " <td>0.845839</td>\n",
1935
+ " <td>0.845941</td>\n",
1936
+ " </tr>\n",
1937
+ " <tr>\n",
1938
+ " <th>f1</th>\n",
1939
+ " <td>0.639866</td>\n",
1940
+ " <td>0.000000</td>\n",
1941
+ " <td>0.000000</td>\n",
1942
+ " <td>0.548662</td>\n",
1943
+ " <td>0.020300</td>\n",
1944
+ " <td>0.543704</td>\n",
1945
+ " <td>0.443917</td>\n",
1946
+ " <td>0.449897</td>\n",
1947
+ " <td>0.503148</td>\n",
1948
+ " <td>0.482173</td>\n",
1949
+ " <td>...</td>\n",
1950
+ " <td>0.641509</td>\n",
1951
+ " <td>0.640057</td>\n",
1952
+ " <td>0.639560</td>\n",
1953
+ " <td>0.639485</td>\n",
1954
+ " <td>0.639215</td>\n",
1955
+ " <td>0.640096</td>\n",
1956
+ " <td>0.638962</td>\n",
1957
+ " <td>0.639195</td>\n",
1958
+ " <td>0.639195</td>\n",
1959
+ " <td>0.639866</td>\n",
1960
+ " </tr>\n",
1961
+ " <tr>\n",
1962
+ " <th>statistical_parity_difference</th>\n",
1963
+ " <td>-0.184166</td>\n",
1964
+ " <td>0.000000</td>\n",
1965
+ " <td>0.000000</td>\n",
1966
+ " <td>-0.435397</td>\n",
1967
+ " <td>-0.021355</td>\n",
1968
+ " <td>-0.456752</td>\n",
1969
+ " <td>-0.160701</td>\n",
1970
+ " <td>-0.161317</td>\n",
1971
+ " <td>-0.202337</td>\n",
1972
+ " <td>-0.173927</td>\n",
1973
+ " <td>...</td>\n",
1974
+ " <td>-0.184475</td>\n",
1975
+ " <td>-0.184474</td>\n",
1976
+ " <td>-0.184934</td>\n",
1977
+ " <td>-0.185090</td>\n",
1978
+ " <td>-0.183399</td>\n",
1979
+ " <td>-0.182016</td>\n",
1980
+ " <td>-0.182169</td>\n",
1981
+ " <td>-0.183398</td>\n",
1982
+ " <td>-0.183859</td>\n",
1983
+ " <td>-0.184166</td>\n",
1984
+ " </tr>\n",
1985
+ " <tr>\n",
1986
+ " <th>equal_opportunity_difference</th>\n",
1987
+ " <td>-0.145485</td>\n",
1988
+ " <td>0.000000</td>\n",
1989
+ " <td>0.000000</td>\n",
1990
+ " <td>-0.718274</td>\n",
1991
+ " <td>-0.012690</td>\n",
1992
+ " <td>-0.730964</td>\n",
1993
+ " <td>-0.379695</td>\n",
1994
+ " <td>-0.376297</td>\n",
1995
+ " <td>-0.453168</td>\n",
1996
+ " <td>-0.365838</td>\n",
1997
+ " <td>...</td>\n",
1998
+ " <td>-0.145198</td>\n",
1999
+ " <td>-0.146500</td>\n",
2000
+ " <td>-0.152149</td>\n",
2001
+ " <td>-0.147515</td>\n",
2002
+ " <td>-0.144469</td>\n",
2003
+ " <td>-0.143454</td>\n",
2004
+ " <td>-0.145264</td>\n",
2005
+ " <td>-0.147294</td>\n",
2006
+ " <td>-0.147294</td>\n",
2007
+ " <td>-0.145485</td>\n",
2008
+ " </tr>\n",
2009
+ " <tr>\n",
2010
+ " <th>average_abs_odds_difference</th>\n",
2011
+ " <td>0.113153</td>\n",
2012
+ " <td>0.000000</td>\n",
2013
+ " <td>0.000000</td>\n",
2014
+ " <td>0.515449</td>\n",
2015
+ " <td>0.018903</td>\n",
2016
+ " <td>0.534352</td>\n",
2017
+ " <td>0.222674</td>\n",
2018
+ " <td>0.220975</td>\n",
2019
+ " <td>0.271748</td>\n",
2020
+ " <td>0.220662</td>\n",
2021
+ " <td>...</td>\n",
2022
+ " <td>0.112947</td>\n",
2023
+ " <td>0.113709</td>\n",
2024
+ " <td>0.116423</td>\n",
2025
+ " <td>0.114533</td>\n",
2026
+ " <td>0.112363</td>\n",
2027
+ " <td>0.111036</td>\n",
2028
+ " <td>0.112051</td>\n",
2029
+ " <td>0.113555</td>\n",
2030
+ " <td>0.113837</td>\n",
2031
+ " <td>0.113153</td>\n",
2032
+ " </tr>\n",
2033
+ " <tr>\n",
2034
+ " <th>disparate_impact</th>\n",
2035
+ " <td>-1.319479</td>\n",
2036
+ " <td>NaN</td>\n",
2037
+ " <td>NaN</td>\n",
2038
+ " <td>-inf</td>\n",
2039
+ " <td>-inf</td>\n",
2040
+ " <td>-inf</td>\n",
2041
+ " <td>-inf</td>\n",
2042
+ " <td>-5.172173</td>\n",
2043
+ " <td>-5.111415</td>\n",
2044
+ " <td>-3.127099</td>\n",
2045
+ " <td>...</td>\n",
2046
+ " <td>-1.314049</td>\n",
2047
+ " <td>-1.317365</td>\n",
2048
+ " <td>-1.325888</td>\n",
2049
+ " <td>-1.313182</td>\n",
2050
+ " <td>-1.313088</td>\n",
2051
+ " <td>-1.310885</td>\n",
2052
+ " <td>-1.314845</td>\n",
2053
+ " <td>-1.316418</td>\n",
2054
+ " <td>-1.321608</td>\n",
2055
+ " <td>-1.319479</td>\n",
2056
+ " </tr>\n",
2057
+ " <tr>\n",
2058
+ " <th>theil_index</th>\n",
2059
+ " <td>0.126415</td>\n",
2060
+ " <td>0.271671</td>\n",
2061
+ " <td>0.271671</td>\n",
2062
+ " <td>0.140487</td>\n",
2063
+ " <td>0.274012</td>\n",
2064
+ " <td>0.139671</td>\n",
2065
+ " <td>0.188861</td>\n",
2066
+ " <td>0.187256</td>\n",
2067
+ " <td>0.172201</td>\n",
2068
+ " <td>0.178419</td>\n",
2069
+ " <td>...</td>\n",
2070
+ " <td>0.125786</td>\n",
2071
+ " <td>0.126283</td>\n",
2072
+ " <td>0.126481</td>\n",
2073
+ " <td>0.126316</td>\n",
2074
+ " <td>0.126646</td>\n",
2075
+ " <td>0.126545</td>\n",
2076
+ " <td>0.126926</td>\n",
2077
+ " <td>0.126696</td>\n",
2078
+ " <td>0.126696</td>\n",
2079
+ " <td>0.126415</td>\n",
2080
+ " </tr>\n",
2081
+ " </tbody>\n",
2082
+ "</table>\n",
2083
+ "<p>7 rows × 101 columns</p>\n",
2084
+ "</div>"
2085
+ ],
2086
+ "text/plain": [
2087
+ " classifier T0 T1 T2 \\\n",
2088
+ "accuracy 0.845941 0.762105 0.762105 0.761695 \n",
2089
+ "f1 0.639866 0.000000 0.000000 0.548662 \n",
2090
+ "statistical_parity_difference -0.184166 0.000000 0.000000 -0.435397 \n",
2091
+ "equal_opportunity_difference -0.145485 0.000000 0.000000 -0.718274 \n",
2092
+ "average_abs_odds_difference 0.113153 0.000000 0.000000 0.515449 \n",
2093
+ "disparate_impact -1.319479 NaN NaN -inf \n",
2094
+ "theil_index 0.126415 0.271671 0.271671 0.140487 \n",
2095
+ "\n",
2096
+ " T3 T4 T5 T6 \\\n",
2097
+ "accuracy 0.752994 0.752585 0.808169 0.809499 \n",
2098
+ "f1 0.020300 0.543704 0.443917 0.449897 \n",
2099
+ "statistical_parity_difference -0.021355 -0.456752 -0.160701 -0.161317 \n",
2100
+ "equal_opportunity_difference -0.012690 -0.730964 -0.379695 -0.376297 \n",
2101
+ "average_abs_odds_difference 0.018903 0.534352 0.222674 0.220975 \n",
2102
+ "disparate_impact -inf -inf -inf -5.172173 \n",
2103
+ "theil_index 0.274012 0.139671 0.188861 0.187256 \n",
2104
+ "\n",
2105
+ " T7 T8 ... T90 T91 \\\n",
2106
+ "accuracy 0.814208 0.812673 ... 0.846351 0.845839 \n",
2107
+ "f1 0.503148 0.482173 ... 0.641509 0.640057 \n",
2108
+ "statistical_parity_difference -0.202337 -0.173927 ... -0.184475 -0.184474 \n",
2109
+ "equal_opportunity_difference -0.453168 -0.365838 ... -0.145198 -0.146500 \n",
2110
+ "average_abs_odds_difference 0.271748 0.220662 ... 0.112947 0.113709 \n",
2111
+ "disparate_impact -5.111415 -3.127099 ... -1.314049 -1.317365 \n",
2112
+ "theil_index 0.172201 0.178419 ... 0.125786 0.126283 \n",
2113
+ "\n",
2114
+ " T92 T93 T94 T95 \\\n",
2115
+ "accuracy 0.845737 0.845225 0.845737 0.846555 \n",
2116
+ "f1 0.639560 0.639485 0.639215 0.640096 \n",
2117
+ "statistical_parity_difference -0.184934 -0.185090 -0.183399 -0.182016 \n",
2118
+ "equal_opportunity_difference -0.152149 -0.147515 -0.144469 -0.143454 \n",
2119
+ "average_abs_odds_difference 0.116423 0.114533 0.112363 0.111036 \n",
2120
+ "disparate_impact -1.325888 -1.313182 -1.313088 -1.310885 \n",
2121
+ "theil_index 0.126481 0.126316 0.126646 0.126545 \n",
2122
+ "\n",
2123
+ " T96 T97 T98 T99 \n",
2124
+ "accuracy 0.846146 0.845839 0.845839 0.845941 \n",
2125
+ "f1 0.638962 0.639195 0.639195 0.639866 \n",
2126
+ "statistical_parity_difference -0.182169 -0.183398 -0.183859 -0.184166 \n",
2127
+ "equal_opportunity_difference -0.145264 -0.147294 -0.147294 -0.145485 \n",
2128
+ "average_abs_odds_difference 0.112051 0.113555 0.113837 0.113153 \n",
2129
+ "disparate_impact -1.314845 -1.316418 -1.321608 -1.319479 \n",
2130
+ "theil_index 0.126926 0.126696 0.126696 0.126415 \n",
2131
+ "\n",
2132
+ "[7 rows x 101 columns]"
2133
+ ]
2134
+ },
2135
+ "execution_count": 27,
2136
+ "metadata": {},
2137
+ "output_type": "execute_result"
2138
+ }
2139
+ ],
2140
+ "source": [
2141
+ "import numpy as np\n",
2142
+ "final_result = pd.DataFrame(final_metrics)\n",
2143
+ "print(final_result)\n",
2144
+ "final_result[3] = np.log(final_result[3])\n",
2145
+ "final_result = final_result.transpose()\n",
2146
+ "acc_f1 = pd.DataFrame(accuracy)\n",
2147
+ "acc_f1['f1'] = f1\n",
2148
+ "acc_f1 = pd.DataFrame(acc_f1).transpose()\n",
2149
+ "acc = acc_f1.rename(index={0: 'accuracy', 1: 'f1'})\n",
2150
+ "final_result = final_result.rename(index={0: 'statistical_parity_difference', 1: 'equal_opportunity_difference', 2: 'average_abs_odds_difference', 3: 'disparate_impact', 4: 'theil_index'})\n",
2151
+ "final_result = pd.concat([acc,final_result])\n",
2152
+ "final_result.columns = ['T' + str(col) for col in final_result.columns]\n",
2153
+ "final_result.insert(0, \"classifier\", final_result['T' + str(num_estimators - 1)]) ##Add final metrics add the beginning of the df\n",
2154
+ "#final_result.to_csv('../../Results/AdaBoost/' + nb_fname + '.csv')\n",
2155
+ "final_result"
2156
+ ]
2157
+ },
2158
+ {
2159
+ "cell_type": "code",
2160
+ "execution_count": null,
2161
+ "metadata": {},
2162
+ "outputs": [],
2163
+ "source": []
2164
+ }
2165
+ ],
2166
+ "metadata": {
2167
+ "kernelspec": {
2168
+ "display_name": "Python 3",
2169
+ "language": "python",
2170
+ "name": "python3"
2171
+ },
2172
+ "language_info": {
2173
+ "codemirror_mode": {
2174
+ "name": "ipython",
2175
+ "version": 3
2176
+ },
2177
+ "file_extension": ".py",
2178
+ "mimetype": "text/x-python",
2179
+ "name": "python",
2180
+ "nbconvert_exporter": "python",
2181
+ "pygments_lexer": "ipython3",
2182
+ "version": "3.8.5"
2183
+ },
2184
+ "papermill": {
2185
+ "default_parameters": {},
2186
+ "duration": 1432.173477,
2187
+ "end_time": "2021-03-19T06:01:46.424650",
2188
+ "environment_variables": {},
2189
+ "exception": null,
2190
+ "input_path": "__notebook__.ipynb",
2191
+ "output_path": "__notebook__.ipynb",
2192
+ "parameters": {},
2193
+ "start_time": "2021-03-19T05:37:54.251173",
2194
+ "version": "2.2.2"
2195
+ }
2196
+ },
2197
+ "nbformat": 4,
2198
+ "nbformat_minor": 4
2199
+ }
AdultNoteBook/Kernels/AdaBoost/2-boosting-algorithms-model-for-adult-census-income.py ADDED
@@ -0,0 +1,276 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+ import seaborn as sns
10
+ import matplotlib.pyplot as plt
11
+ import warnings
12
+ warnings.filterwarnings("ignore")
13
+
14
+ from sklearn.pipeline import Pipeline
15
+ from sklearn.preprocessing import OneHotEncoder
16
+ import category_encoders as ce
17
+ from sklearn.impute import SimpleImputer
18
+ from sklearn.compose import ColumnTransformer
19
+
20
+ from sklearn.model_selection import train_test_split, RandomizedSearchCV, StratifiedKFold, cross_val_score
21
+
22
+ from sklearn.tree import DecisionTreeClassifier
23
+ from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier
24
+ from xgboost.sklearn import XGBClassifier
25
+ from sklearn.metrics import classification_report, f1_score, plot_roc_curve
26
+
27
+
28
+ # In[2]:
29
+
30
+
31
+ adult = pd.read_csv('../input/adult-census-income/adult.csv')
32
+ adult.sample()
33
+
34
+
35
+ # In[3]:
36
+
37
+
38
+ adult.info()
39
+
40
+
41
+ # *In this info detail, indicate that there is no missing value at all. But if you see the whole data carefully, you will find **missing value with '?'**.*
42
+
43
+ # # PreProcessing
44
+
45
+ # *Preprocessing scheme:*
46
+ # * Encode all columns
47
+ # * Drop education because it's already encoded on education.num
48
+ # * Drop fnlwgt because it's unique
49
+
50
+ # *Handling Missing Value In Pipeline*
51
+
52
+ # In[4]:
53
+
54
+
55
+ binary_encoder_pipe = Pipeline([
56
+ ('imputer', SimpleImputer(strategy = 'constant', fill_value = 'NC', missing_values = '?')),
57
+ ('binary', ce.BinaryEncoder())
58
+ ])
59
+
60
+ transformer = ColumnTransformer([
61
+ ('one hot', OneHotEncoder(drop = 'first'), ['relationship', 'race', 'sex']),
62
+ ('binary', binary_encoder_pipe, ['workclass', 'marital.status', 'occupation', 'native.country'])],
63
+ remainder = 'passthrough')
64
+
65
+
66
+ # *Splitting Data*
67
+
68
+ # In[5]:
69
+
70
+
71
+ adult['income'].value_counts()
72
+
73
+
74
+ # Income is the target data and **indicated with imbalance data**. I define **income with 1 if income is >50K and 0 if income is <50K**.
75
+
76
+ # In[6]:
77
+
78
+
79
+ X = adult.drop(['fnlwgt', 'education', 'income'], axis = 1)
80
+ y = np.where(adult['income'] == '>50K', 1, 0)
81
+
82
+
83
+ # In[7]:
84
+
85
+
86
+ X.shape
87
+
88
+
89
+ # In[8]:
90
+
91
+
92
+ X_train, X_test, y_train, y_test = train_test_split(X, y, stratify = y,
93
+ test_size = 0.3, random_state = 1212)
94
+
95
+
96
+ # I use 0.3 as default score for test_size and X.shape for random_state so the data will be devided equally.
97
+
98
+ # # Define Model
99
+
100
+ # I use 3 Boosting Algorithms Models:
101
+ # * Ada Boost Classifier
102
+ # * Gradient Boosting Classifier
103
+ # * XGB Classifier
104
+
105
+ # In[9]:
106
+
107
+
108
+ adaboost = AdaBoostClassifier(DecisionTreeClassifier(), random_state = 1212)
109
+ pipe_ada = Pipeline([
110
+ ('transformer', transformer),
111
+ ('adaboost', adaboost)])
112
+
113
+ gradboost = GradientBoostingClassifier(random_state = 1212)
114
+ pipe_grad = Pipeline([
115
+ ('transformer', transformer),
116
+ ('gradboost', gradboost)])
117
+
118
+ XGBOOST = XGBClassifier(random_state = 1212)
119
+ pipe_XGB = Pipeline([
120
+ ('transformer', transformer),
121
+ ('XGBOOST', XGBOOST)])
122
+
123
+
124
+ # # Cross Validation
125
+
126
+ # *Model Evaluation*
127
+
128
+ # In[10]:
129
+
130
+
131
+ def model_evaluation(model, metric):
132
+ skfold = StratifiedKFold(n_splits = 5)
133
+ model_cv = cross_val_score(model, X_train, y_train, cv = skfold, scoring = metric)
134
+ return model_cv
135
+
136
+ pipe_ada_cv = model_evaluation(pipe_ada, 'f1')
137
+ pipe_grad_cv = model_evaluation(pipe_grad, 'f1')
138
+ pipe_XGB_cv = model_evaluation(pipe_XGB, 'f1')
139
+
140
+
141
+ # *Fitting Data*
142
+
143
+ # In[11]:
144
+
145
+
146
+ for model in [pipe_ada, pipe_grad, pipe_XGB]:
147
+ model.fit(X_train, y_train)
148
+
149
+
150
+ # *Summary*
151
+
152
+ # In[12]:
153
+
154
+
155
+ score_mean = [pipe_ada_cv.mean(), pipe_grad_cv.mean(), pipe_XGB_cv.mean()]
156
+ score_std = [pipe_ada_cv.std(), pipe_grad_cv.std(), pipe_XGB_cv.std()]
157
+ score_f1 = [f1_score(y_test, pipe_ada.predict(X_test)),
158
+ f1_score(y_test, pipe_grad.predict(X_test)),
159
+ f1_score(y_test, pipe_XGB.predict(X_test))]
160
+ method_name = ['Ada Boost Classifier', 'Gradient Boost Classifier ',
161
+ 'XGB Classifier']
162
+ summary = pd.DataFrame({'method': method_name, 'mean score': score_mean,
163
+ 'std score': score_std, 'f1 score': score_f1})
164
+ summary
165
+
166
+
167
+ # From these scores, **XGB Classifier is the best one** with the highest f1 score and mean score, also the lowest std score. Let's cross-check with the important features, see if the model is correct.
168
+
169
+ # In[13]:
170
+
171
+
172
+ plot_roc_curve(pipe_XGB, X_test, y_test)
173
+
174
+
175
+ # # Importance Features
176
+
177
+ # In[14]:
178
+
179
+
180
+ features = list(pipe_ada[0].transformers_[0][1].get_feature_names()) + pipe_ada[0].transformers_[1][1][1].get_feature_names() + ['age', 'education.num', 'capital.gain', 'capital.loss', 'hours.per.week']
181
+
182
+
183
+ # In[15]:
184
+
185
+
186
+ imptab_ada = pd.DataFrame(pipe_ada[1].feature_importances_, columns = ['imp'], index = features)
187
+ imptab_ada.sort_values('imp').plot(kind = 'barh', figsize = (15,8))
188
+ plt.title('Importance Table For Ada Boost Classifier Model')
189
+ plt.show()
190
+
191
+
192
+ # In[16]:
193
+
194
+
195
+ imptab_grad = pd.DataFrame(pipe_grad[1].feature_importances_, columns = ['imp'], index = features)
196
+ imptab_grad.sort_values('imp').plot(kind = 'barh', figsize = (15,8))
197
+ plt.title('Importance Table For Gradient Boost Classifier Model')
198
+ plt.show()
199
+
200
+
201
+ # In[17]:
202
+
203
+
204
+ imptab_XGB = pd.DataFrame(pipe_XGB[1].feature_importances_, columns = ['imp'], index = features)
205
+ imptab_XGB.sort_values('imp').plot(kind = 'barh', figsize = (15,8))
206
+ plt.title('Importance Table For XGB Classifier Model')
207
+ plt.show()
208
+
209
+
210
+ # From Importance Features Table, the **XGB Classifier can boost almost all the features**. It's has a consistency with the cross validation result. Now, see if the HyperParameter Tuning process can boost until getting the maximum score.
211
+
212
+ # # HyperParameter Tuning
213
+
214
+ # In[18]:
215
+
216
+
217
+ XGBOOST = XGBClassifier(random_state = 1212)
218
+ estimator = Pipeline([('transformer', transformer), ('XGBOOST', XGBOOST)])
219
+
220
+ hyperparam_space = {
221
+ 'XGBOOST__learning_rate': [0.1, 0.05, 0.01, 0.005],
222
+ 'XGBOOST__n_estimators': [50, 100, 150, 200],
223
+ 'XGBOOST__max_depth': [3, 5, 7, 9]
224
+ }
225
+
226
+ random = RandomizedSearchCV(
227
+ estimator,
228
+ param_distributions = hyperparam_space,
229
+ cv = StratifiedKFold(n_splits = 5),
230
+ scoring = 'f1',
231
+ n_iter = 10,
232
+ n_jobs = -1)
233
+
234
+ random.fit(X_train, y_train)
235
+
236
+
237
+ # In[19]:
238
+
239
+
240
+ print('best score', random.best_score_)
241
+ print('best param', random.best_params_)
242
+
243
+
244
+ # After HyperParameter Tuning, the best score is 0.6996, which getting lower. N estimator is 150, Max depth is 5, and Learning rate is 0.1. Let's compare the result.
245
+
246
+ # # Before VS After Tuning Comparison
247
+
248
+ # In[20]:
249
+
250
+
251
+ estimator.fit(X_train, y_train)
252
+ y_pred_estimator = estimator.predict(X_test)
253
+ print(classification_report(y_test, y_pred_estimator))
254
+
255
+
256
+ # In[21]:
257
+
258
+
259
+ random.best_estimator_.fit(X_train, y_train)
260
+ y_pred_random = random.best_estimator_.predict(X_test)
261
+ print(classification_report(y_test, y_pred_random))
262
+
263
+
264
+ # In[22]:
265
+
266
+
267
+ score_list = [f1_score(y_test, y_pred_estimator), f1_score(y_test, y_pred_random)]
268
+ method_name = ['XGB Classifier Before Tuning', 'XGB Classifier After Tuning']
269
+ best_summary = pd.DataFrame({
270
+ 'method': method_name,
271
+ 'f1 score': score_list
272
+ })
273
+ best_summary
274
+
275
+
276
+ # After all, HyperParameter Tuning doesn't work good in this data. So if I have to choose, I pick the **XGB Classifier score Before Tuning, which is 0.71**. I know the number isn't good enough either because the data is imbalance and I don't process any resampling on it.
AdultNoteBook/Kernels/AdaBoost/4-deep-analysis-and-90-accuracy.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/4-deep-analysis-and-90-accuracy.py ADDED
@@ -0,0 +1,994 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[2]:
5
+
6
+
7
+ import pandas as pd
8
+ import numpy as np
9
+ import matplotlib.pyplot as plt
10
+ import plotly.express as px
11
+ import seaborn as sns
12
+ import warnings
13
+ warnings.filterwarnings('ignore')
14
+
15
+
16
+ # In[3]:
17
+
18
+
19
+ df=pd.read_csv('../input/adult-census-income/adult.csv')
20
+
21
+
22
+ # # EDA
23
+
24
+ # In[4]:
25
+
26
+
27
+ df.head()
28
+
29
+
30
+ # In[5]:
31
+
32
+
33
+ df.shape
34
+
35
+
36
+ # Dataset has 32561 rows and 15 columns.
37
+
38
+ # In[6]:
39
+
40
+
41
+ df.nunique()
42
+
43
+
44
+ # Label column has only two categories, hence it is a problem of classification. There are no constant columns nor there are any identifier column.
45
+
46
+ # In[7]:
47
+
48
+
49
+ df.isnull().sum()
50
+
51
+
52
+ # There are no null values in the dataset
53
+
54
+ # In[8]:
55
+
56
+
57
+ df.dtypes
58
+
59
+
60
+ # There are 8 object type feautures rest of the features are of integer type.
61
+
62
+ # In[9]:
63
+
64
+
65
+ df.skew()
66
+
67
+
68
+ # There is skewness present in the data which needs to be removed.
69
+
70
+ # In[10]:
71
+
72
+
73
+ df['income'].value_counts()
74
+
75
+
76
+ # Dataset is imbalanced.
77
+
78
+ # In[11]:
79
+
80
+
81
+ df.describe()
82
+
83
+
84
+ # All the columns are not present as they are of object type. Count of each column is 32561 showing there are no null values. Mean is very much greater than the median in capital gain and capital loss stating that there is high skewness present and data is skewed to the right side. Also there is high variance in Capital gain and Capital loss column. Min, Max, and interquartile ranges have variable difference, that means there are outliers present in the data.
85
+
86
+ # ### Univariate Analysis
87
+
88
+ # In[12]:
89
+
90
+
91
+ plt.figure(figsize=(15,8))
92
+ plt.subplot(1,2,1)
93
+ df['income'].value_counts().plot.pie(autopct='%1.1f%%')
94
+ plt.subplot(1,2,2)
95
+ sns.countplot(x='income',data=df)
96
+ plt.ylabel('No. of People')
97
+ df['income'].value_counts()
98
+
99
+
100
+ # Dataset if highly imbalanced. There is less than 25% of >50K income category while more than 75% of <=50K income.
101
+
102
+ # In[13]:
103
+
104
+
105
+ #Separating categorical and continuous variables
106
+ cat=[feature for feature in df.columns if df[feature].nunique()<45]
107
+ cont=[feature for feature in df.columns if df[feature].nunique()>45]
108
+
109
+
110
+ # In[14]:
111
+
112
+
113
+ plt.figure(figsize=(15,15))
114
+ plt.subplot(2,1,1)
115
+ df['workclass'].value_counts().plot.pie(autopct='%1.1f%%')
116
+ plt.ylabel('')
117
+ plt.subplot(2,1,2)
118
+ sns.countplot(x='workclass',data=df)
119
+ plt.ylabel('No. of Individuals')
120
+ df['workclass'].value_counts()
121
+
122
+
123
+ # There are 9 workclass in total including Never worked and one unknown category(?).Most individuals work in private sector and there are very few who have never worked or work without pay. There are 3 categories of govt job provided state, federal and local among which no. of people working in the local govt is highest.
124
+
125
+ # In[15]:
126
+
127
+
128
+ plt.figure(figsize=(15,15))
129
+ plt.subplot(2,1,1)
130
+ df['education'].value_counts().plot.pie(autopct='%1.1f%%')
131
+ plt.ylabel('')
132
+ plt.subplot(2,1,2)
133
+ sns.countplot(x='education',data=df)
134
+ plt.xticks(rotation=45)
135
+ plt.ylabel('No. of Individuals')
136
+ df['education'].value_counts()
137
+
138
+
139
+ # Most of the people are high school graduate. There are few who have done masters and doctorate. The no. of people who went through just the preschool or 1st to 4th is the least.
140
+
141
+ # In[16]:
142
+
143
+
144
+ plt.figure(figsize=(15,15))
145
+ plt.subplot(2,1,1)
146
+ df['education.num'].value_counts().plot.pie(autopct='%1.1f%%')
147
+ plt.ylabel('')
148
+ plt.subplot(2,1,2)
149
+ sns.countplot(x='education.num',data=df)
150
+ plt.ylabel('No. of Individuals')
151
+ df['education.num'].value_counts()
152
+
153
+
154
+ # Majority of individuals lie in the 9th 10th category of education no. which is a liitle higher than the median education number. People with least and highest educations are very few.
155
+
156
+ # In[17]:
157
+
158
+
159
+ plt.figure(figsize=(15,15))
160
+ plt.subplot(2,1,1)
161
+ df['marital.status'].value_counts().plot.pie(autopct='%1.1f%%')
162
+ plt.ylabel('')
163
+ plt.subplot(2,1,2)
164
+ sns.countplot(x='marital.status',data=df)
165
+ plt.ylabel('No. of Individuals')
166
+ df['marital.status'].value_counts()
167
+
168
+
169
+ # Majority of people are married to a civialian spouse or Never married. Least people are married to armed forces. From the above maritial status data we can see that there are less young people in the workforce as compared to young ones.
170
+
171
+ # In[18]:
172
+
173
+
174
+ plt.figure(figsize=(20,15))
175
+ plt.subplot(2,1,1)
176
+ df['occupation'].value_counts().plot.pie(autopct='%1.1f%%')
177
+ plt.ylabel('')
178
+ plt.subplot(2,1,2)
179
+ sns.countplot(x='occupation',data=df)
180
+ plt.xticks(rotation=45)
181
+ plt.ylabel('No. of Individuals')
182
+ df['occupation'].value_counts()
183
+
184
+
185
+ # We can observe over here that prof-speciality has the highest number for people than any other occupation. followed by craft repair persons. Minimum occupation category is the armed forces with only 9 people in it. There is an unknown category of occupation too.
186
+
187
+ # In[19]:
188
+
189
+
190
+ plt.figure(figsize=(20,8))
191
+ plt.subplot(1,2,1)
192
+ df['relationship'].value_counts().plot.pie(autopct='%1.1f%%')
193
+ plt.ylabel('')
194
+ plt.subplot(1,2,2)
195
+ sns.countplot(x='relationship',data=df)
196
+ plt.ylabel('No. of Individuals')
197
+ df['relationship'].value_counts()
198
+
199
+
200
+ # There are much more husband working than their wives. There are 25% individuals working who fall in not in family category.
201
+
202
+ # In[20]:
203
+
204
+
205
+ plt.figure(figsize=(20,8))
206
+ plt.subplot(1,2,1)
207
+ df['race'].value_counts().plot.pie(autopct='%1.1f%%')
208
+ plt.subplot(1,2,2)
209
+ sns.countplot(x='race',data=df)
210
+ plt.xticks(rotation=45)
211
+ plt.ylabel('No. of Individuals')
212
+ df['race'].value_counts()
213
+
214
+
215
+ # SInce this is from european countries, most of the individuals working here are white. There is also an other category where minory races are present.
216
+
217
+ # In[21]:
218
+
219
+
220
+ plt.figure(figsize=(15,7))
221
+ plt.subplot(1,2,1)
222
+ df['sex'].value_counts().plot.pie(autopct='%1.1f%%')
223
+ plt.subplot(1,2,2)
224
+ sns.countplot(x='sex',data=df)
225
+ plt.ylabel('No. of Individuals')
226
+ df['sex'].value_counts()
227
+
228
+
229
+ # There is more than double the number of men working than the no. of women.
230
+
231
+ # In[22]:
232
+
233
+
234
+ plt.figure(figsize=(15,8))
235
+ sns.countplot(x='native.country',data=df)
236
+ plt.xticks(rotation=90)
237
+ plt.ylabel('No. of Individuals')
238
+ df['native.country'].value_counts()
239
+
240
+
241
+ # Majority of the people working belong from U.S. whereas there are people who come to U.S. from their own countries but there no. is very low. Second highest no. of people belong from Mexico as it is a neighboring country.
242
+
243
+ # In[23]:
244
+
245
+
246
+ plt.figure(figsize=(8,6))
247
+ sns.histplot(df['age'],kde=True,color='r')
248
+ plt.ylabel('No. of Individuals')
249
+ print('Minimum',df['age'].min())
250
+ print('Maximum',df['age'].max())
251
+
252
+
253
+ # Minimum age of a working individual is 17 and highest is 90 which is way far retirement, but majority of the people working are in the age 25 to 45. Data is skewed to the right side.
254
+
255
+ # In[24]:
256
+
257
+
258
+ plt.figure(figsize=(8,6))
259
+ sns.histplot(df['fnlwgt'],kde=True,color='k')
260
+ plt.ylabel('No. of Individuals')
261
+ print('Minimum',df['fnlwgt'].min())
262
+ print('Maximum',df['fnlwgt'].max())
263
+
264
+
265
+ # It is assigned by combination of features and has the peak wt as 0.2*1e6. Data does not follow normal distribution and data is rigt skewed.
266
+
267
+ # In[25]:
268
+
269
+
270
+ plt.figure(figsize=(15,12))
271
+ sns.distplot(df['capital.gain'],color='m', kde_kws={"color": "k"})
272
+ print('Minimum',df['capital.gain'].min())
273
+ print('Maximum',df['capital.gain'].max())
274
+
275
+
276
+ # Minimum capital gain is 0 while the range goes on to 99999 but most of the people are with the gain of 1000. Data is highly skewed with a very long tail due to presence of large outliers. Outliers here are very few people belonging to elite class who have very large capital gains.
277
+
278
+ # In[26]:
279
+
280
+
281
+ plt.figure(figsize=(15,12))
282
+ sns.distplot(df['capital.loss'],color='g', kde_kws={"color": "k"})
283
+ print('Minimum',df['capital.loss'].min())
284
+ print('Maximum',df['capital.loss'].max())
285
+
286
+
287
+ # Minimum capital loss is 0 while the range goes on above 4000 but most of the people are with the loss of 1000. There is also a slight peak seen near 2000. Data is highly skewed with a long tail to the right side.
288
+
289
+ # In[27]:
290
+
291
+
292
+ plt.figure(figsize=(15,12))
293
+ sns.distplot(df['hours.per.week'],color='b', kde_kws={"color": "k"})
294
+ print('Minimum',df['hours.per.week'].min())
295
+ print('Maximum',df['hours.per.week'].max())
296
+
297
+
298
+ # Most of the people work 40 hours a day where there is a high chance that they belong to private sector. There are people working as low ass 1 hour a week and as high as 99 hours a week which undoubtfully might belong from the armed forces. Data shows less skewness compared to the other features in the dataframe.
299
+
300
+ # In[28]:
301
+
302
+
303
+ for i in cont:
304
+ sns.boxplot(df[i])
305
+ plt.figure()
306
+
307
+
308
+ # There are outliers in all the features, while capital gain and capital loss have very vast no. of outliers.
309
+
310
+ # ### Bivariate Analysis
311
+
312
+ # In[29]:
313
+
314
+
315
+ plt.figure(figsize=(8,6))
316
+ sns.stripplot(x='income',y='workclass',data=df)
317
+
318
+
319
+ # There are individuals belonging from every workclass who earn >50k except for never worked and without pay, and even there no. is low in the <=50k category.
320
+
321
+ # In[30]:
322
+
323
+
324
+ plt.figure(figsize=(8,6))
325
+ sns.stripplot(x='income',y='education',data=df)
326
+
327
+
328
+ # There is no individual who has done preschool and earns >50k salary while there are few who earn 50k even after going through 1st-4th and 5th-6th in the education criteria. It is also to be noticed that there are doctorates and prof who earn <=50k even with such high education.
329
+
330
+ # In[31]:
331
+
332
+
333
+ plt.figure(figsize=(8,6))
334
+ sns.stripplot(x='income',y='education.num',data=df)
335
+
336
+
337
+ # It is clearly seen that as the education no. increases chances of earning >50K salary also increases
338
+
339
+ # In[32]:
340
+
341
+
342
+ plt.figure(figsize=(8,6))
343
+ sns.stripplot(x='income',y='marital.status',data=df)
344
+
345
+
346
+ # There are less no. of individual who are married armed forces spouse, thats why the no. is less in both the categories while people with married spouse absent are less in >50k category income comparatively.
347
+
348
+ # In[33]:
349
+
350
+
351
+ plt.figure(figsize=(8,6))
352
+ sns.stripplot(x='income',y='occupation',data=df)
353
+
354
+
355
+ # There are very few people with income greater than armed forces and private house service while all the other categories of people are distributed evenly in both the income categories.
356
+
357
+ # In[34]:
358
+
359
+
360
+ plt.figure(figsize=(8,15))
361
+ sns.stripplot(x='income',y='native.country',data=df)
362
+
363
+
364
+ # The grapghs shows people belonging to diff countries have less chances of earning >50k which is wrong, this is because no. of individuals belonging from other countries other than U.S are very low nut it is to be noticed that there are more people in the category <=50k than >50k.
365
+
366
+ # In[35]:
367
+
368
+
369
+ plt.figure(figsize=(6,8))
370
+ sns.boxenplot(x='income',y='age',data=df,palette="Dark2")
371
+
372
+
373
+ # People with higher mean age earn >50k while there are individuals earning <=50k even wat very high age.
374
+
375
+ # In[36]:
376
+
377
+
378
+ plt.figure(figsize=(6,8))
379
+ sns.boxenplot(x='income',y='fnlwgt',data=df,palette="Dark2_r")
380
+
381
+
382
+ # People are equally divided with respect to fnlwgt in the income category while it is seen that as the fnlwt is high indiduals fall into <=50k income category.
383
+
384
+ # In[37]:
385
+
386
+
387
+ plt.figure(figsize=(6,8))
388
+ sns.boxenplot(x='income',y='capital.gain',data=df,palette="crest")
389
+
390
+
391
+ # As the capital gain increases more people fall into >50k salary while mean of both categories remain cloase to zero capital.gain
392
+
393
+ # In[38]:
394
+
395
+
396
+ plt.figure(figsize=(6,8))
397
+ sns.boxenplot(x='income',y='capital.loss',data=df,palette="ocean")
398
+
399
+
400
+ # There is more density in the >50k income category with increase in capital loss while mean of both categories remain cloase to zero capital.gain
401
+
402
+ # In[39]:
403
+
404
+
405
+ plt.figure(figsize=(6,8))
406
+ sns.boxenplot(x='income',y='hours.per.week',data=df,palette="rocket")
407
+
408
+
409
+ # People earning >50K income work mean hours per week greater than tose earning <50K while people from both the categories work from min to max hours per week.
410
+
411
+ # In[40]:
412
+
413
+
414
+ #age vs Categorical features
415
+ fig,ax=plt.subplots(5,2,figsize=(15,55))
416
+ r=0
417
+ c=0
418
+ for i,n in enumerate(cat):
419
+ if i%2==0 and i>0:
420
+ r+=1
421
+ c=0
422
+ graph=sns.stripplot(x=n,y='age',data=df,ax=ax[r,c])
423
+ if n=='native.country' or n=='occupation' or n=='education':
424
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 90)
425
+ else:
426
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 45)
427
+ if n!='education.num':
428
+ graph.set(xlabel=None)
429
+ c+=1
430
+
431
+
432
+ # Individuals working in the government secctor have atmost age 70 to 80 with few outliers which must be the retirement age for them. There are no individuals who do not work after age of 30. There are no individuals of age >70 belonging to the pre school education category while Doctorates and proffessors appear from late 20's as they have to study for more years to get to that level of education. Same is the case with education num, as the education number increases age also is increased. There are no people after the age of 50 in the married to armed forces category with just a few outliers. Widowed category has seen increase as the age age seem to increase, there are very few widows at an early age. There are less people with high age from other races than the white race. There are more no. of working men at higher age than women. There are very few people belonging from other countries with high age.
433
+
434
+ # In[41]:
435
+
436
+
437
+ #Hours per week vs categorical Feature
438
+ fig,ax=plt.subplots(5,2,figsize=(15,55))
439
+ r=0
440
+ c=0
441
+ for i,n in enumerate(cat):
442
+ if i%2==0 and i>0:
443
+ r+=1
444
+ c=0
445
+ graph=sns.violinplot(x=n,y='hours.per.week',data=df,ax=ax[r,c])
446
+ if n=='native.country' or n=='occupation' or n=='education':
447
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 90)
448
+ else:
449
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 45)
450
+ if n!='education.num':
451
+ graph.set(xlabel=None)
452
+ c+=1
453
+
454
+
455
+ # Govt employees do not work more than 80 hours a week that also with rare cases. It is seen that people with less education worl more no. hours of the week which is quite logical. No armed force person works more than 60 hours a week while farmers and transport movers has working hours mean higher than other occupation. More no, of individuals who have relationship as own child have high density for working only 20 hous a week. Female works for less no. of hours as compared to men.
456
+
457
+ # In[42]:
458
+
459
+
460
+ #Capital gain vs categorical Feature
461
+ fig,ax=plt.subplots(5,2,figsize=(15,55))
462
+ r=0
463
+ c=0
464
+ for i,n in enumerate(cat):
465
+ if i%2==0 and i>0:
466
+ r+=1
467
+ c=0
468
+ graph=sns.boxplot(x=n,y='capital.gain',data=df,ax=ax[r,c])
469
+ if n=='native.country' or n=='occupation' or n=='education':
470
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 90)
471
+ else:
472
+ graph.set_xticklabels(graph.get_xticklabels(),rotation = 45)
473
+ if n!='education.num':
474
+ graph.set(xlabel=None)
475
+ c+=1
476
+
477
+
478
+ # Highest capital gains are seen in individuals belonging to private or self employed workclass. There are more no. of prof-school than any other education category with highest capital gains. There are individuals even with preschool knowledge have capital gain more than 40000. As the education level increases capital gain also increases. People from the armed forces have the least capital gain while most prominent capitals gains are found in people who are in the sales occupation. Whites have more capital gains than any other race. Men also seem to have high capital gains as compared to females. There are many people from <=50k income category who have captital gains more than 10000.
479
+
480
+ # ### Multivariate Analysis
481
+
482
+ # In[43]:
483
+
484
+
485
+ data=df.groupby(['age','income']).apply(lambda x:x['hours.per.week'].count()).reset_index(name='Hours')
486
+ px.line(data,x='age',y='Hours',color='income',title='age of individuals by Hours of work in the income category ')
487
+
488
+
489
+ # People earning <=50k tend to work for high no. of hours at a young age and the no. of hours decreases as the age increases but still they work for more no. of hours even at a later age as compared to people earning >50K
490
+
491
+ # In[44]:
492
+
493
+
494
+ plt.figure(figsize=(6,8))
495
+ sns.barplot(x='income',y='age',hue='sex',data=df)
496
+ plt.ylabel('Average age')
497
+
498
+
499
+ # As the age increases people are paid more but males are paid more than females.
500
+
501
+ # In[45]:
502
+
503
+
504
+ sns.factorplot(x='workclass',y='education.num',hue='income',data=df)
505
+ plt.xticks(rotation=90)
506
+
507
+
508
+ # Some people belonging to a particular workclass might have less education and some workclass might require more education level, but no matter whatever workclass, people in the same workclass, if they have higher education level they earn more. It is also to be noticed that there is no person from without pay and never worked workclass category who earn more than 50k which is logical.
509
+
510
+ # In[46]:
511
+
512
+
513
+ sns.factorplot(x='sex',y='education.num',hue='income',data=df)
514
+ plt.xticks(rotation=90)
515
+
516
+
517
+ # Females with higher education level earn equal to men having less education level than them irrespective of any income category they fall.
518
+
519
+ # In[47]:
520
+
521
+
522
+ sns.factorplot(x='race',y='education.num',hue='income',data=df)
523
+ plt.xticks(rotation=90)
524
+
525
+
526
+ # Asian pacific race have comparatively more education than the fellows who earn same as much as they do, belonging to other races. Indians and some other races earn >50k with lowest education level.
527
+
528
+ # In[48]:
529
+
530
+
531
+ sns.factorplot(x='occupation',y='education.num',hue='income',data=df)
532
+ plt.xticks(rotation=90)
533
+
534
+
535
+ # People with highest education level belong to armed forces, but people with even education level quite low, who belong to handlers cleaners, transport moving occupation earn as much as they do. Same is the case with prof speciality. occupation of private house service who earn >50k and <50k have the highest education level difference while prof speciality have the minimum difference.
536
+
537
+ # In[49]:
538
+
539
+
540
+ plt.figure(figsize=(8,8))
541
+ sns.scatterplot(x='age',y='hours.per.week',hue='income',data=df)
542
+
543
+
544
+ # From the scatterplot between age, hours.per.week and income, we observe that a person needs to be >30 to be earning more than 50K, else needs to work at least 60 hours.per.week to earn >50K.
545
+
546
+ # In[50]:
547
+
548
+
549
+ plt.figure(figsize=(10,10))
550
+ sns.heatmap(df.corr(),annot=True)
551
+
552
+
553
+ # There are only few features in the above heat map as most of them are of object type. From here we can see that the independent features don not have much correlation with each other i.e. no multicollinearity.
554
+
555
+ # # Feature Engineering
556
+
557
+ # ###### Encoding
558
+
559
+ # In[51]:
560
+
561
+
562
+ from sklearn.preprocessing import LabelEncoder, OrdinalEncoder
563
+ l=LabelEncoder()
564
+ o=OrdinalEncoder()
565
+
566
+
567
+ # In[52]:
568
+
569
+
570
+ #We use ordinal encoder to Encode Independent features
571
+ for i in df.columns:
572
+ if df[i].dtypes=='O' and i!='income':
573
+ df[i]=o.fit_transform(df[i].values.reshape(-1,1))
574
+
575
+
576
+ # In[53]:
577
+
578
+
579
+ #We use label encoder to encode label
580
+ df['income']=l.fit_transform(df['income'])
581
+
582
+
583
+ # ##### Removing Outliers
584
+
585
+ # In[54]:
586
+
587
+
588
+ from scipy.stats import zscore
589
+
590
+
591
+ # In[55]:
592
+
593
+
594
+ #Method to find optimum threshold
595
+ def threshold():
596
+ for i in np.arange(3,5,0.2):
597
+ data=df.copy()
598
+ data=data[(z<i).all(axis=1)]
599
+ loss=(df.shape[0]-data.shape[0])/df.shape[0]*100
600
+ print('With threshold {} data loss is {}%'.format(np.round(i,1),np.round(loss,2)))
601
+
602
+
603
+ # In[56]:
604
+
605
+
606
+ z=np.abs(zscore(df))
607
+ threshold()
608
+
609
+
610
+ # From above we choose threhold as 4.2 as data is precious and we cannot afford to lose more than 8% of data.
611
+
612
+ # In[57]:
613
+
614
+
615
+ df=df[(z<4.2).all(axis=1)]
616
+
617
+
618
+ # ###### Removing Skewness
619
+
620
+ # In[58]:
621
+
622
+
623
+ #using Power transformer to remove skewness
624
+ from sklearn.preprocessing import PowerTransformer
625
+ pt=PowerTransformer()
626
+
627
+
628
+ # In[59]:
629
+
630
+
631
+ for i in cont:
632
+ if np.abs(df[i].skew())>0.5:
633
+ df[i]=pt.fit_transform(df[i].values.reshape(-1,1))
634
+
635
+
636
+ # In[60]:
637
+
638
+
639
+ for i in cont:
640
+ sns.distplot(df[i])
641
+ plt.figure()
642
+
643
+
644
+ # A lot of skewness has been resuced but we cannot remove skewness more than this.
645
+
646
+ # In[61]:
647
+
648
+
649
+ #Separating dependent and independent features.
650
+ x=df.copy()
651
+ x.drop('income',axis=1,inplace=True)
652
+ y=df['income']
653
+
654
+
655
+ # ##### Handling Imbalanced Data
656
+
657
+ # In[62]:
658
+
659
+
660
+ #Oversampling using Smote
661
+ from imblearn.over_sampling import SMOTE
662
+ over=SMOTE()
663
+
664
+
665
+ # In[63]:
666
+
667
+
668
+ x,y=over.fit_resample(x,y)
669
+
670
+
671
+ # In[64]:
672
+
673
+
674
+ plt.figure(figsize=(15,5))
675
+ plt.subplot(1,2,1)
676
+ y.value_counts().plot.pie(autopct='%1.1f%%')
677
+ plt.subplot(1,2,2)
678
+ sns.countplot(y)
679
+ y.value_counts()
680
+
681
+
682
+ # Data is balanced now, both the category of income have 50% data each.
683
+
684
+ # ##### Scaling the data
685
+
686
+ # In[65]:
687
+
688
+
689
+ #Scaling the data using min max scaler
690
+ from sklearn.preprocessing import MinMaxScaler
691
+ scaler=MinMaxScaler()
692
+
693
+
694
+ # In[66]:
695
+
696
+
697
+ xd=scaler.fit_transform(x)
698
+ x=pd.DataFrame(xd,columns=x.columns)
699
+
700
+
701
+ # # Modelling Phase
702
+
703
+ # In[67]:
704
+
705
+
706
+ #We import Classification Models
707
+ from sklearn.naive_bayes import GaussianNB
708
+ from sklearn.neighbors import KNeighborsClassifier
709
+ from sklearn.linear_model import LogisticRegression
710
+ from sklearn.tree import DecisionTreeClassifier
711
+ from sklearn.ensemble import RandomForestClassifier
712
+ from sklearn.ensemble import AdaBoostClassifier
713
+ from sklearn.ensemble import GradientBoostingClassifier
714
+ from xgboost import XGBClassifier
715
+
716
+
717
+ # In[68]:
718
+
719
+
720
+ from sklearn.model_selection import train_test_split, cross_val_score
721
+
722
+
723
+ # In[69]:
724
+
725
+
726
+ from sklearn.metrics import accuracy_score,confusion_matrix,classification_report,roc_auc_score,roc_curve
727
+
728
+
729
+ # In[70]:
730
+
731
+
732
+ #Function to find the best random state
733
+ def randomstate(x,y):
734
+ maxx=0
735
+ model=LogisticRegression()
736
+ for i in range(1,201):
737
+ xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.25,random_state=i)
738
+ model.fit(xtrain,ytrain)
739
+ p=model.predict(xtest)
740
+ accu=accuracy_score(p,ytest)
741
+ if accu>maxx:
742
+ maxx=accu
743
+ j=i
744
+ return j
745
+
746
+
747
+ # In[71]:
748
+
749
+
750
+ #To evakuate performances of all the models
751
+ def performance(p,ytest,m,xtest,s):
752
+ print('------------------------------------',m,'------------------------------------')
753
+ print('Accuracy',np.round(accuracy_score(p,ytest),4))
754
+ print('----------------------------------------------------------')
755
+ print('Mean of Cross Validation Score',np.round(s.mean(),4))
756
+ print('----------------------------------------------------------')
757
+ print('AUC_ROC Score',np.round(roc_auc_score(ytest,m.predict_proba(xtest)[:,1]),4))
758
+ print('----------------------------------------------------------')
759
+ print('Confusion Matrix')
760
+ print(confusion_matrix(p,ytest))
761
+ print('----------------------------------------------------------')
762
+ print('Classification Report')
763
+ print(classification_report(p,ytest))
764
+
765
+
766
+ # In[72]:
767
+
768
+
769
+ #Creating a list of models which will be created one by one
770
+ models=[GaussianNB(),KNeighborsClassifier(),LogisticRegression(),DecisionTreeClassifier(),
771
+ RandomForestClassifier(),AdaBoostClassifier(),GradientBoostingClassifier(),XGBClassifier(verbosity=0)]
772
+
773
+
774
+ # In[73]:
775
+
776
+
777
+ #Creates and trains model from the models list
778
+ def createmodel(x,y):
779
+ xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.25,random_state=randomstate(x,y))
780
+ for i in models:
781
+ model=i
782
+ model.fit(xtrain,ytrain)
783
+ p=model.predict(xtest)
784
+ score=cross_val_score(model,x,y,cv=10)
785
+ performance(p,ytest,model,xtest,score)
786
+
787
+
788
+ # In[74]:
789
+
790
+
791
+ createmodel(x,y)
792
+
793
+
794
+ # Random Forest, Gradient Boost, Xtreme Gradient Boost give us the best performance, so we further try hyperparameter tuning on them
795
+
796
+ # # Hyperparameter Tuning
797
+
798
+ # In[75]:
799
+
800
+
801
+ from sklearn.model_selection import GridSearchCV
802
+
803
+
804
+ # In[76]:
805
+
806
+
807
+ xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.25,random_state=randomstate(x,y))
808
+
809
+
810
+ # ##### Random Forest
811
+
812
+ # In[77]:
813
+
814
+
815
+ params={'n_estimators':[100,300,500],
816
+ 'criterion':['gini','entropty'],
817
+ 'max_depth':[None,1,2,3,4,5,6,7,8,9,10],
818
+ 'max_features':['int','float','auto','log2']}
819
+
820
+
821
+ # In[78]:
822
+
823
+
824
+ g=GridSearchCV(RandomForestClassifier(),params,cv=10)
825
+
826
+
827
+ # In[79]:
828
+
829
+
830
+ g.fit(xtrain,ytrain)
831
+
832
+
833
+ # In[80]:
834
+
835
+
836
+ print(g.best_params_)
837
+ print(g.best_estimator_)
838
+ print(g.best_score_)
839
+
840
+
841
+ # In[81]:
842
+
843
+
844
+ m=RandomForestClassifier(max_features='log2', n_estimators=500)
845
+ m.fit(xtrain,ytrain)
846
+ p=m.predict(xtest)
847
+ score=cross_val_score(m,x,y,cv=10)
848
+ performance(p,ytest,m,xtest,score)
849
+
850
+
851
+ # ##### Gradient Boost
852
+
853
+ # In[82]:
854
+
855
+
856
+ from sklearn.model_selection import RandomizedSearchCV
857
+
858
+
859
+ # In[83]:
860
+
861
+
862
+ params={'n_estimators':[100,300,500],
863
+ 'learning_rate':[0.001,0.01,0.10,],
864
+ 'subsample':[0.5,1],
865
+ 'max_depth':[1,2,3,4,5,6,7,8,9,10,None]}
866
+
867
+
868
+ # In[84]:
869
+
870
+
871
+ g=RandomizedSearchCV(GradientBoostingClassifier(),params,cv=10)
872
+
873
+
874
+ # In[85]:
875
+
876
+
877
+ g.fit(xtrain,ytrain)
878
+
879
+
880
+ # In[86]:
881
+
882
+
883
+ print(g.best_params_)
884
+ print(g.best_estimator_)
885
+ print(g.best_score_)
886
+
887
+
888
+ # In[87]:
889
+
890
+
891
+ m=GradientBoostingClassifier(max_depth=8, subsample=0.5)
892
+ m.fit(xtrain,ytrain)
893
+ p=m.predict(xtest)
894
+ score=cross_val_score(m,x,y,cv=10)
895
+ performance(p,ytest,m,xtest,score)
896
+
897
+
898
+ # ##### Xtreme Gradient Boost
899
+
900
+ # In[88]:
901
+
902
+
903
+ params={
904
+ "learning_rate" : [0.01,0.05, 0.10, 0.15, ] ,
905
+ "max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
906
+ "min_child_weight" : [ 1, 3, 5, 7 ],
907
+ "gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
908
+ "colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ]
909
+ }
910
+
911
+
912
+ # In[89]:
913
+
914
+
915
+ g=RandomizedSearchCV(XGBClassifier(),params,cv=10)
916
+
917
+
918
+ # In[90]:
919
+
920
+
921
+ g.fit(xtrain,ytrain)
922
+
923
+
924
+ # In[91]:
925
+
926
+
927
+ print(g.best_params_)
928
+ print(g.best_estimator_)
929
+ print(g.best_score_)
930
+
931
+
932
+ # In[92]:
933
+
934
+
935
+ m=XGBClassifier(colsample_bytree=0.3, gamma= 0.1, learning_rate= 0.15, max_depth= 10, min_child_weight= 5)
936
+ m.fit(xtrain,ytrain)
937
+ p=m.predict(xtest)
938
+ score=cross_val_score(m,x,y,cv=10)
939
+ performance(p,ytest,m,xtest,score)
940
+
941
+
942
+ # We choose random forest as our final model because it gives the highest cross validation score as well as difference between its accuracy score and cross validation score is minimum.
943
+
944
+ # # Finalizing the model
945
+
946
+ # In[93]:
947
+
948
+
949
+ model=RandomForestClassifier(max_features='log2', n_estimators=500)
950
+ model.fit(xtrain,ytrain)
951
+ p=model.predict(xtest)
952
+ score=cross_val_score(model,x,y,cv=10)
953
+
954
+
955
+ # # Evaluation Metrics
956
+
957
+ # In[94]:
958
+
959
+
960
+ performance(p,ytest,model,xtest,score)
961
+
962
+
963
+ # In[95]:
964
+
965
+
966
+ fpred=pd.Series(model.predict_proba(xtest)[:,1])
967
+ fpr,tpr,threshold=roc_curve(ytest,fpred)
968
+
969
+
970
+ # In[96]:
971
+
972
+
973
+ plt.plot(fpr,tpr,color='k',label='ROC')
974
+ plt.plot([0,1],[0,1],color='b',linestyle='--')
975
+ plt.xlabel('False Positive Rate')
976
+ plt.ylabel('True Positive Rate')
977
+ plt.title('ROC-AUC curve')
978
+ plt.legend()
979
+
980
+
981
+ # # Saving the model
982
+
983
+ # In[97]:
984
+
985
+
986
+ import joblib
987
+ joblib.dump(model,'census_income.obj')
988
+
989
+
990
+ # In[ ]:
991
+
992
+
993
+
994
+
AdultNoteBook/Kernels/AdaBoost/5-income-classification-using-meta-learning.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/5-income-classification-using-meta-learning.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Adult Census Income Classification using Meta Learning
5
+
6
+ # In[1]:
7
+
8
+
9
+ #importing the required libraries
10
+ import numpy as np
11
+ import pandas as pd
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+ plt.style.use('ggplot')
15
+
16
+ from imblearn.over_sampling import RandomOverSampler
17
+ from sklearn.model_selection import train_test_split
18
+
19
+ from numpy import mean, std
20
+
21
+
22
+ # In[2]:
23
+
24
+
25
+ #reading the dataset and converting it to dataframe
26
+ df = pd.read_csv("../input/adult-census-income/adult.csv")
27
+
28
+
29
+ # In[3]:
30
+
31
+
32
+ #Viewing the top 5 rows of our dataset
33
+ df.head()
34
+
35
+
36
+ # ## Exploratory Data Analysis
37
+
38
+ # **Income - Target column**
39
+
40
+ # In[4]:
41
+
42
+
43
+ sns.countplot(df.income)
44
+
45
+
46
+ # *As we can see, there is a **class imbalance**. The ">50K" class is comparatively very less. So, we will do **Random Over-Sampling** during preprocessing.*
47
+ #
48
+
49
+ # **Age**
50
+
51
+ # In[5]:
52
+
53
+
54
+ sns.distplot(df[df.income=='<=50K'].age, color='g')
55
+ sns.distplot(df[df.income=='>50K'].age, color='r')
56
+
57
+
58
+ # *We can observe a rough margin **around 30**. We will divide age into 2 parts ie. under 30 and over 30. We need to check if its useful for our model during testing.*
59
+
60
+ # **Workclass**
61
+
62
+ # In[6]:
63
+
64
+
65
+ plt.xticks(rotation=90)
66
+ sns.countplot(df.workclass, hue=df.income, palette='tab10')
67
+
68
+
69
+ # *Majority of the data falls under **Private**. So, we will convert this into Private and not-Private.*
70
+
71
+ # **fnlwgt**
72
+
73
+ # In[7]:
74
+
75
+
76
+ sns.distplot(df[df.income=='<=50K'].fnlwgt, color='r')
77
+ sns.distplot(df[df.income=='>50K'].fnlwgt, color='g')
78
+
79
+
80
+ # *This is a very **ambiguous** attribute. Will check during testing.*
81
+
82
+ # **Education**
83
+
84
+ # In[8]:
85
+
86
+
87
+ plt.xticks(rotation=90)
88
+ sns.countplot(df.education, hue=df.income, palette='muted')
89
+
90
+
91
+ # **education.num**
92
+
93
+ # In[9]:
94
+
95
+
96
+ sns.countplot(df["education.num"], hue=df.income)
97
+
98
+
99
+ # **marital.status**
100
+
101
+ # In[10]:
102
+
103
+
104
+ plt.xticks(rotation=90)
105
+ sns.countplot(df['marital.status'], hue=df.income)
106
+
107
+
108
+ # *We observe that the majority of ">50K" class is **Married-civ-spouse**. So we ll make it 1 and others 0*
109
+
110
+ # **occupation**
111
+
112
+ # In[11]:
113
+
114
+
115
+ plt.xticks(rotation=90)
116
+ sns.countplot(df.occupation, hue=df.income, palette='rocket')
117
+
118
+
119
+ # **relationship**
120
+
121
+ # In[12]:
122
+
123
+
124
+ plt.xticks(rotation=90)
125
+ sns.countplot(df.relationship, hue=df.income, palette='muted')
126
+
127
+
128
+ # **race**
129
+
130
+ # In[13]:
131
+
132
+
133
+ plt.xticks(rotation=90)
134
+ sns.countplot(df.race, hue=df.income, palette='Set2')
135
+
136
+
137
+ # **sex**
138
+
139
+ # In[14]:
140
+
141
+
142
+ plt.xticks(rotation=90)
143
+ sns.countplot(df.sex, hue=df.income)
144
+
145
+
146
+ # **capital.gain**
147
+
148
+ # In[15]:
149
+
150
+
151
+ df['capital.gain'].value_counts()
152
+
153
+
154
+ # **capital.loss**
155
+
156
+ # In[16]:
157
+
158
+
159
+ df['capital.loss'].value_counts()
160
+
161
+
162
+ # **hours.per.week**
163
+
164
+ # In[17]:
165
+
166
+
167
+ sns.distplot(df[df.income=='<=50K']['hours.per.week'], color='b')
168
+ sns.distplot(df[df.income=='>50K']['hours.per.week'], color='r')
169
+
170
+
171
+ # **native.country**
172
+
173
+ # In[18]:
174
+
175
+
176
+ df['native.country'].value_counts()
177
+
178
+
179
+ # ## Preprocessing
180
+
181
+ # ### Finding and Handling Missing Data
182
+ #
183
+ # *Observing the dataset, I found that the null values are marked as "?", So, we will now convert them to numpy.nan(null values).*
184
+
185
+ # In[19]:
186
+
187
+
188
+ df[df.select_dtypes("object") =="?"] = np.nan
189
+ nans = df.isnull().sum()
190
+ if len(nans[nans>0]):
191
+ print("Missing values detected.\n")
192
+ print(nans[nans>0])
193
+ else:
194
+ print("No missing values. You are good to go.")
195
+
196
+
197
+ # In[20]:
198
+
199
+
200
+ #majority of the values are "Private". Lets fill the missing values as "Private".
201
+ df.workclass.fillna("Private", inplace=True)
202
+
203
+ df.occupation.fillna(method='bfill', inplace=True)
204
+
205
+ #majority of the values are "United-States". Lets fill the missing values as "United-States".
206
+ df['native.country'].fillna("United-States", inplace=True)
207
+
208
+ print("Handled missing values successfully.")
209
+
210
+
211
+ # In[21]:
212
+
213
+
214
+ from sklearn.preprocessing import LabelEncoder
215
+ from sklearn.utils import column_or_1d
216
+
217
+ class MyLabelEncoder(LabelEncoder):
218
+
219
+ def fit(self, y, arr=[]):
220
+ y = column_or_1d(y, warn=True)
221
+ if arr == []:
222
+ arr=y
223
+ self.classes_ = pd.Series(arr).unique()
224
+ return self
225
+
226
+ le = MyLabelEncoder()
227
+
228
+
229
+ # ### Feature Engineering and Encoding the columns
230
+
231
+ # In[22]:
232
+
233
+
234
+ # age_enc = pd.cut(df.age, bins=(0,25,45,65,100), labels=(0,1,2,3))
235
+ df['age_enc'] = df.age.apply(lambda x: 1 if x > 30 else 0)
236
+
237
+ def prep_workclass(x):
238
+ if x == 'Never-worked' or x == 'Without-pay':
239
+ return 0
240
+ elif x == 'Private':
241
+ return 1
242
+ elif x == 'State-gov' or x == 'Local-gov' or x == 'Federal-gov':
243
+ return 2
244
+ elif x == 'Self-emp-not-inc':
245
+ return 3
246
+ else:
247
+ return 4
248
+
249
+ df['workclass_enc'] = df.workclass.apply(prep_workclass)
250
+
251
+ df['fnlwgt_enc'] = df.fnlwgt.apply(lambda x: 0 if x>200000 else 1)
252
+
253
+ le.fit(df.education, arr=['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th','10th', '11th', '12th',
254
+ 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', 'Some-college', 'Bachelors', 'Masters', 'Doctorate'])
255
+ df['education_enc'] = le.transform(df.education)
256
+
257
+
258
+ df['education.num_enc'] = df['education.num'].apply(lambda x: 1 if x>=9 else 0)
259
+
260
+ df['marital.status_enc'] = df['marital.status'].apply(lambda x: 1 if x=='Married-civ-spouse' or x == 'Married-AF-spouse' else 0)
261
+
262
+ def prep_occupation(x):
263
+ if x in ['Prof-specialty', 'Exec-managerial', 'Tech-support', 'Protective-serv']:
264
+ return 2
265
+ elif x in ['Sales', 'Craft-repair']:
266
+ return 1
267
+ else:
268
+ return 0
269
+
270
+ df['occupation_enc'] = df.occupation.apply(prep_occupation)
271
+
272
+ df['relationship_enc'] = df.relationship.apply(lambda x: 1 if x in ['Husband', 'Wife'] else 0)
273
+
274
+ df['race_enc'] = df.race.apply(lambda x: 1 if x=='White' else 0)
275
+
276
+ df['sex_enc'] = df.sex.apply(lambda x: 1 if x=='Male' else 0)
277
+
278
+ df['capital.gain_enc'] = pd.cut(df["capital.gain"],
279
+ bins=[-1,0,df[df["capital.gain"]>0]["capital.gain"].median(), df["capital.gain"].max()], labels=(0,1,2)).astype('int64')
280
+
281
+ df['capital.loss_enc'] = pd.cut(df["capital.loss"],
282
+ bins=[-1,0,df[df["capital.loss"]>0]["capital.loss"].median(), df["capital.loss"].max()], labels=(0,1,2)).astype('int64')
283
+
284
+ # hpw_enc = pd.cut(df['hours.per.week'], bins= (0,30,40,53,168), labels=(0,1,2,3))
285
+ df['hours.per.week_enc'] = pd.qcut(df['hours.per.week'], q=5, labels=(0,1,2,3), duplicates='drop').astype('int64')
286
+
287
+ df['native.country_enc'] = df['native.country'].apply(lambda x: 1 if x=='United-States' else 0)
288
+
289
+ df['income_enc'] = df.income.apply(lambda x: 1 if x==">50K" else 0)
290
+
291
+ print("Encoding complete.")
292
+
293
+
294
+ # In[23]:
295
+
296
+
297
+ df.select_dtypes("object").info()
298
+
299
+
300
+ # In[24]:
301
+
302
+
303
+ #dropping encoded columns - education, sex, income
304
+ df.drop(['education', 'sex', 'income'], 1, inplace=True)
305
+
306
+
307
+ # ### Label Encoding without Feature Engineering
308
+
309
+ # In[25]:
310
+
311
+
312
+ for feature in df.select_dtypes("object").columns:
313
+ df[feature]=le.fit_transform(df[feature])
314
+
315
+
316
+ # ### Feature Selection
317
+
318
+ # In[26]:
319
+
320
+
321
+ df.info()
322
+
323
+
324
+ # In[27]:
325
+
326
+
327
+ #Visualizing the pearson correlation with the target class
328
+ pcorr = df.drop('income_enc',1).corrwith(df.income_enc)
329
+ plt.figure(figsize=(10,6))
330
+ plt.title("Pearson Correlation of Features with Income")
331
+ plt.xlabel("Features")
332
+ plt.ylabel("Correlation Coeff")
333
+ plt.xticks(rotation=90)
334
+ plt.bar(pcorr.index, list(map(abs,pcorr.values)))
335
+
336
+
337
+ # From the pearson correlation plot, we can see that correlation of few columns are very **low** with the target column, so, we ll drop them.
338
+
339
+ # In[28]:
340
+
341
+
342
+ df.drop(['workclass', 'fnlwgt','occupation', 'race', 'native.country', 'fnlwgt_enc', 'race_enc', 'native.country_enc'], 1, inplace=True)
343
+
344
+
345
+ # In[29]:
346
+
347
+
348
+ sns.heatmap(df.corr().apply(abs))
349
+
350
+
351
+ # **Dropping redundant features**
352
+
353
+ # We can see that **education_enc, education.num_enc and education.num** as well as **relationship_enc and marital.status_enc** have **high correlation**. So, we will only keep one of them based on their correlation with income_enc.
354
+ #
355
+ # We also have some redundant feautres as we have engineered features from them(age, capital.gain, etc.).
356
+
357
+ # In[30]:
358
+
359
+
360
+ df.drop(['age', 'education.num_enc', 'education_enc', 'marital.status_enc', 'capital.gain', 'capital.loss', 'hours.per.week'], 1, inplace = True)
361
+
362
+
363
+ # In[31]:
364
+
365
+
366
+ df.info()
367
+
368
+
369
+ # In[32]:
370
+
371
+
372
+ X = df.drop('income_enc', 1)
373
+ y = df.income_enc
374
+
375
+
376
+ # ### Train Test Split (3:1)
377
+
378
+ # In[33]:
379
+
380
+
381
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
382
+
383
+
384
+ # In[34]:
385
+
386
+
387
+ print("No. of rows in training data:",X_train.shape[0])
388
+ print("No. of rows in testing data:",X_test.shape[0])
389
+
390
+
391
+ # ### Random Over Sampling
392
+
393
+ # *We can see the class imbalance in our target. This results in models that have poor predictive performance, specifically for the minority class. So, we need to random over sampling*
394
+
395
+ # In[35]:
396
+
397
+
398
+ oversample = RandomOverSampler(sampling_strategy=0.5) #50% oversampling
399
+ X_over, y_over = oversample.fit_resample(X_train, y_train)
400
+
401
+
402
+ # In[36]:
403
+
404
+
405
+ y_over.value_counts()
406
+
407
+
408
+ # ## Model Preparation
409
+
410
+ # In[37]:
411
+
412
+
413
+ #Model Imports
414
+ from sklearn.model_selection import StratifiedKFold, cross_val_score
415
+ from sklearn.preprocessing import StandardScaler
416
+ from sklearn.pipeline import make_pipeline
417
+ from sklearn.metrics import classification_report, confusion_matrix
418
+
419
+ from sklearn.svm import SVC
420
+ from sklearn.linear_model import LogisticRegression
421
+ from sklearn.neighbors import KNeighborsClassifier
422
+ from sklearn.tree import DecisionTreeClassifier
423
+ from sklearn.ensemble import AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier, RandomForestClassifier, StackingClassifier
424
+ from xgboost import XGBClassifier
425
+ from lightgbm import LGBMClassifier
426
+
427
+
428
+ # In[38]:
429
+
430
+
431
+ seed= 42
432
+
433
+
434
+ # In[39]:
435
+
436
+
437
+ models = {
438
+ 'LR':LogisticRegression(random_state=seed),
439
+ 'SVC':SVC(random_state=seed),
440
+ 'AB':AdaBoostClassifier(random_state=seed),
441
+ 'ET':ExtraTreesClassifier(random_state=seed),
442
+ 'GB':GradientBoostingClassifier(random_state=seed),
443
+ 'RF':RandomForestClassifier(random_state=seed),
444
+ 'XGB':XGBClassifier(random_state=seed),
445
+ 'LGBM':LGBMClassifier(random_state=seed)
446
+ }
447
+
448
+
449
+ # In[40]:
450
+
451
+
452
+ # evaluate a give model using cross-validation
453
+ def evaluate_models(model, xtrain, ytrain):
454
+ cv = StratifiedKFold(shuffle=True, random_state=seed)
455
+ scores = cross_val_score(model, xtrain, ytrain, scoring='accuracy', cv=cv, error_score='raise')
456
+ return scores
457
+
458
+ def plot_scores(xval,yval,show_value=False):
459
+ plt.ylim(ymax = max(yval)+0.5, ymin = min(yval)-0.5)
460
+ plt.xticks(rotation=45)
461
+ s = sns.barplot(xval,yval)
462
+ if show_value:
463
+ for x,y in zip(range(len(yval)),yval):
464
+ s.text(x,y+0.1,round(y,2),ha="center")
465
+
466
+
467
+ # In[41]:
468
+
469
+
470
+ # evaluate the models and store results for 100% oversampled minority class
471
+ results, names = list(), list()
472
+ for name, model in models.items():
473
+ scores = evaluate_models(model, X_train, y_train)
474
+ results.append(scores)
475
+ names.append(name)
476
+ print('*%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
477
+
478
+
479
+ # In[42]:
480
+
481
+
482
+ plt.boxplot(results, labels=names, showmeans=True)
483
+ plt.show()
484
+
485
+
486
+ # In[43]:
487
+
488
+
489
+ param_grids = {
490
+ 'LR':{'C':[0.001,0.01,0.1,1,10]},
491
+ 'SVC':{'gamma':[0.01,0.02,0.05,0.08,0.1], 'C':range(1,8)},
492
+
493
+ 'AB':{'learning_rate': [0.05, 0.1, 0.2], 'n_estimators': [100, 200, 500]},
494
+
495
+ 'ET':{'max_depth':[5,8,10,12], 'min_samples_split': [5,9,12],
496
+ 'n_estimators': [100,200,500,800]},
497
+
498
+ 'GB':{'learning_rate': [0.05, 0.1, 0.2], 'max_depth':[3,5,9],
499
+ 'min_samples_split': [5,7,9], 'n_estimators': [100,200,500],
500
+ 'subsample':[0.5,0.7,0.9]},
501
+
502
+ 'RF':{'max_depth':[3,5,9,15], 'n_estimators': [100, 200, 500, 1000],
503
+ 'learning_rate': [0.05, 0.1, 0.2], 'min_samples_split': [5,9,12]},
504
+
505
+ 'XGB':{'max_depth':[3,5,7,9], 'n_estimators': [100, 200, 500],
506
+ 'learning_rate': [0.05, 0.1, 0.2], 'subsample':[0.5,0.7,0.9]},
507
+
508
+ 'LGBM':{'n_estimators': [100,200,500],'learning_rate': [0.05, 0.1, 0.2],
509
+ 'subsample':[0.5,0.7,0.9],'num_leaves': [25,31,50]}
510
+ }
511
+
512
+
513
+ # In[44]:
514
+
515
+
516
+ # !pip install sklearn-deap
517
+ # from evolutionary_search import EvolutionaryAlgorithmSearchCV
518
+
519
+
520
+ # In[45]:
521
+
522
+
523
+ # evaluate the models and store results
524
+ # best_params = []
525
+ # names= []
526
+ # for name, param_grid, model in zip(param_grids.keys(), param_grids.values(), models.values()):
527
+ # eascv = EvolutionaryAlgorithmSearchCV(model, param_grid, verbose=3, cv=3)
528
+ # eascv.fit(X_train,y_train)
529
+ # names.append(name)
530
+ # best_params.append(eascv.best_params_)
531
+ # print(name)
532
+ # print("best score:",eascv.best_score_)
533
+ # print("best params:",eascv.best_params_)
534
+
535
+
536
+ # In[46]:
537
+
538
+
539
+ best_params=[
540
+ {'C': 10},
541
+ {'gamma': 0.1, 'C': 2},
542
+ {'learning_rate': 0.1, 'n_estimators': 500},
543
+ {'max_depth': 12, 'min_samples_split': 9, 'n_estimators': 100},
544
+ {'learning_rate': 0.05, 'max_depth': 3, 'min_samples_split': 9, 'n_estimators': 200, 'subsample': 0.9},
545
+ {'max_depth': 9, 'n_estimators': 200, 'min_samples_split': 5},
546
+ {'max_depth': 3, 'n_estimators': 200, 'learning_rate': 0.1, 'subsample': 0.9},
547
+ {'n_estimators': 100, 'learning_rate': 0.05, 'subsample': 0.9, 'num_leaves': 25}
548
+ ]
549
+
550
+
551
+ # In[47]:
552
+
553
+
554
+ models = [
555
+ ('LR',LogisticRegression(random_state=seed)),
556
+ ('SVC',SVC(random_state=seed)),
557
+ ('AB',AdaBoostClassifier(random_state=seed)),
558
+ ('ET',ExtraTreesClassifier(random_state=seed)),
559
+ ('GB',GradientBoostingClassifier(random_state=seed)),
560
+ ('RF',RandomForestClassifier(random_state=seed)),
561
+ ('XGB',XGBClassifier(random_state=seed)),
562
+ ('LGBM',LGBMClassifier(random_state=seed))
563
+ ]
564
+
565
+
566
+ # In[48]:
567
+
568
+
569
+ for model, param in zip(models, best_params):
570
+ model[1].set_params(**param)
571
+
572
+
573
+ # In[49]:
574
+
575
+
576
+ models.append(('MLModel',StackingClassifier(estimators = models[:-1])))
577
+
578
+
579
+ # In[50]:
580
+
581
+
582
+ scores=[]
583
+ preds=[]
584
+ for model in models:
585
+ model[1].fit(X_train,y_train)
586
+ print(model[0],"trained.")
587
+ scores.append(model[1].score(X_test,y_test))
588
+ preds.append(model[1].predict(X_test))
589
+ print("Results are ready.")
590
+
591
+
592
+ # ## Using Classification Based on Assocation
593
+
594
+ # In[51]:
595
+
596
+
597
+ get_ipython().system('pip install pyarc==1.0.23')
598
+ get_ipython().system('pip install pyfim')
599
+ from pyarc import CBA, TransactionDB
600
+
601
+
602
+ # In[52]:
603
+
604
+
605
+ txns_train = TransactionDB.from_DataFrame(X_train.join(y_train))
606
+ txns_test = TransactionDB.from_DataFrame(X_test.join(y_test))
607
+
608
+
609
+ cba = CBA(support=0.15, confidence=0.5, algorithm="m1")
610
+ cba.fit(txns_train)
611
+
612
+
613
+ # In[53]:
614
+
615
+
616
+ cba_score = cba.rule_model_accuracy(txns_test)
617
+ scores.append(cba_score)
618
+ models.append(["CBA"])
619
+
620
+
621
+ # In[54]:
622
+
623
+
624
+ model_names= [i[0] for i in models]
625
+ scores = list(map(lambda x: x*100, scores))
626
+
627
+
628
+ # In[55]:
629
+
630
+
631
+ plot_scores(model_names, scores, True)
632
+
AdultNoteBook/Kernels/AdaBoost/6-income-prediction-eda-to-visuals-0-98-auc.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/6-income-prediction-eda-to-visuals-0-98-auc.py ADDED
@@ -0,0 +1,682 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # ## Importing Important libraries
5
+
6
+ # In[1]:
7
+
8
+
9
+ import pandas as pd
10
+ import numpy as np
11
+ import matplotlib.pyplot as plt
12
+ import seaborn as sns
13
+ import plotly.express as px
14
+ import warnings
15
+ warnings.filterwarnings('ignore')
16
+
17
+
18
+ # #### Loading the data onto notebook.
19
+
20
+ # In[2]:
21
+
22
+
23
+ data = pd.read_csv("../input/adult-census-income/adult.csv")
24
+ data
25
+
26
+
27
+ # ![](https://kiarofoods.com/wp-content/uploads/2019/10/line_break.png)
28
+ # # Exploratory Data Analysis:
29
+ # **Problem Type Identification:** We have the target variable available with us. So, it a supervised machine learning problem. First we try to find out the type of supervised machine learning that we have in this case study by lookin at the target variable
30
+
31
+ # In[3]:
32
+
33
+
34
+ print(f"Target: 'Income'\nUnique Values in Income: {data.income.unique()}\nNumber of unique values: {data.income.nunique()}")
35
+
36
+
37
+ # In the problem, we have 'Income' as the Target variable. we see that we have only two values which are to be predicted, either the income is greater than 50K, which is Yes, or the income is less than or equal to 50K, which is No. We will label encode the target variable.
38
+
39
+ # In[4]:
40
+
41
+
42
+ data['income'] = data['income'].str.replace('<=50K', '0')
43
+ data['income'] = data['income'].str.replace('>50K', '1')
44
+ data['income'] = data['income'].astype(np.int64)
45
+
46
+
47
+ # In[5]:
48
+
49
+
50
+ data.income.dtypes
51
+
52
+
53
+ # We can see that, we have encoded the values of the target variable, and converted it into int data-type. This problem is a classification problem with 'Income' as the target variable. Making a copy of the dataset to work ahead
54
+
55
+ # In[6]:
56
+
57
+
58
+ ds = data.copy()
59
+ print(f"Unique values in 'education': {ds.education.nunique()}\nUnique values in 'Education_num': {ds['education.num'].nunique()}")
60
+
61
+
62
+ # We see that for the feature 'Education', we already have the encoded values in feature 'Education_num'. 'Education' will be removed from the dataset.
63
+
64
+ # In[7]:
65
+
66
+
67
+ ds.drop(['education'], axis = 1, inplace = True)
68
+
69
+
70
+ # Checking to see that is there any Null values present in the data that we have. Handling the null values will be the first thing that we need to do. Then, we have a look at the data-types of the other features and the value counts and unique values in those features.
71
+
72
+ # In[8]:
73
+
74
+
75
+ plt.title("Null values in the data", fontsize = 12)
76
+ sns.heatmap(ds.isnull(), cmap = 'inferno')
77
+ plt.show()
78
+
79
+
80
+ # From the heatmap, we see that the dataset consists of no null values. But for some features, we have '?' as the values present. **'?' will be considered as null values.** We move ahead with the feature engineering part. Checking the datatypes of the columns
81
+
82
+ # In[9]:
83
+
84
+
85
+ print("Datatype of every feature: ")
86
+ ds.dtypes
87
+
88
+
89
+ # In[10]:
90
+
91
+
92
+ print("Number of unique values in every feature: ")
93
+ ds.nunique()
94
+
95
+
96
+ # *'Workclass', 'Marital_status', 'Occupation', 'Relationship', 'Race', 'Sex', 'Native_country' are the categorical variables in the data*. Proper encoding or conversion of these variables is necessary for the feature engineering. We will look at these attributes and convert them one by one.
97
+ #
98
+ # **'Workclass':** Starting off with the work class, we look the number of unique values and value counts for those values
99
+
100
+ # In[11]:
101
+
102
+
103
+ ds.workclass.value_counts()
104
+
105
+
106
+ # In work class, *majority of the people are private employees*. The *minority of people are either working without-pay or they have never-worked*. We can combine the values of these two values as one. first we remove the blank space from the column is present in any values.
107
+
108
+ # In[12]:
109
+
110
+
111
+ ds['workclass'] = ds['workclass'].str.replace('Never-worked', 'Without-pay')
112
+
113
+
114
+ # Now, we have 8 unique values in this feature. But, we see that **there are some values where we have '?' in the column. This values can be replaced with NaN values.**
115
+
116
+ # In[13]:
117
+
118
+
119
+ ds['workclass'] = ds['workclass'].replace('?', np.NaN)
120
+
121
+
122
+ # In[14]:
123
+
124
+
125
+ plt.figure(figsize = (10,6))
126
+ plt.title("Income of people according to their workclass", fontsize = 16)
127
+ sns.countplot(y = ds['workclass'], hue = ds['income'])
128
+ plt.show()
129
+
130
+
131
+ # We see that the **majority of people who have income more than 50K a year are from private sector**. Same goes for the people with income less than 50K. But *for the Self Employed sector, the number of people whose income > 50K are more than the number of people whose income < 50K.* Now, moving ahead with replacing the null values and encoding the feature. **We will replace the NaN values in the 'Workclass' feature by the mode of the column, grouping it by the 'Occupation' feature.** We now have 7 unique values in Workclass feature. We can encode these values using the frequency encoding technique.
132
+
133
+ # In[15]:
134
+
135
+
136
+ from scipy.stats import mode
137
+ workclass_mode = ds.pivot_table(values='workclass', columns='occupation',aggfunc=(lambda x:mode(x).mode[0]))
138
+ workclass_mode
139
+
140
+
141
+ # In[16]:
142
+
143
+
144
+ loc1 = ds['workclass'].isnull()
145
+ ds.loc[loc1, 'workclass'] = ds.loc[loc1,'occupation'].apply(lambda x: workclass_mode[x])
146
+
147
+
148
+ # In[17]:
149
+
150
+
151
+ workclass_enc = (ds.groupby('workclass').size()) / len(ds)
152
+ print(workclass_enc)
153
+
154
+ ds['workclass_enc'] = ds['workclass'].apply(lambda x : workclass_enc[x])
155
+ ds['workclass_enc'].head(3)
156
+
157
+
158
+ # In[18]:
159
+
160
+
161
+ ds.drop(['workclass'], axis = 1, inplace = True)
162
+
163
+
164
+ # **'Occupation':** Similar to 'Workclass', we will look at the unique values and value counts in the 'Occupation' feature.
165
+
166
+ # In[19]:
167
+
168
+
169
+ ds.occupation.value_counts()
170
+
171
+
172
+ # **We will drop the rows where the occupation is NaN.**
173
+
174
+ # In[20]:
175
+
176
+
177
+ ds['occupation'] = ds['occupation'].replace('?', np.NaN)
178
+ ds = ds.loc[ds['occupation'].isnull() == False]
179
+ ds
180
+
181
+
182
+ # As we cannn see that after removing the null values from 'occupation', we are left with 30718 observations.
183
+
184
+ # In[21]:
185
+
186
+
187
+ plt.style.use('ggplot')
188
+ plt.figure(figsize = (10,6))
189
+ plt.title("Income of people according to their occupation", fontsize = 16)
190
+ sns.countplot(y = ds['occupation'], hue = ds['income'])
191
+ plt.show()
192
+
193
+
194
+ # Majority of people whose income is greater than 50K are either executive managers or they belong to any professional speciality. Now, encoding the occupation by frequency of the values in the column.
195
+
196
+ # In[22]:
197
+
198
+
199
+ occupation_enc = (ds.groupby('occupation').size()) / len(ds)
200
+ print(occupation_enc)
201
+
202
+ ds['occupation_enc'] = ds['occupation'].apply(lambda x : occupation_enc[x])
203
+ ds['occupation_enc'].head(3)
204
+
205
+
206
+ # In[23]:
207
+
208
+
209
+ ds.drop(['occupation'], axis = 1, inplace = True)
210
+
211
+
212
+ # **'Native_country':** We are checking for the salary on people in USA and outside USA, so , **we will convert all the values where country is not USA to 'non-usa'.** This way, we can encode the values by one-hot encoding without increasing the curse of dimensionality.
213
+
214
+ # In[24]:
215
+
216
+
217
+ ds['native.country'].loc[ds['native.country'] == 'United-States'] = 'usa'
218
+ ds['native.country'].loc[ds['native.country'] != 'usa'] = 'non_usa'
219
+ ds['native.country'].value_counts()
220
+
221
+
222
+ # In[25]:
223
+
224
+
225
+ plt.style.use('default')
226
+
227
+
228
+ # In[26]:
229
+
230
+
231
+ plt.style.use('seaborn-pastel')
232
+
233
+
234
+ # In[27]:
235
+
236
+
237
+ plt.figure(figsize = (8,3))
238
+ plt.title("Income of people according to their native country", fontsize = 16)
239
+ sns.countplot(y = ds['native.country'], hue = ds['income'])
240
+ plt.show()
241
+
242
+
243
+ # **Majority of people with higher income belong to the USA**. We also have more number of people from USA then any other country combined in this dataset. Encoding this feature using one hot encoding.
244
+
245
+ # In[28]:
246
+
247
+
248
+ ds['country_enc'] = ds['native.country'].map({'usa' : 1, 'non_usa' : 0})
249
+ ds.drop(['native.country'], axis = 1, inplace = True)
250
+
251
+
252
+ # **'Sex':** Similarly, encoding the sex using one hot encoding.
253
+
254
+ # In[29]:
255
+
256
+
257
+ plt.title("Income of people by their sex", fontsize = 16)
258
+ sns.countplot(x = ds['sex'], hue = ds['income'])
259
+ plt.show()
260
+
261
+
262
+ # We can see that male have more salary than female. Also in the dataset, the number of men are more than women. Encoding this feature with one hot encoding.
263
+
264
+ # In[30]:
265
+
266
+
267
+ ds['sex_enc'] = ds['sex'].map({'Male' : 1, 'Female' : 0})
268
+ ds.drop(['sex'], axis = 1, inplace = True)
269
+
270
+
271
+ # **'Marital_status':** Looking at the iincome of people according to their marital status.
272
+
273
+ # In[31]:
274
+
275
+
276
+ plt.style.use('default')
277
+
278
+
279
+ # In[32]:
280
+
281
+
282
+ plt.style.use('seaborn-talk')
283
+
284
+
285
+ # In[33]:
286
+
287
+
288
+ plt.title("Income of people by Marital Status", fontsize = 16)
289
+ sns.countplot(y = ds['marital.status'], hue = ds['income'])
290
+ plt.show()
291
+
292
+
293
+ # **Married people have a higher income as compared to others.** Encoding the feature
294
+
295
+ # In[34]:
296
+
297
+
298
+ marital_status_enc = (ds.groupby('marital.status').size()) / len(ds)
299
+ print(marital_status_enc)
300
+
301
+ ds['marital_status_enc'] = ds['marital.status'].apply(lambda x : marital_status_enc[x])
302
+ ds['marital_status_enc'].head(3)
303
+
304
+
305
+ # In[35]:
306
+
307
+
308
+ ds.drop(['marital.status'], axis = 1, inplace = True)
309
+
310
+
311
+ # Similarly, **for 'Race' and 'Relationship'**
312
+
313
+ # In[36]:
314
+
315
+
316
+ plt.style.use('bmh')
317
+
318
+
319
+ # In[37]:
320
+
321
+
322
+ plt.figure(figsize = (12,4))
323
+
324
+ plt.subplot(1, 2, 1)
325
+ sns.countplot(y = ds['race'], hue = ds['income'])
326
+ plt.title("Income respective to Race", fontsize = 12)
327
+
328
+ plt.subplot(1, 2, 2)
329
+ sns.countplot(y = ds['relationship'], hue = ds['income'])
330
+ plt.title("Income respective to Relationship", fontsize = 12)
331
+
332
+ plt.tight_layout(pad = 4)
333
+ plt.show()
334
+
335
+
336
+ # **White people have a higher salary as compared to other races**. Similarly, **husband in the family have a higher salary as compared to other relationship in the family.** Encoding both these columns
337
+
338
+ # In[38]:
339
+
340
+
341
+ race_enc = (ds.groupby('race').size()) / len(ds)
342
+ print(race_enc,'\n')
343
+ ds['race_enc'] = ds['race'].apply(lambda x : race_enc[x])
344
+
345
+ relationship_enc = (ds.groupby('relationship').size()) / len(ds)
346
+ print(relationship_enc)
347
+ ds['relationship_enc'] = ds['relationship'].apply(lambda x : relationship_enc[x])
348
+
349
+
350
+ # In[39]:
351
+
352
+
353
+ ds.drop(['race', 'relationship'], axis = 1, inplace = True)
354
+ new_ds = ds.drop(['income'], axis = 1)
355
+ new_ds['income'] = ds['income']
356
+ new_ds
357
+
358
+
359
+ # ## Outliers:
360
+ # We check if any outliers are present in the continous attributes of the dataset. We check it both by visualisations and the zscore for the continous columns.
361
+
362
+ # In[40]:
363
+
364
+
365
+ plt.style.use('default')
366
+
367
+
368
+ # In[41]:
369
+
370
+
371
+ plt.style.use('ggplot')
372
+
373
+
374
+ # In[42]:
375
+
376
+
377
+ clist = ['fnlwgt','age','capital.gain','capital.loss','hours.per.week']
378
+ plt.figure(figsize = (12,6))
379
+ for i in range(0, len(clist)):
380
+ plt.subplot(2,3, i+1)
381
+ sns.boxplot(ds[clist[i]], color = 'skyblue')
382
+ print("BoxPlots of the features:")
383
+ plt.show()
384
+
385
+
386
+ # **Outliers are present in the continous columns of the feature**. We will check the z-score of the features and and clip them from the data.
387
+
388
+ # In[43]:
389
+
390
+
391
+ from scipy.stats import zscore
392
+ zabs = np.abs(zscore(new_ds.loc[:,'fnlwgt':'hours.per.week']))
393
+ print(np.shape(np.where(zabs >= 3)))
394
+ new_ds = new_ds[(zabs < 3).all(axis = 1)]
395
+ new_ds
396
+
397
+
398
+ # WE have a total of 2566 outliers in the data. After removing the outliers, we have 28213 observations left.
399
+ # ## Correlation:
400
+ # Checking the correlation between the features and target variable to see which of them columns are more related to target.
401
+
402
+ # In[44]:
403
+
404
+
405
+ plt.figure(figsize = (14, 8))
406
+ plt.title("Correlation between target and features:")
407
+ sns.heatmap(new_ds.corr(), annot = True)
408
+ plt.show()
409
+
410
+
411
+ # 'Capital_gain', 'Education_num', 'Marital_status_enc', 'Relationship_enc' are most correlated to the Income of the observations.
412
+ # ## Scaling:
413
+ # As we see that the values of attributes in the dataset vary largely, so it is important to scale the data. Using the Min-Max scaler in order to bring normalisation in the data.
414
+
415
+ # In[45]:
416
+
417
+
418
+ from sklearn.preprocessing import MinMaxScaler
419
+ scale = MinMaxScaler()
420
+ new_ds.loc[:,'age':'hours.per.week'] = scale.fit_transform(new_ds.loc[:,'age':'hours.per.week'])
421
+ new_ds
422
+
423
+
424
+ # AS we can see from the above table that the data is now more normalised and can be used by the models for learning.
425
+ # ![](https://kiarofoods.com/wp-content/uploads/2019/10/line_break.png)
426
+ # # Data Imbalance:
427
+ # If the data is imbalanced, it can cause the overfitting and bias in the odel prediction. So it is important to check and cure the data imbalance if present. We check the target variable to see if it is balanced or not.
428
+
429
+ # In[46]:
430
+
431
+
432
+ plt.figure(figsize = (8, 4))
433
+ plt.title("Values distribution in target class: Income")
434
+ sns.countplot(data = new_ds, x = 'income')
435
+ plt.show()
436
+
437
+
438
+ # As we can see that data is imbalanced. In order **to remove the data imbalance, we use the SMOTETomek class to create synthetic values using KNN algorithm.**
439
+
440
+ # In[47]:
441
+
442
+
443
+ from imblearn.combine import SMOTETomek
444
+ x = new_ds.loc[:,"age":"relationship_enc"]
445
+ y = new_ds.loc[:,"income"]
446
+ smk = SMOTETomek()
447
+ x_new, y_new = smk.fit_resample(x, y)
448
+
449
+
450
+ # In[48]:
451
+
452
+
453
+ plt.figure(figsize = (8, 4))
454
+ plt.title("Values in target class after using SMOTETomek")
455
+ sns.countplot(x = y_new)
456
+ plt.show()
457
+
458
+
459
+ # As we can see that we now have a balanced dataset, so we can model ahead with the model building part.
460
+ # ![](https://kiarofoods.com/wp-content/uploads/2019/10/line_break.png)
461
+ # # Model Building:
462
+ # Starting with the spliting of the training and testing data. For that, we check to see what is the best random state.
463
+
464
+ # In[49]:
465
+
466
+
467
+ from sklearn.linear_model import LogisticRegression
468
+ from sklearn.metrics import accuracy_score
469
+ from sklearn.model_selection import train_test_split
470
+
471
+ max_accuracy = 0
472
+ best_rs = 0
473
+ for i in range(1, 150):
474
+ x_train, x_test, y_train, y_test = train_test_split(x_new, y_new, test_size = 0.30, random_state = i)
475
+ lg = LogisticRegression()
476
+ lg.fit(x_train, y_train)
477
+ pred = lg.predict(x_test)
478
+ acc = accuracy_score(y_test, pred)
479
+ if acc > max_accuracy: # after each iteration, acc is replace by the best possible accuracy
480
+ max_accuracy = acc
481
+ best_rs = i
482
+ print(f"Best Random State is {best_rs}, {max_accuracy*100}")
483
+
484
+
485
+ # Best possible random state is 67, so using it to split the data
486
+
487
+ # In[50]:
488
+
489
+
490
+ x_train, x_test, y_train, y_test = train_test_split(x_new, y_new, test_size = 0.30, random_state = 67)
491
+
492
+
493
+ # In[51]:
494
+
495
+
496
+ from sklearn.linear_model import LogisticRegression
497
+ from sklearn.ensemble import RandomForestClassifier
498
+ from sklearn.tree import DecisionTreeClassifier
499
+ from sklearn.ensemble import AdaBoostClassifier
500
+ from sklearn.naive_bayes import MultinomialNB
501
+ from sklearn.neighbors import KNeighborsClassifier
502
+ from sklearn.svm import SVC
503
+
504
+
505
+ # ## Model Fitting:
506
+ # Fitting 7 different models to check which model gives the best accuracy.
507
+
508
+ # In[52]:
509
+
510
+
511
+ # For Logistic Regression
512
+ lg = LogisticRegression()
513
+ lg.fit(x_train, y_train)
514
+ pred_lg = lg.predict(x_test)
515
+ print("Accuracy Score of Logistic Regression model is", accuracy_score(y_test, pred_lg)*100)
516
+
517
+ # For Decision Tree Classifier
518
+ dtc = DecisionTreeClassifier()
519
+ dtc.fit(x_train, y_train)
520
+ pred_dtc = dtc.predict(x_test)
521
+ print("Accuracy Score of Decision Tree Classifier model is", accuracy_score(y_test, pred_dtc)*100)
522
+
523
+ # For K-Nearest Neighbour Classifier
524
+ knc = KNeighborsClassifier(n_neighbors = 5)
525
+ knc.fit(x_train, y_train)
526
+ pred_knc = knc.predict(x_test)
527
+ print("Accuracy Score of K-Nearest Neighbour Classifier model is", accuracy_score(y_test, pred_knc)*100)
528
+
529
+ # For Support Vector Classifier
530
+ svc = SVC(kernel = 'rbf')
531
+ svc.fit(x_train, y_train)
532
+ pred_svc = svc.predict(x_test)
533
+ print("Accuracy Score of Support Vector Classifier model is", accuracy_score(y_test, pred_svc)*100)
534
+
535
+ # For Random Forest Classifier
536
+ rfc = RandomForestClassifier()
537
+ rfc.fit(x_train, y_train)
538
+ pred_rfc = rfc.predict(x_test)
539
+ print("Accuracy Score of Random Forest model is", accuracy_score(y_test, pred_rfc)*100)
540
+
541
+ # For MultinomialNB
542
+ nb = MultinomialNB() # making the Multinomial Naive Bayes class
543
+ nb.fit(x_train, y_train) # fitting the model
544
+ pred_nb = nb.predict(x_test) # predicting the values
545
+ print("Accuracy Score of MultinomialNB model is", accuracy_score(y_test, pred_nb)*100)
546
+
547
+ # For ADA Boost Classifier
548
+ ada= AdaBoostClassifier()
549
+ ada.fit(x_train, y_train) # fitting the model
550
+ pred_ada = ada.predict(x_test) # predicting the values
551
+ print("Accuracy Score of ADA Boost model is", accuracy_score(y_test, pred_ada)*100)
552
+
553
+
554
+ # Best accuracy score is given by Random Forest Classifier model. In order to avoid the bias and overfitting or underfitting, we cross validate the models and check the mean accuracy score of them.
555
+ # ## Cross Validation:
556
+ # Cross validating the m,odels to see if they are underfitting or overfitting and to prevent bias. We will compare the mean accuracy scores of the model.
557
+
558
+ # In[53]:
559
+
560
+
561
+ from sklearn.model_selection import cross_val_score
562
+
563
+ lg_scores = cross_val_score(lg, x_new, y_new, cv = 10) # cross validating the model
564
+ print(lg_scores) # accuracy scores of each cross validation cycle
565
+ print(f"Mean of accuracy scores is for Logistic Regression is {lg_scores.mean()*100}\n")
566
+
567
+ dtc_scores = cross_val_score(dtc, x_new, y_new, cv = 10)
568
+ print(dtc_scores)
569
+ print(f"Mean of accuracy scores is for Decision Tree Classifier is {dtc_scores.mean()*100}\n")
570
+
571
+ knc_scores = cross_val_score(knc, x_new, y_new, cv = 10)
572
+ print(knc_scores)
573
+ print(f"Mean of accuracy scores is for KNN Classifier is {knc_scores.mean()*100}\n")
574
+
575
+ svc_scores = cross_val_score(svc, x_new, y_new, cv = 10)
576
+ print(svc_scores)
577
+ print(f"Mean of accuracy scores is for SVC Classifier is {svc_scores.mean()*100}\n")
578
+
579
+ rfc_scores = cross_val_score(rfc, x_new, y_new, cv = 10)
580
+ print(rfc_scores)
581
+ print(f"Mean of accuracy scores is for Random Forest Classifier is {rfc_scores.mean()*100}\n")
582
+
583
+ nb_scores = cross_val_score(nb, x_new, y_new, cv = 10)
584
+ print(nb_scores)
585
+ print(f"Mean of accuracy scores is for MultinomialNB is {nb_scores.mean()*100}\n")
586
+
587
+ ada_scores = cross_val_score(ada, x_new, y_new, cv = 10)
588
+ print(ada_scores)
589
+ print(f"Mean of accuracy scores is for ADA Boost Classifier is {ada_scores.mean()*100}\n")
590
+
591
+
592
+ # In[54]:
593
+
594
+
595
+ # Checking for difference between accuracy and mean accuracies.
596
+ lis3 = ['Logistic Regression','Decision Tree Classifier','KNeighbors Classifier','SVC', 'Random Forest Classifier',
597
+ 'MultinomialNB', 'ADA Boost Classifier']
598
+
599
+ lis1 = [accuracy_score(y_test, pred_lg)*100, accuracy_score(y_test, pred_dtc)*100, accuracy_score(y_test, pred_knc)*100,
600
+ accuracy_score(y_test, pred_svc)*100, accuracy_score(y_test, pred_rfc)*100, accuracy_score(y_test, pred_nb)*100,
601
+ accuracy_score(y_test, pred_ada)*100]
602
+
603
+ lis2 = [lg_scores.mean()*100, dtc_scores.mean()*100, knc_scores.mean()*100, svc_scores.mean()*100, rfc_scores.mean()*100,
604
+ nb_scores.mean()*100, ada_scores.mean()*100]
605
+
606
+ for i in range(0, 7):
607
+ dif = (lis1[i]) - (lis2[i])
608
+ print(lis3[i], dif)
609
+
610
+
611
+ # **Random forest classifier is the best model with highest cross validation mean score and accuracy score**. We will use it for the model building.
612
+ # ## Hyperparameter Tuning:
613
+ # Tuning the parameters of the Random Forest in order to obtain the best possible parameters for model building.
614
+
615
+ # In[55]:
616
+
617
+
618
+ from sklearn.model_selection import GridSearchCV
619
+ rfc = RandomForestClassifier()
620
+ param = dict()
621
+ param['criterion'] = ['gini', 'entropy']
622
+ param['n_estimators'] = [1, 2, 4, 8, 10, 16, 32, 64, 100, 200]
623
+ param['min_samples_split'] = [1,2,5,8,10,15,20,25,50,55,60,80,100]
624
+
625
+
626
+ gs = GridSearchCV(estimator = rfc, param_grid = param, scoring='f1', cv = 5, n_jobs = 3)
627
+ gs.fit(x_train, y_train)
628
+ print(gs.best_score_)
629
+ print(gs.best_params_)
630
+
631
+
632
+ # After the hyperparameter tuning, **the best parameters for Random Forest Classifier are 'crietrion' = 'entropy', 'min_samples_split' = 2, 'n_estimators' = 100**. We build the model using these parameters.
633
+
634
+ # In[56]:
635
+
636
+
637
+ rfc = RandomForestClassifier(criterion = 'entropy', min_samples_split = 2, n_estimators = 100)
638
+ rfc.fit(x_train, y_train)
639
+ print(rfc.score(x_train, y_train))
640
+ pred_rfc = rfc.predict(x_test)
641
+
642
+
643
+ # ![](https://kiarofoods.com/wp-content/uploads/2019/10/line_break.png)
644
+ # # Model Evaluation:
645
+ # We have build the model after the cross validation and hyper parameter tuning. It is now time to evaluate the model using the classification report, confusion matrix and ROC curve.
646
+
647
+ # In[57]:
648
+
649
+
650
+ from sklearn.metrics import plot_roc_curve
651
+ from sklearn.metrics import confusion_matrix, classification_report
652
+
653
+ print("Accuracy Score of RFC model is", accuracy_score(y_test, pred_rfc)*100)
654
+ print("Confusion matrix for RFC Model is")
655
+ print(confusion_matrix(y_test, pred_rfc))
656
+ print("Classification Report of the RFC Model is")
657
+ print(classification_report(y_test, pred_rfc))
658
+
659
+ plot_roc_curve(rfc, x_test, y_test) # arg. are model name, feature testing data, label testing data.
660
+ plt.title("Recevier's Operating Characteristic")
661
+ plt.xlabel("False Positive Rate")
662
+ plt.ylabel("True Positive Rate")
663
+ plt.show()
664
+
665
+
666
+ # After the model evaluation , we get the **precision and recall for both the target variable as 0.92 and 0.91**. The **f1- score of the model is 0.92**. The ROC curve gave us **the AUC score which is 0.98**. Model evaluation gives the results that ***the prediction is very accurate.***
667
+ # ![](https://miro.medium.com/max/2400/1*IH10jlQEJ7GW1_oq8s7WPw.png)
668
+ # # Serialisation:
669
+ # Now we save the Random Forest Classifier Model as an object using joblib.
670
+
671
+ # In[58]:
672
+
673
+
674
+ import joblib
675
+ joblib.dump(rfc, 'Census Income Prediction.obj') # saving the model as an object
676
+
677
+
678
+ # In[ ]:
679
+
680
+
681
+
682
+
AdultNoteBook/Kernels/AdaBoost/7-adult-census-income-eda-and-prediction-87-35.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/AdaBoost/7-adult-census-income-eda-and-prediction-87-35.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Adult Census Income EDA and Prediction
5
+ #
6
+ # In this kernel I work with the UCI Adult Census Income dataset. The prediction task is to determine whether a person makes over $50K a year. I start with an exhaustive EDA, and I then train various models to solve the prediction task.
7
+
8
+ # In[1]:
9
+
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import matplotlib.pyplot as plt
14
+ import seaborn as sns
15
+ get_ipython().run_line_magic('matplotlib', 'inline')
16
+
17
+
18
+ import os
19
+ print(os.listdir("../input"))
20
+
21
+ data = pd.read_csv("../input/adult.csv")
22
+
23
+
24
+ # ## Exploratory Data Analysis
25
+
26
+ # In[2]:
27
+
28
+
29
+ print(len(data))
30
+ data.head(10)
31
+
32
+
33
+ # In[3]:
34
+
35
+
36
+ data.isnull().sum()
37
+
38
+
39
+ # Good.
40
+
41
+ # In[4]:
42
+
43
+
44
+ data.dtypes
45
+
46
+
47
+ # In[5]:
48
+
49
+
50
+ sns.countplot(data['income'])
51
+ plt.show()
52
+
53
+
54
+ # ### Distribution of features
55
+
56
+ # In[6]:
57
+
58
+
59
+ # Sex distribution
60
+ sns.countplot(data['sex'])
61
+ plt.show()
62
+
63
+
64
+ # In[7]:
65
+
66
+
67
+ # Age distribution
68
+ ages = data['age'].hist(bins=max(data['age'])-min(data['age']))
69
+ mean_val = np.mean(data['age'])
70
+ plt.axvline(mean_val, linestyle='dashed', linewidth=2, color='yellow', label='mean age')
71
+ plt.xlabel('age')
72
+ plt.ylabel('count')
73
+ plt.legend()
74
+ plt.show()
75
+
76
+
77
+ # In[8]:
78
+
79
+
80
+ data['hours.per.week'].hist()
81
+ plt.xlabel('hours per week')
82
+ plt.ylabel('count')
83
+ plt.show()
84
+
85
+
86
+ # In[9]:
87
+
88
+
89
+ fig, axs = plt.subplots(ncols=2, nrows=4, figsize=(20, 20))
90
+ plt.subplots_adjust(hspace=0.68)
91
+ fig.delaxes(axs[3][1])
92
+
93
+
94
+ # Workclass
95
+ wc_plot = sns.countplot(data['workclass'], ax=axs[0][0])
96
+ wc_plot.set_xticklabels(wc_plot.get_xticklabels(), rotation=40, ha="right")
97
+
98
+ # Native country
99
+ nc_plot = sns.countplot(data['native.country'], ax=axs[0][1])
100
+ nc_plot.set_xticklabels(nc_plot.get_xticklabels(), rotation=72, ha="right")
101
+
102
+ # Education
103
+ order=['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th', '11th', '12th', 'HS-grad',
104
+ 'Some-college', 'Assoc-acdm', 'Assoc-voc', 'Bachelors', 'Masters', 'Prof-school', 'Doctorate']
105
+ ed_plot = sns.countplot(data['education'], order=order, ax=axs[1][0])
106
+ ed_plot.set_xticklabels(ed_plot.get_xticklabels(), rotation=40, ha="right")
107
+
108
+ # Marital status
109
+ ms_plot = sns.countplot(data['marital.status'], ax=axs[1][1])
110
+ ms_plot.set_xticklabels(ms_plot.get_xticklabels(), rotation=40, ha="right")
111
+
112
+ # Relationship
113
+ rel_plot = sns.countplot(data['relationship'], ax=axs[2][0])
114
+ rel_plot.set_xticklabels(rel_plot.get_xticklabels(), rotation=40, ha="right")
115
+
116
+ # Race
117
+ race_plot = sns.countplot(data['race'], ax=axs[2][1])
118
+ race_plot.set_xticklabels(race_plot.get_xticklabels(), rotation=40, ha="right")
119
+
120
+ # Occupation
121
+ occ_plot = sns.countplot(data['occupation'], ax=axs[3][0])
122
+ occ_plot.set_xticklabels(occ_plot.get_xticklabels(), rotation=40, ha="right")
123
+
124
+ plt.show()
125
+
126
+
127
+ #
128
+ # ### How do features relate to one another?
129
+
130
+ # In[10]:
131
+
132
+
133
+ plt.figure(figsize=(24, 6))
134
+ ro = sns.countplot(data['occupation'], hue=data['sex'])
135
+ ro.set_xticklabels(ro.get_xticklabels(), rotation=30, ha="right")
136
+ plt.show()
137
+
138
+
139
+ # In[11]:
140
+
141
+
142
+ plt.figure(figsize=(20, 6))
143
+ ro = sns.countplot(data['education'], hue=data['sex'], order=order)
144
+ ro.set_xticklabels(ro.get_xticklabels(), rotation=40, ha="right")
145
+ #ro.set_yscale('log')
146
+ plt.show()
147
+
148
+
149
+ # In[12]:
150
+
151
+
152
+ data['income'] = data['income'].map({'<=50K': 0, '>50K': 1})
153
+
154
+
155
+ # ### How do features relate to income?
156
+
157
+ # In[13]:
158
+
159
+
160
+ fig, axs = plt.subplots(ncols=2, nrows=4, figsize=(24, 28))
161
+ #fig.delaxes(axs[3][1])
162
+ plt.subplots_adjust(hspace=0.4)
163
+
164
+ # education and income
165
+ sns.catplot(x="education", y="income", data=data, kind="bar", height = 6, palette = "muted", order=order, ax=axs[0][0])
166
+ axs[0][0].set_xticklabels(axs[0][0].axes.get_xticklabels(), rotation=40, ha="right")
167
+ axs[0][0].set_ylabel(">50K probability")
168
+
169
+ sns.catplot(x="workclass", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[0][1])
170
+ axs[0][1].set_xticklabels(axs[0][1].axes.get_xticklabels(), rotation=40, ha="right")
171
+ axs[0][1].set_ylabel(">50K probability")
172
+
173
+
174
+ sns.catplot(x="relationship", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[1][0])
175
+ axs[1][0].set_xticklabels(axs[1][0].axes.get_xticklabels(), rotation=40, ha="right")
176
+ axs[1][0].set_ylabel(">50K probability")
177
+
178
+ sns.catplot(x="marital.status", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[1][1])
179
+ axs[1][1].set_xticklabels(axs[1][1].axes.get_xticklabels(), rotation=40, ha="right")
180
+ axs[1][1].set_ylabel(">50K probability")
181
+
182
+ sns.catplot(x="race", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[2][0])
183
+ axs[2][0].set_xticklabels(axs[2][0].axes.get_xticklabels(), rotation=40, ha="right")
184
+ axs[2][0].set_ylabel(">50K probability")
185
+
186
+ sns.catplot(x="native.country", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[2][1])
187
+ axs[2][1].set_xticklabels(axs[2][1].axes.get_xticklabels(), rotation=55, ha="right")
188
+ axs[2][1].set_ylabel(">50K probability")
189
+
190
+ sns.catplot(x="sex", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[3][0])
191
+ axs[3][0].set_xticklabels(axs[3][0].axes.get_xticklabels(), rotation=40, ha="right")
192
+ axs[3][0].set_ylabel(">50K probability")
193
+
194
+ sns.catplot(x="occupation", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[3][1])
195
+ axs[3][1].set_xticklabels(axs[3][1].axes.get_xticklabels(), rotation=40, ha="right")
196
+ axs[3][1].set_ylabel(">50K probability")
197
+
198
+ #ed_income.set_ylabels(">50K probability")
199
+
200
+ for i in range(2,10):
201
+ plt.close(i)
202
+
203
+ plt.show()
204
+
205
+
206
+ # #### Another way of visualizing this
207
+
208
+ # In[14]:
209
+
210
+
211
+ plt.figure(figsize=(20, 6))
212
+ sns.countplot(data['marital.status'], hue=data['income'])
213
+ plt.show()
214
+
215
+
216
+ # ## Data Preparation
217
+ #
218
+ # Now the data needs to be prepared for prediction.
219
+
220
+ # In[15]:
221
+
222
+
223
+ data['sex'] = data['sex'].map({'Male': 1, 'Female': 0})
224
+
225
+
226
+ # In[16]:
227
+
228
+
229
+ data['race'] = data['race'].map({'White': 1, 'Asian-Pac-Islander': 1, 'Black':0, 'Amer-Indian-Eskimo':0, 'Other':0})
230
+ data['relationship'] = data['relationship'].map({'Not-in-family':0, 'Unmarried':0, 'Own-child':0, 'Other-relative':0, 'Husband':1, 'Wife':1})
231
+ data['marital.status'] = data['marital.status'].map({'Widowed':0, 'Divorced':0, 'Separated':0, 'Never-married':0, 'Married-civ-spouse':1, 'Married-AF-spouse':1, 'Married-spouse-absent':0})
232
+
233
+
234
+ # In[17]:
235
+
236
+
237
+ g = sns.heatmap(data[['relationship', 'marital.status']].corr(),annot=True, fmt = ".2f", cmap = "coolwarm")
238
+ plt.show()
239
+
240
+
241
+ # relationship and marital.status contain the same information now, so one of them can be removed
242
+
243
+ # In[18]:
244
+
245
+
246
+ data.drop(['marital.status'], axis=1,inplace=True)
247
+
248
+
249
+ # LabelEncoder can be used to transform the rest of the categorical features.
250
+
251
+ # In[19]:
252
+
253
+
254
+ # data.drop(['workclass', 'education', 'occupation', 'native.country'], axis=1,inplace=True)
255
+
256
+ data.drop(['education'], axis=1,inplace=True)
257
+
258
+ labels = ['workclass', 'occupation', 'native.country']
259
+
260
+ from sklearn.preprocessing import LabelEncoder
261
+ le = LabelEncoder()
262
+ for l in labels:
263
+ data[l]=le.fit_transform(data[l])
264
+
265
+
266
+ # In[20]:
267
+
268
+
269
+ data.head(10)
270
+
271
+
272
+ # The dataset is ready.
273
+
274
+ # ## Prediction
275
+
276
+ # #### Importing the relevant libraries
277
+
278
+ # In[21]:
279
+
280
+
281
+ from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier, BaggingClassifier, ExtraTreesClassifier
282
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
283
+ from sklearn.linear_model import LogisticRegression
284
+ from sklearn.neighbors import KNeighborsClassifier
285
+
286
+ from sklearn.tree import DecisionTreeClassifier
287
+ from sklearn.naive_bayes import GaussianNB
288
+ from sklearn.model_selection import GridSearchCV, cross_val_score, cross_val_predict, StratifiedKFold, learning_curve, train_test_split, KFold
289
+ # from sklearn.metrics import classification_report
290
+ from sklearn.metrics import confusion_matrix, accuracy_score
291
+ from sklearn.svm import SVC
292
+
293
+
294
+ # #### Preparing data for training and testing with k-fold Cross-Validation
295
+
296
+ # In[22]:
297
+
298
+
299
+ seed = 42
300
+
301
+ from sklearn.preprocessing import StandardScaler
302
+
303
+ X = StandardScaler().fit_transform(data.loc[:, data.columns != 'income'])
304
+ Y = data['income']
305
+
306
+ # X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
307
+
308
+ kf = KFold(n_splits=10, shuffle=True, random_state=seed)
309
+
310
+
311
+ # In[23]:
312
+
313
+
314
+ a = len(data.loc[data.income==0])/len(data)
315
+ print(a)
316
+
317
+
318
+ # One would get a 76% accuracy by just always predicting <=50k. Our model has to do better than that or it's not learning anything.
319
+
320
+ # ### Starting with some simple models
321
+
322
+ # In[24]:
323
+
324
+
325
+
326
+ fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(24, 14))
327
+
328
+
329
+ classifiers = [
330
+ LogisticRegression(solver='newton-cg'),
331
+ KNeighborsClassifier(n_neighbors=17), # Some trial and error I don't show went into this hyperpa
332
+ LinearDiscriminantAnalysis(),
333
+ GaussianNB()
334
+ ]
335
+
336
+
337
+ for i, c in enumerate(classifiers):
338
+
339
+ x_axs = i%2
340
+ y_axs = int(i/2)
341
+ # print(c)
342
+ print(type(c).__name__)
343
+ pred = cross_val_predict(c, X, Y, cv=kf)
344
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
345
+
346
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g', ax=axs[y_axs][x_axs])
347
+ axs[y_axs][x_axs].set_xlabel('Predicted')
348
+ axs[y_axs][x_axs].set_ylabel('Real')
349
+ axs[y_axs][x_axs].set_title(type(c).__name__)
350
+
351
+ plt.show()
352
+
353
+
354
+ # Logistic regression performs best with 84.25% accuracy.
355
+ #
356
+
357
+ # ### More complex models
358
+
359
+ # In[25]:
360
+
361
+
362
+ import warnings
363
+ warnings.filterwarnings(action='ignore')
364
+ fig, axs = plt.subplots(ncols=2, nrows=3, figsize=(24, 21))
365
+
366
+ classifiers = [
367
+ DecisionTreeClassifier(),
368
+ BaggingClassifier(),
369
+ RandomForestClassifier(),
370
+ ExtraTreesClassifier(),
371
+ GradientBoostingClassifier(),
372
+ AdaBoostClassifier()
373
+ ]
374
+
375
+
376
+ for i, c in enumerate(classifiers):
377
+
378
+ x_axs = i%2
379
+ y_axs = int(i/2)
380
+
381
+ # print(c)
382
+ print(type(c).__name__)
383
+ pred = cross_val_predict(c, X, Y, cv=kf)
384
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
385
+
386
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g', ax=axs[y_axs][x_axs])
387
+ axs[y_axs][x_axs].set_xlabel('Predicted')
388
+ axs[y_axs][x_axs].set_ylabel('Real')
389
+ axs[y_axs][x_axs].set_title(type(c).__name__)
390
+
391
+ plt.show()
392
+
393
+
394
+ # Gradient Boosting with no hyperparameter tuning gets to 86.58% accuracy. Not bad. Let's see if we can do better.
395
+
396
+ # ### Model Tuning
397
+
398
+ # GridSearchCV allows to try out a lot of hyperparameters at once.
399
+
400
+ # In[26]:
401
+
402
+
403
+ '''
404
+ # This takes about 2 hours to run
405
+ params = {'max_depth': [5, 6, 7],
406
+ 'n_estimators': [100, 150, 200],
407
+ 'learning_rate': [0.1, 0.07, 0.05],
408
+ 'max_features': ['sqrt', 'log2', 3, 4, 5]
409
+ }
410
+ '''
411
+
412
+
413
+ params = {'max_depth': [6],
414
+ 'n_estimators': [200],
415
+ 'learning_rate': [0.07, 0.06],
416
+ 'max_features': [3,4]
417
+ }
418
+
419
+ classifier = GradientBoostingClassifier()
420
+
421
+ grid = GridSearchCV(classifier, param_grid=params, cv=kf)
422
+ search_result = grid.fit(X, Y)
423
+
424
+
425
+ # In[27]:
426
+
427
+
428
+ # GridSearch results
429
+ means = search_result.cv_results_['mean_test_score']
430
+ params = search_result.cv_results_['params']
431
+ for m, p in zip(means, params):
432
+ print(f"{m} with: {p}")
433
+
434
+
435
+ # In[28]:
436
+
437
+
438
+ p = np.argmax(means)
439
+ best_param = params[p]
440
+
441
+ final_model = GradientBoostingClassifier(**best_param)
442
+
443
+ print(final_model)
444
+ pred = cross_val_predict(final_model, X, Y, cv=kf)
445
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
446
+
447
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g')
448
+ plt.show()
449
+
450
+
451
+ # Final prediction accuracy: 87.35%
AdultNoteBook/Kernels/Adult_Fairness.xlsx ADDED
Binary file (9.76 kB). View file
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/1-income-prediction-84-369-accuracy-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/10-accurate-predictions-with-20-test-data-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/11-adult-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/12-compare-all-the-classification-models-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/2-multiple-ml-techniques-and-analysis-of-dataset-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/3-income-classification-using-meta-learning-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/4-a-simple-knn-application-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/.ipynb_checkpoints/5-adult-census-income-eda-and-prediction-87-35-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/1-income-prediction-84-369-accuracy.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/1-income-prediction-84-369-accuracy.py ADDED
@@ -0,0 +1,340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # #Income Prediction Problem
5
+ # In this Notebook, I am working through the Income Prediction problem associated with the Adult Income Census dataset. The goal is to accurately predict whether or not someone is making more or less than $50,000 a year. While working through this problem, I am following a framework I use to attack all my machine learning problems. It includes the following steps:
6
+ #
7
+ # 1. Load Libraries
8
+ # 2. Load Data
9
+ # 3. Analyze Data
10
+ # 4. Feature Engineering
11
+ # 5. Modeling
12
+ # 6. Algorithm Tuning
13
+ # 7. Finalizing the Model
14
+ #
15
+ # I hope you enjoy this notebook and find it useful. Please keep in mind this is my first Notebook on here so don't judge it too harshly!
16
+
17
+ # ##1. Load Libaraies
18
+
19
+ # First, we need to load all of our libraries we will use for this project.
20
+
21
+ # In[1]:
22
+
23
+
24
+ import pandas as pd
25
+ import numpy as np
26
+ import matplotlib.pyplot as plt
27
+ import seaborn as sns
28
+ get_ipython().run_line_magic('matplotlib', 'inline')
29
+
30
+ from collections import Counter
31
+
32
+ from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier
33
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
34
+ from sklearn.linear_model import LogisticRegression
35
+ from sklearn.neighbors import KNeighborsClassifier
36
+ from sklearn.tree import DecisionTreeClassifier
37
+ from sklearn.neural_network import MLPClassifier
38
+ from sklearn.naive_bayes import GaussianNB
39
+ from sklearn.ensemble import RandomForestClassifier
40
+ from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve, train_test_split, KFold
41
+ from sklearn.metrics import classification_report
42
+ from sklearn.metrics import confusion_matrix
43
+ from sklearn.metrics import accuracy_score
44
+
45
+ sns.set(style='white', context='notebook', palette='deep')
46
+
47
+
48
+ # ##2. Load Data
49
+
50
+ # Next, we load our data.
51
+
52
+ # In[2]:
53
+
54
+
55
+ dataset = pd.read_csv("../input/adult.csv")
56
+
57
+ # Check for Null Data
58
+ dataset.isnull().sum()
59
+
60
+
61
+ # In[3]:
62
+
63
+
64
+ # Replace All Null Data in NaN
65
+ dataset = dataset.fillna(np.nan)
66
+
67
+
68
+ # In[4]:
69
+
70
+
71
+ # Get data types
72
+ dataset.dtypes
73
+
74
+
75
+ # In[5]:
76
+
77
+
78
+ # Peek at data
79
+ dataset.head(4)
80
+
81
+
82
+ # In[6]:
83
+
84
+
85
+
86
+ # Reformat Column We Are Predicting
87
+ dataset['income']=dataset['income'].map({'<=50K': 0, '>50K': 1, '<=50K.': 0, '>50K.': 1})
88
+ dataset.head(4)
89
+
90
+
91
+ # ##3. Analyze Data
92
+
93
+ # In[7]:
94
+
95
+
96
+ # Identify Numeric features
97
+ numeric_features = ['age','fnlwgt','education.num','capital.gain','capital.loss','hours.per.week','income']
98
+
99
+ # Identify Categorical features
100
+ cat_features = ['workclass','education','marital.status', 'occupation', 'relationship', 'race', 'sex', 'native']
101
+
102
+
103
+ # ###3.1. Numeric Data Analysis
104
+
105
+ # In[8]:
106
+
107
+
108
+ # Count of >50K & <=50K
109
+ sns.countplot(dataset['income'],label="Count")
110
+ sns.plt.show()
111
+
112
+
113
+ # In[9]:
114
+
115
+
116
+ # Correlation matrix between numerical values
117
+ g = sns.heatmap(dataset[numeric_features].corr(),annot=True, fmt = ".2f", cmap = "coolwarm")
118
+ sns.plt.show()
119
+
120
+
121
+ # In[10]:
122
+
123
+
124
+ # Explore Education Num vs Income
125
+ g = sns.factorplot(x="education.num",y="income",data=dataset,kind="bar",size = 6,palette = "muted")
126
+ g.despine(left=True)
127
+ g = g.set_ylabels(">50K probability")
128
+
129
+
130
+ # In[11]:
131
+
132
+
133
+ # Explore Hours Per Week vs Income
134
+ g = sns.factorplot(x="hours.per.week",y="income",data=dataset,kind="bar",size = 6,palette = "muted")
135
+ g.despine(left=True)
136
+ g = g.set_ylabels(">50K probability")
137
+
138
+
139
+ # In[12]:
140
+
141
+
142
+ # Explore Age vs Income
143
+ g = sns.FacetGrid(dataset, col='income')
144
+ g = g.map(sns.distplot, "age")
145
+ sns.plt.show()
146
+
147
+
148
+ # ###3.2. Categorical Data Analysis
149
+
150
+ # In[13]:
151
+
152
+
153
+ # Fill Missing Category Entries
154
+ dataset["workclass"] = dataset["workclass"].fillna("X")
155
+ dataset["occupation"] = dataset["occupation"].fillna("X")
156
+ dataset["native.country"] = dataset["native.country"].fillna("United-States")
157
+
158
+ # Confirm All Missing Data is Handled
159
+ dataset.isnull().sum()
160
+
161
+
162
+ # In[14]:
163
+
164
+
165
+ # Explore Native Nation vs Income
166
+ g = sns.barplot(x="native.country",y="income",data=dataset)
167
+ g = g.set_ylabel("Income >50K Probability")
168
+ sns.plt.show()
169
+
170
+
171
+ # In[15]:
172
+
173
+
174
+ # Explore Sex vs Income
175
+ g = sns.barplot(x="sex",y="income",data=dataset)
176
+ g = g.set_ylabel("Income >50K Probability")
177
+ sns.plt.show()
178
+
179
+
180
+ # In[16]:
181
+
182
+
183
+ # Explore Relationship vs Income
184
+ g = sns.factorplot(x="relationship",y="income",data=dataset,kind="bar", size = 6 ,
185
+ palette = "muted")
186
+ g.despine(left=True)
187
+ g = g.set_ylabels("Income >50K Probability")
188
+ sns.plt.show()
189
+
190
+
191
+ # In[17]:
192
+
193
+
194
+ # Explore Marital Status vs Income
195
+ g = sns.factorplot(x="marital.status",y="income",data=dataset,kind="bar", size = 6 ,
196
+ palette = "muted")
197
+ g.despine(left=True)
198
+ g = g.set_ylabels("Income >50K Probability")
199
+ sns.plt.show()
200
+
201
+
202
+ # In[18]:
203
+
204
+
205
+ # Explore Workclass vs Income
206
+ g = sns.factorplot(x="workclass",y="income",data=dataset,kind="bar", size = 6 ,
207
+ palette = "muted")
208
+ g.despine(left=True)
209
+ g = g.set_ylabels("Income >50K Probability")
210
+ sns.plt.show()
211
+
212
+
213
+ # ##4. Feature Engineering
214
+
215
+ # In[19]:
216
+
217
+
218
+ ####################################################
219
+ ############### FEATURE ENGINEERING ################
220
+ ####################################################
221
+ # Convert Sex value to 0 and 1
222
+ dataset["sex"] = dataset["sex"].map({"Male": 0, "Female":1})
223
+
224
+ # Create Married Column - Binary Yes(1) or No(0)
225
+ dataset["marital.status"] = dataset["marital.status"].replace(['Never-married','Divorced','Separated','Widowed'], 'Single')
226
+ dataset["marital.status"] = dataset["marital.status"].replace(['Married-civ-spouse','Married-spouse-absent','Married-AF-spouse'], 'Married')
227
+ dataset["marital.status"] = dataset["marital.status"].map({"Married":1, "Single":0})
228
+ dataset["marital.status"] = dataset["marital.status"].astype(int)
229
+
230
+ # Drop the data you don't want to use
231
+ dataset.drop(labels=["workclass","education","occupation","relationship","race","native.country"], axis = 1, inplace = True)
232
+ print('Dataset with Dropped Labels')
233
+ print(dataset.head())
234
+
235
+
236
+ # ##5. Modeling
237
+
238
+ # In[20]:
239
+
240
+
241
+ ###################################################
242
+ ##################### MODELING #####################
243
+ ####################################################
244
+ # Split-out Validation Dataset and Create Test Variables
245
+ array = dataset.values
246
+ X = array[:,0:8]
247
+ Y = array[:,8]
248
+ print('Split Data: X')
249
+ print(X)
250
+ print('Split Data: Y')
251
+ print(Y)
252
+ validation_size = 0.20
253
+ seed = 7
254
+ num_folds = 10
255
+ scoring = 'accuracy'
256
+ X_train, X_validation, Y_train, Y_validation = train_test_split(X,Y,
257
+ test_size=validation_size,random_state=seed)
258
+
259
+ # Params for Random Forest
260
+ num_trees = 100
261
+ max_features = 3
262
+
263
+ #Spot Check 5 Algorithms (LR, LDA, KNN, CART, GNB, SVM)
264
+ models = []
265
+ models.append(('LR', LogisticRegression()))
266
+ models.append(('LDA', LinearDiscriminantAnalysis()))
267
+ models.append(('KNN', KNeighborsClassifier()))
268
+ models.append(('CART', DecisionTreeClassifier()))
269
+ models.append(('NB', GaussianNB()))
270
+ models.append(('RF', RandomForestClassifier(n_estimators=num_trees, max_features=max_features)))
271
+ #models.append(('SVM', SVC()))
272
+ # evalutate each model in turn
273
+ results = []
274
+ names = []
275
+ for name, model in models:
276
+ kfold = KFold(n_splits=10, random_state=seed)
277
+ cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')
278
+ results.append(cv_results)
279
+ names.append(name)
280
+ msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
281
+ print(msg)
282
+
283
+
284
+ # In[21]:
285
+
286
+
287
+ fig = plt.figure()
288
+ fig.suptitle('Algorith Comparison')
289
+ ax = fig.add_subplot(111)
290
+ plt.boxplot(results)
291
+ ax.set_xticklabels(names)
292
+ plt.show()
293
+
294
+
295
+ # ##6. Algorithm Tuning
296
+
297
+ # In[22]:
298
+
299
+
300
+ ####################################################
301
+ ################ ALGORITHM TUNING ##################
302
+ ####################################################
303
+ '''
304
+ Commented Out to Reduce Script Time - Took 20 Minutes to run.
305
+ best n_estimator = 250
306
+ best max_feature = 5
307
+ # Tune Random Forest
308
+ n_estimators = np.array([50,100,150,200,250])
309
+ max_features = np.array([1,2,3,4,5])
310
+ param_grid = dict(n_estimators=n_estimators,max_features=max_features)
311
+ model = RandomForestClassifier()
312
+ kfold = KFold(n_splits=num_folds, random_state=seed)
313
+ grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)
314
+ grid_result = grid.fit(X_train, Y_train)
315
+ print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
316
+ means = grid_result.cv_results_['mean_test_score']
317
+ stds = grid_result.cv_results_['std_test_score']
318
+ params = grid_result.cv_results_['params']
319
+ for mean, stdev, param in zip(means, stds, params):
320
+ print("%f (%f) with: %r" % (mean, stdev, param))
321
+ '''
322
+
323
+
324
+ # ##7. Finalize Model
325
+
326
+ # In[23]:
327
+
328
+
329
+ ####################################################
330
+ ################# FINALIZE MODEL ###################
331
+ ####################################################
332
+ # 5. Finalize Model
333
+ # a) Predictions on validation dataset - KNN
334
+ random_forest = RandomForestClassifier(n_estimators=250,max_features=5)
335
+ random_forest.fit(X_train, Y_train)
336
+ predictions = random_forest.predict(X_validation)
337
+ print("Accuracy: %s%%" % (100*accuracy_score(Y_validation, predictions)))
338
+ print(confusion_matrix(Y_validation, predictions))
339
+ print(classification_report(Y_validation, predictions))
340
+
AdultNoteBook/Kernels/ExtraTrees/10-accurate-predictions-with-20-test-data.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/10-accurate-predictions-with-20-test-data.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # In[1]:
5
+
6
+
7
+ import pandas as pd
8
+ import seaborn as sns
9
+ import matplotlib.pyplot as plt
10
+ import numpy as np
11
+ get_ipython().run_line_magic('matplotlib', 'inline')
12
+
13
+
14
+ # In[2]:
15
+
16
+
17
+ data = pd.DataFrame.from_csv("../input/adult.csv", header=0, index_col=None)
18
+
19
+
20
+ # In[3]:
21
+
22
+
23
+ data.head(5)
24
+
25
+
26
+ # In[4]:
27
+
28
+
29
+ data["income"].value_counts()
30
+
31
+
32
+ # In[5]:
33
+
34
+
35
+ data.isnull().values.any()
36
+
37
+
38
+ # In[6]:
39
+
40
+
41
+ data = data.replace("?", np.nan)
42
+
43
+
44
+ # In[7]:
45
+
46
+
47
+ data.isnull().sum()
48
+
49
+
50
+ # In[8]:
51
+
52
+
53
+ null_data = data[pd.isnull(data).any(1)]
54
+ null_data["income"].value_counts()
55
+
56
+
57
+ # In[9]:
58
+
59
+
60
+ data.dropna(inplace=True)
61
+
62
+
63
+ # In[10]:
64
+
65
+
66
+ bins = (-1, 0, 5, 12, 18, 25, 35, 60, 120)
67
+ group_names = ['Unknown', 'Baby', 'Child', 'Teenager', 'Student', 'Young Adult', 'Adult', 'Senior']
68
+ categories = pd.cut(data["age"], bins, labels=group_names)
69
+ data["age"] = categories
70
+
71
+
72
+ # In[11]:
73
+
74
+
75
+ sns.countplot(y='native.country',data=data)
76
+
77
+
78
+ # In[12]:
79
+
80
+
81
+ sns.countplot(y='education', hue='income', data=data)
82
+
83
+
84
+ # In[13]:
85
+
86
+
87
+ sns.countplot(y='occupation', hue='income', data=data)
88
+
89
+
90
+ # In[14]:
91
+
92
+
93
+ # education = education.num
94
+ data.drop(["education", "fnlwgt"], axis=1, inplace=True)
95
+
96
+
97
+ # In[15]:
98
+
99
+
100
+ from sklearn import preprocessing
101
+
102
+
103
+ # In[16]:
104
+
105
+
106
+ for f in data:
107
+ if f in ["age", "workclass", "marital.status", "occupation", "relationship", "race", "sex", "native.country", "income"]:
108
+ le = preprocessing.LabelEncoder()
109
+ le = le.fit(data[f])
110
+ data[f] = le.transform(data[f])
111
+ data.head(5)
112
+
113
+
114
+ # In[17]:
115
+
116
+
117
+ y = data["income"]
118
+ X = data.drop(["income"], axis=1)
119
+
120
+
121
+ # In[18]:
122
+
123
+
124
+ from sklearn.ensemble import ExtraTreesClassifier
125
+
126
+
127
+ # In[19]:
128
+
129
+
130
+ forest = ExtraTreesClassifier(n_estimators=100,random_state=0)
131
+
132
+ forest.fit(X, y)
133
+
134
+ importances = forest.feature_importances_
135
+ std = np.std([tree.feature_importances_ for tree in forest.estimators_],axis=0)
136
+ indices = np.argsort(importances)[::-1]
137
+
138
+ plt.figure()
139
+ plt.title("Feature importances")
140
+ plt.bar(range(X.shape[1]), importances[indices], color="r", yerr=std[indices], align="center")
141
+ plt.xticks(range(X.shape[1]), indices)
142
+ plt.xlim([-1, X.shape[1]])
143
+ plt.show()
144
+
145
+
146
+ # In[20]:
147
+
148
+
149
+ X = data.drop(["race", "native.country", "sex", "capital.loss", "workclass", "age"], axis=1)
150
+
151
+
152
+ # In[21]:
153
+
154
+
155
+ from sklearn.model_selection import train_test_split
156
+
157
+
158
+ # In[22]:
159
+
160
+
161
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)
162
+
163
+
164
+ # In[23]:
165
+
166
+
167
+ from sklearn.ensemble import RandomForestClassifier
168
+ from sklearn.metrics import accuracy_score
169
+
170
+
171
+ # In[24]:
172
+
173
+
174
+ forest = RandomForestClassifier(10)
175
+ forest.fit(X_train, y_train)
176
+
177
+
178
+ # In[25]:
179
+
180
+
181
+ predictions = forest.predict_proba(X_test)
182
+ predictions = [np.argmax(p) for p in predictions]
183
+
184
+
185
+ # In[26]:
186
+
187
+
188
+ precision = accuracy_score(predictions, y_test) * 100
189
+
190
+
191
+ # In[27]:
192
+
193
+
194
+ print("Precision: {0}%".format(precision))
195
+
AdultNoteBook/Kernels/ExtraTrees/12-compare-all-the-classification-models.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/12-compare-all-the-classification-models.py ADDED
@@ -0,0 +1,1690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # 1. Introduction
5
+ # A census is the procedure of systematically acquiring and recording information about the members of a given population.
6
+ # The census is a special, wide-range activity, which takes place once a decade in the entire country. The purpose is to gather information about the general population, in order to present a full and reliable picture of the population in the country - its housing conditions and demographic, social and economic characteristics. The information collected includes data on age, gender, country of origin, marital status, housing conditions, marriage, education, employment, etc.
7
+
8
+ # ## 1.1 Data description
9
+ # This data was extracted from the 1994 Census bureau database by Ronny Kohavi and Barry Becker (Data Mining and Visualization, Silicon Graphics). The prediction task is to determine whether a person makes over $50K a year.
10
+
11
+ # ## 1.2 Features Description
12
+ # **1. Categorical Attributes**
13
+ # * **workclass**: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
14
+ # - Individual work category
15
+ # * **education**: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
16
+ # - Individual's highest education degree
17
+ # * **marital-status**: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
18
+ # - Individual marital status
19
+ # * **occupation**: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
20
+ # - Individual's occupation
21
+ # * **relationship**: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
22
+ # - Individual's relation in a family
23
+ # * **race**: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
24
+ # - Race of Individual
25
+ # * **sex**: Female, Male.
26
+ # * **native-country**: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
27
+ # - Individual's native country
28
+ #
29
+ # **2. Continuous Attributes**
30
+ # * **age**: continuous.
31
+ # - Age of an individual
32
+ # * **fnlwgt**: final weight, continuous.
33
+ # * The weights on the CPS files are controlled to independent estimates of the civilian noninstitutional population of the US. These are prepared monthly for us by Population Division here at the Census Bureau.
34
+ # * **capital-gain**: continuous.
35
+ # * **capital-loss**: continuous.
36
+ # * **hours-per-week**: continuous.
37
+ # - Individual's working hour per week
38
+
39
+ # ## 1.3 Objective of this project
40
+ # The goal of this machine learning project is to predict whether a person makes over 50K a year or not given their demographic variation. This is a classification problem.
41
+
42
+ # # 2. Import packages
43
+
44
+ # In[ ]:
45
+
46
+
47
+ import numpy as np
48
+ import pandas as pd
49
+ import seaborn as sns
50
+ import warnings
51
+ warnings.filterwarnings("ignore")
52
+ import plotly.offline as py
53
+ import plotly.graph_objs as go
54
+ import plotly.tools as tls
55
+ import matplotlib.pyplot as plt
56
+ get_ipython().run_line_magic('matplotlib', 'inline')
57
+
58
+
59
+ # In[ ]:
60
+
61
+
62
+ from sklearn.model_selection import train_test_split,cross_val_score,GridSearchCV
63
+ from sklearn.linear_model import LogisticRegression
64
+ from sklearn.tree import DecisionTreeClassifier
65
+ from sklearn.svm import SVC
66
+ from sklearn.neighbors import KNeighborsClassifier
67
+ from sklearn.ensemble import RandomForestClassifier,AdaBoostClassifier,BaggingClassifier,ExtraTreesClassifier
68
+ from sklearn.naive_bayes import GaussianNB
69
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
70
+ from sklearn.ensemble import AdaBoostClassifier
71
+ from sklearn.ensemble import GradientBoostingClassifier
72
+ from sklearn.ensemble import RandomForestClassifier
73
+ from sklearn.ensemble import ExtraTreesClassifier
74
+ from sklearn.metrics import accuracy_score,confusion_matrix,roc_auc_score
75
+ from sklearn import metrics
76
+ from datetime import datetime
77
+ from sklearn.feature_selection import RFE
78
+ from sklearn.model_selection import StratifiedKFold
79
+
80
+
81
+ # ## Load Data
82
+
83
+ # In[ ]:
84
+
85
+
86
+ data = pd.read_csv("../input/adult-census-income/adult.csv")
87
+ data.head()
88
+
89
+
90
+ # In[ ]:
91
+
92
+
93
+ data.shape
94
+
95
+
96
+ # # 3. Data Cleaning
97
+
98
+ # Fixing the common nan values
99
+ #
100
+ # Nan values were as ? in data. Hence we fix this with most frequent element(mode) in the entire dataset. It generalizes well, as we will see with the accuracy of our classifiers
101
+
102
+ # In[ ]:
103
+
104
+
105
+ attrib, counts = np.unique(data['workclass'], return_counts = True)
106
+ most_freq_attrib = attrib[np.argmax(counts, axis = 0)]
107
+ data['workclass'][data['workclass'] == '?'] = most_freq_attrib
108
+
109
+ attrib, counts = np.unique(data['occupation'], return_counts = True)
110
+ most_freq_attrib = attrib[np.argmax(counts, axis = 0)]
111
+ data['occupation'][data['occupation'] == '?'] = most_freq_attrib
112
+
113
+ attrib, counts = np.unique(data['native.country'], return_counts = True)
114
+ most_freq_attrib = attrib[np.argmax(counts, axis = 0)]
115
+ data['native.country'][data['native.country'] == '?'] = most_freq_attrib
116
+
117
+
118
+ # Lets look the data it again :
119
+
120
+ # In[ ]:
121
+
122
+
123
+ # for later use
124
+ data_num = data.copy()
125
+ data1 = data.copy()
126
+ data.head(10)
127
+
128
+
129
+ # # 4. Feature Engineering
130
+
131
+ # Education
132
+ #
133
+ # 9th, 10th, 11th, 12th comes under HighSchool Grad but it has mentioned separately
134
+ # Create Elementary object for 1st-4th, 5th-6th, 7th-8th
135
+ #
136
+ # Marital Status
137
+ #
138
+ #
139
+ # Married-civ-spouse,Married-spouse-absent,Married-AF-spouse comes under category Married
140
+ # Divorced, separated again comes under category separated.
141
+ #
142
+ # Workclass
143
+ #
144
+ # Self-emp-not-inc, Self-emp-inc comes under category self employed
145
+ # Local-gov,State-gov,Federal-gov comes under category goverment emloyees
146
+ #
147
+
148
+ # In[ ]:
149
+
150
+
151
+ hs_grad = ['HS-grad','11th','10th','9th','12th']
152
+ elementary = ['1st-4th','5th-6th','7th-8th']
153
+
154
+ # replace elements in list.
155
+ data1['education'].replace(to_replace = hs_grad,value = 'HS-grad',inplace = True)
156
+ data1['education'].replace(to_replace = elementary,value = 'elementary_school',inplace = True)
157
+
158
+ data1['education'].value_counts()
159
+
160
+
161
+ # In[ ]:
162
+
163
+
164
+ married= ['Married-spouse-absent','Married-civ-spouse','Married-AF-spouse']
165
+ separated = ['Separated','Divorced']
166
+
167
+ #replace elements in list.
168
+ data1['marital.status'].replace(to_replace = married ,value = 'Married',inplace = True)
169
+ data1['marital.status'].replace(to_replace = separated,value = 'Separated',inplace = True)
170
+
171
+ data1['marital.status'].value_counts()
172
+
173
+
174
+ # In[ ]:
175
+
176
+
177
+ self_employed = ['Self-emp-not-inc','Self-emp-inc']
178
+ govt_employees = ['Local-gov','State-gov','Federal-gov']
179
+
180
+ #replace elements in list.
181
+ data1['workclass'].replace(to_replace = self_employed ,value = 'Self_employed',inplace = True)
182
+ data1['workclass'].replace(to_replace = govt_employees,value = 'Govt_employees',inplace = True)
183
+
184
+ data1['workclass'].value_counts()
185
+
186
+
187
+ # ### Deleting the unuseful features and observations
188
+
189
+ # In[ ]:
190
+
191
+
192
+ del_cols = ['education.num']
193
+ data1.drop(labels = del_cols,axis = 1,inplace = True)
194
+
195
+
196
+ # ### Updating the columns
197
+
198
+ # In[ ]:
199
+
200
+
201
+ num_col_new = ['age','capital.gain', 'capital.loss',
202
+ 'hours.per.week','fnlwgt']
203
+ cat_col_new = ['workclass', 'education', 'marital.status', 'occupation','relationship',
204
+ 'race', 'sex', 'income']
205
+
206
+
207
+ # # 5. Pipeline
208
+
209
+ # In[ ]:
210
+
211
+
212
+ from sklearn.pipeline import Pipeline
213
+ from sklearn.base import TransformerMixin
214
+ from sklearn.preprocessing import MinMaxScaler,StandardScaler
215
+
216
+ scaler = MinMaxScaler()
217
+ pd.DataFrame(scaler.fit_transform(data1[num_col_new]),columns = num_col_new).head(5)
218
+
219
+
220
+ # In[ ]:
221
+
222
+
223
+ class DataFrameSelector(TransformerMixin):
224
+ def __init__(self,attribute_names):
225
+ self.attribute_names = attribute_names
226
+
227
+ def fit(self,X,y = None):
228
+ return self
229
+
230
+ def transform(self,X):
231
+ return X[self.attribute_names]
232
+
233
+
234
+ class num_trans(TransformerMixin):
235
+ def __init__(self):
236
+ pass
237
+
238
+ def fit(self,X,y=None):
239
+ return self
240
+
241
+ def transform(self,X):
242
+ df = pd.DataFrame(X)
243
+ df.columns = num_col_new
244
+ return df
245
+
246
+
247
+
248
+ pipeline = Pipeline([('selector',DataFrameSelector(num_col_new)),
249
+ ('scaler',MinMaxScaler()),
250
+ ('transform',num_trans())])
251
+
252
+
253
+ # In[ ]:
254
+
255
+
256
+ num_df = pipeline.fit_transform(data1)
257
+ num_df.shape
258
+
259
+
260
+ # In[ ]:
261
+
262
+
263
+ # columns which I don't need after creating dummy variables dataframe
264
+ cols = ['workclass_Govt_employess','education_Some-college',
265
+ 'marital-status_Never-married','occupation_Other-service',
266
+ 'race_Black','sex_Male','income_>50K']
267
+
268
+
269
+ # In[ ]:
270
+
271
+
272
+ class dummies(TransformerMixin):
273
+ def __init__(self,cols):
274
+ self.cols = cols
275
+
276
+ def fit(self,X,y = None):
277
+ return self
278
+
279
+ def transform(self,X):
280
+ df = pd.get_dummies(X)
281
+ df_new = df[df.columns.difference(cols)]
282
+ #difference returns the original columns, with the columns passed as argument removed.
283
+ return df_new
284
+
285
+ pipeline_cat=Pipeline([('selector',DataFrameSelector(cat_col_new)),
286
+ ('dummies',dummies(cols))])
287
+ cat_df = pipeline_cat.fit_transform(data1)
288
+ cat_df.shape
289
+
290
+
291
+ # In[ ]:
292
+
293
+
294
+ cat_df['id'] = pd.Series(range(cat_df.shape[0]))
295
+ num_df['id'] = pd.Series(range(num_df.shape[0]))
296
+
297
+
298
+ # In[ ]:
299
+
300
+
301
+ final_df = pd.merge(cat_df,num_df,how = 'inner', on = 'id')
302
+ print(f"Number of observations in final dataset: {final_df.shape}")
303
+
304
+
305
+ # ## 5.2 Split the dataset¶
306
+
307
+ # In[ ]:
308
+
309
+
310
+ y = final_df['income_<=50K']
311
+ final_df.drop(labels = ['id','income_<=50K','fnlwgt'],axis = 1,inplace = True)
312
+ X = final_df
313
+
314
+
315
+ # ## 5.3 Take a look to Income class distribution
316
+
317
+ # In[ ]:
318
+
319
+
320
+ sns.countplot(x="income", data= data)
321
+ plt.show()
322
+ data["income"].value_counts()
323
+
324
+
325
+ # ### Implementating model on imbalanced data
326
+
327
+ # In[ ]:
328
+
329
+
330
+ X_train1,X_test1,y_train1,y_test1 = train_test_split(X,y,test_size =0.15,random_state = 42)
331
+ #fitting the model
332
+ lr=LogisticRegression()
333
+ lr.fit(X_train1,y_train1)
334
+ # predict
335
+ y_pred4=lr.predict(X_test1)
336
+ print("Accuracy:",metrics.accuracy_score(y_test1, y_pred4))
337
+ print("Precision:",metrics.precision_score(y_test1, y_pred4))
338
+ print("Recall:",metrics.recall_score(y_test1, y_pred4))
339
+ print("F1 score:",metrics.f1_score(y_test1, y_pred4))
340
+ print("AUC :",metrics.roc_auc_score(y_test1, y_pred4))
341
+
342
+
343
+ # ## 5.4 Resampling
344
+ #
345
+ # The main idea of sampling classes is to either increasing the samples of the minority class or decreasing the samples of the majority class. This is done in order to obtain a fair balance in the number of instances for both the classes.
346
+ #
347
+ # There can be two main types of sampling:
348
+ #
349
+ # You can add copies of instances from the minority class which is called over-sampling (or more formally sampling with replacement), or
350
+ # You can delete instances from the majority class, which is called under-sampling.
351
+ #
352
+ #
353
+ # A widely adopted technique for dealing with highly unbalanced datasets is called resampling. It consists of removing samples from the majority class (under-sampling) and / or adding more examples from the minority class (over-sampling).
354
+ #
355
+ # Despite the advantage of balancing classes, these techniques also have their weaknesses (there is no free lunch). The simplest implementation of over-sampling is to duplicate random records from the minority class, which can cause overfitting. In under-sampling, the simplest technique involves removing random records from the majority class, which can cause loss of information.
356
+ #
357
+ # **Under-sampling**
358
+ #
359
+ # Advantages of this approach:
360
+ #
361
+ # It can help improve the runtime of the model and solve the memory problems by reducing the number of training data samples when the training data set is enormous.
362
+ # ![resampling.png](attachment:resampling.png)
363
+
364
+ # In[ ]:
365
+
366
+
367
+ from imblearn.under_sampling import RandomUnderSampler
368
+
369
+ rus = RandomUnderSampler()
370
+ X_rus, y_rus = rus.fit_sample(X, y)
371
+
372
+
373
+ # In[ ]:
374
+
375
+
376
+ X_rus = pd.DataFrame(X_rus)
377
+ X_rus.columns = ['education_Assoc-acdm', 'education_Assoc-voc', 'education_Bachelors',
378
+ 'education_Doctorate', 'education_HS-grad', 'education_Masters',
379
+ 'education_Preschool', 'education_Prof-school',
380
+ 'education_elementary_school', 'gender_Female',
381
+ 'marital-status_Married', 'marital-status_Separated',
382
+ 'marital-status_Widowed', 'occupation_Adm-clerical',
383
+ 'occupation_Armed-Forces', 'occupation_Craft-repair',
384
+ 'occupation_Exec-managerial', 'occupation_Farming-fishing',
385
+ 'occupation_Handlers-cleaners', 'occupation_Machine-op-inspct',
386
+ 'occupation_Priv-house-serv', 'occupation_Prof-specialty',
387
+ 'occupation_Protective-serv', 'occupation_Sales',
388
+ 'occupation_Tech-support', 'occupation_Transport-moving',
389
+ 'race_Amer-Indian-Eskimo', 'race_Asian-Pac-Islander', 'race_Other',
390
+ 'race_White', 'relationship_Husband', 'relationship_Not-in-family',
391
+ 'relationship_Other-relative', 'relationship_Own-child',
392
+ 'relationship_Unmarried', 'relationship_Wife',
393
+ 'workclass_Govt_employees', 'workclass_Never-worked',
394
+ 'workclass_Private', 'workclass_Self_employed', 'workclass_Without-pay',
395
+ 'age', 'capital-gain', 'capital-loss', 'hours-per-week']
396
+ y_rus = pd.DataFrame(y_rus)
397
+ y_rus.columns = ["income"]
398
+
399
+
400
+ # In[ ]:
401
+
402
+
403
+ sns.countplot(x=y_rus["income"])
404
+ plt.show()
405
+
406
+
407
+ # In[ ]:
408
+
409
+
410
+ X_train,X_test,y_train,y_test = train_test_split(X_rus,y_rus,test_size =0.15,random_state = 42)
411
+
412
+
413
+ # ## 5.5 Baseline models
414
+
415
+ # In[ ]:
416
+
417
+
418
+ # Spot-Check Algorithms
419
+ def GetBasedModel():
420
+ basedModels = []
421
+ basedModels.append(('LR' , LogisticRegression()))
422
+ basedModels.append(('LDA' , LinearDiscriminantAnalysis()))
423
+ basedModels.append(('KNN' , KNeighborsClassifier()))
424
+ basedModels.append(('CART' , DecisionTreeClassifier()))
425
+ basedModels.append(('NB' , GaussianNB()))
426
+ basedModels.append(('AB' , AdaBoostClassifier()))
427
+ basedModels.append(('GBM' , GradientBoostingClassifier()))
428
+ basedModels.append(('RF' , RandomForestClassifier()))
429
+ basedModels.append(('ET' , ExtraTreesClassifier()))
430
+
431
+
432
+ return basedModels
433
+
434
+
435
+ # In[ ]:
436
+
437
+
438
+ def BasedLine2(X_train, y_train,models):
439
+ # Test options and evaluation metric
440
+ num_folds = 3
441
+ scoring = 'accuracy'
442
+
443
+ results = []
444
+ names = []
445
+ for name, model in models:
446
+ kfold = StratifiedKFold(n_splits=num_folds, random_state=10)
447
+ cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring)
448
+ results.append(cv_results)
449
+ names.append(name)
450
+ msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
451
+ print(msg)
452
+
453
+ return names,results
454
+
455
+
456
+ # In[ ]:
457
+
458
+
459
+ models = GetBasedModel()
460
+ names,results = BasedLine2(X_train, y_train,models)
461
+
462
+
463
+ # ## 5.6 Models Scores
464
+
465
+ # In[ ]:
466
+
467
+
468
+ def ScoreDataFrame(names,results):
469
+ def floatingDecimals(f_val, dec=3):
470
+ prc = "{:."+str(dec)+"f}"
471
+
472
+ return float(prc.format(f_val))
473
+
474
+ scores = []
475
+ for r in results:
476
+ scores.append(floatingDecimals(r.mean(),4))
477
+
478
+ scoreDataFrame = pd.DataFrame({'Model':names, 'Score': scores})
479
+ return scoreDataFrame
480
+ basedLineScore = ScoreDataFrame(names,results)
481
+ basedLineScore.sort_values(by='Score', ascending=False)
482
+
483
+
484
+ # # 6. Tuning Machine Learning Models
485
+
486
+ # ## 6.1 Logistic Regression
487
+
488
+ # **Logistic Regression is used when the dependent variable(target) is categorical.**
489
+ #
490
+ # **Model**
491
+ #
492
+ # Output = 0 or 1
493
+ #
494
+ # Hypothesis => Z = WX + B
495
+ #
496
+ # hΘ(x) = sigmoid (Z)
497
+ #
498
+ # **Sigmoid Function**
499
+ # ![logistic.png](attachment:logistic.png)
500
+ #
501
+ # If ‘Z’ goes to infinity, Y(predicted) will become 1 and if ‘Z’ goes to negative infinity, Y(predicted) will become 0.
502
+ #
503
+ # **Cost Function**
504
+ # ![sfd.png](attachment:sfd.png)
505
+
506
+ # ## Hyperparameter Tuning
507
+ # A hyperparameter is a parameter whose value is set before the learning process begins.
508
+ # Tuning Strategies
509
+ #
510
+ # We will explore two different methods for optimizing hyperparameters:
511
+ #
512
+ # Grid Search
513
+ # Random Search
514
+
515
+ # ### Grid Search vs Random search
516
+ #
517
+ # **Grid search** is a traditional way to perform hyperparameter optimization. It works by searching exhaustively through a specified subset of hyperparameters.
518
+ #
519
+ # **Random search** differs from grid search mainly in that it searches the specified subset of hyperparameters randomly instead of exhaustively. The major benefit being decreased processing time.
520
+ #
521
+ # * There is a tradeoff to decreased processing time, however. We aren’t guaranteed to find the optimal combination of hyperparameters.
522
+ #
523
+ # ![HPO1.png](attachment:HPO1.png)
524
+
525
+ # ### Grid Search
526
+
527
+ # In[ ]:
528
+
529
+
530
+ get_ipython().run_cell_magic('time', '', "from sklearn.model_selection import GridSearchCV\nlr = LogisticRegression(class_weight='balanced',random_state=42)\nparam_grid = { \n 'C': [0.1,0.2,0.3,0.4],\n 'penalty': ['l1', 'l2'],\n 'class_weight':[{0: 1, 1: 1},{ 0:0.67, 1:0.33 },{ 0:0.75, 1:0.25 },{ 0:0.8, 1:0.2 }]}\nCV_rfc = GridSearchCV(estimator=lr, param_grid=param_grid, cv= 5)\nCV_rfc.fit(X_train, y_train)\nprint(CV_rfc.best_params_)")
531
+
532
+
533
+ # **C : Inverse of regularization strength**
534
+ #
535
+ # we use paramter C as our regularization parameter. Parameter C = 1/λ.
536
+ #
537
+ # Lambda (λ) controls the trade-off between allowing the model to increase it's complexity as much as it wants with trying to keep it simple. For example, if λ is very low or 0, the model will have enough power to increase it's complexity (overfit) by assigning big values to the weights for each parameter. If, in the other hand, we increase the value of λ, the model will tend to underfit, as the model will become too simple.
538
+ #
539
+ # * Parameter C will work the other way around. For small values of C, we increase the regularization strength which will create simple models which underfit the data. For big values of C, we low the power of regularization which imples the model is allowed to increase it's complexity, and therefore, overfit the data.
540
+ #
541
+ # **L2 Regularization or Ridge Regularization**
542
+ #
543
+ # * Ridge regression adds “squared magnitude” of coefficient as penalty term to the loss function. Here the highlighted part represents L2 regularization element.
544
+ # ![ewr.png](attachment:ewr.png)
545
+ #
546
+ # **L1 Regularization or Lasso**
547
+ #
548
+ # * Lasso Regression (Least Absolute Shrinkage and Selection Operator) adds “absolute value of magnitude” of coefficient as penalty term to the loss function.
549
+ # ![tre.png](attachment:tre.png)
550
+ #
551
+ # The key difference between these techniques is that Lasso shrinks the less important feature’s coefficient to zero thus, removing some feature altogether. So, this works well for feature selection in case we have a huge number of features.
552
+ #
553
+ # **Class weight**
554
+ #
555
+ # * If we have highly imbalanced classes and have no addressed it during preprocessing, we have the option of using the class_weight parameter to weight the classes to make certain we have a balanced mix of each class. Class weights will be given by n_samples / (n_classes * np.bincount(y))
556
+
557
+ # In[ ]:
558
+
559
+
560
+ get_ipython().run_cell_magic('time', '', "#fitting the model\nlr1=LogisticRegression(C=0.4, random_state=4 ,penalty='l1', class_weight={0:1,1:1})\nlr1.fit(X_train,y_train)\n# predict \ny_pred1=lr1.predict(X_test)")
561
+
562
+
563
+ # ### Evaluation of logistic regression(Grid Search)
564
+ # **Confusion Matrix**
565
+
566
+ # In[ ]:
567
+
568
+
569
+ cf_matrix = confusion_matrix(y_test, y_pred1)
570
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
571
+ plt.tight_layout()
572
+ plt.title('Confusion matrix', y=1.1)
573
+ plt.ylabel('Actual label')
574
+ plt.xlabel('Predicted label')
575
+ plt.show()
576
+
577
+
578
+ # In[ ]:
579
+
580
+
581
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred1))
582
+ print("Precision:",metrics.precision_score(y_test, y_pred1))
583
+ print("Recall:",metrics.recall_score(y_test, y_pred1))
584
+ print("F1 score:",metrics.f1_score(y_test, y_pred1))
585
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred1))
586
+
587
+
588
+ # **Precision:**
589
+ # Precision is about being precise, i.e., how accurate your model is. In other words, you can say, when a model makes a prediction, how often it is correct.
590
+ # Precision can be thought of as a measure of a classifier's exactness.
591
+ # ![ret.png](attachment:ret.png)
592
+ #
593
+ # **Recall:**
594
+ # Out of all the positive classes, how much we predicted correctly. It should be high as possible.
595
+ # Recall can be thought of as a measure of a classifier's completeness.
596
+ # ![ytu.png](attachment:ytu.png)
597
+ #
598
+ # **F1 score**
599
+ # It is difficult to compare two models with low precision and high recall or vice versa. So to make them comparable, we use F-Score.
600
+ #
601
+ # F-score helps to measure Recall and Precision at the same time. It uses Harmonic Mean in place of Arithmetic Mean by punishing the extreme values more.
602
+ # ![uyt.png](attachment:uyt.png)
603
+
604
+ # ### ROC Curve
605
+
606
+ # An ROC curve (receiver operating characteristic curve) is a graph showing the performance of a classification model at all classification thresholds. This curve plots two parameters:
607
+ #
608
+ # True Positive Rate
609
+ # False Positive Rate
610
+ # ![gfh.png](attachment:gfh.png)
611
+
612
+ # In[ ]:
613
+
614
+
615
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred1)
616
+ auc = metrics.roc_auc_score(y_test, y_pred1)
617
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
618
+ plt.legend(loc=4)
619
+ plt.plot([0, 1], [0, 1],'r--')
620
+ plt.xlabel('False Positive Rate')
621
+ plt.ylabel('True Positive Rate')
622
+ plt.title('Receiver operating characteristic')
623
+ plt.show()
624
+
625
+
626
+ # ### Random search
627
+
628
+ # In[ ]:
629
+
630
+
631
+ get_ipython().run_cell_magic('time', '', "from sklearn.model_selection import RandomizedSearchCV\nlr = LogisticRegression(class_weight='balanced',random_state=42)\nparam_grid = { \n 'C': [0.1,0.2,0.3,0.4],\n 'penalty': ['l1', 'l2'],\n 'class_weight':[{0: 1, 1: 1},{ 0:0.67, 1:0.33 },{ 0:0.75, 1:0.25 },{ 0:0.8, 1:0.2 }]}\nCV_rfc = RandomizedSearchCV(estimator=lr, param_distributions=param_grid, cv= 5,random_state=1)\nCV_rfc.fit(X_train, y_train)\nprint(CV_rfc.best_params_)")
632
+
633
+
634
+ # In[ ]:
635
+
636
+
637
+ get_ipython().run_cell_magic('time', '', "#fitting the model\nlr2=LogisticRegression(C=0.3, random_state=4 ,penalty='l2', class_weight={0:1,1:1})\nlr2.fit(X_train,y_train)\n# predict \ny_pred2=lr2.predict(X_test)")
638
+
639
+
640
+ # ### Evaluation of logistic regression(Random Search)
641
+
642
+ # #### Confusion Matrix
643
+
644
+ # In[ ]:
645
+
646
+
647
+ cf_matrix = confusion_matrix(y_test, y_pred2)
648
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
649
+ plt.tight_layout()
650
+ plt.title('Confusion matrix', y=1.1)
651
+ plt.ylabel('Actual label')
652
+ plt.xlabel('Predicted label')
653
+ plt.show()
654
+
655
+
656
+ # In[ ]:
657
+
658
+
659
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred2))
660
+ print("Precision:",metrics.precision_score(y_test, y_pred2))
661
+ print("Recall:",metrics.recall_score(y_test, y_pred2))
662
+ print("F1 score:",metrics.f1_score(y_test, y_pred2))
663
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred2))
664
+
665
+
666
+ # In[ ]:
667
+
668
+
669
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred2)
670
+ auc = metrics.roc_auc_score(y_test, y_pred2)
671
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
672
+ plt.legend(loc=4)
673
+ plt.plot([0, 1], [0, 1],'r--')
674
+ plt.xlabel('False Positive Rate')
675
+ plt.ylabel('True Positive Rate')
676
+ plt.title('Receiver operating characteristic')
677
+ plt.show()
678
+
679
+
680
+ # ## Recursive Feature Elimination
681
+
682
+ # * The Recursive Feature Elimination (or RFE) works by recursively removing attributes and building a model on those attributes that remain.
683
+ # * It uses the model accuracy to identify which attributes (and combination of attributes) contribute the most to predicting the target attribute.
684
+ # * As the name suggests, this method eliminates worst performing features on a particular model one after the other until the best subset of features are known.
685
+ # ![vnb.png](attachment:vnb.png)
686
+
687
+ # In[ ]:
688
+
689
+
690
+ get_ipython().run_cell_magic('time', '', 'from sklearn.feature_selection import RFE\n\n# feature extraction\nlr = LogisticRegression()\nrfe = RFE(lr, 15)\nlr3 = rfe.fit(X_train, y_train)\n\nprint("Num Features: ", lr3.n_features_)\nprint("Selected Features: ", lr3.support_)\nprint("Feature Ranking: ", lr3.ranking_)')
691
+
692
+
693
+ # **Features sorted by their rank:**
694
+
695
+ # In[ ]:
696
+
697
+
698
+ feature = list(X_train.columns.values)
699
+ print(sorted(zip(map(lambda x: round(x, 4), lr3.ranking_), feature)))
700
+
701
+
702
+ # In[ ]:
703
+
704
+
705
+
706
+ X_train_f = X_train[['age','capital-gain','capital-loss','education_Bachelors','education_Doctorate','education_Masters',
707
+ 'education_Preschool','education_Prof-school','education_elementary_school',
708
+ 'hours-per-week','occupation_Priv-house-serv','relationship_Not-in-family','relationship_Other-relative'
709
+ ,'relationship_Own-child','relationship_Unmarried']]
710
+
711
+ X_test_f = X_test[['age','capital-gain','capital-loss','education_Bachelors','education_Doctorate','education_Masters',
712
+ 'education_Preschool','education_Prof-school','education_elementary_school',
713
+ 'hours-per-week','occupation_Priv-house-serv','relationship_Not-in-family','relationship_Other-relative'
714
+ ,'relationship_Own-child','relationship_Unmarried']]
715
+
716
+ lr4=LogisticRegression(C=0.4, random_state=4 ,penalty='l2', class_weight={0:1,1:1})
717
+ get_ipython().run_line_magic('time', 'lr4.fit(X_train_f,y_train)')
718
+
719
+
720
+ # In[ ]:
721
+
722
+
723
+ # predict
724
+ y_pred4=lr4.predict(X_test_f)
725
+ cf_matrix = confusion_matrix(y_test, y_pred4)
726
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
727
+ plt.tight_layout()
728
+ plt.title('Confusion matrix', y=1.1)
729
+ plt.ylabel('Actual label')
730
+ plt.xlabel('Predicted label')
731
+ plt.show()
732
+
733
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred4))
734
+ print("Precision:",metrics.precision_score(y_test, y_pred4))
735
+ print("Recall:",metrics.recall_score(y_test, y_pred4))
736
+ print("F1 score:",metrics.f1_score(y_test, y_pred4))
737
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred4))
738
+
739
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred4)
740
+ auc = metrics.roc_auc_score(y_test, y_pred4)
741
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
742
+ plt.legend(loc=4)
743
+ plt.plot([0, 1], [0, 1],'r--')
744
+ plt.xlabel('False Positive Rate')
745
+ plt.ylabel('True Positive Rate')
746
+ plt.title('Receiver operating characteristic')
747
+ plt.show()
748
+
749
+
750
+ # ## PCA analysis
751
+ # Principal Component Analysis (or PCA) uses linear algebra to transform the dataset into a compressed form.
752
+ #
753
+ # Generally this is called a data reduction technique. A property of PCA is that you can choose the number of dimensions or principal component in the transformed result.
754
+ #
755
+ # In this case we will use it to analyse the feature importanace
756
+
757
+ # In[ ]:
758
+
759
+
760
+ from sklearn.preprocessing import StandardScaler
761
+ X_std = StandardScaler().fit_transform(X_train)
762
+
763
+
764
+ from sklearn.decomposition import PCA as sklearnPCA
765
+
766
+ sklearn_pca = sklearnPCA(n_components=39)
767
+ Y_sklearn = sklearn_pca.fit_transform(X_std)
768
+
769
+ cum_sum = sklearn_pca.explained_variance_ratio_.cumsum()
770
+
771
+ sklearn_pca.explained_variance_ratio_[:10].sum()
772
+
773
+ cum_sum = cum_sum*100
774
+
775
+ fig, ax = plt.subplots(figsize=(8,8))
776
+ plt.bar(range(39), cum_sum, label='Cumulative _Sum_of_Explained _Varaince', color = 'b',alpha=0.5)
777
+
778
+
779
+ # In[ ]:
780
+
781
+
782
+ #Cumulative explained variance
783
+ from sklearn.decomposition import PCA
784
+ pca = PCA(39)
785
+ pca_full = pca.fit(X)
786
+
787
+ plt.plot(np.cumsum(pca_full.explained_variance_ratio_))
788
+ plt.xlabel('# of components')
789
+ plt.ylabel('Cumulative explained variance')
790
+
791
+
792
+ # In[ ]:
793
+
794
+
795
+ get_ipython().run_cell_magic('time', '', '# 26 Principal Components seems good \npca = PCA(n_components=26)\nX_transformed = pca.fit_transform(X_train)\n\nX_train_pca, X_test_pca, y_train_pca, y_test_pca = train_test_split( \n X_transformed, y_train, test_size=0.2, random_state=13)\n\nlr5=LogisticRegression(C=0.4, random_state=4 ,penalty=\'l1\', class_weight={0:1,1:1})\nlr5.fit(X_train_pca, y_train_pca)\n\n# predict \ny_pred =lr5.predict(X_test_pca)\n\ncf_matrix = confusion_matrix(y_test_pca, y_pred)\nsns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt=\'g\')\nplt.tight_layout()\nplt.title(\'Confusion matrix\', y=1.1)\nplt.ylabel(\'Actual label\')\nplt.xlabel(\'Predicted label\')\nplt.show()\n\nprint("Accuracy:",metrics.accuracy_score(y_test_pca, y_pred))\nprint("Precision:",metrics.precision_score(y_test_pca, y_pred))\nprint("Recall:",metrics.recall_score(y_test_pca, y_pred))\nprint("F1 score:",metrics.f1_score(y_test_pca, y_pred))\nprint("AUC :",metrics.roc_auc_score(y_test_pca, y_pred))\n\nfpr, tpr, _ = metrics.roc_curve(y_test_pca, y_pred)\nauc = metrics.roc_auc_score(y_test_pca, y_pred)\nplt.plot(fpr,tpr,label="data 1, auc="+str(auc))\nplt.legend(loc=4)\nplt.plot([0, 1], [0, 1],\'r--\')\nplt.xlabel(\'False Positive Rate\')\nplt.ylabel(\'True Positive Rate\')\nplt.title(\'Receiver operating characteristic\')\nplt.show()')
796
+
797
+
798
+ # # Feature Importance
799
+
800
+ # In[ ]:
801
+
802
+
803
+ data1 = pd.DataFrame(X,y)
804
+
805
+ clf = ExtraTreesClassifier(n_estimators=250,
806
+ random_state=2)
807
+
808
+ clf.fit(X_train, y_train)
809
+
810
+
811
+ # Plot feature importance
812
+ feature_importance = clf.feature_importances_
813
+ # make importances relative to max importance
814
+ feature_importance = 100.0 * (feature_importance / feature_importance.max())
815
+ sorted_idx = np.argsort(feature_importance)
816
+ pos = np.arange(sorted_idx.shape[0]) + .5
817
+ plt.figure(figsize=(10,10))
818
+ plt.barh(pos, feature_importance[sorted_idx], align='center')
819
+ plt.yticks(pos, data1.columns[sorted_idx])
820
+ plt.xlabel('Relative Importance')
821
+ plt.title('Variable Importance')
822
+ plt.show()
823
+
824
+
825
+ # In[ ]:
826
+
827
+
828
+ X_train_ef = X_train[['race_Asian-Pac-Islander','education_Assoc-voc', 'workclass_Govt_employees','occupation_Craft-repair',
829
+ 'education_Doctorate','workclass_Self_employed', 'occupation_Adm-clerical','occupation_Sales', 'education_Prof-school',
830
+ 'workclass_Private','race_White', 'occupation_Prof-specialty', 'relationship_Unmarried','marital-status_Separated', 'education_elementary_school',
831
+ 'education_Masters', 'relationship_Wife', 'education_Bachelors','capital-loss', 'occupation_Exec-managerial', 'gender_Female',
832
+ 'relationship_Not-in-family', 'education_HS-grad','relationship_Own-child', 'capital-gain', 'relationship_Husband',
833
+ 'marital-status_Married', 'hours-per-week', 'age']]
834
+
835
+ X_test_ef = X_test[['race_Asian-Pac-Islander','education_Assoc-voc', 'workclass_Govt_employees',
836
+ 'occupation_Craft-repair', 'education_Doctorate','workclass_Self_employed', 'occupation_Adm-clerical',
837
+ 'occupation_Sales', 'education_Prof-school', 'workclass_Private','race_White', 'occupation_Prof-specialty', 'relationship_Unmarried',
838
+ 'marital-status_Separated', 'education_elementary_school','education_Masters', 'relationship_Wife', 'education_Bachelors',
839
+ 'capital-loss', 'occupation_Exec-managerial', 'gender_Female','relationship_Not-in-family', 'education_HS-grad',
840
+ 'relationship_Own-child', 'capital-gain', 'relationship_Husband','marital-status_Married', 'hours-per-week', 'age']]
841
+
842
+
843
+ # In[ ]:
844
+
845
+
846
+ get_ipython().run_cell_magic('time', '', 'lr5=LogisticRegression(C=0.4, random_state=4 ,penalty=\'l1\', class_weight={0:1,1:1})\nlr5.fit(X_train_ef, y_train)\n\n# predict \ny_pred =lr5.predict(X_test_ef)\n\ncf_matrix = confusion_matrix(y_test, y_pred)\nsns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt=\'g\')\nplt.tight_layout()\nplt.title(\'Confusion matrix\', y=1.1)\nplt.ylabel(\'Actual label\')\nplt.xlabel(\'Predicted label\')\nplt.show()\n\nprint("Accuracy:",metrics.accuracy_score(y_test, y_pred))\nprint("Precision:",metrics.precision_score(y_test, y_pred))\nprint("Recall:",metrics.recall_score(y_test, y_pred))\nprint("F1 score:",metrics.f1_score(y_test, y_pred))\nprint("AUC :",metrics.roc_auc_score(y_test, y_pred))\n\nfpr, tpr, _ = metrics.roc_curve(y_test, y_pred)\nauc = metrics.roc_auc_score(y_test, y_pred)\nplt.plot(fpr,tpr,label="data 1, auc="+str(auc))\nplt.legend(loc=4)\nplt.plot([0, 1], [0, 1],\'r--\')\nplt.xlabel(\'False Positive Rate\')\nplt.ylabel(\'True Positive Rate\')\nplt.title(\'Receiver operating characteristic\')\nplt.show()')
847
+
848
+
849
+ # ### Grid Search
850
+
851
+ # In[ ]:
852
+
853
+
854
+ class GridSearch(object):
855
+
856
+ def __init__(self,X_train,y_train,model,hyperparameters):
857
+
858
+ self.X_train = X_train
859
+ self.y_train = y_train
860
+ self.model = model
861
+ self.hyperparameters = hyperparameters
862
+
863
+ def GridSearch(self):
864
+ # Create randomized search 10-fold cross validation and 100 iterations
865
+ cv = 10
866
+ clf = GridSearchCV(self.model,
867
+ self.hyperparameters,
868
+ cv=cv,
869
+ verbose=0,
870
+ n_jobs=-1
871
+ )
872
+ # Fit randomized search
873
+ best_model = clf.fit(self.X_train, self.y_train)
874
+ message = (best_model.best_score_, best_model.best_params_)
875
+ print("Best: %f using %s" % (message))
876
+
877
+ return best_model,best_model.best_params_
878
+
879
+ def Best_Model_Predict(self,X_test):
880
+
881
+ best_model,_ = self.GridSearch()
882
+ pred = best_model.predict(X_test)
883
+ return pred
884
+
885
+
886
+ # ### Random Search
887
+
888
+ # In[ ]:
889
+
890
+
891
+ from scipy.stats import uniform
892
+
893
+ class RandomSearch(object):
894
+
895
+ def __init__(self,X_train,y_train,model,hyperparameters):
896
+
897
+ self.X_train = X_train
898
+ self.y_train = y_train
899
+ self.model = model
900
+ self.hyperparameters = hyperparameters
901
+
902
+ def RandomSearch(self):
903
+ # Create randomized search 10-fold cross validation and 100 iterations
904
+ cv = 10
905
+ clf = RandomizedSearchCV(self.model,
906
+ self.hyperparameters,
907
+ random_state=1,
908
+ n_iter=100,
909
+ cv=cv,
910
+ verbose=0,
911
+ n_jobs=-1
912
+ )
913
+ # Fit randomized search
914
+ best_model = clf.fit(self.X_train, self.y_train)
915
+ message = (best_model.best_score_, best_model.best_params_)
916
+ print("Best: %f using %s" % (message))
917
+
918
+ return best_model,best_model.best_params_
919
+
920
+ def Best_Model_Predict(self,X_test):
921
+
922
+ best_model,_ = self.RandomSearch()
923
+ pred = best_model.predict(X_test)
924
+ return pred
925
+
926
+
927
+ # # 6.2 KNN
928
+
929
+ # **kNN is non-parametric, instance based, lazy algorithm and used in the supervised setting.**
930
+ #
931
+ # * Non-parametric :
932
+ #
933
+ # It means that algorithm has no pre assumptions about the functional form of the model, to avoid mismodeling .
934
+ #
935
+ # * Instance based :
936
+ #
937
+ # It means that our algorithm does not explicitly learn a model.
938
+ # Instead, it memorize the training instances which are subsequently used as “knowledge” for the prediction.
939
+ #
940
+ # * Lazy algorithm :
941
+ #
942
+ # It means that it does not use the training data for the Generalization i.e. these algorithm has no explicit training phase or it is minimal. Training is very fast.
943
+ #
944
+ # **kNN Algorithm for Classification**
945
+ #
946
+ # Training element {xi, yi} , Testing point(x)
947
+ #
948
+ # Compute the Distance D(x,xi) to every training element xi.
949
+ # Select k closest instance xi1,xi2,…….., xik and their labels yi1, yi2 …, yik.
950
+ # Output the class y* which is most frequent in yi1,yi2 ……yik.
951
+ #
952
+ #
953
+ #
954
+ # **Significant of “k”**
955
+ #
956
+ # Value of k has strong effect on kNN performance.
957
+ # k act as controller to decide the shape of decision boundary.
958
+ # Large value of k has following properties:
959
+ #
960
+ # 1. Smoother decision boundary
961
+ # 2. It provide more voters for prediction, it implies less affect from outliers.
962
+ # 3. As a result has Lower Variance and High Bias.
963
+ #
964
+ # ![gh.jpeg](attachment:gh.jpeg)
965
+ #
966
+ # **How to Select k**
967
+ #
968
+ # The simplest solution is Cross Validation.
969
+ # Best method is to try many k values and use Cross-Validation to see which k value is giving the best result.
970
+ #
971
+
972
+ # In[ ]:
973
+
974
+
975
+ get_ipython().run_cell_magic('time', '', "k_range = list(range(2,15))\nd_metric = ['euclidean','minkowski']\n\nparam_grid = dict(n_neighbors = k_range, metric =d_metric)\n\nknn = KNeighborsClassifier()\n\nKNN_GridSearch = GridSearch(X_train_f, y_train, knn ,param_grid)\ny_pred = KNN_GridSearch.Best_Model_Predict(X_test_f)")
976
+
977
+
978
+ # In[ ]:
979
+
980
+
981
+ cf_matrix = confusion_matrix(y_test, y_pred)
982
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
983
+ plt.tight_layout()
984
+ plt.title('Confusion matrix', y=1.1)
985
+ plt.ylabel('Actual label')
986
+ plt.xlabel('Predicted label')
987
+ plt.show()
988
+
989
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
990
+ print("Precision:",metrics.precision_score(y_test, y_pred))
991
+ print("Recall:",metrics.recall_score(y_test, y_pred))
992
+ print("F1 score:",metrics.f1_score(y_test, y_pred))
993
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred))
994
+
995
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
996
+ auc = metrics.roc_auc_score(y_test, y_pred)
997
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
998
+ plt.legend(loc=4)
999
+ plt.plot([0, 1], [0, 1],'r--')
1000
+ plt.xlabel('False Positive Rate')
1001
+ plt.ylabel('True Positive Rate')
1002
+ plt.title('Receiver operating characteristic')
1003
+ plt.show()
1004
+
1005
+
1006
+ # # 6.3 SVM (Support Vector Machine)
1007
+
1008
+ # **What is a Support Vector Machine**
1009
+ # A Support Vector Machine is a supervised machine learning algorithm which can be used for both classification and regression problems. It follows a technique called the kernel trick to transform the data and based on these transformations, it finds an optimal boundary between the possible outputs.
1010
+ #
1011
+ # **How does it work?**
1012
+ # The main idea is to identify the optimal separating hyperplane which maximizes the margin of the training data.
1013
+ #
1014
+ # The goal of SVMs is to find the optimal hyperplane because it not only classifies the existing dataset but also helps predict the class of the unseen data. And the optimal hyperplane is the one which has the biggest margin.
1015
+ #
1016
+ # ![Margin.png](attachment:Margin.png)
1017
+
1018
+ # In[ ]:
1019
+
1020
+
1021
+ get_ipython().run_cell_magic('time', '', "param_grid = [{'gamma': [ 0.1, 1, 10],'C': [ 0.10, 10, 100]}]\n\nsvm = SVC()\n\nsvm_GridSearch = GridSearch(X_train_f, y_train, svm,param_grid )\ny_pred = svm_GridSearch.Best_Model_Predict(X_test_f)")
1022
+
1023
+
1024
+ # **Kernel**
1025
+ #
1026
+ # kernel parameters selects the type of hyperplane used to separate the data. Using ‘linear’ will use a linear hyperplane (a line in the case of 2D data). ‘rbf’ and ‘poly’ uses a non linear hyper-plane.
1027
+ #
1028
+ # **gamma**
1029
+ #
1030
+ # gamma is a parameter for non linear hyperplanes. The higher the gamma value it tries to exactly fit the training data set. Increasing gamma leads to overfitting as the classifier tries to perfectly fit the training data.
1031
+ #
1032
+ # **C**
1033
+ #
1034
+ # C is the penalty parameter of the error term. It controls the trade off between smooth decision boundary and classifying the training points correctly.
1035
+ # A smaller C value leads to a wider street but more margin violations
1036
+
1037
+ # In[ ]:
1038
+
1039
+
1040
+ cf_matrix = confusion_matrix(y_test, y_pred)
1041
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1042
+ plt.tight_layout()
1043
+ plt.title('Confusion matrix', y=1.1)
1044
+ plt.ylabel('Actual label')
1045
+ plt.xlabel('Predicted label')
1046
+ plt.show()
1047
+
1048
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
1049
+ print("Precision:",metrics.precision_score(y_test, y_pred))
1050
+ print("Recall:",metrics.recall_score(y_test, y_pred))
1051
+ print("F1 score:",metrics.f1_score(y_test, y_pred))
1052
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred))
1053
+
1054
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
1055
+ auc = metrics.roc_auc_score(y_test, y_pred)
1056
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1057
+ plt.legend(loc=4)
1058
+ plt.plot([0, 1], [0, 1],'r--')
1059
+ plt.xlabel('False Positive Rate')
1060
+ plt.ylabel('True Positive Rate')
1061
+ plt.title('Receiver operating characteristic')
1062
+ plt.show()
1063
+
1064
+
1065
+ # # 6.4 LDA
1066
+ #
1067
+ # * Linear Discriminant Analysis (LDA) is most commonly used as dimensionality reduction technique in the pre-processing step for pattern-classification and machine learning applications.
1068
+ #
1069
+ # * The goal is to project a dataset onto a lower-dimensional space with good class-separability in order avoid overfitting (“curse of dimensionality”) and also reduce computational costs.
1070
+ #
1071
+ # In general, dimensionality reduction does not only help reducing computational costs for a given classification task, but it can also be helpful to avoid overfitting by minimizing the error in parameter estimation (“curse of dimensionality”).
1072
+ #
1073
+ # ![sdfg.jpg](attachment:sdfg.jpg)
1074
+
1075
+ # In[ ]:
1076
+
1077
+
1078
+ get_ipython().run_cell_magic('time', '', "param_grid = [{'n_components': [1,2,3,4]}]\n\nlda = LinearDiscriminantAnalysis()\n\nlda_GridSearch = GridSearch(X_train, y_train, lda , param_grid )\ny_pred = lda_GridSearch.Best_Model_Predict(X_test)")
1079
+
1080
+
1081
+ # In[ ]:
1082
+
1083
+
1084
+ cf_matrix = confusion_matrix(y_test, y_pred)
1085
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1086
+ plt.tight_layout()
1087
+ plt.title('Confusion matrix', y=1.1)
1088
+ plt.ylabel('Actual label')
1089
+ plt.xlabel('Predicted label')
1090
+ plt.show()
1091
+
1092
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
1093
+ print("Precision:",metrics.precision_score(y_test, y_pred))
1094
+ print("Recall:",metrics.recall_score(y_test, y_pred))
1095
+ print("F1 score:",metrics.f1_score(y_test, y_pred))
1096
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred))
1097
+
1098
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
1099
+ auc = metrics.roc_auc_score(y_test, y_pred)
1100
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1101
+ plt.legend(loc=4)
1102
+ plt.plot([0, 1], [0, 1],'r--')
1103
+ plt.xlabel('False Positive Rate')
1104
+ plt.ylabel('True Positive Rate')
1105
+ plt.title('Receiver operating characteristic')
1106
+ plt.show()
1107
+
1108
+
1109
+ # # 6.5 Decision Tree
1110
+
1111
+ # **Introduction to Decision Trees**
1112
+ #
1113
+ # * A decision tree is a decision support tool that uses a tree-like graph or model of decisions and their possible consequences, including chance event outcomes, resource costs, and utility. It is one way to display an algorithm that only contains conditional control statements.
1114
+ #
1115
+ # **How does Decision Tree works ?**
1116
+ #
1117
+ # * Decision tree is a type of supervised learning algorithm (having a pre-defined target variable) that is mostly used in classification problems. It works for both categorical and continuous input and output variables. In this technique, we split the population or sample into two or more homogeneous sets (or sub-populations) based on most significant splitter / differentiator in input variables.
1118
+ #
1119
+ # ![asdwq.png](attachment:asdwq.png)
1120
+ #
1121
+ # The core algorithm for building decision trees called ID3 by J. R. Quinlan which employs a top-down, greedy search through the space of possible branches with no backtracking. ID3 uses Entropy and Information Gain to construct a decision tree.
1122
+ #
1123
+ # **The popular attribute selection measures:**
1124
+ #
1125
+ # Information gain
1126
+ # Gini index
1127
+ #
1128
+ #
1129
+ # **Advantages of CART**
1130
+ #
1131
+ # Simple to understand, interpret, visualize.
1132
+ # Decision trees implicitly perform variable screening or feature selection.
1133
+ # Can handle both numerical and categorical data. Can also handle multi-output problems.
1134
+ # Decision trees require relatively little effort from users for data preparation.
1135
+ # Nonlinear relationships between parameters do not affect tree performance.
1136
+ #
1137
+ # **Disadvantages of CART**
1138
+ #
1139
+ # Decision-tree learners can create over-complex trees that do not generalize the data well.This is called overfitting.
1140
+ # Decision trees can be unstable because small variations in the data might result in a completely different tree being generated. This is called variance, which needs to be lowered by methods like bagging and boosting.
1141
+
1142
+ # ### Implemention of Decision Tree
1143
+
1144
+ # In[ ]:
1145
+
1146
+
1147
+ get_ipython().run_cell_magic('time', '', 'from scipy.stats import randint\nmax_depth_value = [4, 5,6,7,8,9,10,11,12,13]\nmax_features_value = randint(1, 7)\nmin_samples_leaf_value = randint(1, 4)\ncriterion_value = ["gini", "entropy"]\n\nparam_grid = dict(max_depth = max_depth_value,\n max_features = max_features_value,\n min_samples_leaf = min_samples_leaf_value,\n criterion = criterion_value)\n\nCART = DecisionTreeClassifier(random_state=1)\n\nCART_RandSearch = RandomSearch(X_train_f, y_train, CART, param_grid)\nPrediction_CART = CART_RandSearch.Best_Model_Predict(X_test_f)')
1148
+
1149
+
1150
+ # #### How does a tree decide where to split?
1151
+ #
1152
+ # Decision tree splits the nodes on all available variables and then selects the split which results in most homogeneous sub-nodes.
1153
+ #
1154
+ # Let’s look at the two most commonly used algorithms in decision tree:
1155
+ #
1156
+ # #### Gini:
1157
+ #
1158
+ # Gini says, if we select two items from a population at random then they must be of same class and probability for this is 1 if population is pure.
1159
+ # Steps to Calculate Gini for a split
1160
+ #
1161
+ # Calculate Gini for sub-nodes, using formula sum of square of probability for success and failure (p^2+q^2).
1162
+ # Calculate Gini for split using weighted Gini score of each node of that split
1163
+ #
1164
+ # Example: – Referring to example used above, where we want to segregate the students based on target variable ( playing cricket or not ).
1165
+ #
1166
+ # ![dt.PNG](attachment:dt.PNG)
1167
+ # Split on Gender:
1168
+ #
1169
+ # Calculate, Gini for sub-node Female = (0.2)*(0.2)+(0.8)*(0.8)=0.68
1170
+ # Gini for sub-node Male = (0.65)*(0.65)+(0.35)*(0.35)=0.55
1171
+ # Calculate weighted Gini for Split Gender = (10/30)*0.68+(20/30)*0.55 = 0.59
1172
+ #
1173
+ # Similar for Split on Class:
1174
+ #
1175
+ # Gini for sub-node Class IX = (0.43)*(0.43)+(0.57)*(0.57)=0.51
1176
+ # Gini for sub-node Class X = (0.56)*(0.56)+(0.44)*(0.44)=0.51
1177
+ # Calculate weighted Gini for Split Class = (14/30)*0.51+(16/30)*0.51 = 0.51
1178
+ #
1179
+ # Above, you can see that Gini score for Split on Gender is higher than Split on Class, hence, the node split will take place on Gender.
1180
+ #
1181
+ # #### Information Gain:
1182
+ #
1183
+ # Information gain can be understood as decrease in “uncertainty” of the result.
1184
+ #
1185
+ # Information theory is a measure to define this degree of disorganization in a system known as Entropy. If the sample is completely homogeneous, then the entropy is zero and if the sample is an equally divided (50% – 50%), it has entropy of one.
1186
+ #
1187
+ # Entropy can be calculated using formula:-
1188
+ # ![et.PNG](attachment:et.PNG)
1189
+ #
1190
+ # Here p and q is probability of success and failure respectively in that node. Entropy is also used with categorical target variable. It chooses the split which has lowest entropy compared to parent node and other splits. The lesser the entropy, the better it is.
1191
+ #
1192
+ # Steps to calculate entropy for a split:
1193
+ #
1194
+ # Calculate entropy of parent node
1195
+ # Calculate entropy of each individual node of split and calculate weighted average of all sub-nodes available in split.
1196
+ #
1197
+ # Example: Let’s use this method to identify best split for student example.
1198
+ #
1199
+ # Entropy for parent node = -(15/30) log2 (15/30) – (15/30) log2 (15/30) = 1. Here 1 shows that it is a impure node.
1200
+ # Entropy for Female node = -(2/10) log2 (2/10) – (8/10) log2 (8/10) = 0.72 and for male node, -(13/20) log2 (13/20) – (7/20) log2 (7/20) = 0.93
1201
+ # Entropy for split Gender = Weighted entropy of sub-nodes = (10/30)*0.72 + (20/30)*0.93 = 0.86
1202
+ # Entropy for Class IX node, -(6/14) log2 (6/14) – (8/14) log2 (8/14) = 0.99 and for Class X node, -(9/16) log2 (9/16) – (7/16) log2 (7/16) = 0.99.
1203
+ # Entropy for split Class = (14/30)*0.99 + (16/30)*0.99 = 0.99
1204
+ #
1205
+ # Above, you can see that entropy for Split on Gender is the lowest among all, so the tree will split on Gender. We can derive information gain from entropy as 1- Entropy.
1206
+
1207
+ # ## Visualize Decision Tree
1208
+
1209
+ # In[ ]:
1210
+
1211
+
1212
+ # Visualize Decision Tree
1213
+ from sklearn import tree
1214
+ from sklearn.tree import export_graphviz
1215
+ from IPython.display import Image
1216
+
1217
+ feature_names = [i for i in X_train_f.columns]
1218
+
1219
+ y_train_str = y_train.astype('str')
1220
+ y_train_str[y_train_str == 1] = "1"
1221
+ y_train_str[y_train_str == 0] ="0"
1222
+ y_train_str = y_train_str.values
1223
+
1224
+ model_1 = tree.DecisionTreeClassifier(criterion='gini', max_depth= 13, max_features= 2, min_samples_leaf= 2)
1225
+ model_1.fit(X_train_f, y_train)
1226
+
1227
+ from sklearn.externals.six import StringIO
1228
+ # Let's give dot_data some space so it will not feel nervous any more
1229
+ dot_data = StringIO()
1230
+ tree.export_graphviz(model_1, out_file=dot_data,filled=True)
1231
+ import pydotplus
1232
+
1233
+ graph = pydotplus.graphviz.graph_from_dot_data(dot_data.getvalue())
1234
+ # make sure you have graphviz installed and set in path
1235
+ Image(graph.create_png())
1236
+
1237
+
1238
+ # In[ ]:
1239
+
1240
+
1241
+ cf_matrix = confusion_matrix(y_test, Prediction_CART)
1242
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1243
+ plt.tight_layout()
1244
+ plt.title('Confusion matrix', y=1.1)
1245
+ plt.ylabel('Actual label')
1246
+ plt.xlabel('Predicted label')
1247
+ plt.show()
1248
+
1249
+ print("Accuracy:",metrics.accuracy_score(y_test, Prediction_CART))
1250
+ print("Precision:",metrics.precision_score(y_test, Prediction_CART))
1251
+ print("Recall:",metrics.recall_score(y_test, Prediction_CART))
1252
+ print("F1 score:",metrics.f1_score(y_test, Prediction_CART))
1253
+ print("AUC :",metrics.roc_auc_score(y_test, Prediction_CART))
1254
+
1255
+ fpr, tpr, _ = metrics.roc_curve(y_test, Prediction_CART)
1256
+ auc = metrics.roc_auc_score(y_test, Prediction_CART)
1257
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1258
+ plt.legend(loc=4)
1259
+ plt.plot([0, 1], [0, 1],'r--')
1260
+ plt.xlabel('False Positive Rate')
1261
+ plt.ylabel('True Positive Rate')
1262
+ plt.title('Receiver operating characteristic')
1263
+ plt.show()
1264
+
1265
+
1266
+ # # 6.6 Ensemble methods
1267
+ #
1268
+ # **What is an ensemble method?**
1269
+ #
1270
+ # Ensemble is a Machine Learning concept in which the idea is to train multiple models using the same learning algorithm. The ensembles take part in a bigger group of methods, called multiclassifiers, where a set of hundreds or thousands of learners with a common objective are fused together to solve the problem.
1271
+ # When we try to predict the target variable using any machine learning technique, the main causes of difference in actual and predicted values are noise, variance, and bias. Ensemble helps to reduce these factors (except noise, which is irreducible error).
1272
+ #
1273
+ # **Techniques to perform ensemble decision trees:**
1274
+ #
1275
+ # **1. Bagging**
1276
+ #
1277
+ # Bagging is used when the goal is to reduce the variance of a decision tree classifier. Here the objective is to create several subsets of data from training sample chosen randomly with replacement. Each collection of subset data is used to train their decision trees. As a result, we get an ensemble of different models. Average of all the predictions from different trees are used which is more robust than a single decision tree classifier.
1278
+ #
1279
+ # Bagging Steps:
1280
+ #
1281
+ # Suppose there are N observations and M features in training data set. A sample from training data set is taken randomly with replacement.
1282
+ # A subset of M features are selected randomly and whichever feature gives the best split is used to split the node iteratively.
1283
+ # The tree is grown to the largest.
1284
+ # Above steps are repeated n times and prediction is given based on the aggregation of predictions from n number of trees.
1285
+ #
1286
+ # Advantages:
1287
+ #
1288
+ # Reduces over-fitting of the model.
1289
+ # Handles higher dimensionality data very well.
1290
+ # Maintains accuracy for missing data.
1291
+ #
1292
+ # Disadvantages:
1293
+ #
1294
+ # Since final prediction is based on the mean predictions from subset trees, it won’t give precise values for the classification and regression model.
1295
+ #
1296
+ # ![dfg.png](attachment:dfg.png)
1297
+ #
1298
+ #
1299
+ # **2. Boosting**
1300
+ #
1301
+ # Boosting is used to create a collection of predictors. In this technique, learners are learned sequentially with early learners fitting simple models to the data and then analysing data for errors. Consecutive trees (random sample) are fit and at every step, the goal is to improve the accuracy from the prior tree. When an input is misclassified by a hypothesis, its weight is increased so that next hypothesis is more likely to classify it correctly. This process converts weak learners into better performing model.
1302
+ #
1303
+ # Boosting Steps:
1304
+ #
1305
+ # Draw a random subset of training samples d1 without replacement from the training set D to train a weak learner C1
1306
+ # Draw second random training subset d2 without replacement from the training set and add 50 percent of the samples that were previously falsely classified/misclassified to train a weak learner C2
1307
+ # Find the training samples d3 in the training set D on which C1 and C2 disagree to train a third weak learner C3
1308
+ # Combine all the weak learners via majority voting.
1309
+ #
1310
+ # Advantages:
1311
+ #
1312
+ # Supports different loss function (we have used ‘binary:logistic’ for this example).
1313
+ # Works well with interactions.
1314
+ #
1315
+ # Disadvantages:
1316
+ #
1317
+ # Prone to over-fitting.
1318
+ # Requires careful tuning of different hyper-parameters.
1319
+ #
1320
+ # * Bagging to decrease the model’s variance;
1321
+ # * Boosting to decreasing the model’s bias, and;
1322
+ # * Stacking to increasing the predictive force of the classifier.
1323
+ #
1324
+ # ![boosting.png](attachment:boosting.png)
1325
+
1326
+ # ## 6.6.1 Bagging
1327
+
1328
+ # # 6.6.1.1 Random Forest
1329
+ #
1330
+
1331
+ # In[ ]:
1332
+
1333
+
1334
+ get_ipython().run_cell_magic('time', '', "param_grid = [\n{'n_estimators': [10, 25,30], 'max_features': ['auto', 'sqrt', 'log2', None], \n 'max_depth': [10, 20, None], 'bootstrap': [True, False]}\n]\n\nrf = RandomForestClassifier()\n\nrf_GridSearch = GridSearch(X_train, y_train, rf ,param_grid )\ny_pred = rf_GridSearch.Best_Model_Predict(X_test)")
1335
+
1336
+
1337
+ # In[ ]:
1338
+
1339
+
1340
+ cf_matrix = confusion_matrix(y_test, y_pred)
1341
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1342
+ plt.tight_layout()
1343
+ plt.title('Confusion matrix', y=1.1)
1344
+ plt.ylabel('Actual label')
1345
+ plt.xlabel('Predicted label')
1346
+ plt.show()
1347
+
1348
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
1349
+ print("Precision:",metrics.precision_score(y_test, y_pred))
1350
+ print("Recall:",metrics.recall_score(y_test, y_pred))
1351
+ print("F1 score:",metrics.f1_score(y_test, y_pred))
1352
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred))
1353
+
1354
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
1355
+ auc = metrics.roc_auc_score(y_test, y_pred)
1356
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1357
+ plt.legend(loc=4)
1358
+ plt.plot([0, 1], [0, 1],'r--')
1359
+ plt.xlabel('False Positive Rate')
1360
+ plt.ylabel('True Positive Rate')
1361
+ plt.title('Receiver operating characteristic')
1362
+ plt.show()
1363
+
1364
+
1365
+ # **Set of hyperparameters:**
1366
+ #
1367
+ # * n_estimators = number of trees in the foreset
1368
+ # * max_features = max number of features considered for splitting a node
1369
+ # * max_depth = max number of levels in each decision tree
1370
+ # * bootstrap = method for sampling data points (with or without replacement)
1371
+ #
1372
+ #
1373
+ # **max_features:**
1374
+ #
1375
+ # These are the maximum number of features Random Forest is allowed to try in individual tree.
1376
+ #
1377
+ # 1)Auto : This will simply take all the features which make sense in every tree.Here we simply do not put any restrictions on the individual tree.
1378
+ #
1379
+ # 2)sqrt : This option will take square root of the total number of features in individual run. For instance, if the total number of variables are 100, we can only take 10 of them in individual tree.
1380
+ #
1381
+ # 3)log2:It is another option which takes log to the base 2 of the features input.
1382
+ #
1383
+ # Increasing max_features generally improves the performance of the model as at each node now we have a higher number of options to be considered.But, for sure, you decrease the speed of algorithm by increasing the max_features. Hence, you need to strike the right balance and choose the optimal max_features.
1384
+ #
1385
+ # **n_estimators :**
1386
+ #
1387
+ # This is the number of trees you want to build before taking the maximum voting or averages of predictions. Higher number of trees give you better performance but makes your code slower. You should choose as high value as your processor can handle because this makes your predictions stronger and more stable.
1388
+ #
1389
+ # **min_sample_leaf:**
1390
+ #
1391
+ # Leaf is the end node of a decision tree. A smaller leaf makes the model more prone to capturing noise in train data. Hence it is important to try different values to get good estimate.
1392
+
1393
+ # In[ ]:
1394
+
1395
+
1396
+ get_ipython().run_cell_magic('time', '', "param_grid = [\n{'n_estimators': [10, 25,30], 'max_features': ['auto', 'sqrt', 'log2', None], \n 'max_depth': [10, 20, None], 'bootstrap': [True, False]}\n]\n\nrf = RandomForestClassifier()\n\nrf_GridSearch = GridSearch(X_train_ef, y_train, rf ,param_grid )\ny_pred = rf_GridSearch.Best_Model_Predict(X_test_ef)")
1397
+
1398
+
1399
+ # In[ ]:
1400
+
1401
+
1402
+ cf_matrix = confusion_matrix(y_test, y_pred)
1403
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1404
+ plt.tight_layout()
1405
+ plt.title('Confusion matrix', y=1.1)
1406
+ plt.ylabel('Actual label')
1407
+ plt.xlabel('Predicted label')
1408
+ plt.show()
1409
+
1410
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
1411
+ print("Precision:",metrics.precision_score(y_test, y_pred))
1412
+ print("Recall:",metrics.recall_score(y_test, y_pred))
1413
+ print("F1 score:",metrics.f1_score(y_test, y_pred))
1414
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred))
1415
+
1416
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred)
1417
+ auc = metrics.roc_auc_score(y_test, y_pred)
1418
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1419
+ plt.legend(loc=4)
1420
+ plt.plot([0, 1], [0, 1],'r--')
1421
+ plt.xlabel('False Positive Rate')
1422
+ plt.ylabel('True Positive Rate')
1423
+ plt.title('Receiver operating characteristic')
1424
+ plt.show()
1425
+
1426
+
1427
+ # ## 6.6.2 Boosting
1428
+
1429
+ # # 6.6.2.1 GradientBoosting
1430
+ #
1431
+ # A special case of boosting where errors are minimized by gradient descent algorithm e.g. the strategy consulting firms leverage by using case interviews to weed out less qualified candidates.
1432
+
1433
+ # In[ ]:
1434
+
1435
+
1436
+ get_ipython().run_cell_magic('time', '', 'learning_rate_ = [.01,.05,.1,.5,1]\nn_estimators_ = [50,100,150,200,250,300]\n\nparam_grid = dict(learning_rate=learning_rate_, n_estimators=n_estimators_)\n\nGB = GradientBoostingClassifier()\n\nGB_GridSearch = RandomSearch(X_train, y_train, GB, param_grid)')
1437
+
1438
+
1439
+ # In[ ]:
1440
+
1441
+
1442
+ Prediction_GB = GB_GridSearch.Best_Model_Predict(X_test)
1443
+
1444
+
1445
+ # In[ ]:
1446
+
1447
+
1448
+ from xgboost import XGBClassifier
1449
+ %%time
1450
+ learning_rate_ = [.01,.05,.1,.5,1]
1451
+ n_estimators_ = [50,100,150,200,250,300]
1452
+
1453
+ param_grid = dict(learning_rate=learning_rate_, n_estimators=n_estimators_,n_jobs=-1)
1454
+
1455
+ GB = XGBClassifier()
1456
+
1457
+ GB_GridSearch = RandomSearch(X_train, y_train, GB, param_grid)
1458
+ Prediction_GB = GB_GridSearch.Best_Model_Predict(X_test)
1459
+
1460
+
1461
+ # In[ ]:
1462
+
1463
+
1464
+ cf_matrix = confusion_matrix(y_test, Prediction_GB)
1465
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1466
+ plt.tight_layout()
1467
+ plt.title('Confusion matrix', y=1.1)
1468
+ plt.ylabel('Actual label')
1469
+ plt.xlabel('Predicted label')
1470
+ plt.show()
1471
+
1472
+ print("Accuracy:",metrics.accuracy_score(y_test, Prediction_GB))
1473
+ print("Precision:",metrics.precision_score(y_test, Prediction_GB))
1474
+ print("Recall:",metrics.recall_score(y_test, Prediction_GB))
1475
+ print("F1 score:",metrics.f1_score(y_test, Prediction_GB))
1476
+ print("AUC :",metrics.roc_auc_score(y_test, Prediction_GB))
1477
+
1478
+
1479
+ fpr, tpr, _ = metrics.roc_curve(y_test, Prediction_GB)
1480
+ auc = metrics.roc_auc_score(y_test, Prediction_GB)
1481
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1482
+ plt.legend(loc=4)
1483
+ plt.plot([0, 1], [0, 1],'r--')
1484
+ plt.xlabel('False Positive Rate')
1485
+ plt.ylabel('True Positive Rate')
1486
+ plt.title('Receiver operating characteristic')
1487
+ plt.show()
1488
+ #.712
1489
+
1490
+
1491
+ # # 7. Neural Network
1492
+ #
1493
+ # ![nn_diagram_1.png](attachment:nn_diagram_1.png)
1494
+ #
1495
+ # **The Sequential model is a linear stack of layers.**
1496
+ #
1497
+ # **Specifying the input shape**
1498
+ #
1499
+ # The model needs to know what input shape it should expect. For this reason, the first layer in a Sequential model (and only the first, because following layers can do automatic shape inference) needs to receive information about its input shape.
1500
+ #
1501
+ # **Compilation**
1502
+ #
1503
+ # Before training a model, we need to configure the learning process, which is done via the compile method. It receives three arguments:
1504
+ #
1505
+ # An optimizer.
1506
+ # This could be the string identifier of an existing optimizer (such as rmsprop , adam or adagrad).
1507
+ #
1508
+ # A loss function.
1509
+ # This is the objective that the model will try to minimize. It can be the string identifier of an existing loss function (such as categorical_crossentropy or mse), or it can be an objective function.
1510
+ #
1511
+ # A list of metrics.
1512
+ # For any classification problem you will want to set this to metrics=['accuracy']. A metric could be the string identifier of an existing metric or a custom metric function.
1513
+ #
1514
+ # **Training**
1515
+ #
1516
+ # Keras models are trained on Numpy arrays of input data and labels. For training a model,we will use the fit function.
1517
+
1518
+ # In[ ]:
1519
+
1520
+
1521
+ get_ipython().run_cell_magic('time', '', "import keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\n\nmodel = Sequential()\n\n# Adding the input layer and the first hidden layer\nmodel.add(Dense(output_dim = 16, activation = 'relu', input_dim = 45))\n# Adding the second hidden layer\nmodel.add(Dense(output_dim = 8, activation = 'relu'))\n\n# Adding the output layer\nmodel.add(Dense(output_dim = 1, activation = 'sigmoid'))\n\nmodel.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])\n\nmodel.fit(X_train, y_train, batch_size = 10, nb_epoch = 100)\n\ny_pred1 = model.predict(X_test)\ny_pred1 = (y_pred1 > 0.5)\n")
1522
+
1523
+
1524
+ # In[ ]:
1525
+
1526
+
1527
+ cf_matrix = confusion_matrix(y_test, y_pred1)
1528
+ sns.heatmap(pd.DataFrame(cf_matrix), annot=True, cmap="YlGnBu" ,fmt='g')
1529
+ plt.tight_layout()
1530
+ plt.title('Confusion matrix', y=1.1)
1531
+ plt.ylabel('Actual label')
1532
+ plt.xlabel('Predicted label')
1533
+ plt.show()
1534
+
1535
+ print("Accuracy:",metrics.accuracy_score(y_test, y_pred1))
1536
+ print("Precision:",metrics.precision_score(y_test, y_pred1))
1537
+ print("Recall:",metrics.recall_score(y_test, y_pred1))
1538
+ print("F1 score:",metrics.f1_score(y_test, y_pred1))
1539
+ print("AUC :",metrics.roc_auc_score(y_test, y_pred1))
1540
+
1541
+
1542
+ fpr, tpr, _ = metrics.roc_curve(y_test, y_pred1)
1543
+ auc = metrics.roc_auc_score(y_test, y_pred1)
1544
+ plt.plot(fpr,tpr,label="data 1, auc="+str(auc))
1545
+ plt.legend(loc=4)
1546
+ plt.plot([0, 1], [0, 1],'r--')
1547
+ plt.xlabel('False Positive Rate')
1548
+ plt.ylabel('True Positive Rate')
1549
+ plt.title('Receiver operating characteristic')
1550
+ plt.show()
1551
+
1552
+
1553
+ # In[ ]:
1554
+
1555
+
1556
+
1557
+ m = {
1558
+ "GradientBoosting" : {
1559
+ "Accuracy": 0.8280,
1560
+ "Precision": 0.84150,
1561
+ "Recall": 0.8063,
1562
+ "F1 score": 0.82352,
1563
+ "AUC" : 0.82795,
1564
+ "RunTime(sec)" : 481
1565
+ },
1566
+ "Random Forest" : {
1567
+ "Accuracy": 0.82064,
1568
+ "Precision": 0.86139,
1569
+ "Recall": 0.76217,
1570
+ "F1 score": 0.80875,
1571
+ "AUC" : 0.8203,
1572
+ "RunTime(sec)" : 59.4
1573
+ },
1574
+ "LDA" : {
1575
+ "Accuracy": 0.8001,
1576
+ "Precision": 0.8307,
1577
+ "Recall": 0.7512,
1578
+ "F1 score": 0.7890,
1579
+ "AUC" : 0.7998,
1580
+ "RunTime(sec)" : 1.2
1581
+ },
1582
+ "Decision Tree" : {
1583
+ "Accuracy": 0.7992,
1584
+ "Precision": 0.8607,
1585
+ "Recall": 0.7117,
1586
+ "F1 score": 0.7791,
1587
+ "AUC" : 0.7988,
1588
+ "RunTime(sec)" : 2.1
1589
+ },
1590
+ "SVM" : {
1591
+ "Accuracy": 0.8066,
1592
+ "Precision": 0.8592,
1593
+ "Recall": 0.7312,
1594
+ "F1 score": 0.7900,
1595
+ "AUC" : 0.8063,
1596
+ "RunTime(sec)" : 500.4
1597
+ },
1598
+ "KNN" : {
1599
+ "Accuracy": 0.7924,
1600
+ "Precision": 0.8212,
1601
+ "Recall": 0.74498,
1602
+ "F1 score": 0.7812,
1603
+ "AUC" : 0.7921,
1604
+ "RunTime(sec)" : 144.1
1605
+ },
1606
+ "LR(ET)" : {
1607
+ "Accuracy": 0.8069,
1608
+ "Precision": 0.8156,
1609
+ "Recall": 0.7908,
1610
+ "F1 score": 0.8030,
1611
+ "AUC" : 0.80687,
1612
+ "RunTime(sec)" : .688
1613
+ },
1614
+ "LR(PCA)" : {
1615
+ "Accuracy": 0.7866,
1616
+ "Precision": 0.8162,
1617
+ "Recall": 0.7492,
1618
+ "F1 score": 0.7813,
1619
+ "AUC" : 0.7872,
1620
+ "RunTime(sec)" : .433
1621
+ },
1622
+ "LR(REF)" : {
1623
+ "Accuracy": 0.7984,
1624
+ "Precision": 0.8409,
1625
+ "Recall": 0.7335,
1626
+ "F1 score":0.7835,
1627
+ "AUC" : 0.7980,
1628
+ "RunTime(sec)" : 1.7
1629
+ },
1630
+ "LR(GridSearch)" : {
1631
+ "Accuracy": 0.81636,
1632
+ "Precision": 0.8270,
1633
+ "Recall": 0.7977,
1634
+ "F1 score": 0.8121,
1635
+ "AUC" : 0.8162,
1636
+ "RunTime(sec)" : 23.8
1637
+ },
1638
+ "LR(RandomSearch)":{
1639
+ "Accuracy": 0.8120,
1640
+ "Precision": 0.8302,
1641
+ "Recall": 0.7822,
1642
+ "F1 score": 0.8055,
1643
+ "AUC" : 0.8119,
1644
+ "RunTime(sec)" : 4.9
1645
+ },
1646
+ "Neural Network" : {
1647
+ "Accuracy": 0.8157,
1648
+ "Precision": 0.8466,
1649
+ "Recall": 0.76905,
1650
+ "F1 score": 0.7707,
1651
+ "AUC" : 0.8155,
1652
+ "RunTime(sec)" : 120
1653
+ },
1654
+ }
1655
+ df = pd.DataFrame(m)
1656
+ df.T
1657
+
1658
+
1659
+ # # Summary
1660
+ #
1661
+ # I have choosen two performance criterion for models:
1662
+ # 1. Model Accuracy
1663
+ # 2. Run time
1664
+ #
1665
+ # #### 1. Model Accuracy
1666
+ #
1667
+ # **GradientBoosting** outperform all the models if we take accuracy under consideration.It has 82.7% accuracy. But it takes nearly 481 sec (8 min) to pick the hyperparameters, train the model and predict the output.
1668
+ #
1669
+ # There is another way to reduce the runtime by a factor of 8 without losing much accuracy if we use Random Forest for modelling.
1670
+ # **Random Forest** has accuracy 82% but it has nearly 1 min runtime.
1671
+ #
1672
+ # #### 2. Run time
1673
+ #
1674
+ # In case if our focus is on runtime then **Logistic Regression** has runtime 4.9 sec with accuracy 81.1%.
1675
+ #
1676
+ # But if we want lowest runtime, we have to diminishing the accuracy. In that case **LDA** is the best choice with nearly 80% accuracy and 1.2 sec of runtime.
1677
+
1678
+ # **The results obtained above can be used as a standard point of reference for other comparative studies done in the field of predicting values from census data. This comparative study can further be used as a basis for improving the present classifiers and techniques resulting in making better technologies for accurately predicting income level of an individual.**
1679
+
1680
+ # In[ ]:
1681
+
1682
+
1683
+
1684
+
1685
+
1686
+ # In[ ]:
1687
+
1688
+
1689
+
1690
+
AdultNoteBook/Kernels/ExtraTrees/3-income-classification-using-meta-learning.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/3-income-classification-using-meta-learning.py ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Adult Census Income Classification using Meta Learning
5
+
6
+ # In[1]:
7
+
8
+
9
+ #importing the required libraries
10
+ import numpy as np
11
+ import pandas as pd
12
+ import matplotlib.pyplot as plt
13
+ import seaborn as sns
14
+ plt.style.use('ggplot')
15
+
16
+ from imblearn.over_sampling import RandomOverSampler
17
+ from sklearn.model_selection import train_test_split
18
+
19
+ from numpy import mean, std
20
+
21
+
22
+ # In[2]:
23
+
24
+
25
+ #reading the dataset and converting it to dataframe
26
+ df = pd.read_csv("../input/adult-census-income/adult.csv")
27
+
28
+
29
+ # In[3]:
30
+
31
+
32
+ #Viewing the top 5 rows of our dataset
33
+ df.head()
34
+
35
+
36
+ # ## Exploratory Data Analysis
37
+
38
+ # **Income - Target column**
39
+
40
+ # In[4]:
41
+
42
+
43
+ sns.countplot(df.income)
44
+
45
+
46
+ # *As we can see, there is a **class imbalance**. The ">50K" class is comparatively very less. So, we will do **Random Over-Sampling** during preprocessing.*
47
+ #
48
+
49
+ # **Age**
50
+
51
+ # In[5]:
52
+
53
+
54
+ sns.distplot(df[df.income=='<=50K'].age, color='g')
55
+ sns.distplot(df[df.income=='>50K'].age, color='r')
56
+
57
+
58
+ # *We can observe a rough margin **around 30**. We will divide age into 2 parts ie. under 30 and over 30. We need to check if its useful for our model during testing.*
59
+
60
+ # **Workclass**
61
+
62
+ # In[6]:
63
+
64
+
65
+ plt.xticks(rotation=90)
66
+ sns.countplot(df.workclass, hue=df.income, palette='tab10')
67
+
68
+
69
+ # *Majority of the data falls under **Private**. So, we will convert this into Private and not-Private.*
70
+
71
+ # **fnlwgt**
72
+
73
+ # In[7]:
74
+
75
+
76
+ sns.distplot(df[df.income=='<=50K'].fnlwgt, color='r')
77
+ sns.distplot(df[df.income=='>50K'].fnlwgt, color='g')
78
+
79
+
80
+ # *This is a very **ambiguous** attribute. Will check during testing.*
81
+
82
+ # **Education**
83
+
84
+ # In[8]:
85
+
86
+
87
+ plt.xticks(rotation=90)
88
+ sns.countplot(df.education, hue=df.income, palette='muted')
89
+
90
+
91
+ # **education.num**
92
+
93
+ # In[9]:
94
+
95
+
96
+ sns.countplot(df["education.num"], hue=df.income)
97
+
98
+
99
+ # **marital.status**
100
+
101
+ # In[10]:
102
+
103
+
104
+ plt.xticks(rotation=90)
105
+ sns.countplot(df['marital.status'], hue=df.income)
106
+
107
+
108
+ # *We observe that the majority of ">50K" class is **Married-civ-spouse**. So we ll make it 1 and others 0*
109
+
110
+ # **occupation**
111
+
112
+ # In[11]:
113
+
114
+
115
+ plt.xticks(rotation=90)
116
+ sns.countplot(df.occupation, hue=df.income, palette='rocket')
117
+
118
+
119
+ # **relationship**
120
+
121
+ # In[12]:
122
+
123
+
124
+ plt.xticks(rotation=90)
125
+ sns.countplot(df.relationship, hue=df.income, palette='muted')
126
+
127
+
128
+ # **race**
129
+
130
+ # In[13]:
131
+
132
+
133
+ plt.xticks(rotation=90)
134
+ sns.countplot(df.race, hue=df.income, palette='Set2')
135
+
136
+
137
+ # **sex**
138
+
139
+ # In[14]:
140
+
141
+
142
+ plt.xticks(rotation=90)
143
+ sns.countplot(df.sex, hue=df.income)
144
+
145
+
146
+ # **capital.gain**
147
+
148
+ # In[15]:
149
+
150
+
151
+ df['capital.gain'].value_counts()
152
+
153
+
154
+ # **capital.loss**
155
+
156
+ # In[16]:
157
+
158
+
159
+ df['capital.loss'].value_counts()
160
+
161
+
162
+ # **hours.per.week**
163
+
164
+ # In[17]:
165
+
166
+
167
+ sns.distplot(df[df.income=='<=50K']['hours.per.week'], color='b')
168
+ sns.distplot(df[df.income=='>50K']['hours.per.week'], color='r')
169
+
170
+
171
+ # **native.country**
172
+
173
+ # In[18]:
174
+
175
+
176
+ df['native.country'].value_counts()
177
+
178
+
179
+ # ## Preprocessing
180
+
181
+ # ### Finding and Handling Missing Data
182
+ #
183
+ # *Observing the dataset, I found that the null values are marked as "?", So, we will now convert them to numpy.nan(null values).*
184
+
185
+ # In[19]:
186
+
187
+
188
+ df[df.select_dtypes("object") =="?"] = np.nan
189
+ nans = df.isnull().sum()
190
+ if len(nans[nans>0]):
191
+ print("Missing values detected.\n")
192
+ print(nans[nans>0])
193
+ else:
194
+ print("No missing values. You are good to go.")
195
+
196
+
197
+ # In[20]:
198
+
199
+
200
+ #majority of the values are "Private". Lets fill the missing values as "Private".
201
+ df.workclass.fillna("Private", inplace=True)
202
+
203
+ df.occupation.fillna(method='bfill', inplace=True)
204
+
205
+ #majority of the values are "United-States". Lets fill the missing values as "United-States".
206
+ df['native.country'].fillna("United-States", inplace=True)
207
+
208
+ print("Handled missing values successfully.")
209
+
210
+
211
+ # In[21]:
212
+
213
+
214
+ from sklearn.preprocessing import LabelEncoder
215
+ from sklearn.utils import column_or_1d
216
+
217
+ class MyLabelEncoder(LabelEncoder):
218
+
219
+ def fit(self, y, arr=[]):
220
+ y = column_or_1d(y, warn=True)
221
+ if arr == []:
222
+ arr=y
223
+ self.classes_ = pd.Series(arr).unique()
224
+ return self
225
+
226
+ le = MyLabelEncoder()
227
+
228
+
229
+ # ### Feature Engineering and Encoding the columns
230
+
231
+ # In[22]:
232
+
233
+
234
+ # age_enc = pd.cut(df.age, bins=(0,25,45,65,100), labels=(0,1,2,3))
235
+ df['age_enc'] = df.age.apply(lambda x: 1 if x > 30 else 0)
236
+
237
+ def prep_workclass(x):
238
+ if x == 'Never-worked' or x == 'Without-pay':
239
+ return 0
240
+ elif x == 'Private':
241
+ return 1
242
+ elif x == 'State-gov' or x == 'Local-gov' or x == 'Federal-gov':
243
+ return 2
244
+ elif x == 'Self-emp-not-inc':
245
+ return 3
246
+ else:
247
+ return 4
248
+
249
+ df['workclass_enc'] = df.workclass.apply(prep_workclass)
250
+
251
+ df['fnlwgt_enc'] = df.fnlwgt.apply(lambda x: 0 if x>200000 else 1)
252
+
253
+ le.fit(df.education, arr=['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th','10th', '11th', '12th',
254
+ 'HS-grad', 'Prof-school', 'Assoc-acdm', 'Assoc-voc', 'Some-college', 'Bachelors', 'Masters', 'Doctorate'])
255
+ df['education_enc'] = le.transform(df.education)
256
+
257
+
258
+ df['education.num_enc'] = df['education.num'].apply(lambda x: 1 if x>=9 else 0)
259
+
260
+ df['marital.status_enc'] = df['marital.status'].apply(lambda x: 1 if x=='Married-civ-spouse' or x == 'Married-AF-spouse' else 0)
261
+
262
+ def prep_occupation(x):
263
+ if x in ['Prof-specialty', 'Exec-managerial', 'Tech-support', 'Protective-serv']:
264
+ return 2
265
+ elif x in ['Sales', 'Craft-repair']:
266
+ return 1
267
+ else:
268
+ return 0
269
+
270
+ df['occupation_enc'] = df.occupation.apply(prep_occupation)
271
+
272
+ df['relationship_enc'] = df.relationship.apply(lambda x: 1 if x in ['Husband', 'Wife'] else 0)
273
+
274
+ df['race_enc'] = df.race.apply(lambda x: 1 if x=='White' else 0)
275
+
276
+ df['sex_enc'] = df.sex.apply(lambda x: 1 if x=='Male' else 0)
277
+
278
+ df['capital.gain_enc'] = pd.cut(df["capital.gain"],
279
+ bins=[-1,0,df[df["capital.gain"]>0]["capital.gain"].median(), df["capital.gain"].max()], labels=(0,1,2)).astype('int64')
280
+
281
+ df['capital.loss_enc'] = pd.cut(df["capital.loss"],
282
+ bins=[-1,0,df[df["capital.loss"]>0]["capital.loss"].median(), df["capital.loss"].max()], labels=(0,1,2)).astype('int64')
283
+
284
+ # hpw_enc = pd.cut(df['hours.per.week'], bins= (0,30,40,53,168), labels=(0,1,2,3))
285
+ df['hours.per.week_enc'] = pd.qcut(df['hours.per.week'], q=5, labels=(0,1,2,3), duplicates='drop').astype('int64')
286
+
287
+ df['native.country_enc'] = df['native.country'].apply(lambda x: 1 if x=='United-States' else 0)
288
+
289
+ df['income_enc'] = df.income.apply(lambda x: 1 if x==">50K" else 0)
290
+
291
+ print("Encoding complete.")
292
+
293
+
294
+ # In[23]:
295
+
296
+
297
+ df.select_dtypes("object").info()
298
+
299
+
300
+ # In[24]:
301
+
302
+
303
+ #dropping encoded columns - education, sex, income
304
+ df.drop(['education', 'sex', 'income'], 1, inplace=True)
305
+
306
+
307
+ # ### Label Encoding without Feature Engineering
308
+
309
+ # In[25]:
310
+
311
+
312
+ for feature in df.select_dtypes("object").columns:
313
+ df[feature]=le.fit_transform(df[feature])
314
+
315
+
316
+ # ### Feature Selection
317
+
318
+ # In[26]:
319
+
320
+
321
+ df.info()
322
+
323
+
324
+ # In[27]:
325
+
326
+
327
+ #Visualizing the pearson correlation with the target class
328
+ pcorr = df.drop('income_enc',1).corrwith(df.income_enc)
329
+ plt.figure(figsize=(10,6))
330
+ plt.title("Pearson Correlation of Features with Income")
331
+ plt.xlabel("Features")
332
+ plt.ylabel("Correlation Coeff")
333
+ plt.xticks(rotation=90)
334
+ plt.bar(pcorr.index, list(map(abs,pcorr.values)))
335
+
336
+
337
+ # From the pearson correlation plot, we can see that correlation of few columns are very **low** with the target column, so, we ll drop them.
338
+
339
+ # In[28]:
340
+
341
+
342
+ df.drop(['workclass', 'fnlwgt','occupation', 'race', 'native.country', 'fnlwgt_enc', 'race_enc', 'native.country_enc'], 1, inplace=True)
343
+
344
+
345
+ # In[29]:
346
+
347
+
348
+ sns.heatmap(df.corr().apply(abs))
349
+
350
+
351
+ # **Dropping redundant features**
352
+
353
+ # We can see that **education_enc, education.num_enc and education.num** as well as **relationship_enc and marital.status_enc** have **high correlation**. So, we will only keep one of them based on their correlation with income_enc.
354
+ #
355
+ # We also have some redundant feautres as we have engineered features from them(age, capital.gain, etc.).
356
+
357
+ # In[30]:
358
+
359
+
360
+ df.drop(['age', 'education.num_enc', 'education_enc', 'marital.status_enc', 'capital.gain', 'capital.loss', 'hours.per.week'], 1, inplace = True)
361
+
362
+
363
+ # In[31]:
364
+
365
+
366
+ df.info()
367
+
368
+
369
+ # In[32]:
370
+
371
+
372
+ X = df.drop('income_enc', 1)
373
+ y = df.income_enc
374
+
375
+
376
+ # ### Train Test Split (3:1)
377
+
378
+ # In[33]:
379
+
380
+
381
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25, random_state=42, stratify=y)
382
+
383
+
384
+ # In[34]:
385
+
386
+
387
+ print("No. of rows in training data:",X_train.shape[0])
388
+ print("No. of rows in testing data:",X_test.shape[0])
389
+
390
+
391
+ # ### Random Over Sampling
392
+
393
+ # *We can see the class imbalance in our target. This results in models that have poor predictive performance, specifically for the minority class. So, we need to random over sampling*
394
+
395
+ # In[35]:
396
+
397
+
398
+ oversample = RandomOverSampler(sampling_strategy=0.5) #50% oversampling
399
+ X_over, y_over = oversample.fit_resample(X_train, y_train)
400
+
401
+
402
+ # In[36]:
403
+
404
+
405
+ y_over.value_counts()
406
+
407
+
408
+ # ## Model Preparation
409
+
410
+ # In[37]:
411
+
412
+
413
+ #Model Imports
414
+ from sklearn.model_selection import StratifiedKFold, cross_val_score
415
+ from sklearn.preprocessing import StandardScaler
416
+ from sklearn.pipeline import make_pipeline
417
+ from sklearn.metrics import classification_report, confusion_matrix
418
+
419
+ from sklearn.svm import SVC
420
+ from sklearn.linear_model import LogisticRegression
421
+ from sklearn.neighbors import KNeighborsClassifier
422
+ from sklearn.tree import DecisionTreeClassifier
423
+ from sklearn.ensemble import AdaBoostClassifier, ExtraTreesClassifier, GradientBoostingClassifier, RandomForestClassifier, StackingClassifier
424
+ from xgboost import XGBClassifier
425
+ from lightgbm import LGBMClassifier
426
+
427
+
428
+ # In[38]:
429
+
430
+
431
+ seed= 42
432
+
433
+
434
+ # In[39]:
435
+
436
+
437
+ models = {
438
+ 'LR':LogisticRegression(random_state=seed),
439
+ 'SVC':SVC(random_state=seed),
440
+ 'AB':AdaBoostClassifier(random_state=seed),
441
+ 'ET':ExtraTreesClassifier(random_state=seed),
442
+ 'GB':GradientBoostingClassifier(random_state=seed),
443
+ 'RF':RandomForestClassifier(random_state=seed),
444
+ 'XGB':XGBClassifier(random_state=seed),
445
+ 'LGBM':LGBMClassifier(random_state=seed)
446
+ }
447
+
448
+
449
+ # In[40]:
450
+
451
+
452
+ # evaluate a give model using cross-validation
453
+ def evaluate_models(model, xtrain, ytrain):
454
+ cv = StratifiedKFold(shuffle=True, random_state=seed)
455
+ scores = cross_val_score(model, xtrain, ytrain, scoring='accuracy', cv=cv, error_score='raise')
456
+ return scores
457
+
458
+ def plot_scores(xval,yval,show_value=False):
459
+ plt.ylim(ymax = max(yval)+0.5, ymin = min(yval)-0.5)
460
+ plt.xticks(rotation=45)
461
+ s = sns.barplot(xval,yval)
462
+ if show_value:
463
+ for x,y in zip(range(len(yval)),yval):
464
+ s.text(x,y+0.1,round(y,2),ha="center")
465
+
466
+
467
+ # In[41]:
468
+
469
+
470
+ # evaluate the models and store results for 100% oversampled minority class
471
+ results, names = list(), list()
472
+ for name, model in models.items():
473
+ scores = evaluate_models(model, X_train, y_train)
474
+ results.append(scores)
475
+ names.append(name)
476
+ print('*%s %.3f (%.3f)' % (name, mean(scores), std(scores)))
477
+
478
+
479
+ # In[42]:
480
+
481
+
482
+ plt.boxplot(results, labels=names, showmeans=True)
483
+ plt.show()
484
+
485
+
486
+ # In[43]:
487
+
488
+
489
+ param_grids = {
490
+ 'LR':{'C':[0.001,0.01,0.1,1,10]},
491
+ 'SVC':{'gamma':[0.01,0.02,0.05,0.08,0.1], 'C':range(1,8)},
492
+
493
+ 'AB':{'learning_rate': [0.05, 0.1, 0.2], 'n_estimators': [100, 200, 500]},
494
+
495
+ 'ET':{'max_depth':[5,8,10,12], 'min_samples_split': [5,9,12],
496
+ 'n_estimators': [100,200,500,800]},
497
+
498
+ 'GB':{'learning_rate': [0.05, 0.1, 0.2], 'max_depth':[3,5,9],
499
+ 'min_samples_split': [5,7,9], 'n_estimators': [100,200,500],
500
+ 'subsample':[0.5,0.7,0.9]},
501
+
502
+ 'RF':{'max_depth':[3,5,9,15], 'n_estimators': [100, 200, 500, 1000],
503
+ 'learning_rate': [0.05, 0.1, 0.2], 'min_samples_split': [5,9,12]},
504
+
505
+ 'XGB':{'max_depth':[3,5,7,9], 'n_estimators': [100, 200, 500],
506
+ 'learning_rate': [0.05, 0.1, 0.2], 'subsample':[0.5,0.7,0.9]},
507
+
508
+ 'LGBM':{'n_estimators': [100,200,500],'learning_rate': [0.05, 0.1, 0.2],
509
+ 'subsample':[0.5,0.7,0.9],'num_leaves': [25,31,50]}
510
+ }
511
+
512
+
513
+ # In[44]:
514
+
515
+
516
+ # !pip install sklearn-deap
517
+ # from evolutionary_search import EvolutionaryAlgorithmSearchCV
518
+
519
+
520
+ # In[45]:
521
+
522
+
523
+ # evaluate the models and store results
524
+ # best_params = []
525
+ # names= []
526
+ # for name, param_grid, model in zip(param_grids.keys(), param_grids.values(), models.values()):
527
+ # eascv = EvolutionaryAlgorithmSearchCV(model, param_grid, verbose=3, cv=3)
528
+ # eascv.fit(X_train,y_train)
529
+ # names.append(name)
530
+ # best_params.append(eascv.best_params_)
531
+ # print(name)
532
+ # print("best score:",eascv.best_score_)
533
+ # print("best params:",eascv.best_params_)
534
+
535
+
536
+ # In[46]:
537
+
538
+
539
+ best_params=[
540
+ {'C': 10},
541
+ {'gamma': 0.1, 'C': 2},
542
+ {'learning_rate': 0.1, 'n_estimators': 500},
543
+ {'max_depth': 12, 'min_samples_split': 9, 'n_estimators': 100},
544
+ {'learning_rate': 0.05, 'max_depth': 3, 'min_samples_split': 9, 'n_estimators': 200, 'subsample': 0.9},
545
+ {'max_depth': 9, 'n_estimators': 200, 'min_samples_split': 5},
546
+ {'max_depth': 3, 'n_estimators': 200, 'learning_rate': 0.1, 'subsample': 0.9},
547
+ {'n_estimators': 100, 'learning_rate': 0.05, 'subsample': 0.9, 'num_leaves': 25}
548
+ ]
549
+
550
+
551
+ # In[47]:
552
+
553
+
554
+ models = [
555
+ ('LR',LogisticRegression(random_state=seed)),
556
+ ('SVC',SVC(random_state=seed)),
557
+ ('AB',AdaBoostClassifier(random_state=seed)),
558
+ ('ET',ExtraTreesClassifier(random_state=seed)),
559
+ ('GB',GradientBoostingClassifier(random_state=seed)),
560
+ ('RF',RandomForestClassifier(random_state=seed)),
561
+ ('XGB',XGBClassifier(random_state=seed)),
562
+ ('LGBM',LGBMClassifier(random_state=seed))
563
+ ]
564
+
565
+
566
+ # In[48]:
567
+
568
+
569
+ for model, param in zip(models, best_params):
570
+ model[1].set_params(**param)
571
+
572
+
573
+ # In[49]:
574
+
575
+
576
+ models.append(('MLModel',StackingClassifier(estimators = models[:-1])))
577
+
578
+
579
+ # In[50]:
580
+
581
+
582
+ scores=[]
583
+ preds=[]
584
+ for model in models:
585
+ model[1].fit(X_train,y_train)
586
+ print(model[0],"trained.")
587
+ scores.append(model[1].score(X_test,y_test))
588
+ preds.append(model[1].predict(X_test))
589
+ print("Results are ready.")
590
+
591
+
592
+ # ## Using Classification Based on Assocation
593
+
594
+ # In[51]:
595
+
596
+
597
+ get_ipython().system('pip install pyarc==1.0.23')
598
+ get_ipython().system('pip install pyfim')
599
+ from pyarc import CBA, TransactionDB
600
+
601
+
602
+ # In[52]:
603
+
604
+
605
+ txns_train = TransactionDB.from_DataFrame(X_train.join(y_train))
606
+ txns_test = TransactionDB.from_DataFrame(X_test.join(y_test))
607
+
608
+
609
+ cba = CBA(support=0.15, confidence=0.5, algorithm="m1")
610
+ cba.fit(txns_train)
611
+
612
+
613
+ # In[53]:
614
+
615
+
616
+ cba_score = cba.rule_model_accuracy(txns_test)
617
+ scores.append(cba_score)
618
+ models.append(["CBA"])
619
+
620
+
621
+ # In[54]:
622
+
623
+
624
+ model_names= [i[0] for i in models]
625
+ scores = list(map(lambda x: x*100, scores))
626
+
627
+
628
+ # In[55]:
629
+
630
+
631
+ plot_scores(model_names, scores, True)
632
+
AdultNoteBook/Kernels/ExtraTrees/4-a-simple-knn-application.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/4-a-simple-knn-application.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # KNN
5
+
6
+ # # 1. Libraries
7
+
8
+ # In[1]:
9
+
10
+
11
+ import pandas as pd
12
+ import matplotlib.pyplot as plt
13
+ import numpy as np
14
+ import seaborn as sns
15
+ from sklearn import preprocessing
16
+ from sklearn.neighbors import KNeighborsClassifier
17
+ from sklearn.model_selection import train_test_split
18
+ from sklearn import metrics
19
+
20
+ from sklearn.ensemble import ExtraTreesClassifier
21
+
22
+
23
+ # # 2. Dataset
24
+
25
+ # In[2]:
26
+
27
+
28
+ dataset = pd.read_csv('../input/adult.csv')
29
+ display(dataset.head(5))
30
+
31
+
32
+ # In[3]:
33
+
34
+
35
+ dataset.describe()
36
+
37
+
38
+ # In[4]:
39
+
40
+
41
+ display(dataset.info())
42
+
43
+
44
+ # ## 2.1 Analising columns values
45
+ # No line seems to have Nan or empty values. However futher investigation is needed once it's already possible to see '?' values in different columns.
46
+
47
+ # In[5]:
48
+
49
+
50
+ print("Work class categories \n")
51
+ print(dataset['workclass'].unique())
52
+
53
+
54
+ # In[6]:
55
+
56
+
57
+ print("Education categories")
58
+ education_dataset = dataset[['education','education.num']]
59
+ education_dataset = education_dataset.drop_duplicates()
60
+
61
+ data = {'education': education_dataset['education'], 'education.num': education_dataset['education.num']}
62
+
63
+ education_dataset = pd.DataFrame(data=data)
64
+ education_dataset['education'].astype('category')
65
+ education_dataset.index = education_dataset['education.num']
66
+ print(education_dataset[['education']].sort_values('education.num'))
67
+
68
+
69
+ # The columns 'education' and 'education.num' represent the same information. 'education.num' is the respective label for an education level.
70
+
71
+ # In[7]:
72
+
73
+
74
+ print('marital status')
75
+ print(dataset['marital.status'].unique())
76
+ print(' \n occupation')
77
+ print(dataset['occupation'].unique())
78
+ print(' \n relationship')
79
+ print(dataset['relationship'].unique())
80
+ print(' \n race')
81
+ print(dataset['race'].unique())
82
+ print(' \n native.country')
83
+ print(dataset['native.country'].unique())
84
+
85
+
86
+ # ## 2.2 Dataset cleaning
87
+ #
88
+ # As mentioned before, in many columns are present the '?' value for a missing information. So it will be removed every observation that contains a missing value. Also, it will be removed the columns 'capital.gain' and 'capital.loss', which doesn't seem so clear its meaning, and 'fnlwgt' which represents some final avaliation about the observation, thus the fitting process will get an equal result.
89
+
90
+ # In[8]:
91
+
92
+
93
+ #Replacing ? for a nan value to drop every line with it
94
+ dataset = dataset.replace({'?': np.nan})
95
+ dataset = dataset.dropna()
96
+ dataset = dataset.drop(['fnlwgt', 'capital.gain','capital.loss'], axis=1)
97
+
98
+
99
+ # # 3. EDA
100
+
101
+ # In[9]:
102
+
103
+
104
+ ax = dataset['sex'].value_counts().plot(kind="bar")
105
+ ax.set_ylabel("Quantity")
106
+ plt.title("Sex quantities")
107
+ plt.show()
108
+
109
+ ax = dataset['age'].hist()
110
+ ax.set_xlabel("Age")
111
+ ax.set_ylabel("Quantity")
112
+ plt.title("Age quantities")
113
+ plt.show()
114
+
115
+ ax = dataset['education.num'].hist()
116
+ ax.set_xlabel("Education label")
117
+ ax.set_ylabel("Quantity")
118
+ plt.title("Education level quantities")
119
+ plt.show()
120
+
121
+ ax = dataset['race'].value_counts().plot(kind="bar")
122
+ ax.set_ylabel("Quantity")
123
+ plt.title("Race quantities")
124
+ plt.show()
125
+
126
+ dataset['native.country'].value_counts().plot(kind="bar")
127
+ ax.set_ylabel("Quantity")
128
+ plt.title("Countries quantities")
129
+ plt.show()
130
+
131
+ dataset['income'].value_counts().plot(kind="bar")
132
+ ax.set_ylabel("Quantity")
133
+ plt.title("Income quantities")
134
+ plt.show()
135
+
136
+
137
+ # # 4. Fitting a model
138
+
139
+ # In[10]:
140
+
141
+
142
+ #Preparing the features and target
143
+ features = dataset.drop("income", axis=1)
144
+ target = dataset.income
145
+
146
+ #encoding the category features
147
+ features_to_encode = features[['workclass', 'education', 'marital.status',
148
+ 'occupation', 'relationship', 'race', 'sex',
149
+ 'native.country']]
150
+
151
+ features_encoded = features_to_encode.apply(preprocessing.LabelEncoder().fit_transform)
152
+ target = preprocessing.LabelEncoder().fit_transform(target)
153
+ features[['workclass', 'education', 'marital.status',
154
+ 'occupation', 'relationship', 'race', 'sex',
155
+ 'native.country']] = features_encoded
156
+
157
+ print(features.shape, target.shape)
158
+
159
+ display(features.head(5))
160
+
161
+
162
+ # In[11]:
163
+
164
+
165
+ #Dividing train and test data
166
+ X_train, X_test, y_train, y_test = train_test_split(features,target, test_size=0.3)
167
+
168
+
169
+ # In[12]:
170
+
171
+
172
+ #Analising the % importance level in each feature
173
+ forest = ExtraTreesClassifier(n_estimators=250,random_state=0)
174
+ forest.fit(features, target)
175
+ importances = forest.feature_importances_
176
+ feature_importances = pd.DataFrame(importances*100,index = X_train.columns,columns=['importance']).sort_values('importance', ascending=False)
177
+ display(feature_importances)
178
+
179
+
180
+ # In[13]:
181
+
182
+
183
+ #Analisng the accuracy by increasing the number of K
184
+ scores = []
185
+ for i in range(1,30):
186
+ knn = KNeighborsClassifier(n_neighbors=i)
187
+ knn.fit(X_train, y_train)
188
+ y_pred = knn.predict(X_test)
189
+ scores.append(metrics.accuracy_score(y_test, y_pred))
190
+
191
+ sns.lineplot(range(1,30), scores)
192
+ plt.xlabel('Value of K for KNN')
193
+ plt.ylabel('Testing Accuracy')
194
+ plt.title("Respective accuracy when increased the number of K")
195
+ plt.grid(True)
196
+ plt.show()
197
+
198
+
199
+ # In[14]:
200
+
201
+
202
+ print("The best K value in this dataset is {0} - Accuracy = {1}".format(scores.index(max(scores)), max(scores)))
203
+
AdultNoteBook/Kernels/ExtraTrees/5-adult-census-income-eda-and-prediction-87-35.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/ExtraTrees/5-adult-census-income-eda-and-prediction-87-35.py ADDED
@@ -0,0 +1,451 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding: utf-8
3
+
4
+ # # Adult Census Income EDA and Prediction
5
+ #
6
+ # In this kernel I work with the UCI Adult Census Income dataset. The prediction task is to determine whether a person makes over $50K a year. I start with an exhaustive EDA, and I then train various models to solve the prediction task.
7
+
8
+ # In[1]:
9
+
10
+
11
+ import numpy as np
12
+ import pandas as pd
13
+ import matplotlib.pyplot as plt
14
+ import seaborn as sns
15
+ get_ipython().run_line_magic('matplotlib', 'inline')
16
+
17
+
18
+ import os
19
+ print(os.listdir("../input"))
20
+
21
+ data = pd.read_csv("../input/adult.csv")
22
+
23
+
24
+ # ## Exploratory Data Analysis
25
+
26
+ # In[2]:
27
+
28
+
29
+ print(len(data))
30
+ data.head(10)
31
+
32
+
33
+ # In[3]:
34
+
35
+
36
+ data.isnull().sum()
37
+
38
+
39
+ # Good.
40
+
41
+ # In[4]:
42
+
43
+
44
+ data.dtypes
45
+
46
+
47
+ # In[5]:
48
+
49
+
50
+ sns.countplot(data['income'])
51
+ plt.show()
52
+
53
+
54
+ # ### Distribution of features
55
+
56
+ # In[6]:
57
+
58
+
59
+ # Sex distribution
60
+ sns.countplot(data['sex'])
61
+ plt.show()
62
+
63
+
64
+ # In[7]:
65
+
66
+
67
+ # Age distribution
68
+ ages = data['age'].hist(bins=max(data['age'])-min(data['age']))
69
+ mean_val = np.mean(data['age'])
70
+ plt.axvline(mean_val, linestyle='dashed', linewidth=2, color='yellow', label='mean age')
71
+ plt.xlabel('age')
72
+ plt.ylabel('count')
73
+ plt.legend()
74
+ plt.show()
75
+
76
+
77
+ # In[8]:
78
+
79
+
80
+ data['hours.per.week'].hist()
81
+ plt.xlabel('hours per week')
82
+ plt.ylabel('count')
83
+ plt.show()
84
+
85
+
86
+ # In[9]:
87
+
88
+
89
+ fig, axs = plt.subplots(ncols=2, nrows=4, figsize=(20, 20))
90
+ plt.subplots_adjust(hspace=0.68)
91
+ fig.delaxes(axs[3][1])
92
+
93
+
94
+ # Workclass
95
+ wc_plot = sns.countplot(data['workclass'], ax=axs[0][0])
96
+ wc_plot.set_xticklabels(wc_plot.get_xticklabels(), rotation=40, ha="right")
97
+
98
+ # Native country
99
+ nc_plot = sns.countplot(data['native.country'], ax=axs[0][1])
100
+ nc_plot.set_xticklabels(nc_plot.get_xticklabels(), rotation=72, ha="right")
101
+
102
+ # Education
103
+ order=['Preschool', '1st-4th', '5th-6th', '7th-8th', '9th', '10th', '11th', '12th', 'HS-grad',
104
+ 'Some-college', 'Assoc-acdm', 'Assoc-voc', 'Bachelors', 'Masters', 'Prof-school', 'Doctorate']
105
+ ed_plot = sns.countplot(data['education'], order=order, ax=axs[1][0])
106
+ ed_plot.set_xticklabels(ed_plot.get_xticklabels(), rotation=40, ha="right")
107
+
108
+ # Marital status
109
+ ms_plot = sns.countplot(data['marital.status'], ax=axs[1][1])
110
+ ms_plot.set_xticklabels(ms_plot.get_xticklabels(), rotation=40, ha="right")
111
+
112
+ # Relationship
113
+ rel_plot = sns.countplot(data['relationship'], ax=axs[2][0])
114
+ rel_plot.set_xticklabels(rel_plot.get_xticklabels(), rotation=40, ha="right")
115
+
116
+ # Race
117
+ race_plot = sns.countplot(data['race'], ax=axs[2][1])
118
+ race_plot.set_xticklabels(race_plot.get_xticklabels(), rotation=40, ha="right")
119
+
120
+ # Occupation
121
+ occ_plot = sns.countplot(data['occupation'], ax=axs[3][0])
122
+ occ_plot.set_xticklabels(occ_plot.get_xticklabels(), rotation=40, ha="right")
123
+
124
+ plt.show()
125
+
126
+
127
+ #
128
+ # ### How do features relate to one another?
129
+
130
+ # In[10]:
131
+
132
+
133
+ plt.figure(figsize=(24, 6))
134
+ ro = sns.countplot(data['occupation'], hue=data['sex'])
135
+ ro.set_xticklabels(ro.get_xticklabels(), rotation=30, ha="right")
136
+ plt.show()
137
+
138
+
139
+ # In[11]:
140
+
141
+
142
+ plt.figure(figsize=(20, 6))
143
+ ro = sns.countplot(data['education'], hue=data['sex'], order=order)
144
+ ro.set_xticklabels(ro.get_xticklabels(), rotation=40, ha="right")
145
+ #ro.set_yscale('log')
146
+ plt.show()
147
+
148
+
149
+ # In[12]:
150
+
151
+
152
+ data['income'] = data['income'].map({'<=50K': 0, '>50K': 1})
153
+
154
+
155
+ # ### How do features relate to income?
156
+
157
+ # In[13]:
158
+
159
+
160
+ fig, axs = plt.subplots(ncols=2, nrows=4, figsize=(24, 28))
161
+ #fig.delaxes(axs[3][1])
162
+ plt.subplots_adjust(hspace=0.4)
163
+
164
+ # education and income
165
+ sns.catplot(x="education", y="income", data=data, kind="bar", height = 6, palette = "muted", order=order, ax=axs[0][0])
166
+ axs[0][0].set_xticklabels(axs[0][0].axes.get_xticklabels(), rotation=40, ha="right")
167
+ axs[0][0].set_ylabel(">50K probability")
168
+
169
+ sns.catplot(x="workclass", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[0][1])
170
+ axs[0][1].set_xticklabels(axs[0][1].axes.get_xticklabels(), rotation=40, ha="right")
171
+ axs[0][1].set_ylabel(">50K probability")
172
+
173
+
174
+ sns.catplot(x="relationship", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[1][0])
175
+ axs[1][0].set_xticklabels(axs[1][0].axes.get_xticklabels(), rotation=40, ha="right")
176
+ axs[1][0].set_ylabel(">50K probability")
177
+
178
+ sns.catplot(x="marital.status", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[1][1])
179
+ axs[1][1].set_xticklabels(axs[1][1].axes.get_xticklabels(), rotation=40, ha="right")
180
+ axs[1][1].set_ylabel(">50K probability")
181
+
182
+ sns.catplot(x="race", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[2][0])
183
+ axs[2][0].set_xticklabels(axs[2][0].axes.get_xticklabels(), rotation=40, ha="right")
184
+ axs[2][0].set_ylabel(">50K probability")
185
+
186
+ sns.catplot(x="native.country", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[2][1])
187
+ axs[2][1].set_xticklabels(axs[2][1].axes.get_xticklabels(), rotation=55, ha="right")
188
+ axs[2][1].set_ylabel(">50K probability")
189
+
190
+ sns.catplot(x="sex", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[3][0])
191
+ axs[3][0].set_xticklabels(axs[3][0].axes.get_xticklabels(), rotation=40, ha="right")
192
+ axs[3][0].set_ylabel(">50K probability")
193
+
194
+ sns.catplot(x="occupation", y="income", data=data, kind="bar", height = 6, palette = "muted", ax=axs[3][1])
195
+ axs[3][1].set_xticklabels(axs[3][1].axes.get_xticklabels(), rotation=40, ha="right")
196
+ axs[3][1].set_ylabel(">50K probability")
197
+
198
+ #ed_income.set_ylabels(">50K probability")
199
+
200
+ for i in range(2,10):
201
+ plt.close(i)
202
+
203
+ plt.show()
204
+
205
+
206
+ # #### Another way of visualizing this
207
+
208
+ # In[14]:
209
+
210
+
211
+ plt.figure(figsize=(20, 6))
212
+ sns.countplot(data['marital.status'], hue=data['income'])
213
+ plt.show()
214
+
215
+
216
+ # ## Data Preparation
217
+ #
218
+ # Now the data needs to be prepared for prediction.
219
+
220
+ # In[15]:
221
+
222
+
223
+ data['sex'] = data['sex'].map({'Male': 1, 'Female': 0})
224
+
225
+
226
+ # In[16]:
227
+
228
+
229
+ data['race'] = data['race'].map({'White': 1, 'Asian-Pac-Islander': 1, 'Black':0, 'Amer-Indian-Eskimo':0, 'Other':0})
230
+ data['relationship'] = data['relationship'].map({'Not-in-family':0, 'Unmarried':0, 'Own-child':0, 'Other-relative':0, 'Husband':1, 'Wife':1})
231
+ data['marital.status'] = data['marital.status'].map({'Widowed':0, 'Divorced':0, 'Separated':0, 'Never-married':0, 'Married-civ-spouse':1, 'Married-AF-spouse':1, 'Married-spouse-absent':0})
232
+
233
+
234
+ # In[17]:
235
+
236
+
237
+ g = sns.heatmap(data[['relationship', 'marital.status']].corr(),annot=True, fmt = ".2f", cmap = "coolwarm")
238
+ plt.show()
239
+
240
+
241
+ # relationship and marital.status contain the same information now, so one of them can be removed
242
+
243
+ # In[18]:
244
+
245
+
246
+ data.drop(['marital.status'], axis=1,inplace=True)
247
+
248
+
249
+ # LabelEncoder can be used to transform the rest of the categorical features.
250
+
251
+ # In[19]:
252
+
253
+
254
+ # data.drop(['workclass', 'education', 'occupation', 'native.country'], axis=1,inplace=True)
255
+
256
+ data.drop(['education'], axis=1,inplace=True)
257
+
258
+ labels = ['workclass', 'occupation', 'native.country']
259
+
260
+ from sklearn.preprocessing import LabelEncoder
261
+ le = LabelEncoder()
262
+ for l in labels:
263
+ data[l]=le.fit_transform(data[l])
264
+
265
+
266
+ # In[20]:
267
+
268
+
269
+ data.head(10)
270
+
271
+
272
+ # The dataset is ready.
273
+
274
+ # ## Prediction
275
+
276
+ # #### Importing the relevant libraries
277
+
278
+ # In[21]:
279
+
280
+
281
+ from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, VotingClassifier, BaggingClassifier, ExtraTreesClassifier
282
+ from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
283
+ from sklearn.linear_model import LogisticRegression
284
+ from sklearn.neighbors import KNeighborsClassifier
285
+
286
+ from sklearn.tree import DecisionTreeClassifier
287
+ from sklearn.naive_bayes import GaussianNB
288
+ from sklearn.model_selection import GridSearchCV, cross_val_score, cross_val_predict, StratifiedKFold, learning_curve, train_test_split, KFold
289
+ # from sklearn.metrics import classification_report
290
+ from sklearn.metrics import confusion_matrix, accuracy_score
291
+ from sklearn.svm import SVC
292
+
293
+
294
+ # #### Preparing data for training and testing with k-fold Cross-Validation
295
+
296
+ # In[22]:
297
+
298
+
299
+ seed = 42
300
+
301
+ from sklearn.preprocessing import StandardScaler
302
+
303
+ X = StandardScaler().fit_transform(data.loc[:, data.columns != 'income'])
304
+ Y = data['income']
305
+
306
+ # X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
307
+
308
+ kf = KFold(n_splits=10, shuffle=True, random_state=seed)
309
+
310
+
311
+ # In[23]:
312
+
313
+
314
+ a = len(data.loc[data.income==0])/len(data)
315
+ print(a)
316
+
317
+
318
+ # One would get a 76% accuracy by just always predicting <=50k. Our model has to do better than that or it's not learning anything.
319
+
320
+ # ### Starting with some simple models
321
+
322
+ # In[24]:
323
+
324
+
325
+
326
+ fig, axs = plt.subplots(ncols=2, nrows=2, figsize=(24, 14))
327
+
328
+
329
+ classifiers = [
330
+ LogisticRegression(solver='newton-cg'),
331
+ KNeighborsClassifier(n_neighbors=17), # Some trial and error I don't show went into this hyperpa
332
+ LinearDiscriminantAnalysis(),
333
+ GaussianNB()
334
+ ]
335
+
336
+
337
+ for i, c in enumerate(classifiers):
338
+
339
+ x_axs = i%2
340
+ y_axs = int(i/2)
341
+ # print(c)
342
+ print(type(c).__name__)
343
+ pred = cross_val_predict(c, X, Y, cv=kf)
344
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
345
+
346
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g', ax=axs[y_axs][x_axs])
347
+ axs[y_axs][x_axs].set_xlabel('Predicted')
348
+ axs[y_axs][x_axs].set_ylabel('Real')
349
+ axs[y_axs][x_axs].set_title(type(c).__name__)
350
+
351
+ plt.show()
352
+
353
+
354
+ # Logistic regression performs best with 84.25% accuracy.
355
+ #
356
+
357
+ # ### More complex models
358
+
359
+ # In[25]:
360
+
361
+
362
+ import warnings
363
+ warnings.filterwarnings(action='ignore')
364
+ fig, axs = plt.subplots(ncols=2, nrows=3, figsize=(24, 21))
365
+
366
+ classifiers = [
367
+ DecisionTreeClassifier(),
368
+ BaggingClassifier(),
369
+ RandomForestClassifier(),
370
+ ExtraTreesClassifier(),
371
+ GradientBoostingClassifier(),
372
+ AdaBoostClassifier()
373
+ ]
374
+
375
+
376
+ for i, c in enumerate(classifiers):
377
+
378
+ x_axs = i%2
379
+ y_axs = int(i/2)
380
+
381
+ # print(c)
382
+ print(type(c).__name__)
383
+ pred = cross_val_predict(c, X, Y, cv=kf)
384
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
385
+
386
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g', ax=axs[y_axs][x_axs])
387
+ axs[y_axs][x_axs].set_xlabel('Predicted')
388
+ axs[y_axs][x_axs].set_ylabel('Real')
389
+ axs[y_axs][x_axs].set_title(type(c).__name__)
390
+
391
+ plt.show()
392
+
393
+
394
+ # Gradient Boosting with no hyperparameter tuning gets to 86.58% accuracy. Not bad. Let's see if we can do better.
395
+
396
+ # ### Model Tuning
397
+
398
+ # GridSearchCV allows to try out a lot of hyperparameters at once.
399
+
400
+ # In[26]:
401
+
402
+
403
+ '''
404
+ # This takes about 2 hours to run
405
+ params = {'max_depth': [5, 6, 7],
406
+ 'n_estimators': [100, 150, 200],
407
+ 'learning_rate': [0.1, 0.07, 0.05],
408
+ 'max_features': ['sqrt', 'log2', 3, 4, 5]
409
+ }
410
+ '''
411
+
412
+
413
+ params = {'max_depth': [6],
414
+ 'n_estimators': [200],
415
+ 'learning_rate': [0.07, 0.06],
416
+ 'max_features': [3,4]
417
+ }
418
+
419
+ classifier = GradientBoostingClassifier()
420
+
421
+ grid = GridSearchCV(classifier, param_grid=params, cv=kf)
422
+ search_result = grid.fit(X, Y)
423
+
424
+
425
+ # In[27]:
426
+
427
+
428
+ # GridSearch results
429
+ means = search_result.cv_results_['mean_test_score']
430
+ params = search_result.cv_results_['params']
431
+ for m, p in zip(means, params):
432
+ print(f"{m} with: {p}")
433
+
434
+
435
+ # In[28]:
436
+
437
+
438
+ p = np.argmax(means)
439
+ best_param = params[p]
440
+
441
+ final_model = GradientBoostingClassifier(**best_param)
442
+
443
+ print(final_model)
444
+ pred = cross_val_predict(final_model, X, Y, cv=kf)
445
+ print("Accuracy score:", round(accuracy_score(Y, pred), 4), '\n')
446
+
447
+ sns.heatmap(confusion_matrix(Y, pred), annot=True, fmt='g')
448
+ plt.show()
449
+
450
+
451
+ # Final prediction accuracy: 87.35%
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/0-income-prediction-84-369-accuracy-checkpoint.ipynb ADDED
@@ -0,0 +1,1164 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "_cell_guid": "26ca1808-47be-4291-bae6-aee31907dfd0",
7
+ "_execution_state": "idle",
8
+ "_uuid": "8b5645b4fc767f691c88c0beeb918337c49b6433"
9
+ },
10
+ "source": [
11
+ "#Income Prediction Problem\n",
12
+ "In this Notebook, I am working through the Income Prediction problem associated with the Adult Income Census dataset. The goal is to accurately predict whether or not someone is making more or less than $50,000 a year. While working through this problem, I am following a framework I use to attack all my machine learning problems. It includes the following steps:\n",
13
+ "\n",
14
+ "1. Load Libraries\n",
15
+ "2. Load Data\n",
16
+ "3. Analyze Data\n",
17
+ "4. Feature Engineering\n",
18
+ "5. Modeling\n",
19
+ "6. Algorithm Tuning\n",
20
+ "7. Finalizing the Model\n",
21
+ "\n",
22
+ "I hope you enjoy this notebook and find it useful. Please keep in mind this is my first Notebook on here so don't judge it too harshly!"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "markdown",
27
+ "metadata": {
28
+ "_cell_guid": "2249cd4d-d941-4015-8103-9dcdec8bc587",
29
+ "_execution_state": "idle",
30
+ "_uuid": "b36102e03c9ec96cd20c1f6d40a1f8591d45c0c6"
31
+ },
32
+ "source": [
33
+ "##1. Load Libaraies"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "markdown",
38
+ "metadata": {
39
+ "_cell_guid": "bfca2f2a-d078-4f0a-ad8a-3d3571a08f76",
40
+ "_execution_state": "idle",
41
+ "_uuid": "e5e4b5ee88062058ceff62de7993eac563c1582b"
42
+ },
43
+ "source": [
44
+ "First, we need to load all of our libraries we will use for this project."
45
+ ]
46
+ },
47
+ {
48
+ "cell_type": "code",
49
+ "execution_count": 1,
50
+ "metadata": {
51
+ "_cell_guid": "9201e3be-d4ec-4e38-9e7b-eb7bf54d7c25",
52
+ "_execution_state": "idle",
53
+ "_uuid": "de4cbbff6d2b51eb47a5a4719da64cd27804989d"
54
+ },
55
+ "outputs": [],
56
+ "source": [
57
+ "import pandas as pd\n",
58
+ "import numpy as np\n",
59
+ "import matplotlib.pyplot as plt\n",
60
+ "import seaborn as sns\n",
61
+ "%matplotlib inline\n",
62
+ "\n",
63
+ "from collections import Counter\n",
64
+ "\n",
65
+ "from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier, ExtraTreesClassifier, VotingClassifier\n",
66
+ "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n",
67
+ "from sklearn.linear_model import LogisticRegression\n",
68
+ "from sklearn.neighbors import KNeighborsClassifier\n",
69
+ "from sklearn.tree import DecisionTreeClassifier\n",
70
+ "from sklearn.neural_network import MLPClassifier\n",
71
+ "from sklearn.naive_bayes import GaussianNB\n",
72
+ "from sklearn.ensemble import RandomForestClassifier\n",
73
+ "from sklearn.model_selection import GridSearchCV, cross_val_score, StratifiedKFold, learning_curve, train_test_split, KFold\n",
74
+ "from sklearn.metrics import classification_report\n",
75
+ "from sklearn.metrics import confusion_matrix\n",
76
+ "from sklearn.metrics import accuracy_score\n",
77
+ "\n",
78
+ "sns.set(style='white', context='notebook', palette='deep')"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "code",
83
+ "execution_count": 2,
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "from aif360.datasets import StandardDataset\n",
88
+ "from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric\n",
89
+ "import matplotlib.patches as patches\n",
90
+ "from aif360.algorithms.preprocessing import Reweighing\n",
91
+ "#from packages import *\n",
92
+ "#from ml_fairness import *\n",
93
+ "import matplotlib.pyplot as plt\n",
94
+ "import seaborn as sns\n",
95
+ "\n",
96
+ "\n",
97
+ "\n",
98
+ "from IPython.display import Markdown, display"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "markdown",
103
+ "metadata": {
104
+ "_cell_guid": "bf1d51ca-c701-44dd-a305-dbbfd4bb0cec",
105
+ "_execution_state": "idle",
106
+ "_uuid": "161e57d369bf62b7ad27912cc4ae0116ac5de0b6"
107
+ },
108
+ "source": [
109
+ "##2. Load Data"
110
+ ]
111
+ },
112
+ {
113
+ "cell_type": "markdown",
114
+ "metadata": {
115
+ "_cell_guid": "960b99a1-4408-4c1a-b86b-788c17e3b18b",
116
+ "_execution_state": "idle",
117
+ "_uuid": "0ecc4df26f5b56f9bfc4ed1841a73c8aafa00837"
118
+ },
119
+ "source": [
120
+ "Next, we load our data."
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 3,
126
+ "metadata": {
127
+ "_cell_guid": "946806b5-f230-4216-b42e-a8358e51b605",
128
+ "_execution_state": "idle",
129
+ "_uuid": "c4141c604b71fa95c187aa09fe7bed39106fcb1d"
130
+ },
131
+ "outputs": [
132
+ {
133
+ "data": {
134
+ "text/plain": [
135
+ "age 0\n",
136
+ "workclass 0\n",
137
+ "fnlwgt 0\n",
138
+ "education 0\n",
139
+ "education.num 0\n",
140
+ "marital.status 0\n",
141
+ "occupation 0\n",
142
+ "relationship 0\n",
143
+ "race 0\n",
144
+ "sex 0\n",
145
+ "capital.gain 0\n",
146
+ "capital.loss 0\n",
147
+ "hours.per.week 0\n",
148
+ "native.country 0\n",
149
+ "income 0\n",
150
+ "dtype: int64"
151
+ ]
152
+ },
153
+ "execution_count": 3,
154
+ "metadata": {},
155
+ "output_type": "execute_result"
156
+ }
157
+ ],
158
+ "source": [
159
+ "dataset = pd.read_csv(\"../../Data/adult.csv\")\n",
160
+ "\n",
161
+ "# Check for Null Data\n",
162
+ "dataset.isnull().sum()"
163
+ ]
164
+ },
165
+ {
166
+ "cell_type": "code",
167
+ "execution_count": 4,
168
+ "metadata": {
169
+ "_cell_guid": "25324eeb-2942-45fe-afae-0532421929cb",
170
+ "_execution_state": "idle",
171
+ "_uuid": "771a425b9b2a13c2266d851158fbc9e53211e118"
172
+ },
173
+ "outputs": [],
174
+ "source": [
175
+ "# Replace All Null Data in NaN\n",
176
+ "dataset = dataset.fillna(np.nan)"
177
+ ]
178
+ },
179
+ {
180
+ "cell_type": "code",
181
+ "execution_count": 5,
182
+ "metadata": {
183
+ "_cell_guid": "86a2ec1f-d7e8-4f29-a949-68240fe69d87",
184
+ "_execution_state": "idle",
185
+ "_uuid": "68a3687580bd65fb09a100d49cecfe1774df79dc"
186
+ },
187
+ "outputs": [
188
+ {
189
+ "data": {
190
+ "text/plain": [
191
+ "age int64\n",
192
+ "workclass object\n",
193
+ "fnlwgt int64\n",
194
+ "education object\n",
195
+ "education.num int64\n",
196
+ "marital.status object\n",
197
+ "occupation object\n",
198
+ "relationship object\n",
199
+ "race object\n",
200
+ "sex object\n",
201
+ "capital.gain int64\n",
202
+ "capital.loss int64\n",
203
+ "hours.per.week int64\n",
204
+ "native.country object\n",
205
+ "income object\n",
206
+ "dtype: object"
207
+ ]
208
+ },
209
+ "execution_count": 5,
210
+ "metadata": {},
211
+ "output_type": "execute_result"
212
+ }
213
+ ],
214
+ "source": [
215
+ "# Get data types\n",
216
+ "dataset.dtypes"
217
+ ]
218
+ },
219
+ {
220
+ "cell_type": "code",
221
+ "execution_count": 6,
222
+ "metadata": {
223
+ "_cell_guid": "1cbf148a-b814-40a3-9154-70c4511bdcd2",
224
+ "_execution_state": "idle",
225
+ "_uuid": "5fd35d640fdd346662ac08f7ab16ab996dfeb864"
226
+ },
227
+ "outputs": [
228
+ {
229
+ "data": {
230
+ "text/html": [
231
+ "<div>\n",
232
+ "<style scoped>\n",
233
+ " .dataframe tbody tr th:only-of-type {\n",
234
+ " vertical-align: middle;\n",
235
+ " }\n",
236
+ "\n",
237
+ " .dataframe tbody tr th {\n",
238
+ " vertical-align: top;\n",
239
+ " }\n",
240
+ "\n",
241
+ " .dataframe thead th {\n",
242
+ " text-align: right;\n",
243
+ " }\n",
244
+ "</style>\n",
245
+ "<table border=\"1\" class=\"dataframe\">\n",
246
+ " <thead>\n",
247
+ " <tr style=\"text-align: right;\">\n",
248
+ " <th></th>\n",
249
+ " <th>age</th>\n",
250
+ " <th>workclass</th>\n",
251
+ " <th>fnlwgt</th>\n",
252
+ " <th>education</th>\n",
253
+ " <th>education.num</th>\n",
254
+ " <th>marital.status</th>\n",
255
+ " <th>occupation</th>\n",
256
+ " <th>relationship</th>\n",
257
+ " <th>race</th>\n",
258
+ " <th>sex</th>\n",
259
+ " <th>capital.gain</th>\n",
260
+ " <th>capital.loss</th>\n",
261
+ " <th>hours.per.week</th>\n",
262
+ " <th>native.country</th>\n",
263
+ " <th>income</th>\n",
264
+ " </tr>\n",
265
+ " </thead>\n",
266
+ " <tbody>\n",
267
+ " <tr>\n",
268
+ " <th>0</th>\n",
269
+ " <td>90</td>\n",
270
+ " <td>?</td>\n",
271
+ " <td>77053</td>\n",
272
+ " <td>HS-grad</td>\n",
273
+ " <td>9</td>\n",
274
+ " <td>Widowed</td>\n",
275
+ " <td>?</td>\n",
276
+ " <td>Not-in-family</td>\n",
277
+ " <td>White</td>\n",
278
+ " <td>Female</td>\n",
279
+ " <td>0</td>\n",
280
+ " <td>4356</td>\n",
281
+ " <td>40</td>\n",
282
+ " <td>United-States</td>\n",
283
+ " <td>&lt;=50K</td>\n",
284
+ " </tr>\n",
285
+ " <tr>\n",
286
+ " <th>1</th>\n",
287
+ " <td>82</td>\n",
288
+ " <td>Private</td>\n",
289
+ " <td>132870</td>\n",
290
+ " <td>HS-grad</td>\n",
291
+ " <td>9</td>\n",
292
+ " <td>Widowed</td>\n",
293
+ " <td>Exec-managerial</td>\n",
294
+ " <td>Not-in-family</td>\n",
295
+ " <td>White</td>\n",
296
+ " <td>Female</td>\n",
297
+ " <td>0</td>\n",
298
+ " <td>4356</td>\n",
299
+ " <td>18</td>\n",
300
+ " <td>United-States</td>\n",
301
+ " <td>&lt;=50K</td>\n",
302
+ " </tr>\n",
303
+ " <tr>\n",
304
+ " <th>2</th>\n",
305
+ " <td>66</td>\n",
306
+ " <td>?</td>\n",
307
+ " <td>186061</td>\n",
308
+ " <td>Some-college</td>\n",
309
+ " <td>10</td>\n",
310
+ " <td>Widowed</td>\n",
311
+ " <td>?</td>\n",
312
+ " <td>Unmarried</td>\n",
313
+ " <td>Black</td>\n",
314
+ " <td>Female</td>\n",
315
+ " <td>0</td>\n",
316
+ " <td>4356</td>\n",
317
+ " <td>40</td>\n",
318
+ " <td>United-States</td>\n",
319
+ " <td>&lt;=50K</td>\n",
320
+ " </tr>\n",
321
+ " <tr>\n",
322
+ " <th>3</th>\n",
323
+ " <td>54</td>\n",
324
+ " <td>Private</td>\n",
325
+ " <td>140359</td>\n",
326
+ " <td>7th-8th</td>\n",
327
+ " <td>4</td>\n",
328
+ " <td>Divorced</td>\n",
329
+ " <td>Machine-op-inspct</td>\n",
330
+ " <td>Unmarried</td>\n",
331
+ " <td>White</td>\n",
332
+ " <td>Female</td>\n",
333
+ " <td>0</td>\n",
334
+ " <td>3900</td>\n",
335
+ " <td>40</td>\n",
336
+ " <td>United-States</td>\n",
337
+ " <td>&lt;=50K</td>\n",
338
+ " </tr>\n",
339
+ " </tbody>\n",
340
+ "</table>\n",
341
+ "</div>"
342
+ ],
343
+ "text/plain": [
344
+ " age workclass fnlwgt education education.num marital.status \\\n",
345
+ "0 90 ? 77053 HS-grad 9 Widowed \n",
346
+ "1 82 Private 132870 HS-grad 9 Widowed \n",
347
+ "2 66 ? 186061 Some-college 10 Widowed \n",
348
+ "3 54 Private 140359 7th-8th 4 Divorced \n",
349
+ "\n",
350
+ " occupation relationship race sex capital.gain \\\n",
351
+ "0 ? Not-in-family White Female 0 \n",
352
+ "1 Exec-managerial Not-in-family White Female 0 \n",
353
+ "2 ? Unmarried Black Female 0 \n",
354
+ "3 Machine-op-inspct Unmarried White Female 0 \n",
355
+ "\n",
356
+ " capital.loss hours.per.week native.country income \n",
357
+ "0 4356 40 United-States <=50K \n",
358
+ "1 4356 18 United-States <=50K \n",
359
+ "2 4356 40 United-States <=50K \n",
360
+ "3 3900 40 United-States <=50K "
361
+ ]
362
+ },
363
+ "execution_count": 6,
364
+ "metadata": {},
365
+ "output_type": "execute_result"
366
+ }
367
+ ],
368
+ "source": [
369
+ "# Peek at data\n",
370
+ "dataset.head(4)"
371
+ ]
372
+ },
373
+ {
374
+ "cell_type": "code",
375
+ "execution_count": 7,
376
+ "metadata": {
377
+ "_cell_guid": "c183daab-b3b2-4cab-91a0-05f0b7a98dce",
378
+ "_execution_state": "idle",
379
+ "_uuid": "0b0c1b473300ee13c423318626c371835d032e12"
380
+ },
381
+ "outputs": [
382
+ {
383
+ "data": {
384
+ "text/html": [
385
+ "<div>\n",
386
+ "<style scoped>\n",
387
+ " .dataframe tbody tr th:only-of-type {\n",
388
+ " vertical-align: middle;\n",
389
+ " }\n",
390
+ "\n",
391
+ " .dataframe tbody tr th {\n",
392
+ " vertical-align: top;\n",
393
+ " }\n",
394
+ "\n",
395
+ " .dataframe thead th {\n",
396
+ " text-align: right;\n",
397
+ " }\n",
398
+ "</style>\n",
399
+ "<table border=\"1\" class=\"dataframe\">\n",
400
+ " <thead>\n",
401
+ " <tr style=\"text-align: right;\">\n",
402
+ " <th></th>\n",
403
+ " <th>age</th>\n",
404
+ " <th>workclass</th>\n",
405
+ " <th>fnlwgt</th>\n",
406
+ " <th>education</th>\n",
407
+ " <th>education.num</th>\n",
408
+ " <th>marital.status</th>\n",
409
+ " <th>occupation</th>\n",
410
+ " <th>relationship</th>\n",
411
+ " <th>race</th>\n",
412
+ " <th>sex</th>\n",
413
+ " <th>capital.gain</th>\n",
414
+ " <th>capital.loss</th>\n",
415
+ " <th>hours.per.week</th>\n",
416
+ " <th>native.country</th>\n",
417
+ " <th>income</th>\n",
418
+ " </tr>\n",
419
+ " </thead>\n",
420
+ " <tbody>\n",
421
+ " <tr>\n",
422
+ " <th>0</th>\n",
423
+ " <td>90</td>\n",
424
+ " <td>?</td>\n",
425
+ " <td>77053</td>\n",
426
+ " <td>HS-grad</td>\n",
427
+ " <td>9</td>\n",
428
+ " <td>Widowed</td>\n",
429
+ " <td>?</td>\n",
430
+ " <td>Not-in-family</td>\n",
431
+ " <td>White</td>\n",
432
+ " <td>Female</td>\n",
433
+ " <td>0</td>\n",
434
+ " <td>4356</td>\n",
435
+ " <td>40</td>\n",
436
+ " <td>United-States</td>\n",
437
+ " <td>0</td>\n",
438
+ " </tr>\n",
439
+ " <tr>\n",
440
+ " <th>1</th>\n",
441
+ " <td>82</td>\n",
442
+ " <td>Private</td>\n",
443
+ " <td>132870</td>\n",
444
+ " <td>HS-grad</td>\n",
445
+ " <td>9</td>\n",
446
+ " <td>Widowed</td>\n",
447
+ " <td>Exec-managerial</td>\n",
448
+ " <td>Not-in-family</td>\n",
449
+ " <td>White</td>\n",
450
+ " <td>Female</td>\n",
451
+ " <td>0</td>\n",
452
+ " <td>4356</td>\n",
453
+ " <td>18</td>\n",
454
+ " <td>United-States</td>\n",
455
+ " <td>0</td>\n",
456
+ " </tr>\n",
457
+ " <tr>\n",
458
+ " <th>2</th>\n",
459
+ " <td>66</td>\n",
460
+ " <td>?</td>\n",
461
+ " <td>186061</td>\n",
462
+ " <td>Some-college</td>\n",
463
+ " <td>10</td>\n",
464
+ " <td>Widowed</td>\n",
465
+ " <td>?</td>\n",
466
+ " <td>Unmarried</td>\n",
467
+ " <td>Black</td>\n",
468
+ " <td>Female</td>\n",
469
+ " <td>0</td>\n",
470
+ " <td>4356</td>\n",
471
+ " <td>40</td>\n",
472
+ " <td>United-States</td>\n",
473
+ " <td>0</td>\n",
474
+ " </tr>\n",
475
+ " <tr>\n",
476
+ " <th>3</th>\n",
477
+ " <td>54</td>\n",
478
+ " <td>Private</td>\n",
479
+ " <td>140359</td>\n",
480
+ " <td>7th-8th</td>\n",
481
+ " <td>4</td>\n",
482
+ " <td>Divorced</td>\n",
483
+ " <td>Machine-op-inspct</td>\n",
484
+ " <td>Unmarried</td>\n",
485
+ " <td>White</td>\n",
486
+ " <td>Female</td>\n",
487
+ " <td>0</td>\n",
488
+ " <td>3900</td>\n",
489
+ " <td>40</td>\n",
490
+ " <td>United-States</td>\n",
491
+ " <td>0</td>\n",
492
+ " </tr>\n",
493
+ " </tbody>\n",
494
+ "</table>\n",
495
+ "</div>"
496
+ ],
497
+ "text/plain": [
498
+ " age workclass fnlwgt education education.num marital.status \\\n",
499
+ "0 90 ? 77053 HS-grad 9 Widowed \n",
500
+ "1 82 Private 132870 HS-grad 9 Widowed \n",
501
+ "2 66 ? 186061 Some-college 10 Widowed \n",
502
+ "3 54 Private 140359 7th-8th 4 Divorced \n",
503
+ "\n",
504
+ " occupation relationship race sex capital.gain \\\n",
505
+ "0 ? Not-in-family White Female 0 \n",
506
+ "1 Exec-managerial Not-in-family White Female 0 \n",
507
+ "2 ? Unmarried Black Female 0 \n",
508
+ "3 Machine-op-inspct Unmarried White Female 0 \n",
509
+ "\n",
510
+ " capital.loss hours.per.week native.country income \n",
511
+ "0 4356 40 United-States 0 \n",
512
+ "1 4356 18 United-States 0 \n",
513
+ "2 4356 40 United-States 0 \n",
514
+ "3 3900 40 United-States 0 "
515
+ ]
516
+ },
517
+ "execution_count": 7,
518
+ "metadata": {},
519
+ "output_type": "execute_result"
520
+ }
521
+ ],
522
+ "source": [
523
+ "\n",
524
+ "# Reformat Column We Are Predicting\n",
525
+ "dataset['income']=dataset['income'].map({'<=50K': 0, '>50K': 1, '<=50K.': 0, '>50K.': 1})\n",
526
+ "dataset.head(4)"
527
+ ]
528
+ },
529
+ {
530
+ "cell_type": "markdown",
531
+ "metadata": {
532
+ "_cell_guid": "7ad8dd1d-470b-4e1e-b7b5-44769b775131",
533
+ "_execution_state": "idle",
534
+ "_uuid": "6cb8261a58c38c0492512efdbd1621d910965d52"
535
+ },
536
+ "source": [
537
+ "##3. Analyze Data"
538
+ ]
539
+ },
540
+ {
541
+ "cell_type": "code",
542
+ "execution_count": 8,
543
+ "metadata": {
544
+ "_cell_guid": "1c4744ab-7e9a-4993-ae0e-3c725912c638",
545
+ "_execution_state": "idle",
546
+ "_uuid": "31c7a7019b800cdea2eae4d2904b3ad428219da2"
547
+ },
548
+ "outputs": [],
549
+ "source": [
550
+ "# Identify Numeric features\n",
551
+ "numeric_features = ['age','fnlwgt','education.num','capital.gain','capital.loss','hours.per.week','income']\n",
552
+ "\n",
553
+ "# Identify Categorical features\n",
554
+ "cat_features = ['workclass','education','marital.status', 'occupation', 'relationship', 'race', 'sex', 'native']"
555
+ ]
556
+ },
557
+ {
558
+ "cell_type": "markdown",
559
+ "metadata": {
560
+ "_cell_guid": "0ade92f2-fcb2-487d-a581-34a89f85e3c5",
561
+ "_execution_state": "idle",
562
+ "_uuid": "1d0cdf67c8b35f2389931629a5f105823a42c804"
563
+ },
564
+ "source": [
565
+ "###3.1. Numeric Data Analysis"
566
+ ]
567
+ },
568
+ {
569
+ "cell_type": "code",
570
+ "execution_count": 9,
571
+ "metadata": {
572
+ "_cell_guid": "7004fe8d-fefd-4ade-805a-1dddf9c88928",
573
+ "_execution_state": "idle",
574
+ "_uuid": "bd1dbe810e1435bf36a9e262a115382c6c2761f4"
575
+ },
576
+ "outputs": [
577
+ {
578
+ "name": "stderr",
579
+ "output_type": "stream",
580
+ "text": [
581
+ "Pass the following variable as a keyword arg: x. From version 0.12, the only valid positional argument will be `data`, and passing other arguments without an explicit keyword will result in an error or misinterpretation.\n"
582
+ ]
583
+ },
584
+ {
585
+ "ename": "AttributeError",
586
+ "evalue": "module 'seaborn' has no attribute 'plt'",
587
+ "output_type": "error",
588
+ "traceback": [
589
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
590
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
591
+ "\u001b[1;32m<ipython-input-9-b46d4f116953>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;31m# Count of >50K & <=50K\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0msns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcountplot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'income'\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mlabel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"Count\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[0msns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
592
+ "\u001b[1;31mAttributeError\u001b[0m: module 'seaborn' has no attribute 'plt'"
593
+ ]
594
+ },
595
+ {
596
+ "data": {
597
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZMAAAEJCAYAAABR4cpEAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAAAYqUlEQVR4nO3df2zU9eHH8dfBlWJXsw5yV5pSm4kYwqqw2SjMed3CaIvtDQWM0JsV2cCgY2oMWIGmAYc0XQMOoWzZ12yJgKNj2io7rxCcdVp/YLOBJQQ00BpavWst4Fq543r9fP9w3CxFKLy5noXnIzHp5927D+83wXvmc5/7fM5mWZYlAAAMDIv3BAAAQx8xAQAYIyYAAGPEBABgjJgAAIzZ4z2BwRYMBtXU1CSHw6Hhw4fHezoAMCREIhG1t7crKytLI0eO7Pf7qy4mTU1N8ng88Z4GAAxJW7duVXZ2dr/xqy4mDodD0pd/IWPGjInzbABgaPj000/l8Xiir6Fnu+picuatrTFjxmjs2LFxng0ADC1fd3qAE/AAAGMxjcnGjRtVUFCggoICVVRUSJKefPJJ5ebmaubMmZo5c6Z2794tSWpoaJDb7VZubq7Wr18f3cfBgwc1e/Zs5eXlacWKFerp6ZEktbW1yePxKD8/X4sXL1Z3d3cslwIAOI+YxaShoUFvvvmmXnrpJdXU1OjAgQPavXu3mpqatGXLFtXW1qq2tlbTp09XMBjU8uXLVVVVJa/Xq6amJtXX10uSli5dqtLSUtXV1cmyLFVXV0uSVq1apaKiIvl8PmVlZamqqipWSwEAXEDMYuJwOFRSUqIRI0YoISFB48aNU1tbm9ra2lRaWiq3260NGzaot7dX+/fvV2ZmpjIyMmS32+V2u+Xz+dTa2qpgMKjJkydLkmbNmiWfz6dwOKy9e/cqLy+vzzgAID5idgJ+/Pjx0Z+bm5vl9Xq1bds2vffee1q9erWSkpL04IMPaseOHUpKSurzCQGn0ym/369AINBn3OFwyO/36/jx40pOTpbdbu8zDgCIj5ifgP/www+1YMECPfHEE7r++uu1adMmjR49Wtdcc43uu+8+1dfX61x3wbfZbBc9DgCIj5jGpLGxUfPnz9fjjz+uu+++W4cOHVJdXV3095ZlyW63KzU1VR0dHdHxQCAgp9PZb7y9vV1Op1OjRo1SV1eXIpFIn3EAQHzELCaffPKJHn74YVVWVqqgoEDSl/F4+umndfLkSYXDYW3fvl3Tp0/XpEmTdPToUbW0tCgSiWjnzp1yuVxKT09XYmKiGhsbJUk1NTVyuVxKSEhQdna2vF5vn/HBdDocGdQ/D0MD/y5wtYrZOZPnnntOoVBI5eXl0bG5c+dq0aJFmjdvnnp6epSbm6vCwkJJUnl5uZYsWaJQKKScnBzl5+dLkiorK7Vy5Up1d3dr4sSJKi4uliSVlZWppKREmzdvVlpamtatWxerpZzTiIThKlq2dVD/THzzbavgVj24Otmutq/tPXbsmKZNm6Y9e/YYXwFPTHA2YoIr1YVeO7kCHgBgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYIyYAACMERMAgDFiAgAwRkwAAMaICQDAGDEBABgjJgAAY8QEAGCMmAAAjBETAIAxYgIAMEZMAADGiAkAwBgxAQAYIyYAAGPEBABgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYCymMdm4caMKCgpUUFCgiooKSVJDQ4Pcbrdyc3O1fv366GMPHjyo2bNnKy8vTytWrFBPT48kqa2tTR6PR/n5+Vq8eLG6u7slSZ9//rkWLVqkGTNmyOPxqL29PZZLAQCcR8xi0tDQoDfffFMvvfSSampqdODAAe3cuVPLly9XVVWVvF6vmpqaVF9fL0launSpSktLVVdXJ8uyVF1dLUlatWqVioqK5PP5lJWVpaqqKknSM888o+zsbL366qu65557tGbNmlgtBQBwATGLicPhUElJiUaMGKGEhASNGzdOzc3NyszMVEZGhux2u9xut3w+n1pbWxUMBjV58mRJ0qxZs+Tz+RQOh7V3717l5eX1GZek119/XW63W5JUWFioN954Q+FwOFbLAQCcR8xiMn78+Ggcmpub5fV6ZbPZ5HA4oo9xOp3y+/0KBAJ9xh0Oh/x+v44fP67k5GTZ7fY+45L6PMdutys5OVmdnZ2xWg4A4DxifgL+ww8/1IIFC/TEE0/ouuuu6/d7m80my7IuavzrDBvG5wkAIB5i+urb2Nio+fPn6/HHH9fdd9+t1NRUdXR0RH8fCATkdDr7jbe3t8vpdGrUqFHq6upSJBLpMy59eVRz5jk9PT3q6upSSkpKLJcDAPgaMYvJJ598oocffliVlZUqKCiQJE2aNElHjx5VS0uLIpGIdu7cKZfLpfT0dCUmJqqxsVGSVFNTI5fLpYSEBGVnZ8vr9fYZl6ScnBzV1NRIkrxer7Kzs5WQkBCr5QAAzsMeqx0/99xzCoVCKi8vj47NnTtX5eXlWrJkiUKhkHJycpSfny9Jqqys1MqVK9Xd3a2JEyequLhYklRWVqaSkhJt3rxZaWlpWrdunSTpkUceUUlJiQoKCnTttdeqsrIyVksBAFyAzTrXiYkr2LFjxzRt2jTt2bNHY8eONdpX0bKtl2lWuFJsq/DEewpATFzotZMz1gAAY8QEAGCMmAAAjBETAIAxYgIAMEZMAADGiAkAwBgxAQAYIyYAAGPEBABgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYIyYAACMERMAgDFiAgAwRkwAAMaICQDAGDEBABgjJgAAY8QEAGCMmAAAjBETAIAxYgIAMEZMAADGiAkAwBgxAQAYIyYAAGMxj0lXV5cKCwt17NgxSdKTTz6p3NxczZw5UzNnztTu3bslSQ0NDXK73crNzdX69eujzz948KBmz56tvLw8rVixQj09PZKktrY2eTwe5efna/Hixeru7o71UgAAXyOmMdm3b5/mzZun5ubm6FhTU5O2bNmi2tpa1dbWavr06QoGg1q+fLmqqqrk9XrV1NSk+vp6SdLSpUtVWlqquro6WZal6upqSdKqVatUVFQkn8+nrKwsVVVVxXIpAIDziGlMqqurVVZWJqfTKUn64osv1NbWptLSUrndbm3YsEG9vb3av3+/MjMzlZGRIbvdLrfbLZ/Pp9bWVgWDQU2ePFmSNGvWLPl8PoXDYe3du1d5eXl9xgEA8WGP5c7XrFnTZ/uzzz7TlClTtHr1aiUlJenBBx/Ujh07lJSUJIfDEX2c0+mU3+9XIBDoM+5wOOT3+3X8+HElJyfLbrf3GQcAxMegnoDPyMjQpk2bNHr0aF1zzTW67777VF9fL8uy+j3WZrNd9DgAID4GNSaHDh1SXV1ddNuyLNntdqWmpqqjoyM6HggE5HQ6+423t7fL6XRq1KhR6urqUiQS6TMOAIiPQY2JZVl6+umndfLkSYXDYW3fvl3Tp0/XpEmTdPToUbW0tCgSiWjnzp1yuVxKT09XYmKiGhsbJUk1NTVyuVxKSEhQdna2vF5vn3EAQHzE9JzJ2SZMmKBFixZp3rx56unpUW5urgoLCyVJ5eXlWrJkiUKhkHJycpSfny9Jqqys1MqVK9Xd3a2JEyequLhYklRWVqaSkhJt3rxZaWlpWrdu3WAuBQDwFTbrXCcgzuL3+5Wamtpn7KOPPtINN9wQs4nFyrFjxzRt2jTt2bNHY8eONdpX0bKtl2lWuFJsq/DEewpATFzotfO8b3OdOHFCJ06c0MKFC3Xy5MnodkdHhx566KGYTRoAMLSc922uxx9/XG+99ZYk6bbbbvvfk+x2/fSnP43tzAAAQ8Z5Y/Lcc89J+vIWKGvXrh2UCQEAhp4BnYBfu3atWltbdfLkyT7XeHzve9+L2cQAAEPHgGJSWVmp559/XqNHj46O2Ww27dmzJ2YTAwAMHQOKidfr1a5du/p9ogsAAGmAFy2mpaUREgDA1xrQkcnUqVNVUVGhadOmaeTIkdFxzpkAAKQBxuTFF1+UpD63eeecCQDgjAHF5LXXXov1PAAAQ9iAYvKnP/3pnOMPPPDAZZ0MAGBoGlBMDh8+HP359OnTamxs7HNFPADg6jbgixa/qrOzU8uWLYvJhAAAQ88lfZ/JqFGj1NraernnAgAYoi76nIllWWpqaupzNTwA4Op20edMpC8vYuRtLgDAGRd1zqS1tVU9PT3KzMyM6aQAAEPLgGLS0tKihx56SIFAQL29vfrOd76jP/zhDxo3blys5wcAGAIGdAJ+9erV+uUvf6m9e/eqsbFRixcv1qpVq2I9NwDAEDGgmHz22We6++67o9uzZ8/W8ePHYzYpAMDQMqCYRCIRnThxIrrd2dkZq/kAAIagAZ0z+fnPf657771XM2bMkCS9+uqruv/++2M6MQDA0DGgI5OcnBxJUjgc1pEjR+T3+zV9+vSYTgwAMHQM6MikpKREHo9HxcXFCoVCeuGFF7R8+XL98Y9/jPX8AABDwICOTI4fP67i4mJJUmJioubPn6/29vaYTgwAMHQM+AS83++Pbnd0dMiyrJhNCgAwtAzoba758+frrrvu0h133CGbzaaGhgZupwIAiBpQTObMmaOsrCy98847Gj58uH7xi1/oxhtvjPXcAABDxIBiIkkTJkzQhAkTYjkXAMAQdUnfZwIAwFcREwCAMWICADBGTAAAxmIak66uLhUWFurYsWOSpIaGBrndbuXm5mr9+vXRxx08eFCzZ89WXl6eVqxYoZ6eHklSW1ubPB6P8vPztXjxYnV3d0uSPv/8cy1atEgzZsyQx+PhAkoAiLOYxWTfvn2aN2+empubJUnBYFDLly9XVVWVvF6vmpqaVF9fL0launSpSktLVVdXJ8uyVF1dLUlatWqVioqK5PP5lJWVpaqqKknSM888o+zsbL366qu65557tGbNmlgtAwAwADGLSXV1tcrKyuR0OiVJ+/fvV2ZmpjIyMmS32+V2u+Xz+dTa2qpgMKjJkydLkmbNmiWfz6dwOKy9e/cqLy+vz7gkvf7663K73ZKkwsJCvfHGGwqHw7FaCgDgAgZ8ncnFOvtoIRAIyOFwRLedTqf8fn+/cYfDIb/fr+PHjys5OVl2u73P+Nn7stvtSk5OVmdnp1JTU2O1HADAeQzaCfhz3cvLZrNd9PjXGTaMzxIAQLwM2itwamqqOjo6otuBQEBOp7PfeHt7u5xOp0aNGqWuri5FIpE+49KXRzVnntPT06Ouri6lpKQM1lIAAGcZtJhMmjRJR48eVUtLiyKRiHbu3CmXy6X09HQlJiaqsbFRklRTUyOXy6WEhARlZ2fL6/X2GZe+/LKumpoaSZLX61V2drYSEhIGaykAgLPE7JzJ2RITE1VeXq4lS5YoFAopJydH+fn5kqTKykqtXLlS3d3dmjhxYvS7U8rKylRSUqLNmzcrLS1N69atkyQ98sgjKikpUUFBga699lpVVlYO1jIAAOcQ85i89tpr0Z+nTp2ql19+ud9jJkyYoB07dvQbT09P1/PPP99vPCUlRb///e8v70QBAJeMs9YAAGPEBABgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYIyYAACMERMAgDFiAgAwRkyAK0xvTzjeU8A3UKz/XQza95kAGBzD7AlqrPhlvKeBb5hblv1fTPfPkQkAwBgxAQAYIyYAAGPEBABgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYIyYAACMERMAgDFiAgAwRkwAAMaICQDAGDEBABiLy9f2FhcX67PPPpPd/uUfv3r1an388cfavHmzwuGw5s+fL4/HI0lqaGjQ2rVrFQqFNGPGDD322GOSpIMHD2rlypXq6upSdna2Vq1aFd0fAGBwDfqRiWVZOnLkiGpra6P/jRkzRuvXr9e2bdtUW1ur7du366OPPlIwGNTy5ctVVVUlr9erpqYm1dfXS5KWLl2q0tJS1dXVybIsVVdXD/ZSAAD/NegxOXLkiGw2mxYuXKif/exn2rJlixoaGjRlyhSlpKQoKSlJeXl58vl82r9/vzIzM5WRkSG73S632y2fz6fW1lYFg0FNnjxZkjRr1iz5fL7BXgoA4L8GPSaff/65pk6dqk2bNunPf/6z/vKXv6itrU0OhyP6GKfTKb/fr0AgMKBxh8Mhv98/qOsAAPzPoMfk+9//vioqKpSUlKRRo0Zpzpw52rBhQ7/H2Ww2WZZ1UeMAgPgY9Ji8//77evvtt6PblmUpPT1dHR0d0bFAICCn06nU1NQBjbe3t8vpdA7OAgAA/Qx6TP7zn/+ooqJCoVBIXV1deumll/Tb3/5Wb7/9tjo7O3Xq1Cnt2rVLLpdLkyZN0tGjR9XS0qJIJKKdO3fK5XIpPT1diYmJamxslCTV1NTI5XIN9lIAAP816J+l/clPfqJ9+/bprrvuUm9vr4qKinTLLbfoscceU3FxscLhsObMmaObb75ZklReXq4lS5YoFAopJydH+fn5kqTKykqtXLlS3d3dmjhxooqLiwd7KQCA/4rLhRmPPvqoHn300T5jbrdbbre732OnTp2ql19+ud/4hAkTtGPHjlhNEQBwEbgCHgBgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYIyYAACMERMAgDFiAgAwRkwAAMaICQDAGDEBABgjJgAAY8QEAGCMmAAAjBETAIAxYgIAMEZMAADGiAkAwBgxAQAYIyYAAGPEBABgjJgAAIwREwCAMWICADBGTAAAxogJAMAYMQEAGCMmAABjxAQAYGxIx+SVV17RnXfeqenTp2vr1q3xng4AXLXs8Z7ApfL7/Vq/fr1efPFFjRgxQnPnztVtt92mG264Id5TA4CrzpCNSUNDg6ZMmaKUlBRJUl5ennw+n371q1+d93mRSESS9OmnnxrPIfTFCeN94Mpy7NixeE9BktT+n2C8p4BvGNN/m2deM8+8hp5tyMYkEAjI4XBEt51Op/bv33/B57W3t0uSPB5PzOaGq9e03RviPQXg3KqnXZbdtLe3KzMzs9/4kI2JZVn9xmw22wWfl5WVpa1bt8rhcGj48OGxmBoAXHEikYja29uVlZV1zt8P2Zikpqbq/fffj24HAgE5nc4LPm/kyJHKzs6O5dQA4Ip0riOSM4bsp7l++MMf6u2331ZnZ6dOnTqlXbt2yeVyxXtaAHBVGtJHJo899piKi4sVDoc1Z84c3XzzzfGeFgBclWzWuU4+AABwEYbs21wAgG8OYgIAMEZMAADGiAkAwBgxwSXjRpv4puvq6lJhYeE35jY3VzJigkty5kab27ZtU21trbZv366PPvoo3tMCovbt26d58+apubk53lO5KhATXJKv3mgzKSkpeqNN4JuiurpaZWVlA7ozBswN2YsWEV+XeqNNYLCsWbMm3lO4qnBkgktyqTfaBHBlIia4JKmpqero6IhuD/RGmwCuTMQEl4QbbQL4Ks6Z4JJwo00AX8WNHgEAxnibCwBgjJgAAIwREwCAMWICADBGTAAAxogJcBl88MEH+vWvfx3vaQBxw0eDAQDGuGgRuAzeffddPfXUU8rKylJycrIOHTqkTz/9VNdff73WrVunb33rW9q3b59+85vf6NSpU0pISNCyZcs0depUvf/++6qoqIiOP/roo3K5XHrxxRe1a9cuBYNBtba2Ki0tTR6PR1u2bFFzc7MeeOABLViwQJL017/+VS+88IJ6e3uVkpKi0tJSjRs3Ls5/K7iqWACMvfPOO1ZBQYH1xBNPWPfee68VCoWs06dPW3fddZe1Y8cO6/Tp09btt99u/eMf/7Asy7I++OADq7Cw0Ors7LSmTp1q/fvf/7Ysy7IOHz5s3XrrrdbHH39s/e1vf7NuueUWq62tzYpEItadd95pLVmyxIpEItbBgwetm266yYpEIta7775rFRUVWV988YVlWZb1z3/+05oxY0a8/ipwleLIBLjM7rjjDo0YMUKSdOONN+rkyZM6fPiwhg0bph//+MeSpKysLL3yyiuqr6/Xddddp0mTJkmSxo8frx/84Ad67733ZLPZdNNNNyktLU2SNHbsWP3oRz/SsGHDlJGRoVAopFOnTun1119XS0uL5s6dG53DyZMndeLECaWkpAzq2nH1IibAZTZy5MjozzabTZZlafjw4f1u0X/48GH19vb2e75lWerp6VFCQkI0SmfY7f3/l+3t7dXMmTO1dOnS6HYgENC3v/3ty7EcYED4NBcwCK6//nrZbDa99dZbkqQDBw7o/vvv180336yjR49Gv1jsww8/1N69e3XrrbcOeN+33367/v73vysQCEiSXnjhBd1///2XfxHAeXBkAgyCESNG6Nlnn9XTTz+tiooKJSQk6Nlnn9Xo0aP1u9/9Tk899ZSCwaBsNpvWrl2r7373u/rXv/41oH3fcccdWrhwoRYsWCCbzabk5GRt3LiRLyvDoOKjwQAAY7zNBQAwRkwAAMaICQDAGDEBABgjJgAAY8QEAGCMmAAAjBETAICx/wd5WOUYlAzerAAAAABJRU5ErkJggg==\n",
598
+ "text/plain": [
599
+ "<Figure size 432x288 with 1 Axes>"
600
+ ]
601
+ },
602
+ "metadata": {},
603
+ "output_type": "display_data"
604
+ }
605
+ ],
606
+ "source": [
607
+ "# Count of >50K & <=50K\n",
608
+ "sns.countplot(dataset['income'],label=\"Count\")\n",
609
+ "sns.plt.show()"
610
+ ]
611
+ },
612
+ {
613
+ "cell_type": "code",
614
+ "execution_count": null,
615
+ "metadata": {
616
+ "_cell_guid": "dc5bcab4-92fc-461c-9d7c-1a9580d23846",
617
+ "_execution_state": "idle",
618
+ "_uuid": "c92ee829ae5ca6724f3778a45f4e2a3df5f88c2a"
619
+ },
620
+ "outputs": [],
621
+ "source": [
622
+ "# Correlation matrix between numerical values\n",
623
+ "g = sns.heatmap(dataset[numeric_features].corr(),annot=True, fmt = \".2f\", cmap = \"coolwarm\")\n",
624
+ "sns.plt.show()"
625
+ ]
626
+ },
627
+ {
628
+ "cell_type": "code",
629
+ "execution_count": null,
630
+ "metadata": {
631
+ "_cell_guid": "57867c79-49b3-4e90-a196-aa9a297040a3",
632
+ "_execution_state": "idle",
633
+ "_uuid": "56537a5cce38758ab1e8514cbb7e12da27339984"
634
+ },
635
+ "outputs": [],
636
+ "source": [
637
+ "# Explore Education Num vs Income\n",
638
+ "g = sns.factorplot(x=\"education.num\",y=\"income\",data=dataset,kind=\"bar\",size = 6,palette = \"muted\")\n",
639
+ "g.despine(left=True)\n",
640
+ "g = g.set_ylabels(\">50K probability\")"
641
+ ]
642
+ },
643
+ {
644
+ "cell_type": "code",
645
+ "execution_count": null,
646
+ "metadata": {
647
+ "_cell_guid": "40c1d5aa-7f1c-4cdd-94c6-fd074d404162",
648
+ "_execution_state": "idle",
649
+ "_uuid": "7d13c85496761accb5791803b816e6152d5b4fc8"
650
+ },
651
+ "outputs": [],
652
+ "source": [
653
+ "# Explore Hours Per Week vs Income\n",
654
+ "g = sns.factorplot(x=\"hours.per.week\",y=\"income\",data=dataset,kind=\"bar\",size = 6,palette = \"muted\")\n",
655
+ "g.despine(left=True)\n",
656
+ "g = g.set_ylabels(\">50K probability\")"
657
+ ]
658
+ },
659
+ {
660
+ "cell_type": "code",
661
+ "execution_count": null,
662
+ "metadata": {
663
+ "_cell_guid": "c719cef4-5d7f-4218-b5d5-f6ec2941a51e",
664
+ "_execution_state": "idle",
665
+ "_uuid": "2fa3fe2ff1f5dc96f2502767ce87263d86eb077b"
666
+ },
667
+ "outputs": [],
668
+ "source": [
669
+ "# Explore Age vs Income\n",
670
+ "g = sns.FacetGrid(dataset, col='income')\n",
671
+ "g = g.map(sns.distplot, \"age\")\n",
672
+ "sns.plt.show()"
673
+ ]
674
+ },
675
+ {
676
+ "cell_type": "markdown",
677
+ "metadata": {
678
+ "_cell_guid": "cb5fb656-d724-4448-97b5-931560fcf9f2",
679
+ "_execution_state": "idle",
680
+ "_uuid": "076ad21bd294eb420d70df41407144cf12903e67"
681
+ },
682
+ "source": [
683
+ "###3.2. Categorical Data Analysis"
684
+ ]
685
+ },
686
+ {
687
+ "cell_type": "code",
688
+ "execution_count": 10,
689
+ "metadata": {
690
+ "_cell_guid": "dbccf607-7eb6-4e8b-8c00-2e91a2477ffe",
691
+ "_execution_state": "idle",
692
+ "_uuid": "0367e18fa3ea49392b03696aa5e75443c98ea471"
693
+ },
694
+ "outputs": [
695
+ {
696
+ "data": {
697
+ "text/plain": [
698
+ "age 0\n",
699
+ "workclass 0\n",
700
+ "fnlwgt 0\n",
701
+ "education 0\n",
702
+ "education.num 0\n",
703
+ "marital.status 0\n",
704
+ "occupation 0\n",
705
+ "relationship 0\n",
706
+ "race 0\n",
707
+ "sex 0\n",
708
+ "capital.gain 0\n",
709
+ "capital.loss 0\n",
710
+ "hours.per.week 0\n",
711
+ "native.country 0\n",
712
+ "income 0\n",
713
+ "dtype: int64"
714
+ ]
715
+ },
716
+ "execution_count": 10,
717
+ "metadata": {},
718
+ "output_type": "execute_result"
719
+ }
720
+ ],
721
+ "source": [
722
+ "# Fill Missing Category Entries\n",
723
+ "dataset[\"workclass\"] = dataset[\"workclass\"].fillna(\"X\")\n",
724
+ "dataset[\"occupation\"] = dataset[\"occupation\"].fillna(\"X\")\n",
725
+ "dataset[\"native.country\"] = dataset[\"native.country\"].fillna(\"United-States\")\n",
726
+ "\n",
727
+ "# Confirm All Missing Data is Handled\n",
728
+ "dataset.isnull().sum()"
729
+ ]
730
+ },
731
+ {
732
+ "cell_type": "code",
733
+ "execution_count": 11,
734
+ "metadata": {
735
+ "_cell_guid": "5c6b1b5c-886d-44b1-ac09-1d410cda49d1",
736
+ "_execution_state": "idle",
737
+ "_uuid": "557e3c876124213d21e797095c23f6e0b9a5c5ab"
738
+ },
739
+ "outputs": [
740
+ {
741
+ "ename": "AttributeError",
742
+ "evalue": "module 'seaborn' has no attribute 'plt'",
743
+ "output_type": "error",
744
+ "traceback": [
745
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
746
+ "\u001b[1;31mAttributeError\u001b[0m Traceback (most recent call last)",
747
+ "\u001b[1;32m<ipython-input-11-0138039b851b>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 2\u001b[0m \u001b[0mg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0msns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbarplot\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mx\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"native.country\"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0my\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;34m\"income\"\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 3\u001b[0m \u001b[0mg\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mg\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mset_ylabel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Income >50K Probability\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 4\u001b[1;33m \u001b[0msns\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
748
+ "\u001b[1;31mAttributeError\u001b[0m: module 'seaborn' has no attribute 'plt'"
749
+ ]
750
+ },
751
+ {
752
+ "data": {
753
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAcgAAAEJCAYAAAAQFbf7AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8vihELAAAACXBIWXMAAAsTAAALEwEAmpwYAABJG0lEQVR4nO3dd3gU5drA4d/W9BBCkg0EpHcSwQIRKaJCJIKAgKIIytGIHhRF5cMjCAgioiIiHFAQxQJKEQhBBQQU9YRmI6CCIJ2QRnrdNt8fccesSdgE2dTnvi6usDO7s8/Mzswzb5l3NIqiKAghhBDCiba6AxBCCCFqIkmQQgghRBkkQQohhBBlkAQphBBClEESpBBCCFEGfXUHcDkKCws5fPgwwcHB6HS66g5HCCFqBZvNRmpqKl26dMHT07O6w6nxamWCPHz4MKNHj67uMIQQolZatWoV1113XXWHUePVygQZHBwMFP/IoaGh1RyNEELUDklJSYwePVo9h4pLq5UJ0lGtGhoaStOmTas5GiGEqF2kaapipJOOEEIIUQZJkEIIIUQZJEEKIYQQZZAEKYQQQpRBEqQQQghRBkmQQgghRBkkQQohxGXau3cvTz31FHv37q3uUIQb1Mr7IIUQoiZYuXIlx44dIz8/n8jIyOoOR1xhUoIUQojLlJ+f7/RX1C2SIIUQQogySIIUQgghyiAJUgghhCiDWxNkXFwc0dHR9O/fn1WrVpWaf+LECcaMGcMdd9zBgw8+SFZWljvDEUIIISrMbQkyOTmZBQsWsHr1amJjY1mzZg3Hjx9X5yuKwqOPPkpMTAybN2+mY8eOLFu2zF3hCCGEEJXitgQZHx9PZGQkAQEBeHt7ExUVxdatW9X5v/zyC97e3vTp0weARx55RB6CLIQQosZwW4JMSUlxeihnSEgIycnJ6uszZ84QFBTElClTGDx4MDNmzMDb29td4QghhBCV4rYEqShKqWkajUb9v9VqZf/+/dx3333ExcXRrFkzXn75ZXeFI4QQQlSK2xKkyWQiLS1NfZ2SkkJISIj6Ojg4mObNmxMeHg7AoEGDSEhIcFc4QgghRKW4LUH27NmTPXv2kJ6eTkFBAdu3b1fbGwG6detGeno6R44cAWDXrl107tzZXeEIIYQQleK2sVhNJhOTJk1i7NixWCwWRowYQUREBDExMUycOJHw8HD++9//Mm3aNAoKCggNDeWVV15xVzhCCFFpe/fuZe3atdx1110y1mo95NbBygcPHszgwYOdpi1fvlz9/9VXX8369evdGYIQQlw2GYy8fpORdISo5eSRS+4jg5HXb/K4KyFqOSnlCOEeUoIUopaTUo4Q7iEJUgghhCiDJEghhBCiDJIghRBCiDJIghRCCCHKIAlS1HhyG4MQojrIbR6ixpPbGIQQ1UFKkKLGk9sYhBDVQRKkEEIIUQZJkEIIIUQZJEEKIYQQZZAEKYQQQpRBEqQQQghRBkmQQgghRBkkQQohhBBlkARZA8hIMUIIUfPISDo1gIwUI4QQNY+UIGsAGSlGCCFqHkmQQgghRBlcJsjHH3+c+Pj4qohFiBpJ2oiFqJ9cJsgBAwawZMkSoqKiWLFiBZmZmVUQlhA1x8qVKzl48CArV66s7lCEEFXIZYIcPHgwH330EUuWLOHixYuMHDmSyZMnk5CQUBXxCaQEU92kjViI+qlCbZB2u53Tp09z6tQprFYrjRo1YubMmbz66quX/FxcXBzR0dH079+fVatWlZq/ePFi+vXrx5AhQxgyZEiZ7xFSghFCiOrg8jaPBQsWsGHDBpo1a8a9997LwoULMRgM5Ofn069fPyZPnlzm55KTk9XPGo1GRo0aRY8ePWjTpo36nsOHD/P666/TrVu3K7dGdZCUYIQQouq5TJDp6eksX76cDh06OE339vZm/vz55X4uPj6eyMhIAgICAIiKimLr1q089thj6nsOHz7M8uXLOXv2LNdffz1TpkzBw8PjMldFCCGEuHJcVrHabLZSyfHxxx8HoFevXuV+LiUlheDgYPV1SEgIycnJ6uu8vDw6duzIlClT2LhxI9nZ2SxZsqTSKyBEdZM2YlEe2Tdqt3JLkDNmzCA5OZkffviB9PR0dbrVauXEiRMuF6woSqlpGo1G/b+Pjw/Lly9XX//rX//iueeeY9KkSRUOXoiaQEZCEuWRfaN2KzdBjhgxgmPHjnH06FGioqLU6TqdrkJthiaTie+//159nZKSQkhIiPo6MTGR+Ph4RowYARQnVL1eRr4TtY+0EYvyyL5Ru5WbkcLDwwkPD+fGG2/EZDJVesE9e/Zk0aJFpKen4+Xlxfbt25k9e7Y639PTk1dffZUePXrQtGlTVq1aRf/+/S9vLYQQQogrrNwE+cQTT7Bw4UIeeuihMufHxcVdcsEmk4lJkyYxduxYLBYLI0aMICIigpiYGCZOnEh4eDizZs3i0UcfxWKxcM011zBu3Lh/tjZCCCHEFVJugoyJiQHg+eefv+yFDx48mMGDBztNK9nuGBUV5VR9K4QQQtQU5SZIjUbDL7/8go+PT1XGI4QQQtQI5SZIx60cZdFoNOzcudMtAQkhhBA1QbkJcteuXVUZhxBCCFGjlJsgly9fTkxMDC+++GKZ86dNm+a2oIQQQojqVm6C9PPzA1CHihNCCCHqk3IT5KhRowB47LHH1NFz9Ho9LVq0QKut0ENAhBBCiFrL5dA1CQkJPPHEE0DxY688PT1ZtGgR7dq1c3twQoi6be/evaxdu5a77rpLhmITNY7LBPniiy8ye/ZsdWDyXbt2MWPGDD7++GO3ByeEqNtkrFJRk7msK7VYLE5P7bj55pspKChwa1BCiPpBxioVNZnLBNm5c2e2bt2qvt69ezedOnVya1BCVCV5JJEQoizlVrF269YNjUaD3W5n/fr1NGjQAK1WS0ZGBkFBQVUZoxBuJdV8QoiylJsgt2zZUpVx1GjSkaBuk2o+IURZyk2QYWFh6v9//fVX8vPzURQFm83GmTNnuOuuu6okwJpAShg1m1zACCHcwWUv1mnTprFz504KCwsxmUycOXOGa6+9tl4lSClh1GxyASOEcAeXnXTi4+PZuXMnAwYMYNmyZaxcuRJPT8+qiE2ICpELGCGEO7hMkMHBwXh7e9OqVSt+//13unfvTkZGRlXEJoQQQlQblwnSYDBw4MABWrduzTfffENOTo4kSCGEEHWeywT5zDPP8Mknn9C3b19+++03IiMjueOOO6oiNiGEEKLauOyk07VrV7p27QrAunXryM7Oxt/f391xCSGEENXKZQny4sWLPPXUU/To0YNevXrx8ssvk52dXRWxCSGEENXGZYKcNm0azZo1Y/369axatYoGDRowffr0qohNCCGEqDYuq1jPnz/P0qVL1ddTpkxh8ODBbg1KCCGEqG4uS5AhISGcPXtWfZ2UlERwcLBbgxJVq6YO1m22mSs1XQghrqRyS5CPPPIIAOnp6QwdOpSePXui1WrZt28f7du3r7IAhfvV1JFojDojA2Pvx5yXDMD5vGQGxt7PF0Per+bIRF1ntypo9ZoKTxd1U7kJMioqqszpN910U4UXHhcXx9KlS7FYLDzwwAOMHj26zPd9/fXXzJo1i127dlV42eLKkZFohHCm1Ws49UYS1kwbANZMG6feSKLFk6HVHJmoSuUmyGHDhqn/P3/+PPv378dqtdK9e3eaN2/ucsHJycksWLCADRs2YDQaGTVqFD169KBNmzZO70tLS2PevHn/YBWEEEKIK89lG+S3337L8OHD2bFjBzt37mTEiBHs2LHD5YLj4+OJjIwkICAAb29voqKinB687DBt2jQee+yxy4teiAqoqW2sQoiazWUv1oULF/LRRx+pJb9jx44xefJkbr311kt+LiUlxakzT0hICAkJCU7v+eCDD+jUqRNXX3315cQuRIXU1DZWIUTN5rIEabFYnKpF27Zti81mc7lgRVFKTdNo/mrc/v3339m+fTv//ve/KxprvWOzlt1bs7zpomzSxiqEuBwuE6SnpyeHDh1SXx86dAgvLy+XCzaZTKSlpamvU1JSCAkJUV9v3bqV1NRUhg8fzsMPP0xKSgr33ntvZeOv03R6I9tWRJOfnQhAfnYi21ZEo9MbqzkyIYSo+1xWsU6ePJlHHnlE7Zhz8uRJFi5c6HLBPXv2ZNGiRaSnp+Pl5cX27duZPXu2On/ixIlMnDgRgHPnzjF27FhWr159uetRK9mtZrRlJLvypgshhKg6LhOkl5cXn332GQcPHkRRFK6++moaNmzocsEmk4lJkyYxduxYLBYLI0aMICIigpiYGCZOnEh4ePgVWYHaYO/evaxdu5a77rrLqQ1Mqzfy7fJBFGQXAlCQnci3ywfRO2ZLdYUqhBDiTy4T5DPPPMMXX3xB3759K73wwYMHlxqWbvny5aXe17Rp0zp9D6R0EqmdzDYrRl3pQ6S86UKIusVlG2T79u2Ji4sjMTGRzMxM9Z+oOOkkUjsZdXpu//QtEnOzAEjMzeL2T9+S5ChEPeHySN+xY0ep+xc1Gg2//fab24ISQgghqpvLBHn48OGqiEMIUY7y2rCFEO5VboIsKipi4cKFnDhxgsjISMaOHYtW67JGVghxhUkbds2jWBU0ZQxaXt50UTuVm/FmzpzJ+fPn6dOnDzt37uTNN9+syrhEJchQanWbtGHXPBq9hqRXT2PLsAJgy7CS9OppSY51TLklyMOHDxMXFwfA7bffzv3338+TTz5ZVXGJSpASRv1jttkw6nSVnieEqLhyE6Re/9esBg0alDl0nKgZpIRR/xh1Ooas3wZAXm7x756Ym8+Q9duIHVH2o+qEEJVT4UZFaX8UQghRn5RbgszOzmb79u3q65ycHKfXAwYMcG9kQtQT0ktViJqp3ATZpEkTPvzwQ/V148aN1dcajUYSpBBXiLQhC1EzlZsgSyZHIYT7SBuyEDWTNCwKIYQQZZAEKYQQQpRBEqQQQghRhnIT5KVGZVmwYIFbghGiKplt1kpNF0LUL+UmyCeffJKjR486TcvOzubBBx9UR9gRojYz6vTcvuENEnMzAUjMzeT2DW/I46yEEMAlEuT06dMZP348Fy5cAODIkSMMGzYMg8HAhg0bqixAIYQQZZNxmN2r3Evl6OhotcQ4ZswYXnvtNcaPH8/DDz9clfEJUS6zzYJRZ6jwdCHqGrmH1r0uWZc0atQosrOzmTVrFsuWLaN3795VFZcQLhl1BqI3PYs5Lw2AxLw0ojc9y+dDX67myISoGnIPrXu5bGx5+OGHycrK4qOPPuLGG2+UMVmFEELUC+UmyG7duqHRFD/bTFEUCgoKuPbaa4HioeZ+/PHHqolQuI3VZkavM1Z4uhBC1CflJsgtW7ZUZRyiGuh1RlZ8MIDsHBsA2TnnWfHBAB4cu93FJ2sHs81aZo/U8qYLIURJ5Z4lwsLCKCoq4ttvvyUpKQmtVktoaCjdu3fH19e3KmMU4rIYdXqiN76EOTcdgMTcdKI3vsTnw56r5siEELVBuQ2KP/30E/379+f999/n4MGD/PTTT6xcuZLbbruNPXv2VGWMQgghRJUrtwQ5ffp0li1bRocOHZymHzlyhP/7v/9j8+bNLhceFxfH0qVLsVgsPPDAA4wePdpp/pdffsmbb76J3W4nPDycWbNmYTRK29ffGfXOf4UQQrhfuSVIu91eKjkCdOjQAUVRXC44OTmZBQsWsHr1amJjY1mzZg3Hjx9X5+fn5zNr1izee+89PvvsM4qKiti4ceNlrkbd1itCR7MQDb0idE7TbVZzme8vb7oQQoiKKzdBNmnShGXLlpGRkaFOy8nJYfny5YSFhblccHx8PJGRkQQEBODt7U1UVBRbt25V53t7e7Nr1y6CgoLIz8/n4sWL+Pv7/8PVqZtah+m451YjrcOcE6ROb2TTuwPJyz4PQF72eTa9OxCdXkrhQgjxT5WbIOfNm8evv/5Kv379uPrqq4mIiODGG2/k0KFDzJkzx+WCU1JSCA4OVl+HhISQnJzs9B6DwcDu3bvp168fGRkZ9OrV6x+sinAXdw9nJcNlCSFqonJbtQIDA3njjTew2WxkZGRgt9tp1KgROp2uvI84Kasa1nFfZUl9+/Zl3759vP7668ycOZP58+dXInxRFdw9nFVtHC7rUreKyG0kQtQN5ZYgHfdBarVaPv/8c55++mliYmJYv359hRZsMplIS0tTX6ekpBASEqK+zszM5LvvvlNfDx48uNTTQ0TN4O7hrGrjcFlGnZ5Bn77HoE/fIzE3G4DE3GwGffqeJEch6ohyE+SKFSsAWLJkCV988QVjxozh7rvvJjY2ljfeeMPlgnv27MmePXtIT0+noKCA7du306dPH3W+oihMnjyZxMREAL744guuueaaf7g6QgghxJXh8lJ327ZtrF69Wh0coG/fvgwZMoQnn3zykp8zmUxMmjSJsWPHYrFYGDFiBBEREcTExDBx4kTCw8OZPXs248ePR6PR0KZNG1544YUrslJ/t3fvXtauXctdd91Va6rwhBBCVC+XCTIgIABPT0/1tdFoRK+vWBXS4MGDGTx4sNO05cuXq/+/9dZbufXWWysa62WrjW1cQgj3k4tncSnlVrFeuHCB2267jbS0NObNmwfA0aNHmThxIl27dq2q+K6I2tjGJYRwv5UrV3Lw4EFWrlxZ3aGIGqjcouDevXs5efIkP/30EzZb8WDWR44coUWLFjz22GNVFqAQQrhLTb94lhJu9bpkXWnLli1p2bKl+nrIkCFuD0iIv9MYNCh//hWiPpHmoepVoacfT5s2jRkzZrg7lhpHsVoqNV24hy7SH02YB7pIGWlJ1C81vYRb17lMkFlZWezfv599+/aRk5NTFTHVGBq9gfP/nYA1KwUAa1YK5/87AY3eUM2R1S/all4YhgejbelV3aEIIeoRlwkyNjaW6OhooqOjZTBxIYQQ9YbLBLlu3TpGjhzJ8OHDWbduXVXEVGfYy3mqRnnT3UXGOhVCiMq7ZIL84YcfaNSoEWFhYYSFhRESEsL3339fVbHVelq9kR/fGkxRVvFoQUVZifz41mC0Vfy0DenKXvOY/+wZXtl5Qoiqc8kEefjwYR544AH19f3338/hw4fdHZO4wqShv+Yx6nQMWr+OQevXkZibC0Bibi6D1q/DWMEHAggh3OuSt3ncf//9Tq/79OnjNJ6qEPWCUe/8VwhRL1ToNo+arra3sXnonf+KmkV/fRe0TYLRX9+lukMRQlShOnFKLu9mWsVqRVPGuLHlTa8u/cINxB+x0rNDzYlJ/EXXojG6Fo2rOwxRiyhWOxp96fJHedNFzVQnfqny2tg0ej2pb72FLSsLAFtWFqlvvVWjkiNAuyY6HrjZg3ZNLq/tyfDnCDMGGWlGiBpBo9eS9Pqv2DKLe6zbMs0kvf6rJMdaxuWvZbfbeeedd5gyZQq5ubm8/fbb6tis1U2x2cuebq0Z8VWVHhE6wkwaekTU084dBp3zXyGEuAJcJshXXnmF33//nYSEBAC+/fZb5s6d6/bAKkKj05K69COMeYUAGPMKSV36ERp9/TpRtgzTcuetBlqG1c+rU12PMDRhfuh6hJX9BulkIy6Tp97T6a+oX1yeUffs2cPLL7+Mh4cHvr6+vPvuu/zvf/+ritgqbGTnbnQKDmVk527VHYqoBrqWARjv7ICuZUDZ83u0QhMWgK5Hq6oNTNR6Q9qPoH2jjgxpP6K6QxHVwOUltV6vR6v9K49W5oHJVeWaxs24pnGz6g5D1FC6FkHoWgRVdxiiFrra1I2rTXLhXV+5LEG2a9eOVatWYbPZOHHiBNOnT6dDhw5VEZsQVcPNVbC1/TYkIeorlwly6tSp/PLLL1y8eJF77rmHvLw8nnvuuaqITVQRR4VADasYqDL67u3RhjVC3729W5YvQ/2VZrMplzVPiKrk8pTo6+vLSy+9VBWxiGoS0U3Lb7/Y6di5nnbyaWFC18LktuXLUH+l6XQa3t+QSnZucY/z7Fwb729IBeD+O4OrMzQhVC4T5LFjx/jwww/J+vNeQoeFCxe6LShRtcKaaghrWr96/gohhCsuE+STTz5Jr169aN/ePdVPQgghRE3kMkF6enryn//8pypicRuvPxvXvOprI5sQokbau3cva9eu5a677nIaJlPUDC4bnbp3787u3btrzOg5l2Nkly50Cg5mZBcZbFoIUXNIB66azWWRqlGjRowfPx6NpnicT0VR0Gg0/Pbbby4XHhcXx9KlS7FYLDzwwAOMHj3aaf6OHTtYtGgRiqLQtGlT5s6dS4MGDS5zVcp3TePGXNNYBpsWQtQs0oGrZnOZID/88EPWrl1Ls2aVuxE/OTmZBQsWsGHDBoxGI6NGjaJHjx60adMGgNzcXGbOnMmnn36KyWRi4cKFLFq0iGnTpl3emghRh5httnIfnHypeUKIK8dlFWtgYCARERE0bNjQ6Z8r8fHxREZGEhAQgLe3N1FRUWzdulWdb7FYmDlzJiZTcff69u3bc+HChX+wKkLUHUadjjvWb+aO9ZtJzM0DIDE3jzvWb5bkKEQVcVmCjIyMZOLEiQwYMACj0ahOHzBgwCU/l5KSQnDwX/czhYSEqAOeAzRs2JBbb70VgMLCQpYtW8aYMWMqvQJCCCGEO7hMkIcPHwZgzZo16jSNRuMyQSpK6dEwHO2YJeXk5PDvf/+bDh06MGzYMJcBi6pjtZnR64wVnl5ZFpsZQxnLKW+6EJUlvUTFP1GhNkgAq9WKoigYDIYKLdhkMvH999+rr1NSUggJCXF6T0pKCg8++CCRkZEyfF0NpNcZWbwqiswcKwCZOedZvCqKx0ZvuyLLN+iMjNt4G8m5FgCSc88zbuNtvDdsq4tPClExK1eu5NixY+Tn50uCFJXmsg3y4sWLPPTQQ3Tt2pWIiAjGjh1LcnKyywX37NmTPXv2kJ6eTkFBAdu3b6dPnz7qfJvNxiOPPMLAgQOZOnVqmaVLIYT4J6SXqPgnXJYgZ82aRdeuXXn99dex2Wx8+OGHzJw5k6VLl17ycyaTiUmTJjF27FgsFgsjRowgIiKCmJgYJk6cSFJSEr/++is2m41t24pLJF26dGHOnDlXZs2EEEKIf8Blgjx16pTTuKsTJ07k9ttvr9DCBw8ezODBg52mLV++HIDw8HCOHDlSmViFEEKIKuOyitVqtVJUVKS+LigokOpQIQQgz7oUdZvLEmR0dDQPPPAAd955JwAbNmwgKirK7YEJIWo+6QQj6jKXCXLChAk0btyYb775Brvdzp133smIESOqIjYh6gdHz/AK9hCvSaQTjKjLXCbI3NxcUlNTeeONNzh//jwrV66koKAAb2/vqohPiDrP0P16rD8fRN/16uoORQhRgss2yP/85z9kZmYC4O/vj0aj4fnnn3d3XHWKh975rxAl6Zo3x2PIHeiaN6/uUIQQJbhMkKdOnWLKlCkA+Pn58dxzz3Hs2DG3B1aXRHUx0jpYS1SX+jk6jHTkEELURi7LNFarldzcXHx9fQHIy8srcxg5Ub6OjXV0bFx/B5iWjhxCXFmK1YZGX/qcUt50cXlcJsihQ4cycuRIbrvtNjQaDV9++aXao1WIipCOHNVLxiOtezR6HSmLt2HLLD6mbJn5pCzeRshjcofBleQyQY4fP542bdqwZ88e9Ho9zzzzDH379q2K2IQQV4CU4IW4PBXqNtK7d2+uu+46tWo1MzOTgIAAd8YlhKggjcGI8uffskgJXojL4zJBvv/++8yfPx+LpfiJC4qioNFo+O2339wenBDCNWP3Xlh+3o+ha/fqDkWIOqVCj7v6+OOP6dy5c1XEUytJG4+oTvrmrdE3b31Zn5V9V4jyuUyQwcHBkhxdkDYeUVvJvitE+VzeB3njjTeyevVqkpOTyczMVP+Jv0gbj6it3L3vyj2wojZzWYJctmwZZrOZWbNmqdOkDVIIURE1tYRqtypo9aWfSlTedFE/uUyQCQkJVRGHEKIOqqm1K1q9hsNvJ2POsgFgzrJx+O1kuow3VXNkoiYpN0H+8ssvl/xgfWqX9NJrnf4KIYSo+8pNkI8//ni5H9JoNOzcudMtAdVEd3YM5otj6QxsG1jdoQghhKgi5SbIXbt2VWUcNVrXxr50bexb3WEIIYSoQlJnKIQQQpRBEqQQos6xW8t+4lB50y+Xp97T6a+oWyRBCiHqHK1ew96VKRRmF/dSLcy2sXdlyhW/hWNEm6F0DOzAiDZDr+hyRc0gz7gXQojL1C3karqFXF3dYQg3kRKkEEIIUQZJkMIlg8H5rxA1RV0dyk6x2is1XbiHWxNkXFwc0dHR9O/fn1WrVpX7vilTprBhwwZ3hiL+gY7XaAkK1dDxGrmeEjXLypUrOXjwICtXrqzuUCrFVQLU6LUkv/kNtswCAGyZBSS/+Q0aGaykSrltaycnJ7NgwQJWr15NbGwsa9as4fjx46Xe88gjj7B161Z3hVGv2azmSk0vT2gzLb2jdYQ2q9zuYrGV/T3lTReismrqUHauaPRakt84gC2zCABbZhHJbxyQBFjDuO3XiI+PJzIykoCAALy9vYmKiiqVCOPi4rjlllsYOHCgu8Ko13R6Ix+vjCIn+zwAOdnn+XhlFDp92U+ev9IMOiP/WXcbabnF35+We57/rLsNg65qvl/UfnW1ClXUDm7rxZqSkkJwcLD6OiQkpNTA5w899BAAP/zwg7vCELWAxuj8t9YxGpz/iiumpj4NRNQPbitBKkrpG3I1GnmMjCgtoIcOjzANAT101R3KZTFcH4G2iQnD9RHVHUqdU1urUEXd4LYSpMlk4vvvv1dfp6SkEBIS4q6vE7WYVwstXi1qb9uLrkVTdC2aVncYQogrzG1npZ49e7Jnzx7S09MpKChg+/bt9OnTx11fJ4SoQhZb+UO2XWqeELWJ2xKkyWRi0qRJjB07lqFDhzJo0CAiIiKIiYnh0KFD7vpaIcQVVF4nGYNOw8SNZ0nNtQKQmmtl4sazTNx4FoNOmlJE3eDWoeYGDx7M4MGDnaYtX7681Ptefvlld4YhhLhMl9tJxmpT0JeTKC81T4iaRMZiFUKU63I7yeh1Gl7bmERGbvFg4Rm5Nl7bmATAM8NCr2yQQrhJ7e0ZIYQQQriRJMh/wF7OiDTlTRf1h9lmu6x5QoiaQxLkP6DVGzm2eAiWzEQALJmJHFs8BG0VjVQjai6jTseg9asYtH4Vibk5ACTm5jBo/SqMutp5v6cQ9Y0kSGQ4KyGEEKVJJx1kOCshhBClSQkSGc5K1D1mW/nPDbzUPCHEX6QEKUQdZNRpGfbpNwDk5hY/U/BCbgHDPv2GjcNlRCshKkJKkELUQ1LCFMI1KUEKUQ8ZdVqGf3qA7NziB/ZeyC1i+KcHAPh0+PXVGZoQNUa9LkEqVmulpovLozM4/xVCiNqgXidIjV5P8tKXsGWlA2DLSid56Uto9FKwvpKaXq/Dr4mGptfL/X+iankYvJz+ClEZkgmE2wU01xLQvF5fi4krzGZT0JUz4HnJebdcPYLvfv2MXp1ur8rwRB0hCVKIGk5jMKL8+VcU0+k07FydSkFO8bB9BTk2dq5OBeCWe4PV93Voeg0dml5TLTGK2k8u64Wo4QzdI9E2aYqhuwxiIURVkhKkEDWcvnlL9M1bVncYl0Vv9HT6W9N4GDyd/gpRkpQgAc8/O+V4SuccIa6otj1GEBjWkbY9RlTqczabclnzKiu680jaBHciuvPIK7ZMUXdIRgBGdm7FZ7+f5vZ2zas7FCHqlJAW3Qhp0a3Sn9PpNMSuSyMvt3jQgrxcO7Hr0gAYMjLoisXXuUk3OjepfHyifpAECXRrHES3xlfuoBOitjPb7Bh1ZVcwXWqeqBxPvYfTX1GzyF4uhCjFqNNy16dHuJBrAeBCroW7Pj3CXZ8ekeR4BY1s359OjVoxsn3/6g5FlEFKkEKIy6b9s/ONtoZ2wqnpupk60M3UobrDEOWQS0EhxGUL6TEM77AOhPQYVt2hCHHFSQnyCvA0aADlz79C1B9+Lbri16JrdYchhFtICfIKGNzRSLsgHYM7ykgnQghRV7g1QcbFxREdHU3//v1ZtWpVqfm//fYbw4cPJyoqiqlTp2KtpU/RCA/V81RvL8JDpUAuhBB1hdsSZHJyMgsWLGD16tXExsayZs0ajh8/7vSeyZMn8/zzz7Nt2zYURWHt2rXuCkcIIYSoFLcVeeLj44mMjCQgIACAqKgotm7dymOPPQbA+fPnKSwspGvXrgDceeedvPnmm9x7770ul22zFQ9QnJSUhDErs9T8onPn1P+nZ2Vdcn5aVm6p+ZYS85OzC0rNV5zmm0vN9yoxPzXLUmr+Oafvv/T8iy7mp7uYn5FpwWy2YbWC2QwZmYrT/KzM0qX2kvOzMy+9fFfyMi79+cL0S8+3pBe6mJ/nYn7p39dpfka2i/ml9x/HfEtGZql5Zc23ms0oVisasxlLRmaJ+RkuPp9+yfnmjIsu5qep32+3WtGazZgz0krMT8VqLvpzXhHmjNS/fT75b/OT/5zvC0BBelI531/cFp+TnlrO/OJ9LjMjHbO5CKvVitlcRGbGhT/nFz/EOT0jw2l+ujq/eJ+4mOn8+YuZzp9PzSy9fc6d++t4TclKK2P+X/tjUnbp+PXn/jpeUnNKr7/1nK7E/BSKLMXxFVmKuJCTgvWcvzo/rYzllzz3pGWnUWQx//l5Mxey05zmX8y++Lf5FzG7ODaTkopjdpxDxaVpFEW5cuM2lfD222+Tn5/PpEmTAFi3bh0JCQnMnj0bgJ9++olXXnmFjz/+GIDTp0/z8MMPs23bNpfL/v777xk9erQ7whZCiDpv1apVXHfdddUdRo3nthJkWXlXo9FUeP6ldOnShVWrVhEcHIxOJw/hFUKIirDZbKSmptKlS5fqDqVWcFuCNJlMfP/99+rrlJQUQkJCnOanpf1VxZGamuo0/1I8PT3l6kcIIS5D8+Yy5nRFua2TTs+ePdmzZw/p6ekUFBSwfft2+vTpo84PCwvDw8ODH374AYBNmzY5zRdCCCGqk9vaIKH4No+3334bi8XCiBEjiImJISYmhokTJxIeHs6RI0eYNm0aeXl5dOrUiblz52I0yr2EQgghqp9bE6QQQghRW8lIOkIIIUQZJEEKIYQQZZAEKYQQQpRBEqQQQghRhit2H+S5c+cYO3Ysu3btcprevn17jh49WuZnDh06xCeffMKcOXNYs2YNPj4+DBo0qMLfefPNN/PBBx/QtGlTzp07xy233MLdd9/NtGnTmDt3LgcOHKCoqIgzZ87QoEEDdDodFosFm82GzWbj5ptvZsGCBeoABfn5+XTr1o0+ffpgNptp27Yt06ZNK/W9e/fu5eWXXyYxMZHCwkKsVis2mw29Xo/VaiU0NJSAgADGjh1L69atGTVqFN7e3lx11VUkJSVhMpl4++236du3Lzt37lTjd2y/qVOnMmrUKPLz8xk7dixHjx5l6tSp7N69mxtvvJF58+YxZMgQnnvuORYuXEj79u05cOAAOp0Of39/nn32WTp37syiRYsA2LFjB7GxscyePZt9+/ZRUFDAuXPn8PDwQFEUbDYbdrsdf39/DAYDQUFBAKxcuZKPPvqIuLg4Nm/ejKenJ/v27WPs2LF0796dcePGcfr0acaNG8ecOXP46KOPaNiwIb6+vuoyP/74Y+Lj49m/fz8vv/yy0/5y66230qxZM86cOYPBYECr1RIQEIBer0er1aLVatHr9Zw5c4YWLVoQFxdX5mASt912G2FhYaxYsaLM/cSxD+bk5HDddddx9OhRNmzYUCqmFStWsHjxYvLz8wkLC1P3ZcfnL7Uv/11ubi7z58/nwIED2Gw2Tpw44dRD28fHh/nz53PjjTeq26Pk8VMyPse+/e6776rvB+jTpw/Jycn4+Phw1VVXYbFYCAkJYe7cuYSGhgLFx9iIESNKxX3u3Dluu+02AK666iqKioooKChgwoQJzJo1i6NHj5aKKTs7mxdeeIEtW7ag1+uJjIwkJiaGhx56iNatW3PkyBF8fX3Jy8sjMDAQPz8/rrnmGvbv309ubi6enp54enoybNgwxo8fz/nz58s8Z5Tk2A5hYWG899579O7dm4ULF6rxt27dmrNnz2I2m/H29iY/P59WrVrh6+tLQEAA+/fvZ9OmTep78/LySEpKokWLFvTv35/vvvuOdevWqcfK/v376d69O1999RVWqxVFURgyZAhffvklLVq0YN68eU7frdFoSm33MWPG8K9//Yt169axZMkSp+34wgsvsHbtWnQ6HWazmfbt2wNw4sQJWrRowZNPPomvry+vv/46BQUF2Gw2+vbty9NPP41Op2PMmDH07NmTrKwscnJymDNnDgMGDMBut5Obm4vdbufOO+/kqaeeqvQdAWUdExVRkXP/qlWrWLt2LYqioNFoGDduHEOHDuWNN94gPT2dWbNmOX12zJgxPPDAA2RlZVU6Jle5JC8vjylTprBw4UKXA81UawkyPDycOXPmAMVDz5nNpcc1rYyAgAC+/fZb3n33Xex2O3FxcXTt2hWtVkteXh4RERF07NgRg8HAlClT2LNnD6tXr1Y/v337dqKioli+fDlnz57lgQceKPUddrud6dOnY7fbiY+PJyEhgXnz5qHVapk6dSpGo5Hdu3cTGxvL8OHDCQoKokmTJvz4449s2rSJvXv30qdPH3XIvbLMmTOH8PDwUtMMBoP6OjY2FkVROHr0KA0aNGDTpk3ExsYyYcIEYmJiyCgxzmdsbCxHjhxh7969vPPOO6SlpeHn50dCQgJ79+6lQ4cOeHh44O/vz6hRo4iNjSU2NpaGDRsCkJiYyOuvv14qzl9++YXc3L/GOvXx8aF169bMnj2bCRMm4Ovre8n1NBgMvPjii0BxckpISOCbb76hZ8+eFBYW0qdPHz7//HOee+45MjMznX6ry5FVxri87mC324mJiVF/l2XLlqnTd+3aRUJCAj179uThhx92+p0uxWAw8PzzzzttbwCtVsvmzZvZtGkTn332GV26dHHa5n/fj0oKCQkhJCSEZcuWsXPnTuLj4y85hOP8+fNp164dUDzQx7Bhw5g9ezYhISFs3LgRKD6x7dy5k/z8fAIDA4mNjSUnJ4cvv/yS3bt38+mnn5Y67irKbrdz4cIFp/hjY2Pp2rUrOp2OoUOH8uOPP7J582amTZvGwYMH8fDwID4+npCQEHVYyxUrVrBlyxa0Wm2pe6/NZjMff/wxK1asYPPmzXzyySd8/vnneHp60qNHj1LfXd52z8vL48iRI6XWYcaMGZhMJjw8PNDr9eqxFhQURKdOnejduzdPP/00r732Gps3b2bjxo2cOHFCfRqS3W5ny5YtPP3008yZM4e33nqLtLQ0nnnmGUaPHs29995LamoqCxYsqPT2dZeDBw+ybt061qxZw+bNm3n33Xd54403OHLkCHfeeSfbtm3DYvlrDNzExEROnTpF3759L+v7XOUSHx8fbrjhBj755BOXy7pit3lc6ipi7ty5fPvtt2RlZXH27FluvPFGZs6cyb59+1i8eDGPPvooTz75JN7e3syePZuOHTsyffp0kpKS0Gg0PP300/Ts2ZPMzEwmT55MUlISrVu35sCBA6xZs8apBNahQwd0Oh1+fn7MnDmToUOH4u/vryayqVOnUlRURFBQEAUFBURERLB3716MRiNmsxk/Pz+KioooKipCo9HQoEEDDAYDaWlp6HQ67HY7drsdrVZLo0aNSE9Px263oygKWq0Wu92O0WhEq9VSWPjXQNsajYbo6Gi+/vpr8vPz1aH2GjZsSH5+PkVFRU7bbeDAgWg0Gj7//HOnZXh5eVFYWIjdblen+/n5ERAQwNmzZ9HpdNhsNjw8PGjQoAHZ2dkUFhbi5+dHbm4uTZs25ezZs0DxSVev1xMREcEPP/yAzWZT18PDw4Pu3buze/du9XuMRiNGo5Hc3Fz8/PzIy8vDbrej0WjKHDrQYDCgKAq+vr5kZmai0WjQaDQYDAY+//xzBg4ciMlk4uzZs2g0Gjw9PbFYLOpjzxzr4uDr64vFYnHaVlqtVq0ZcJTgHaXPkJAQDAYDp0+fRq/Xq7+dl5cXw4cPZ+3atU4HkqPEajab0el0KIqiLvuFF15gxowZBAYGkp2djaIo2O12rrrqKnX/O3fuHFqtFpvNhkajYeLEiWzYsIGsrCwyMzPx8/OjcePG/P7773Ts2JHff/+d5s2bc/r0aXUf0mg0aLVamjRpwrlz59DpdGU+Bs7xnY7t5Ph97HY7RUVF6r7q+F2aNWtGUVERaWlpaDQap+3aqFEjvLy8SExMRKvVYrVa8ff3p7CwEIvFom53g8FAcHAwJ06cICQkhKysLIKDg7lw4QImk4nExESCgoKcRsgC8PLyUn9Xg8GA0WikYcOG6vfpdDr1Nw0JCSElJQUAf39/srOz1X2/vOEpKzO95G+tKIr6Hk9PT6fj1VGj8ffjsiSDwYDFYnH6Lsc+6ODp6UmLFi04cuQIjRo1wsPDgwsXLqjvnzRpEkuXLlX3J0epVavV4unpidlsdlqeRqMhODhYPd4LCgrQ6/U0adKEli1bkpqailar5dy5c4SHh9O5c2fy8/M5cOAAbdu25fPPP6dLly7Mnz+fV155hd9++43z58/j6+tLjx49yM/PV/ebM2fOEBUVhZ+fHzt27ABg2bJlag2Tg6sS5I4dO1i0aBErV65UL7oTEhIwmUyYTCbGjBnDuHHjuPnmmwF46623yMnJYfLkyZcs1SYkJDB37lwKCwtp2LAhL7zwAmfPnnXKJa1ateI///kP6enpeHp68uKLL9KhQwcyMzO5++672bp16yWHOK2yEuRPP/3Em2++yebNm/nqq6+cqnx69uzJzTffzMSJE+nduzdz5sxh+PDhbNiwgaVLlzJ9+nRyc3N588036dSpE3FxcYwePbrUgQjFicXT05ODBw/SvXt38vLySE9Pp1GjRixZsgR/f3+Cg4N5/PHHCQgIIDMzk65du+Lt7Y1Op6Nx48bYbDb8/f0xmUzcddddpKam8sorr6DVamnatClGo5E2bdqQmprKjBkzMBgMaDQahg8frsYRGBiITqfjySefVA/Gn3/+WT0AWrRoAUBmZiZa7V8/g5eXFwBffPGFOnD7s88+i16vR1EUIiIi1OR49913q1U8vr7FT1iw2+289tprGAwGMjMz1e9p164dDRs2VJMjgMViITg4mNzcXDUuLy8vwsPDad26Nbt378bX1xetVovRaERRFHr27AlAYWEhLVu2xGg0EhAQQP/+/YHik8Zrr72GVqvFYrFw3XXXkZeXh4eHByaTiXvvvZeioiJOnTqF2WxWSwQajYaCggKnk8GIESPUZfbt25fc3FwsFgu33347UJwYjEYjFotFvSjq1asXzZo1w2g0YrPZ1O1itVq555571PWOi4tDp9Op1VbXXXcddrtdTZiOKmLHtp4xY4Y6vU2bNrRp04agoCDOnDnDgQMH6NmzJzabjZYtWzJo0CDsdjsffPAB8+bNU5PR6NGj1RLghx9+SKdOnUhKSkJRFD777DN1vQcMGICPjw+KojBnzhy8vb0BCA0NxcPDAyi+KHLUKAQFBTF69GgKCgrQarWYTCZsNhtt27Zl8ODBQPFTHLy9vdFoNDRv3pxx48Y57Qe5ublotVp8fHzU/dCxLfLy8tDr9eh0Ok6cOAEUDx1pNpvV7ZyYmAigJrTg4GA6d+6MRqPBbDbj6ekJQLdu3bjqqqtIS0vDbrerv2GrVq3U7yoZV+vWrdXfuuSJzLE8vf6vVqKS80NCQtR5juNLo9Hg5+cHFB8nQUFBGI1G9cLMsX0c+25RURF6vV6trgbo16+f+r0NGzbEYDDg4+NDRESEulyj0ciNN96oXrjccMMNGI1GPDw8aNu2LQ0aNECj0aDX63n77bcxm81qYm3bti2NGjXCbrdTUFCAj48Pnp6eeHl5ERQUhKIomM1mGjRogMlkUi/8pkyZwsmTJ9HpdGop3XGcARw5coRx48ZhMpnw8/Nj0aJFNGvWjIKCAnbu3Im/v796YXLw4EHmzp3LZ599xieffEJgYCAbNmygffv2TvtpRfXp04ewsDB69+7Nfffdx6JFiwgICMBkMgHFT3LasmWL+n5H7dulmM1mpk2bxvz589m4cSPjxo3j+eefL5VLXnjhBaKiotiyZQuPP/44S5cuBYprG729vV02mVyxBFnyJO/guCKG4gPD19cXLy8vmjVrdsnqrvj4eN58802GDBlCTEwMVquVs2fPsn//fqKjowG4/vrradasWanP9uvXT61mufXWW7nhhhtITU0lLS2NkydPotfrSU9P56WXXiI1NZVTp06hKIpa39+mTRtsNhu5ubkkJyfz7rvvAjBt2jTMZjPnzp3DYrFw4sQJ9Ho9//3vf9X2j+TkZDQaDf7+/upJ/L///a96kr1w4QIWiwWtVsvChQvVbRQYGIjRaESj0agJQqPRqI8Kc1QNOZK4w/Hjx9Wr3KCgIPWEsG7dOry9vfH09FSr8BISEtSTg+N9Go2GCxcu8Msvv6ivCwsL+eOPPzh9+jSAWkp0tN3+73//U7+/d+/eWCwWMjIy+Oqrr4Dik9rMmTPVdX7sscewWq3cfPPNNGzYkJEjRwJ/PXbHcfJ3lPYc+5GnpycPP/ywepX+3XffAcX72VdffaVetTuSj1arpXv37jz77LNcuHCBwsJCCgsLOXbsGAAeHh5qUrBarYSEhGCxWPD09GTHjh0cPHhQ3QZQfEKeOnUqYWFhTtvMbDYTGRmJ2WxWS8++vr5qm1enTp2w2WxqKfDnn3+mZcuW6jI6deoEFCc4x0WBRqNh7Nix6nb95Zdf6Ny5MwBFRUXqSblk8jhx4oS6jTMyMtQTl9VqVX/z/Px8fv75Z3U/O3fuHDabjfDwcG699VZ1Wy9YsIA+ffqo7YdQfOHm2CebNm3K/fffz5gxYxgzZgxQXE2l1WrJycnBy8tL3S8DAwPRaDTk5OSoyd9ms6k1Mt9//z1nzpyhsLBQvbC85ZZbnPZJh/bt26uJ13Gh40hgDiU/54hBr9eTlpamHj9XXXUVUFxSLrl8R0yOPgSOfSooKAgfHx/1Asqxrzr2M8d2Tk1NVS8uHKVPb29vLBYLR48eVdf7m2++wWq1kpeXR1RUlHqxa7PZyM/PV5O9j48P4eHh6ro6zkuFhYXo9Xry8/MB6Nu3L6dOnVIvbB3NPeHh4Zw9e5Y+ffrQrVs3Tp06pVZdNmrUSN33WrRoQbNmzfD39ycwMJClS5eSlZWlViG3a9eOxo0b4+XlRcOGDbnhhhsAaNKkiXoBVJKrc7/RaGTJkiV89tlnDBw4kF9++YU77rhD3Tdvu+029u/fT35+PgkJCQQEBKgXTOU5deoUZ8+e5dFHH2XIkCG89tprThf/DgcOHGDIkCHqdnOcdx3r47iAKM8VS5D+/v7k5OQ4Tbt48SINGjQAUK98wXX1h91u5/3331fr59esWUO7du1Kfc5xQEydOpWYmBiSk5PVJLhjxw7++OMPXnzxRXr37o3dbqdBgwZ89NFHhIaGcuDAAV588UV1B3ScLPPy8tTOLjqdjhkzZmA0Gvnf//6Hl5cXixYtws/Pj6CgIGbOnMmwYcPQaDRqSRWKD541a9agKIoaNxS3B/n7++Ph4UGHDh2c1tdisag7laMa0nGiCwkJwdfXt1RVQGhoqJogDh06pF6NO6qJ8/Ly1M94enrSpk0bp8/7+Phwzz330KhRI6cTja+vr1MVsCMmDw8PdZt37txZveozGo3ceuutQHHJISYmRi2JOJKyo8rSEe+bb74JoHY6MRqNtG7dWq2CKSoqYsmSJWo1p6Pty2q10qJFC/z8/GjatKl6ICmKQmZmJs888wxQfNB27NjRaV85dOiQ+jogIABFUcjPzyczM5MmTZqoJy6H4OBgmjRpov5Gjji3bt3KxYsXue6669TqdCg+MWs0GsLCwpyq4h0n6vj4eJKTk9X1K9lGNW7cOHQ6He3bt8fb21v97bOysvjiiy+A4uRz7733qr+HI9ZWrVqpbYeOUhDA+PHj1Svxq666Si0FOqqNHV566SVOnz5NYWGhenL29/dXq9wvXLhAXl4eX331lXrhVFBQoJa4PT091Q4hzZs3R1EUjEajU1Vc06ZNadasGc2aNSMqKkr9jex2O7t371a3oSOZQXGpx8vLS60lcRwfJUuNJdtYHdvbsX0cJWDHSb2goICQkBD1uxzr59iOhYWFaLVagoKCsNvtasc7x7o5aoKg+Hzm4+ND8+bNMRqN6n4Cxcfa/fffr8aZkZGhXkytWrUKq9Wq7itQfBw6Et2XX35JZmYmUHws+vj4YDQaKSgoUGtzHMmkQYMGao3ThQsX2LFjB1qtlieeeILCwkKuv/56oPjYcJx/HVXCP//8M7Gxsej1eu677z5at26tbpeSfR0Ap4tygJ07dzJkyBCGDBnCwoULXZ77N23axJ49e2jevDmjR4/mrbfe4v777yc2NhYorq3o27cvO3bsYPPmzeWWHh3fOWTIEOx2O02bNlVzxIYNG8ps1y65ryiKwvHjx53mlZXcS7piCdLX15fmzZs7Pc9xzZo16tWHKyXbmyIjI9WVPX78OHfccQcFBQXccMMN6kZNSEjgzJkzQHEHluXLl2MymQgPDycoKIgXXniBjh07otfr0ev1GAwGcnJyOH78OFarlcWLF7NgwQIMBgOJiYl4e3vTsGFDDh06pB4cgYGB7NixA7PZzOrVqzGbzSxYsIC8vDzy8/OZNm2amrzgrx8jNzdXTU7XXHONuo6Ok+Pf5efnqweLv7+/Wo3ieLpJZGQk2dnZ5OXlObUdOUpjjhO9o+30u+++U6s1HSfn3Nxc9QrUUUotKipSTzpWq1Ut/drtdnWdHCVeg8GA2WxWT3qHDx8mKysLg8FAYGCg+ltkZGTQtm1b9Yr8jz/+QK/X8/PPP5OWlqaWdP7v//4PjUbDt99+C0BERATHjx9Xt6GiKOzatQtPT0+1ROIQFBREXl4eZ86cUXd4RVFITU2lVatWNG3aFI1Gw5EjR9QqOoD3339f/f/PP/+snvBiYmJISUkp1db3xRdfcOeddwJ/XSXrdDouXryI3W6nVatWFBYWqhcDDi1atECr1XLs2DG6dOmilmITEhLUxPfII4+oJ2e73U6/fv2w2WwcOXIEk8mklupDQ0PV727SpAlbtmxBURQ1YUDxycjDwwONRkNQUJB60bF48WK1XejMmTOEhoZit9v5/fff+fbbb9WkHxERQVFREQaDQd1HsrOz8fPzU6sGvby8uHjxolrSNhgMGAwGdDodWq2WgoIC9fd3fL5k+66j+u7ixYts3rxZbXd07M+OJObYVg4FBQUoiqIeT47E54g9KSkJLy8vdTtC8T4bGhqq7jOOv4GBgcBfFzs+Pj5qj3ZHu7XdbufkyZNqifCWW25Rk4vdbldrPBwXAcnJyej1eoYOHQqgHof79u3DZrNhNBoZNGiQmnSOHz/OwIED1e/6exze3t5qvI73OKr7HUk0KSmJa6+9Fq1Wi79/8QOYo6OjURSFJk2a0KBBA7RaLQcOHMDLy0u9qElISCA1tfghzWfPnuW+++4jJSWF9PR0tRNfRdxyyy1qYnriiSdcnvttNhvz5893KkCcPHlSLdECDB8+nC+++IJvv/2WgQMHlvm9ju+MjY2lVatWZGVlqU+M+vTTT9WL45K55LrrrlPPOfHx8Tz//PPq8s6dO+d0QVaWKzoW68mTJ5k5cyYZGRlYLBbat2/P9OnT+frrr50aWseMGcNjjz0GFB/EH374IZ999hmvv/46kydPplu3bkyfPl2tXnnmmWfUNqhnn32WkydP0qpVK44ePcq7775b6jaJlJQUbrrpJgICAmjYsCGpqam0a9eOnJwcTp48idlsRqPR0K9fP3799VcuXLhAgwYNuOmmm/juu+8oLCxEo9GQm5uLr68vHh4epKenqyeqoKAgUlJS0Gg0Tp1lQkNDSUpKIiAggOuvv54dO3Y4lUhat27NxYsXKSoq4ueff1a7eEdHR7N161b1ClhRFHx8fPD19XWq3gHnzgR6vV7dEcr6Gb29vdVqnq5du3L06FG1Ud9RmtNqtfj6+mK329UGeseyjUajmrwdidRoNFJUVESDBg3UqmhHqcRR1ey4SjWbzZhMJoqKisjMzMRgMODv709GRgavvvoqzzzzDHq9Xi3NOKrJHFfYjit8KL5iNxqNpa5UHVe3jpOLoigEBwdz9uxZrr32Wjp27MiHH36oVuU5Ohs4Otv8vQOE4/t0Oh29evUiJSWF3377jQcffJAVK1ZgMplITk5WOzI52lb79OnDxo0b6d27N927d+fVV19Fp9ORmZmpnuQcF0ElO4YEBgaq7bCKoqDX6/Hz88Pf35/Tp0/Tpk0bTp48qf7Ojo4PJU8CjRo1QqvVkpaWRqdOnSgqKnKqgnXsm/7+/vzxxx9qNaXZbHY6UZfcBo62XUeJ1JEQrVYr6enpTqUKu91OQEAAGRkZ6q0WJZXcro7v7t69Oz/++CNms7lUrZBery/VOebvnWgupeR+Cqh9DS71fsf+XTLOso4px/bSaDSEhoaSkpLiVD0bEBCgNh85Otu0aNGCkydPcu2116qdxkpekJX8Lh8fHywWyyV7YXp6euLj40NBQQENGzbk/PnzavOExWLBx8eH0NBQWrZsqVbzHz58mOjoaP744w+uvfZasrOz1T4Rjg6Ibdu2xdfXF7PZzIcffgg430rnuBXm8ccfLxVTeed+x0XJO++8w7p169Rz6O23386ECROcasUGDhxIt27deOmll9Rpl+qk89NPPzFnzhyKiorw9fVl3rx5XHXVVU655Oqrr2batGmkpaXh5eXFiy++SJs2bcjOzmbkyJFOSb1MiqhV9u7dq9x3332lpp89e1bp169fNUTk2vvvv68cO3ZMURRFOXz4sDJs2LBKL8Nmsynz5s1T8vLyFEVRlHfffVeZO3fuFY2zpClTpijr169Xvv76a2X8+PGKoiiK3W5XkpKSlP79+ytFRUUVWs6JEyeU9957T339yCOPKDt37nRHyG7x448/Khs2bFAURVHMZrMybNgw5bfffvvHy3Xsr/90+9jtdiUnJ0eJjo5WUlJS/lFMX331lbJz506lqKhIufvuu5VevXopGRkZFfqsO/bPRYsWKcnJyYqiKMq2bduUxx57TCksLFSGDRummM3mf7Ts+m7lypXKRx995PJ9bntgshAOzZs356mnnlJLXZe6N7I8jra8ESNGYDAYCAsLU++hdZctW7Zw9uxZli9fDsC2bduYOXMmM2fOrPBN2GFhYRw6dIhBgwah0Wjo1auX2ummNmjZsiWLFy/mvffeQ1EUhg4d6tR+/k/90+1z6NAhHnroISZMmEBwcPA/iqV169ZMmjSJCRMmEBAQwLPPPqs2Ubjijv2zSZMm/Otf/0Kv1+Pv78+cOXPw8PDg0UcfZfXq1dx///3/aPn1VV5eHnv27GHx4sUu3yuPuxJCCCHKIGOxCiGEEGWQBCmEEEKUQRKkEEIIUQZJkKJeWrx4sXrLx8KFC9m0aVP1BlRJ69atUwewFkK4h/RiFfXSvn371JGFnnjiiWqOpvJ++OEH2rZtW91hCFGnSYIUtd6+fftYsGABzZo149ixY5jNZqZPn47JZGLWrFnk5+eTkpJChw4deOONN1i/fj2HDx/mlVdeQafTsXPnTvUm6V27dvH2228D8Mcff/DAAw/w9ddfc+rUKebMmUNmZiY2m40xY8aog6mXdPLkSaZPn64OLPHoo48SHR3NsWPHmDVrlvpUk3/9618MHTqUffv2MXv2bHXYvpKvFy1axPnz50lNTeX8+fMEBgayYMECEhIS2LVrF//73//w9PQkPT2dn3/+mZSUFNq1a8fhw4d5/vnn6dWrF1A8jnDbtm3ltgAhKsvdN2QK4W579+5VOnbsqPz666+KoijKihUrlNGjRysvv/yysmnTJkVRim9yHzRokLJ161ZFURTlvvvuU7744gtFUYoHBXjnnXeUnJwc5dprr1VvOH/llVeU119/XbFYLEp0dLRy+PBhRVEUJTs7Wxk4cKDy008/lYpl6NCh6g3IiYmJyi233KLk5OQot9xyi7Jt2zZFURQlKSlJ6d27t/Ljjz8qe/fuVW6//XandXG8fvPNN9XPK4qijB8/Xlm4cKFTzI73RUVFKRaLRVEURXnvvfeUiRMnKoqiKDk5OUpkZKSSlZX1j7ezEPWNtEGKOqFJkybq4OSdOnUiKyuLyZMnExgYyPLly5k5cyYpKSmlhkErydfXl6ioKDZv3ozNZmPz5s2MGDGCU6dOcebMGZ577jmGDBnCfffdR2FhIb/++qvT5zMzMzly5Ig6Rm7jxo3ZsWMHSUlJFBUVMWDAAKD4YcMDBgxQx6G9lO7du6vj4jrWqyxdu3ZVx7G98847iY+PJz09nc2bN3PTTTepY3YKISpOqlhFneB4ZBD8NbblU089hc1mY+DAgdx0001OD6otz8iRI3n++edp3bo1bdq0oVmzZhw9ehR/f391oHyAtLQ09UklDmU9sunEiRNOA8w7KIqiDhBfMqaST9kob73K4hhEG4oHvL/tttvYvHkzcXFx6rMshRCVIyVIUWd99913TJgwgejoaDQaDQcPHlSTlWNQ9L/r2rUrUPwcT0dJsGXLlnh4eKgJ8sKFCwwaNIjDhw87fdbX15fOnTurPWIvXLjAPffcg7+/PwaDge3btwPFT3XZtm0bPXv2JDAwkMTERC5evIiiKGrPWlfKi99h9OjRfPDBB+pDtoUQlSclSFFnOcbVdDw37/rrr1cfy9WvXz/mzZtXqsQGxaXIJUuWqM+4dDzwdc6cObzzzjtYrVaeeOIJrr32WgBiYmIYNWoUt9xyC/Pnz+eFF15QnyAyZ84cGjduzJIlS3jxxRdZtGgRNpuNCRMmEBkZCcCoUaMYPnw4wcHB3HTTTRVatz59+lxyTNsOHTrQoEEDRo0aVZlNJoQoQcZiFaIOOnPmDGPGjGHr1q3qQ3WFEJUjVaxC1DELFy7knnvuYcqUKZIchfgHpAQphBBClEFKkEIIIUQZJEEKIYQQZZAEKYQQQpRBEqQQQghRBkmQQgghRBkkQQohhBBl+H9qWTKtQ/vjjgAAAABJRU5ErkJggg==\n",
754
+ "text/plain": [
755
+ "<Figure size 432x288 with 1 Axes>"
756
+ ]
757
+ },
758
+ "metadata": {},
759
+ "output_type": "display_data"
760
+ }
761
+ ],
762
+ "source": [
763
+ "# Explore Native Nation vs Income\n",
764
+ "g = sns.barplot(x=\"native.country\",y=\"income\",data=dataset)\n",
765
+ "g = g.set_ylabel(\"Income >50K Probability\")\n",
766
+ "sns.plt.show()"
767
+ ]
768
+ },
769
+ {
770
+ "cell_type": "code",
771
+ "execution_count": null,
772
+ "metadata": {
773
+ "_cell_guid": "30b367e2-afec-49fa-b52a-0371f1d6c0bc",
774
+ "_execution_state": "idle",
775
+ "_uuid": "36779048741bd7d8c4be010aab17761657dcc952"
776
+ },
777
+ "outputs": [],
778
+ "source": [
779
+ "# Explore Sex vs Income\n",
780
+ "g = sns.barplot(x=\"sex\",y=\"income\",data=dataset)\n",
781
+ "g = g.set_ylabel(\"Income >50K Probability\")\n",
782
+ "sns.plt.show()"
783
+ ]
784
+ },
785
+ {
786
+ "cell_type": "code",
787
+ "execution_count": null,
788
+ "metadata": {
789
+ "_cell_guid": "70d38dd5-6ea6-4f6b-a5ea-2cd8d7c5e3b4",
790
+ "_execution_state": "idle",
791
+ "_uuid": "2b3ef28f7ccd536387bb5dc29f30a63df5c1f8a8"
792
+ },
793
+ "outputs": [],
794
+ "source": [
795
+ "# Explore Relationship vs Income\n",
796
+ "g = sns.factorplot(x=\"relationship\",y=\"income\",data=dataset,kind=\"bar\", size = 6 ,\n",
797
+ "palette = \"muted\")\n",
798
+ "g.despine(left=True)\n",
799
+ "g = g.set_ylabels(\"Income >50K Probability\")\n",
800
+ "sns.plt.show()"
801
+ ]
802
+ },
803
+ {
804
+ "cell_type": "code",
805
+ "execution_count": null,
806
+ "metadata": {
807
+ "_cell_guid": "c35908c8-7587-4dd6-826a-044a61a1fddd",
808
+ "_execution_state": "idle",
809
+ "_uuid": "8b0379c50391da248a88b6062e290200af00e65b"
810
+ },
811
+ "outputs": [],
812
+ "source": [
813
+ "# Explore Marital Status vs Income\n",
814
+ "g = sns.factorplot(x=\"marital.status\",y=\"income\",data=dataset,kind=\"bar\", size = 6 ,\n",
815
+ "palette = \"muted\")\n",
816
+ "g.despine(left=True)\n",
817
+ "g = g.set_ylabels(\"Income >50K Probability\")\n",
818
+ "sns.plt.show()"
819
+ ]
820
+ },
821
+ {
822
+ "cell_type": "code",
823
+ "execution_count": null,
824
+ "metadata": {
825
+ "_cell_guid": "6dcfbb5a-dff6-4d90-9ed5-9c217810a7ea",
826
+ "_execution_state": "idle",
827
+ "_uuid": "5199bc6918c701af26e84802b068fc858f9d0617"
828
+ },
829
+ "outputs": [],
830
+ "source": [
831
+ "# Explore Workclass vs Income\n",
832
+ "g = sns.factorplot(x=\"workclass\",y=\"income\",data=dataset,kind=\"bar\", size = 6 ,\n",
833
+ "palette = \"muted\")\n",
834
+ "g.despine(left=True)\n",
835
+ "g = g.set_ylabels(\"Income >50K Probability\")\n",
836
+ "sns.plt.show()"
837
+ ]
838
+ },
839
+ {
840
+ "cell_type": "markdown",
841
+ "metadata": {
842
+ "_cell_guid": "f370f948-2810-48a9-b54f-b4c9c8ff851d",
843
+ "_execution_state": "idle",
844
+ "_uuid": "be23c9e7f4907134e69ca32b3e50de52e7f0a111"
845
+ },
846
+ "source": [
847
+ "##4. Feature Engineering"
848
+ ]
849
+ },
850
+ {
851
+ "cell_type": "code",
852
+ "execution_count": 12,
853
+ "metadata": {
854
+ "_cell_guid": "1f2a9e2f-273b-495b-a615-4e9b8dd33c69",
855
+ "_execution_state": "idle",
856
+ "_uuid": "33f5e2439162592c594918029c6ececc4d6fa817"
857
+ },
858
+ "outputs": [
859
+ {
860
+ "name": "stdout",
861
+ "output_type": "stream",
862
+ "text": [
863
+ "Dataset with Dropped Labels\n",
864
+ " age fnlwgt education.num marital.status sex capital.gain \\\n",
865
+ "0 90 77053 9 0 0 0 \n",
866
+ "1 82 132870 9 0 0 0 \n",
867
+ "2 66 186061 10 0 0 0 \n",
868
+ "3 54 140359 4 0 0 0 \n",
869
+ "4 41 264663 10 0 0 0 \n",
870
+ "\n",
871
+ " capital.loss hours.per.week income \n",
872
+ "0 4356 40 0 \n",
873
+ "1 4356 18 0 \n",
874
+ "2 4356 40 0 \n",
875
+ "3 3900 40 0 \n",
876
+ "4 3900 40 0 \n"
877
+ ]
878
+ }
879
+ ],
880
+ "source": [
881
+ "####################################################\n",
882
+ "############### FEATURE ENGINEERING ################\n",
883
+ "####################################################\n",
884
+ "# Convert Sex value to 0 and 1\n",
885
+ "dataset[\"sex\"] = dataset[\"sex\"].map({\"Male\": 1, \"Female\":0})\n",
886
+ "\n",
887
+ "# Create Married Column - Binary Yes(1) or No(0)\n",
888
+ "dataset[\"marital.status\"] = dataset[\"marital.status\"].replace(['Never-married','Divorced','Separated','Widowed'], 'Single')\n",
889
+ "dataset[\"marital.status\"] = dataset[\"marital.status\"].replace(['Married-civ-spouse','Married-spouse-absent','Married-AF-spouse'], 'Married')\n",
890
+ "dataset[\"marital.status\"] = dataset[\"marital.status\"].map({\"Married\":1, \"Single\":0})\n",
891
+ "dataset[\"marital.status\"] = dataset[\"marital.status\"].astype(int)\n",
892
+ "\n",
893
+ "# Drop the data you don't want to use\n",
894
+ "dataset.drop(labels=[\"workclass\",\"education\",\"occupation\",\"relationship\",\"race\",\"native.country\"], axis = 1, inplace = True)\n",
895
+ "print('Dataset with Dropped Labels')\n",
896
+ "print(dataset.head())"
897
+ ]
898
+ },
899
+ {
900
+ "cell_type": "markdown",
901
+ "metadata": {
902
+ "_cell_guid": "46f46b2b-9ecc-4010-88c2-ae7872f80d8f",
903
+ "_execution_state": "idle",
904
+ "_uuid": "ab574da3a5eed4439cd48fe5dd6e681f01778d6e"
905
+ },
906
+ "source": [
907
+ "##5. Modeling"
908
+ ]
909
+ },
910
+ {
911
+ "cell_type": "code",
912
+ "execution_count": null,
913
+ "metadata": {
914
+ "_cell_guid": "2168869f-7cc2-4156-94a2-f1d27faaea17",
915
+ "_execution_state": "idle",
916
+ "_uuid": "a8b064a5427aa5e314094466a394d623bf7f020c"
917
+ },
918
+ "outputs": [
919
+ {
920
+ "name": "stdout",
921
+ "output_type": "stream",
922
+ "text": [
923
+ "Split Data: X\n",
924
+ "[[ 90 77053 9 ... 0 4356 40]\n",
925
+ " [ 82 132870 9 ... 0 4356 18]\n",
926
+ " [ 66 186061 10 ... 0 4356 40]\n",
927
+ " ...\n",
928
+ " [ 40 154374 9 ... 0 0 40]\n",
929
+ " [ 58 151910 9 ... 0 0 40]\n",
930
+ " [ 22 201490 9 ... 0 0 20]]\n",
931
+ "Split Data: Y\n",
932
+ "[0 0 0 ... 1 0 0]\n"
933
+ ]
934
+ },
935
+ {
936
+ "name": "stderr",
937
+ "output_type": "stream",
938
+ "text": [
939
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n"
940
+ ]
941
+ },
942
+ {
943
+ "name": "stdout",
944
+ "output_type": "stream",
945
+ "text": [
946
+ "LR: 0.796836 (0.003727)\n"
947
+ ]
948
+ },
949
+ {
950
+ "name": "stderr",
951
+ "output_type": "stream",
952
+ "text": [
953
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n"
954
+ ]
955
+ },
956
+ {
957
+ "name": "stdout",
958
+ "output_type": "stream",
959
+ "text": [
960
+ "LDA: 0.829507 (0.004318)\n"
961
+ ]
962
+ },
963
+ {
964
+ "name": "stderr",
965
+ "output_type": "stream",
966
+ "text": [
967
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n"
968
+ ]
969
+ },
970
+ {
971
+ "name": "stdout",
972
+ "output_type": "stream",
973
+ "text": [
974
+ "KNN: 0.774455 (0.005765)\n"
975
+ ]
976
+ },
977
+ {
978
+ "name": "stderr",
979
+ "output_type": "stream",
980
+ "text": [
981
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n"
982
+ ]
983
+ },
984
+ {
985
+ "name": "stdout",
986
+ "output_type": "stream",
987
+ "text": [
988
+ "CART: 0.808009 (0.006767)\n",
989
+ "NB: 0.794303 (0.003642)\n"
990
+ ]
991
+ },
992
+ {
993
+ "name": "stderr",
994
+ "output_type": "stream",
995
+ "text": [
996
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n",
997
+ "Setting a random_state has no effect since shuffle is False. This will raise an error in 0.24. You should leave random_state to its default (None), or set shuffle=True.\n"
998
+ ]
999
+ }
1000
+ ],
1001
+ "source": [
1002
+ "###################################################\n",
1003
+ "##################### MODELING #####################\n",
1004
+ "####################################################\n",
1005
+ "# Split-out Validation Dataset and Create Test Variables\n",
1006
+ "array = dataset.values\n",
1007
+ "X = array[:,0:8]\n",
1008
+ "Y = array[:,8]\n",
1009
+ "print('Split Data: X')\n",
1010
+ "print(X)\n",
1011
+ "print('Split Data: Y')\n",
1012
+ "print(Y)\n",
1013
+ "validation_size = 0.20\n",
1014
+ "seed = 7\n",
1015
+ "num_folds = 10\n",
1016
+ "scoring = 'accuracy'\n",
1017
+ "X_train, X_validation, Y_train, Y_validation = train_test_split(X,Y,\n",
1018
+ " test_size=validation_size,random_state=seed)\n",
1019
+ "\n",
1020
+ "# Params for Random Forest\n",
1021
+ "num_trees = 100\n",
1022
+ "max_features = 3\n",
1023
+ "\n",
1024
+ "#Spot Check 5 Algorithms (LR, LDA, KNN, CART, GNB, SVM)\n",
1025
+ "models = []\n",
1026
+ "models.append(('LR', LogisticRegression()))\n",
1027
+ "models.append(('LDA', LinearDiscriminantAnalysis()))\n",
1028
+ "models.append(('KNN', KNeighborsClassifier()))\n",
1029
+ "models.append(('CART', DecisionTreeClassifier()))\n",
1030
+ "models.append(('NB', GaussianNB()))\n",
1031
+ "models.append(('RF', RandomForestClassifier(n_estimators=num_trees, max_features=max_features)))\n",
1032
+ "#models.append(('SVM', SVC()))\n",
1033
+ "# evalutate each model in turn\n",
1034
+ "results = []\n",
1035
+ "names = []\n",
1036
+ "for name, model in models:\n",
1037
+ " kfold = KFold(n_splits=10, random_state=seed)\n",
1038
+ " cv_results = cross_val_score(model, X_train, Y_train, cv=kfold, scoring='accuracy')\n",
1039
+ " results.append(cv_results)\n",
1040
+ " names.append(name)\n",
1041
+ " msg = \"%s: %f (%f)\" % (name, cv_results.mean(), cv_results.std())\n",
1042
+ " print(msg)"
1043
+ ]
1044
+ },
1045
+ {
1046
+ "cell_type": "code",
1047
+ "execution_count": 22,
1048
+ "metadata": {
1049
+ "_cell_guid": "28db75e2-b091-4199-a358-e83b26e29836",
1050
+ "_execution_state": "idle",
1051
+ "_uuid": "8be8d86aeb3f97e762f7184db80c2fa4f5fcb5ed"
1052
+ },
1053
+ "outputs": [],
1054
+ "source": [
1055
+ "fig = plt.figure()\n",
1056
+ "fig.suptitle('Algorith Comparison')\n",
1057
+ "ax = fig.add_subplot(111)\n",
1058
+ "plt.boxplot(results)\n",
1059
+ "ax.set_xticklabels(names)\n",
1060
+ "plt.show()"
1061
+ ]
1062
+ },
1063
+ {
1064
+ "cell_type": "markdown",
1065
+ "metadata": {
1066
+ "_cell_guid": "bc5073e0-41df-4105-8215-ad8cb89241f8",
1067
+ "_execution_state": "idle",
1068
+ "_uuid": "a016da59293800e79278556bae8a0aa550e46184"
1069
+ },
1070
+ "source": [
1071
+ "##6. Algorithm Tuning"
1072
+ ]
1073
+ },
1074
+ {
1075
+ "cell_type": "code",
1076
+ "execution_count": null,
1077
+ "metadata": {
1078
+ "_cell_guid": "ce7f4775-1714-482c-a6de-ceeae6fac0d2",
1079
+ "_execution_state": "idle",
1080
+ "_uuid": "6d3b6b7ac9a070d9c9513d56d40aa3e2a75ce997"
1081
+ },
1082
+ "outputs": [],
1083
+ "source": [
1084
+ "####################################################\n",
1085
+ "################ ALGORITHM TUNING ##################\n",
1086
+ "####################################################\n",
1087
+ "'''\n",
1088
+ "Commented Out to Reduce Script Time - Took 20 Minutes to run.\n",
1089
+ "best n_estimator = 250\n",
1090
+ "best max_feature = 5\n",
1091
+ "# Tune Random Forest\n",
1092
+ "n_estimators = np.array([50,100,150,200,250])\n",
1093
+ "max_features = np.array([1,2,3,4,5])\n",
1094
+ "param_grid = dict(n_estimators=n_estimators,max_features=max_features)\n",
1095
+ "model = RandomForestClassifier()\n",
1096
+ "kfold = KFold(n_splits=num_folds, random_state=seed)\n",
1097
+ "grid = GridSearchCV(estimator=model, param_grid=param_grid, scoring=scoring, cv=kfold)\n",
1098
+ "grid_result = grid.fit(X_train, Y_train)\n",
1099
+ "print(\"Best: %f using %s\" % (grid_result.best_score_, grid_result.best_params_))\n",
1100
+ "means = grid_result.cv_results_['mean_test_score']\n",
1101
+ "stds = grid_result.cv_results_['std_test_score']\n",
1102
+ "params = grid_result.cv_results_['params']\n",
1103
+ "for mean, stdev, param in zip(means, stds, params):\n",
1104
+ " print(\"%f (%f) with: %r\" % (mean, stdev, param))\n",
1105
+ "'''"
1106
+ ]
1107
+ },
1108
+ {
1109
+ "cell_type": "markdown",
1110
+ "metadata": {
1111
+ "_cell_guid": "52cb2e35-1e66-4e7c-bc74-4e7e149d6a60",
1112
+ "_execution_state": "idle",
1113
+ "_uuid": "7491499e7b6de7b0b50cd671a4db9262062d0679"
1114
+ },
1115
+ "source": [
1116
+ "##7. Finalize Model"
1117
+ ]
1118
+ },
1119
+ {
1120
+ "cell_type": "code",
1121
+ "execution_count": 23,
1122
+ "metadata": {
1123
+ "_cell_guid": "a1c36e3d-3aaa-4952-854a-1d9093387928",
1124
+ "_execution_state": "idle",
1125
+ "_uuid": "6a62aa15b02a8d0de1166725e8122344418785ed"
1126
+ },
1127
+ "outputs": [],
1128
+ "source": [
1129
+ "####################################################\n",
1130
+ "################# FINALIZE MODEL ###################\n",
1131
+ "####################################################\n",
1132
+ "# 5. Finalize Model\n",
1133
+ "# a) Predictions on validation dataset - KNN\n",
1134
+ "random_forest = RandomForestClassifier(n_estimators=250,max_features=5)\n",
1135
+ "random_forest.fit(X_train, Y_train)\n",
1136
+ "predictions = random_forest.predict(X_validation)\n",
1137
+ "print(\"Accuracy: %s%%\" % (100*accuracy_score(Y_validation, predictions)))\n",
1138
+ "print(confusion_matrix(Y_validation, predictions))\n",
1139
+ "print(classification_report(Y_validation, predictions))"
1140
+ ]
1141
+ }
1142
+ ],
1143
+ "metadata": {
1144
+ "kernelspec": {
1145
+ "display_name": "Python 3",
1146
+ "language": "python",
1147
+ "name": "python3"
1148
+ },
1149
+ "language_info": {
1150
+ "codemirror_mode": {
1151
+ "name": "ipython",
1152
+ "version": 3
1153
+ },
1154
+ "file_extension": ".py",
1155
+ "mimetype": "text/x-python",
1156
+ "name": "python",
1157
+ "nbconvert_exporter": "python",
1158
+ "pygments_lexer": "ipython3",
1159
+ "version": "3.8.5"
1160
+ }
1161
+ },
1162
+ "nbformat": 4,
1163
+ "nbformat_minor": 1
1164
+ }
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/1-multiple-ml-techniques-and-analysis-of-dataset-checkpoint.ipynb ADDED
@@ -0,0 +1,1476 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "_cell_guid": "f31622ec-28ad-6203-7af8-ac8fc6ef9d57"
7
+ },
8
+ "source": []
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 6,
13
+ "metadata": {
14
+ "_cell_guid": "9e275424-f0c4-c4e0-e8fa-ed4a0bf44a3b"
15
+ },
16
+ "outputs": [],
17
+ "source": [
18
+ "import pandas as pd\n",
19
+ "import numpy as np\n",
20
+ "import matplotlib.pyplot as plt\n",
21
+ "from sklearn import metrics\n",
22
+ "from sklearn import datasets\n",
23
+ "from sklearn.feature_selection import RFE\n",
24
+ "from sklearn.linear_model import LogisticRegression\n",
25
+ "from sklearn.ensemble import ExtraTreesClassifier\n",
26
+ "from sklearn.ensemble import GradientBoostingClassifier\n",
27
+ "from sklearn import tree\n",
28
+ "#from sklearn.cross_validation import KFold, cross_val_score\n",
29
+ "#from sklearn.cross_validation import train_test_split\n",
30
+ "%matplotlib inline\n",
31
+ "\n",
32
+ "path = '../../Data/adult.csv'\n",
33
+ "data = pd.read_csv(path)\n",
34
+ "\n",
35
+ "# remove rows where occupation is unknown\n",
36
+ "data = data[data.occupation != '?']\n",
37
+ "raw_data = data[data.occupation != '?']"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 7,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "from aif360.datasets import StandardDataset\n",
47
+ "from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric\n",
48
+ "import matplotlib.patches as patches\n",
49
+ "from aif360.algorithms.preprocessing import Reweighing\n",
50
+ "#from packages import *\n",
51
+ "#from ml_fairness import *\n",
52
+ "import matplotlib.pyplot as plt\n",
53
+ "import seaborn as sns\n",
54
+ "\n",
55
+ "\n",
56
+ "\n",
57
+ "from IPython.display import Markdown, display"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": null,
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": []
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": 8,
70
+ "metadata": {
71
+ "_cell_guid": "9b69562b-3aa3-7097-d454-4d956c7a1815"
72
+ },
73
+ "outputs": [
74
+ {
75
+ "data": {
76
+ "text/html": [
77
+ "<div>\n",
78
+ "<style scoped>\n",
79
+ " .dataframe tbody tr th:only-of-type {\n",
80
+ " vertical-align: middle;\n",
81
+ " }\n",
82
+ "\n",
83
+ " .dataframe tbody tr th {\n",
84
+ " vertical-align: top;\n",
85
+ " }\n",
86
+ "\n",
87
+ " .dataframe thead th {\n",
88
+ " text-align: right;\n",
89
+ " }\n",
90
+ "</style>\n",
91
+ "<table border=\"1\" class=\"dataframe\">\n",
92
+ " <thead>\n",
93
+ " <tr style=\"text-align: right;\">\n",
94
+ " <th></th>\n",
95
+ " <th>age</th>\n",
96
+ " <th>workclass</th>\n",
97
+ " <th>fnlwgt</th>\n",
98
+ " <th>education</th>\n",
99
+ " <th>education.num</th>\n",
100
+ " <th>marital.status</th>\n",
101
+ " <th>occupation</th>\n",
102
+ " <th>relationship</th>\n",
103
+ " <th>race</th>\n",
104
+ " <th>sex</th>\n",
105
+ " <th>...</th>\n",
106
+ " <th>capital.loss</th>\n",
107
+ " <th>hours.per.week</th>\n",
108
+ " <th>native.country</th>\n",
109
+ " <th>income</th>\n",
110
+ " <th>workclass_num</th>\n",
111
+ " <th>over50K</th>\n",
112
+ " <th>marital_num</th>\n",
113
+ " <th>race_num</th>\n",
114
+ " <th>sex_num</th>\n",
115
+ " <th>rel_num</th>\n",
116
+ " </tr>\n",
117
+ " </thead>\n",
118
+ " <tbody>\n",
119
+ " <tr>\n",
120
+ " <th>1</th>\n",
121
+ " <td>82</td>\n",
122
+ " <td>Private</td>\n",
123
+ " <td>132870</td>\n",
124
+ " <td>HS-grad</td>\n",
125
+ " <td>9</td>\n",
126
+ " <td>Widowed</td>\n",
127
+ " <td>Exec-managerial</td>\n",
128
+ " <td>Not-in-family</td>\n",
129
+ " <td>White</td>\n",
130
+ " <td>Female</td>\n",
131
+ " <td>...</td>\n",
132
+ " <td>4356</td>\n",
133
+ " <td>18</td>\n",
134
+ " <td>United-States</td>\n",
135
+ " <td>&lt;=50K</td>\n",
136
+ " <td>0</td>\n",
137
+ " <td>0</td>\n",
138
+ " <td>0</td>\n",
139
+ " <td>0</td>\n",
140
+ " <td>0</td>\n",
141
+ " <td>0</td>\n",
142
+ " </tr>\n",
143
+ " <tr>\n",
144
+ " <th>3</th>\n",
145
+ " <td>54</td>\n",
146
+ " <td>Private</td>\n",
147
+ " <td>140359</td>\n",
148
+ " <td>7th-8th</td>\n",
149
+ " <td>4</td>\n",
150
+ " <td>Divorced</td>\n",
151
+ " <td>Machine-op-inspct</td>\n",
152
+ " <td>Unmarried</td>\n",
153
+ " <td>White</td>\n",
154
+ " <td>Female</td>\n",
155
+ " <td>...</td>\n",
156
+ " <td>3900</td>\n",
157
+ " <td>40</td>\n",
158
+ " <td>United-States</td>\n",
159
+ " <td>&lt;=50K</td>\n",
160
+ " <td>0</td>\n",
161
+ " <td>0</td>\n",
162
+ " <td>1</td>\n",
163
+ " <td>0</td>\n",
164
+ " <td>0</td>\n",
165
+ " <td>0</td>\n",
166
+ " </tr>\n",
167
+ " <tr>\n",
168
+ " <th>4</th>\n",
169
+ " <td>41</td>\n",
170
+ " <td>Private</td>\n",
171
+ " <td>264663</td>\n",
172
+ " <td>Some-college</td>\n",
173
+ " <td>10</td>\n",
174
+ " <td>Separated</td>\n",
175
+ " <td>Prof-specialty</td>\n",
176
+ " <td>Own-child</td>\n",
177
+ " <td>White</td>\n",
178
+ " <td>Female</td>\n",
179
+ " <td>...</td>\n",
180
+ " <td>3900</td>\n",
181
+ " <td>40</td>\n",
182
+ " <td>United-States</td>\n",
183
+ " <td>&lt;=50K</td>\n",
184
+ " <td>0</td>\n",
185
+ " <td>0</td>\n",
186
+ " <td>2</td>\n",
187
+ " <td>0</td>\n",
188
+ " <td>0</td>\n",
189
+ " <td>0</td>\n",
190
+ " </tr>\n",
191
+ " <tr>\n",
192
+ " <th>5</th>\n",
193
+ " <td>34</td>\n",
194
+ " <td>Private</td>\n",
195
+ " <td>216864</td>\n",
196
+ " <td>HS-grad</td>\n",
197
+ " <td>9</td>\n",
198
+ " <td>Divorced</td>\n",
199
+ " <td>Other-service</td>\n",
200
+ " <td>Unmarried</td>\n",
201
+ " <td>White</td>\n",
202
+ " <td>Female</td>\n",
203
+ " <td>...</td>\n",
204
+ " <td>3770</td>\n",
205
+ " <td>45</td>\n",
206
+ " <td>United-States</td>\n",
207
+ " <td>&lt;=50K</td>\n",
208
+ " <td>0</td>\n",
209
+ " <td>0</td>\n",
210
+ " <td>1</td>\n",
211
+ " <td>0</td>\n",
212
+ " <td>0</td>\n",
213
+ " <td>0</td>\n",
214
+ " </tr>\n",
215
+ " <tr>\n",
216
+ " <th>6</th>\n",
217
+ " <td>38</td>\n",
218
+ " <td>Private</td>\n",
219
+ " <td>150601</td>\n",
220
+ " <td>10th</td>\n",
221
+ " <td>6</td>\n",
222
+ " <td>Separated</td>\n",
223
+ " <td>Adm-clerical</td>\n",
224
+ " <td>Unmarried</td>\n",
225
+ " <td>White</td>\n",
226
+ " <td>Male</td>\n",
227
+ " <td>...</td>\n",
228
+ " <td>3770</td>\n",
229
+ " <td>40</td>\n",
230
+ " <td>United-States</td>\n",
231
+ " <td>&lt;=50K</td>\n",
232
+ " <td>0</td>\n",
233
+ " <td>0</td>\n",
234
+ " <td>2</td>\n",
235
+ " <td>0</td>\n",
236
+ " <td>1</td>\n",
237
+ " <td>0</td>\n",
238
+ " </tr>\n",
239
+ " </tbody>\n",
240
+ "</table>\n",
241
+ "<p>5 rows × 21 columns</p>\n",
242
+ "</div>"
243
+ ],
244
+ "text/plain": [
245
+ " age workclass fnlwgt education education.num marital.status \\\n",
246
+ "1 82 Private 132870 HS-grad 9 Widowed \n",
247
+ "3 54 Private 140359 7th-8th 4 Divorced \n",
248
+ "4 41 Private 264663 Some-college 10 Separated \n",
249
+ "5 34 Private 216864 HS-grad 9 Divorced \n",
250
+ "6 38 Private 150601 10th 6 Separated \n",
251
+ "\n",
252
+ " occupation relationship race sex ... capital.loss \\\n",
253
+ "1 Exec-managerial Not-in-family White Female ... 4356 \n",
254
+ "3 Machine-op-inspct Unmarried White Female ... 3900 \n",
255
+ "4 Prof-specialty Own-child White Female ... 3900 \n",
256
+ "5 Other-service Unmarried White Female ... 3770 \n",
257
+ "6 Adm-clerical Unmarried White Male ... 3770 \n",
258
+ "\n",
259
+ " hours.per.week native.country income workclass_num over50K marital_num \\\n",
260
+ "1 18 United-States <=50K 0 0 0 \n",
261
+ "3 40 United-States <=50K 0 0 1 \n",
262
+ "4 40 United-States <=50K 0 0 2 \n",
263
+ "5 45 United-States <=50K 0 0 1 \n",
264
+ "6 40 United-States <=50K 0 0 2 \n",
265
+ "\n",
266
+ " race_num sex_num rel_num \n",
267
+ "1 0 0 0 \n",
268
+ "3 0 0 0 \n",
269
+ "4 0 0 0 \n",
270
+ "5 0 0 0 \n",
271
+ "6 0 1 0 \n",
272
+ "\n",
273
+ "[5 rows x 21 columns]"
274
+ ]
275
+ },
276
+ "execution_count": 8,
277
+ "metadata": {},
278
+ "output_type": "execute_result"
279
+ }
280
+ ],
281
+ "source": [
282
+ "# create numerical columns representing the categorical data\n",
283
+ "data['workclass_num'] = data.workclass.map({'Private':0, 'State-gov':1, 'Federal-gov':2, 'Self-emp-not-inc':3, 'Self-emp-inc':4, 'Local-gov':5, 'Without-pay':6})\n",
284
+ "data['over50K'] = np.where(data.income == '<=50K', 0, 1)\n",
285
+ "data['marital_num'] = data['marital.status'].map({'Widowed':0, 'Divorced':1, 'Separated':2, 'Never-married':3, 'Married-civ-spouse':4, 'Married-AF-spouse':4, 'Married-spouse-absent':5})\n",
286
+ "data['race_num'] = data.race.map({'White':0, 'Black':1, 'Asian-Pac-Islander':2, 'Amer-Indian-Eskimo':3, 'Other':4})\n",
287
+ "data['sex_num'] = np.where(data.sex == 'Female', 0, 1)\n",
288
+ "data['rel_num'] = data.relationship.map({'Not-in-family':0, 'Unmarried':0, 'Own-child':0, 'Other-relative':0, 'Husband':1, 'Wife':1})\n",
289
+ "data.head()"
290
+ ]
291
+ },
292
+ {
293
+ "cell_type": "markdown",
294
+ "metadata": {
295
+ "_cell_guid": "2792e5d4-f5b1-1da4-9820-fbffd1b75db6"
296
+ },
297
+ "source": []
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": 9,
302
+ "metadata": {
303
+ "_cell_guid": "5f790189-c824-2ab8-0db2-7c4fa2b5be83"
304
+ },
305
+ "outputs": [],
306
+ "source": [
307
+ "X = data[['workclass_num', 'education.num', 'marital_num', 'race_num', 'sex_num', 'rel_num', 'capital.gain', 'capital.loss']]\n",
308
+ "y = data.over50K"
309
+ ]
310
+ },
311
+ {
312
+ "cell_type": "code",
313
+ "execution_count": null,
314
+ "metadata": {
315
+ "_cell_guid": "7f56d42f-b270-7f2d-d4ca-8ae76b868f8e"
316
+ },
317
+ "outputs": [],
318
+ "source": [
319
+ "# create a base classifier used to evaluate a subset of attributes\n",
320
+ "logreg = LogisticRegression()\n",
321
+ "\n",
322
+ "# create the RFE model and select 3 attributes\n",
323
+ "rfe = RFE(logreg, 3)\n",
324
+ "rfe = rfe.fit(X, y)\n",
325
+ "\n",
326
+ "# summarize the selection of the attributes\n",
327
+ "print(rfe.support_)\n",
328
+ "print(rfe.ranking_)"
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "markdown",
333
+ "metadata": {
334
+ "_cell_guid": "1a6c8ea4-9f59-6625-5845-acd310fccd68"
335
+ },
336
+ "source": []
337
+ },
338
+ {
339
+ "cell_type": "code",
340
+ "execution_count": null,
341
+ "metadata": {
342
+ "_cell_guid": "cb700d68-917e-59ac-b8a1-e07452dd1f3b"
343
+ },
344
+ "outputs": [],
345
+ "source": [
346
+ "# fit an Extra Tree model to the data\n",
347
+ "extree = ExtraTreesClassifier()\n",
348
+ "extree.fit(X, y)\n",
349
+ "\n",
350
+ "# display the relative importance of each attribute\n",
351
+ "relval = extree.feature_importances_\n",
352
+ "\n",
353
+ "# horizontal bar plot of feature importance\n",
354
+ "pos = np.arange(8) + 0.5\n",
355
+ "plt.barh(pos, relval, align='center')\n",
356
+ "plt.title(\"Feature Importance\")\n",
357
+ "plt.xlabel(\"Model Accuracy\")\n",
358
+ "plt.ylabel(\"Features\")\n",
359
+ "plt.yticks(pos, ('Working Class', 'Education', 'Marital Status', 'Race', 'Sex', 'Relationship Status', 'Capital Gain', 'Capital Loss'))\n",
360
+ "plt.grid(True)"
361
+ ]
362
+ },
363
+ {
364
+ "cell_type": "markdown",
365
+ "metadata": {
366
+ "_cell_guid": "e8153e4f-c2b2-4edf-70e8-17cdd44ae67d"
367
+ },
368
+ "source": []
369
+ },
370
+ {
371
+ "cell_type": "code",
372
+ "execution_count": null,
373
+ "metadata": {
374
+ "_cell_guid": "a3ac453b-33b0-b1de-e354-aa5e04bb579e"
375
+ },
376
+ "outputs": [],
377
+ "source": [
378
+ "X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=13)"
379
+ ]
380
+ },
381
+ {
382
+ "cell_type": "markdown",
383
+ "metadata": {
384
+ "_cell_guid": "9610a106-d3cf-16c4-62f3-1a9e9825b3d5"
385
+ },
386
+ "source": []
387
+ },
388
+ {
389
+ "cell_type": "code",
390
+ "execution_count": null,
391
+ "metadata": {
392
+ "_cell_guid": "06b10e07-b243-9bcc-5ffb-8cb09f8c5263"
393
+ },
394
+ "outputs": [],
395
+ "source": [
396
+ "# import\n",
397
+ "from sklearn.linear_model import LogisticRegression\n",
398
+ "\n",
399
+ "# instantiate\n",
400
+ "logreg = LogisticRegression()\n",
401
+ "\n",
402
+ "# fit\n",
403
+ "logreg.fit(X_train, y_train)\n",
404
+ "\n",
405
+ "# predict\n",
406
+ "y_pred = logreg.predict(X_test)\n",
407
+ "\n",
408
+ "print('LogReg %s' % metrics.accuracy_score(y_test, y_pred))"
409
+ ]
410
+ },
411
+ {
412
+ "cell_type": "code",
413
+ "execution_count": null,
414
+ "metadata": {
415
+ "_cell_guid": "267b86ef-6b10-828f-39b2-4ca222533360"
416
+ },
417
+ "outputs": [],
418
+ "source": [
419
+ "# KFolds and Cross_val_scores\n",
420
+ "kf = KFold(len(data), n_folds=10, shuffle=False)\n",
421
+ "print('KFold CrossValScore %s' % cross_val_score(logreg, X, y, cv=kf).mean())"
422
+ ]
423
+ },
424
+ {
425
+ "cell_type": "markdown",
426
+ "metadata": {
427
+ "_cell_guid": "6dd648a7-7ba0-e8c0-c0ab-c53a02c64ce6"
428
+ },
429
+ "source": []
430
+ },
431
+ {
432
+ "cell_type": "code",
433
+ "execution_count": 1,
434
+ "metadata": {
435
+ "_cell_guid": "8508203b-5247-9385-6642-e57ca626083b"
436
+ },
437
+ "outputs": [
438
+ {
439
+ "ename": "NameError",
440
+ "evalue": "name 'np' is not defined",
441
+ "output_type": "error",
442
+ "traceback": [
443
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
444
+ "\u001b[1;31mNameError\u001b[0m Traceback (most recent call last)",
445
+ "\u001b[1;32m<ipython-input-1-90807eed3400>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[1;32mfrom\u001b[0m \u001b[0msklearn\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mneighbors\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mKNeighborsClassifier\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 2\u001b[1;33m \u001b[0mk_range\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;36m26\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 3\u001b[0m \u001b[0mscores\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;33m[\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mk\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mk_range\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 5\u001b[0m \u001b[0mknn\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mKNeighborsClassifier\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mn_neighbors\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mk\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
446
+ "\u001b[1;31mNameError\u001b[0m: name 'np' is not defined"
447
+ ]
448
+ }
449
+ ],
450
+ "source": [
451
+ "from sklearn.neighbors import KNeighborsClassifier\n",
452
+ "k_range = np.arange(1, 26)\n",
453
+ "scores = []\n",
454
+ "for k in k_range:\n",
455
+ " knn = KNeighborsClassifier(n_neighbors=k)\n",
456
+ " knn.fit(X_train, y_train)\n",
457
+ " y_pred = knn.predict(X_test)\n",
458
+ " scores.append(metrics.accuracy_score(y_test, y_pred))\n",
459
+ "print(scores.index(max(scores)), max(scores))"
460
+ ]
461
+ },
462
+ {
463
+ "cell_type": "code",
464
+ "execution_count": null,
465
+ "metadata": {
466
+ "_cell_guid": "0fc7b70c-d513-a44b-c8d3-d8834313228f"
467
+ },
468
+ "outputs": [],
469
+ "source": [
470
+ "# plot the relationship between K and testing accuracy\n",
471
+ "plt.plot(k_range, scores)\n",
472
+ "plt.xlabel('Value of K for KNN')\n",
473
+ "plt.ylabel('Testing Accuracy')\n",
474
+ "plt.grid(True)"
475
+ ]
476
+ },
477
+ {
478
+ "cell_type": "markdown",
479
+ "metadata": {
480
+ "_cell_guid": "d452ca66-0a99-fcd3-a489-dcbad4eabae5"
481
+ },
482
+ "source": []
483
+ },
484
+ {
485
+ "cell_type": "code",
486
+ "execution_count": null,
487
+ "metadata": {
488
+ "_cell_guid": "1d170210-b581-ee34-485b-5c1e537d723e"
489
+ },
490
+ "outputs": [],
491
+ "source": [
492
+ "clf = tree.DecisionTreeClassifier()\n",
493
+ "clf = clf.fit(X_train, y_train)\n",
494
+ "y_pred = clf.predict(X_test)\n",
495
+ "metrics.accuracy_score(y_test, y_pred)"
496
+ ]
497
+ },
498
+ {
499
+ "cell_type": "markdown",
500
+ "metadata": {
501
+ "_cell_guid": "cbca0780-6beb-b2d1-996a-707f627582d8"
502
+ },
503
+ "source": []
504
+ },
505
+ {
506
+ "cell_type": "code",
507
+ "execution_count": null,
508
+ "metadata": {
509
+ "_cell_guid": "e0ff4d85-703c-f7a7-04cf-adf464563c06"
510
+ },
511
+ "outputs": [],
512
+ "source": [
513
+ "# create the numerical dummy columns for the features\n",
514
+ "dummies = pd.get_dummies(raw_data)\n",
515
+ "\n",
516
+ "# del either the greater than 50K feature or less than 50K feature\n",
517
+ "del dummies['income_<=50K']\n",
518
+ "dummy_data = dummies.values\n",
519
+ "\n",
520
+ "# shape the features and response \n",
521
+ "X = dummy_data[:, :-1]\n",
522
+ "y = dummy_data[:, -1]\n",
523
+ "print(X.shape, y.shape)\n",
524
+ "\n",
525
+ "# fit\n",
526
+ "gbc = GradientBoostingClassifier().fit(X_train, y_train)\n",
527
+ "\n",
528
+ "print('GBC %s' % gbc.score(X_test, y_test))"
529
+ ]
530
+ },
531
+ {
532
+ "cell_type": "markdown",
533
+ "metadata": {
534
+ "_cell_guid": "39b60434-1361-de4c-39eb-72e18c22055d"
535
+ },
536
+ "source": []
537
+ },
538
+ {
539
+ "cell_type": "code",
540
+ "execution_count": null,
541
+ "metadata": {
542
+ "_cell_guid": "8a668f04-bb31-4d13-a939-6808e4331c60",
543
+ "collapsed": true
544
+ },
545
+ "outputs": [],
546
+ "source": [
547
+ "# create a feature numerically representative of different capital gains catergories \n",
548
+ "def cap_split(row):\n",
549
+ " if row['capital.gain'] > 0 and row['over50K'] == 1:\n",
550
+ " return 1\n",
551
+ " elif row['capital.gain'] > 0 and row['over50K'] == 0:\n",
552
+ " return 2\n",
553
+ " elif row['capital.gain'] <= 0 and row['over50K'] == 1:\n",
554
+ " return 3\n",
555
+ " else:\n",
556
+ " return 4\n",
557
+ " \n",
558
+ "data['cap_split'] = data.apply (lambda row: cap_split (row), axis=1)"
559
+ ]
560
+ },
561
+ {
562
+ "cell_type": "code",
563
+ "execution_count": null,
564
+ "metadata": {
565
+ "_cell_guid": "c9e79439-be22-0f13-f2a8-fc01e4662cb1",
566
+ "collapsed": true
567
+ },
568
+ "outputs": [],
569
+ "source": [
570
+ "# def that will display the percentage and absolute value on the pie chart\n",
571
+ "def make_autopct(values):\n",
572
+ " def my_autopct(pct):\n",
573
+ " total = sum(values)\n",
574
+ " val = int(round(pct*total/100.0))\n",
575
+ " return '{p:.2f}% ({v:d})'.format(p=pct,v=val)\n",
576
+ " return my_autopct"
577
+ ]
578
+ },
579
+ {
580
+ "cell_type": "code",
581
+ "execution_count": null,
582
+ "metadata": {
583
+ "_cell_guid": "afa6d82e-9ba6-dfa8-b4f1-4854dca2f6b1"
584
+ },
585
+ "outputs": [],
586
+ "source": [
587
+ "# pie chart displaying breakdown of all types of cap in relation to making over 50K\n",
588
+ "cap_split = data.cap_split.value_counts().sort_index()\n",
589
+ "colors = ['lightgreen', 'darkseagreen', 'pink', 'lightcoral']\n",
590
+ "explode = [0.2, 0.2, 0, 0]\n",
591
+ "label = ['> 50K, Pos Cap', '<= 50K, Pos Cap', '> 50K, Neg/No Cap', '<= 50K, Neg/No Cap']\n",
592
+ "fig = plt.figure(figsize=(12, 12)) \n",
593
+ "ax = fig.add_subplot(1, 1, 1)\n",
594
+ "plt.pie(cap_split, shadow=True, colors=colors, explode=explode, autopct=make_autopct(cap_split))\n",
595
+ "plt.title('Income Breakdown Based on Capital Gains')\n",
596
+ "plt.axis('equal')\n",
597
+ "plt.legend(label, title='INCOME', bbox_to_anchor=(1.15, .95))"
598
+ ]
599
+ },
600
+ {
601
+ "cell_type": "code",
602
+ "execution_count": null,
603
+ "metadata": {
604
+ "_cell_guid": "8e3dc2bf-25ff-b028-58e1-e1d6323acfb7"
605
+ },
606
+ "outputs": [],
607
+ "source": [
608
+ "# pie charts separating the Pos Capital Gains vs. the Neg/No Capital Gains\n",
609
+ "import matplotlib.patches as mpatches\n",
610
+ "%matplotlib inline\n",
611
+ "\n",
612
+ "caphigh = data[data['capital.gain'] > 0]\n",
613
+ "caplow = data[data['capital.gain'] <= 0]\n",
614
+ "\n",
615
+ "caphigh_val = caphigh.over50K.value_counts()\n",
616
+ "caplow_val = caplow.over50K.value_counts()\n",
617
+ "colors1 = ['lightgreen', 'pink']\n",
618
+ "colors2 = ['pink', 'lightgreen']\n",
619
+ "\n",
620
+ "fig = plt.figure(figsize=(10, 10))\n",
621
+ "ax1 = fig.add_subplot(2, 1, 1)\n",
622
+ "plt.pie(caphigh_val, shadow=True, explode=[0.1, 0], colors=colors1, autopct=make_autopct(caphigh_val))\n",
623
+ "plt.axis('equal')\n",
624
+ "plt.title('Income Breakdown Based on Positive Capital Gains')\n",
625
+ "plt.legend(labels=['> 50K', '<= 50K'], frameon=True, shadow=True, title='INCOME')\n",
626
+ "\n",
627
+ "ax2 = fig.add_subplot(2, 1, 2)\n",
628
+ "plt.pie(caplow_val, shadow=True, explode=[0.1, 0], colors=colors2, autopct=make_autopct(caplow_val))\n",
629
+ "plt.axis('equal')\n",
630
+ "plt.title('Income Breakdown Based on No/Negative Capital Gains')\n",
631
+ "ax2_patch1 = mpatches.Patch(facecolor='lightgreen', edgecolor='black', label='> 50K')\n",
632
+ "ax2_patch2 = mpatches.Patch(facecolor='pink', edgecolor='black', label='<= 50K')\n",
633
+ "plt.legend(handles=[ax2_patch1, ax2_patch2], frameon=True, shadow=True, title='INCOME')\n",
634
+ "\n",
635
+ "plt.show()"
636
+ ]
637
+ },
638
+ {
639
+ "cell_type": "markdown",
640
+ "metadata": {},
641
+ "source": [
642
+ "## Fairness"
643
+ ]
644
+ },
645
+ {
646
+ "cell_type": "code",
647
+ "execution_count": 10,
648
+ "metadata": {},
649
+ "outputs": [],
650
+ "source": [
651
+ "# This DataFrame is created to stock differents models and fair metrics that we produce in this notebook\n",
652
+ "algo_metrics = pd.DataFrame(columns=['model', 'fair_metrics', 'prediction', 'probs'])\n",
653
+ "\n",
654
+ "def add_to_df_algo_metrics(algo_metrics, model, fair_metrics, preds, probs, name):\n",
655
+ " return algo_metrics.append(pd.DataFrame(data=[[model, fair_metrics, preds, probs]], columns=['model', 'fair_metrics', 'prediction', 'probs'], index=[name]))"
656
+ ]
657
+ },
658
+ {
659
+ "cell_type": "code",
660
+ "execution_count": 11,
661
+ "metadata": {},
662
+ "outputs": [],
663
+ "source": [
664
+ "def fair_metrics(dataset, pred, pred_is_dataset=False):\n",
665
+ " if pred_is_dataset:\n",
666
+ " dataset_pred = pred\n",
667
+ " else:\n",
668
+ " dataset_pred = dataset.copy()\n",
669
+ " dataset_pred.labels = pred\n",
670
+ " \n",
671
+ " cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']\n",
672
+ " obj_fairness = [[0,0,0,1,0]]\n",
673
+ " \n",
674
+ " fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)\n",
675
+ " \n",
676
+ " for attr in dataset_pred.protected_attribute_names:\n",
677
+ " idx = dataset_pred.protected_attribute_names.index(attr)\n",
678
+ " privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] \n",
679
+ " unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}] \n",
680
+ " \n",
681
+ " classified_metric = ClassificationMetric(dataset, \n",
682
+ " dataset_pred,\n",
683
+ " unprivileged_groups=unprivileged_groups,\n",
684
+ " privileged_groups=privileged_groups)\n",
685
+ "\n",
686
+ " metric_pred = BinaryLabelDatasetMetric(dataset_pred,\n",
687
+ " unprivileged_groups=unprivileged_groups,\n",
688
+ " privileged_groups=privileged_groups)\n",
689
+ "\n",
690
+ " acc = classified_metric.accuracy()\n",
691
+ "\n",
692
+ " row = pd.DataFrame([[metric_pred.mean_difference(),\n",
693
+ " classified_metric.equal_opportunity_difference(),\n",
694
+ " classified_metric.average_abs_odds_difference(),\n",
695
+ " metric_pred.disparate_impact(),\n",
696
+ " classified_metric.theil_index()]],\n",
697
+ " columns = cols,\n",
698
+ " index = [attr]\n",
699
+ " )\n",
700
+ " fair_metrics = fair_metrics.append(row) \n",
701
+ " \n",
702
+ " fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)\n",
703
+ " \n",
704
+ " return fair_metrics\n",
705
+ "\n",
706
+ "def plot_fair_metrics(fair_metrics):\n",
707
+ " fig, ax = plt.subplots(figsize=(20,4), ncols=5, nrows=1)\n",
708
+ "\n",
709
+ " plt.subplots_adjust(\n",
710
+ " left = 0.125, \n",
711
+ " bottom = 0.1, \n",
712
+ " right = 0.9, \n",
713
+ " top = 0.9, \n",
714
+ " wspace = .5, \n",
715
+ " hspace = 1.1\n",
716
+ " )\n",
717
+ "\n",
718
+ " y_title_margin = 1.2\n",
719
+ "\n",
720
+ " plt.suptitle(\"Fairness metrics\", y = 1.09, fontsize=20)\n",
721
+ " sns.set(style=\"dark\")\n",
722
+ "\n",
723
+ " cols = fair_metrics.columns.values\n",
724
+ " obj = fair_metrics.loc['objective']\n",
725
+ " size_rect = [0.2,0.2,0.2,0.4,0.25]\n",
726
+ " rect = [-0.1,-0.1,-0.1,0.8,0]\n",
727
+ " bottom = [-1,-1,-1,0,0]\n",
728
+ " top = [1,1,1,2,1]\n",
729
+ " bound = [[-0.1,0.1],[-0.1,0.1],[-0.1,0.1],[0.8,1.2],[0,0.25]]\n",
730
+ "\n",
731
+ " display(Markdown(\"### Check bias metrics :\"))\n",
732
+ " display(Markdown(\"A model can be considered bias if just one of these five metrics show that this model is biased.\"))\n",
733
+ " for attr in fair_metrics.index[1:len(fair_metrics)].values:\n",
734
+ " display(Markdown(\"#### For the %s attribute :\"%attr))\n",
735
+ " check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,5)]\n",
736
+ " display(Markdown(\"With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics\"%(5 - sum(check))))\n",
737
+ "\n",
738
+ " for i in range(0,5):\n",
739
+ " plt.subplot(1, 5, i+1)\n",
740
+ " ax = sns.barplot(x=fair_metrics.index[1:len(fair_metrics)], y=fair_metrics.iloc[1:len(fair_metrics)][cols[i]])\n",
741
+ " \n",
742
+ " for j in range(0,len(fair_metrics)-1):\n",
743
+ " a, val = ax.patches[j], fair_metrics.iloc[j+1][cols[i]]\n",
744
+ " marg = -0.2 if val < 0 else 0.1\n",
745
+ " ax.text(a.get_x()+a.get_width()/5, a.get_y()+a.get_height()+marg, round(val, 3), fontsize=15,color='black')\n",
746
+ "\n",
747
+ " plt.ylim(bottom[i], top[i])\n",
748
+ " plt.setp(ax.patches, linewidth=0)\n",
749
+ " ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=\"green\", linewidth=1, linestyle='solid'))\n",
750
+ " plt.axhline(obj[i], color='black', alpha=0.3)\n",
751
+ " plt.title(cols[i])\n",
752
+ " ax.set_ylabel('') \n",
753
+ " ax.set_xlabel('')"
754
+ ]
755
+ },
756
+ {
757
+ "cell_type": "code",
758
+ "execution_count": 12,
759
+ "metadata": {},
760
+ "outputs": [],
761
+ "source": [
762
+ "def get_fair_metrics_and_plot(data, model, plot=False, model_aif=False):\n",
763
+ " pred = model.predict(data).labels if model_aif else model.predict(data.features)\n",
764
+ " # fair_metrics function available in the metrics.py file\n",
765
+ " fair = fair_metrics(data, pred)\n",
766
+ "\n",
767
+ " if plot:\n",
768
+ " # plot_fair_metrics function available in the visualisations.py file\n",
769
+ " # The visualisation of this function is inspired by the dashboard on the demo of IBM aif360 \n",
770
+ " plot_fair_metrics(fair)\n",
771
+ " display(fair)\n",
772
+ " \n",
773
+ " return fair"
774
+ ]
775
+ },
776
+ {
777
+ "cell_type": "code",
778
+ "execution_count": 13,
779
+ "metadata": {},
780
+ "outputs": [
781
+ {
782
+ "name": "stderr",
783
+ "output_type": "stream",
784
+ "text": [
785
+ "\n",
786
+ "A value is trying to be set on a copy of a slice from a DataFrame.\n",
787
+ "Try using .loc[row_indexer,col_indexer] = value instead\n",
788
+ "\n",
789
+ "See the caveats in the documentation: https://pandas.pydata.org/pandas-docs/stable/user_guide/indexing.html#returning-a-view-versus-a-copy\n"
790
+ ]
791
+ }
792
+ ],
793
+ "source": [
794
+ "X['income'] = y"
795
+ ]
796
+ },
797
+ {
798
+ "cell_type": "code",
799
+ "execution_count": 14,
800
+ "metadata": {},
801
+ "outputs": [
802
+ {
803
+ "data": {
804
+ "text/html": [
805
+ "<div>\n",
806
+ "<style scoped>\n",
807
+ " .dataframe tbody tr th:only-of-type {\n",
808
+ " vertical-align: middle;\n",
809
+ " }\n",
810
+ "\n",
811
+ " .dataframe tbody tr th {\n",
812
+ " vertical-align: top;\n",
813
+ " }\n",
814
+ "\n",
815
+ " .dataframe thead th {\n",
816
+ " text-align: right;\n",
817
+ " }\n",
818
+ "</style>\n",
819
+ "<table border=\"1\" class=\"dataframe\">\n",
820
+ " <thead>\n",
821
+ " <tr style=\"text-align: right;\">\n",
822
+ " <th></th>\n",
823
+ " <th>workclass_num</th>\n",
824
+ " <th>education.num</th>\n",
825
+ " <th>marital_num</th>\n",
826
+ " <th>race_num</th>\n",
827
+ " <th>sex_num</th>\n",
828
+ " <th>rel_num</th>\n",
829
+ " <th>capital.gain</th>\n",
830
+ " <th>capital.loss</th>\n",
831
+ " <th>income</th>\n",
832
+ " </tr>\n",
833
+ " </thead>\n",
834
+ " <tbody>\n",
835
+ " <tr>\n",
836
+ " <th>1</th>\n",
837
+ " <td>0</td>\n",
838
+ " <td>9</td>\n",
839
+ " <td>0</td>\n",
840
+ " <td>0</td>\n",
841
+ " <td>0</td>\n",
842
+ " <td>0</td>\n",
843
+ " <td>0</td>\n",
844
+ " <td>4356</td>\n",
845
+ " <td>0</td>\n",
846
+ " </tr>\n",
847
+ " <tr>\n",
848
+ " <th>3</th>\n",
849
+ " <td>0</td>\n",
850
+ " <td>4</td>\n",
851
+ " <td>1</td>\n",
852
+ " <td>0</td>\n",
853
+ " <td>0</td>\n",
854
+ " <td>0</td>\n",
855
+ " <td>0</td>\n",
856
+ " <td>3900</td>\n",
857
+ " <td>0</td>\n",
858
+ " </tr>\n",
859
+ " <tr>\n",
860
+ " <th>4</th>\n",
861
+ " <td>0</td>\n",
862
+ " <td>10</td>\n",
863
+ " <td>2</td>\n",
864
+ " <td>0</td>\n",
865
+ " <td>0</td>\n",
866
+ " <td>0</td>\n",
867
+ " <td>0</td>\n",
868
+ " <td>3900</td>\n",
869
+ " <td>0</td>\n",
870
+ " </tr>\n",
871
+ " <tr>\n",
872
+ " <th>5</th>\n",
873
+ " <td>0</td>\n",
874
+ " <td>9</td>\n",
875
+ " <td>1</td>\n",
876
+ " <td>0</td>\n",
877
+ " <td>0</td>\n",
878
+ " <td>0</td>\n",
879
+ " <td>0</td>\n",
880
+ " <td>3770</td>\n",
881
+ " <td>0</td>\n",
882
+ " </tr>\n",
883
+ " <tr>\n",
884
+ " <th>6</th>\n",
885
+ " <td>0</td>\n",
886
+ " <td>6</td>\n",
887
+ " <td>2</td>\n",
888
+ " <td>0</td>\n",
889
+ " <td>1</td>\n",
890
+ " <td>0</td>\n",
891
+ " <td>0</td>\n",
892
+ " <td>3770</td>\n",
893
+ " <td>0</td>\n",
894
+ " </tr>\n",
895
+ " <tr>\n",
896
+ " <th>...</th>\n",
897
+ " <td>...</td>\n",
898
+ " <td>...</td>\n",
899
+ " <td>...</td>\n",
900
+ " <td>...</td>\n",
901
+ " <td>...</td>\n",
902
+ " <td>...</td>\n",
903
+ " <td>...</td>\n",
904
+ " <td>...</td>\n",
905
+ " <td>...</td>\n",
906
+ " </tr>\n",
907
+ " <tr>\n",
908
+ " <th>32556</th>\n",
909
+ " <td>0</td>\n",
910
+ " <td>10</td>\n",
911
+ " <td>3</td>\n",
912
+ " <td>0</td>\n",
913
+ " <td>1</td>\n",
914
+ " <td>0</td>\n",
915
+ " <td>0</td>\n",
916
+ " <td>0</td>\n",
917
+ " <td>0</td>\n",
918
+ " </tr>\n",
919
+ " <tr>\n",
920
+ " <th>32557</th>\n",
921
+ " <td>0</td>\n",
922
+ " <td>12</td>\n",
923
+ " <td>4</td>\n",
924
+ " <td>0</td>\n",
925
+ " <td>0</td>\n",
926
+ " <td>1</td>\n",
927
+ " <td>0</td>\n",
928
+ " <td>0</td>\n",
929
+ " <td>0</td>\n",
930
+ " </tr>\n",
931
+ " <tr>\n",
932
+ " <th>32558</th>\n",
933
+ " <td>0</td>\n",
934
+ " <td>9</td>\n",
935
+ " <td>4</td>\n",
936
+ " <td>0</td>\n",
937
+ " <td>1</td>\n",
938
+ " <td>1</td>\n",
939
+ " <td>0</td>\n",
940
+ " <td>0</td>\n",
941
+ " <td>1</td>\n",
942
+ " </tr>\n",
943
+ " <tr>\n",
944
+ " <th>32559</th>\n",
945
+ " <td>0</td>\n",
946
+ " <td>9</td>\n",
947
+ " <td>0</td>\n",
948
+ " <td>0</td>\n",
949
+ " <td>0</td>\n",
950
+ " <td>0</td>\n",
951
+ " <td>0</td>\n",
952
+ " <td>0</td>\n",
953
+ " <td>0</td>\n",
954
+ " </tr>\n",
955
+ " <tr>\n",
956
+ " <th>32560</th>\n",
957
+ " <td>0</td>\n",
958
+ " <td>9</td>\n",
959
+ " <td>3</td>\n",
960
+ " <td>0</td>\n",
961
+ " <td>1</td>\n",
962
+ " <td>0</td>\n",
963
+ " <td>0</td>\n",
964
+ " <td>0</td>\n",
965
+ " <td>0</td>\n",
966
+ " </tr>\n",
967
+ " </tbody>\n",
968
+ "</table>\n",
969
+ "<p>30718 rows × 9 columns</p>\n",
970
+ "</div>"
971
+ ],
972
+ "text/plain": [
973
+ " workclass_num education.num marital_num race_num sex_num rel_num \\\n",
974
+ "1 0 9 0 0 0 0 \n",
975
+ "3 0 4 1 0 0 0 \n",
976
+ "4 0 10 2 0 0 0 \n",
977
+ "5 0 9 1 0 0 0 \n",
978
+ "6 0 6 2 0 1 0 \n",
979
+ "... ... ... ... ... ... ... \n",
980
+ "32556 0 10 3 0 1 0 \n",
981
+ "32557 0 12 4 0 0 1 \n",
982
+ "32558 0 9 4 0 1 1 \n",
983
+ "32559 0 9 0 0 0 0 \n",
984
+ "32560 0 9 3 0 1 0 \n",
985
+ "\n",
986
+ " capital.gain capital.loss income \n",
987
+ "1 0 4356 0 \n",
988
+ "3 0 3900 0 \n",
989
+ "4 0 3900 0 \n",
990
+ "5 0 3770 0 \n",
991
+ "6 0 3770 0 \n",
992
+ "... ... ... ... \n",
993
+ "32556 0 0 0 \n",
994
+ "32557 0 0 0 \n",
995
+ "32558 0 0 1 \n",
996
+ "32559 0 0 0 \n",
997
+ "32560 0 0 0 \n",
998
+ "\n",
999
+ "[30718 rows x 9 columns]"
1000
+ ]
1001
+ },
1002
+ "execution_count": 14,
1003
+ "metadata": {},
1004
+ "output_type": "execute_result"
1005
+ }
1006
+ ],
1007
+ "source": [
1008
+ "X"
1009
+ ]
1010
+ },
1011
+ {
1012
+ "cell_type": "code",
1013
+ "execution_count": 15,
1014
+ "metadata": {},
1015
+ "outputs": [],
1016
+ "source": [
1017
+ "#print(X)\n",
1018
+ "\n",
1019
+ "\n",
1020
+ "#combine_final = [train_df, test_df]\n",
1021
+ "#result = pd.concat(combine_final)\n",
1022
+ "#print(result.ifany())\n",
1023
+ "#print(result)\n",
1024
+ "privileged_groups = [{'sex_num': 1}]\n",
1025
+ "unprivileged_groups = [{'sex_num': 0}]\n",
1026
+ "dataset_orig = StandardDataset(X,\n",
1027
+ " label_name='income',\n",
1028
+ " protected_attribute_names=['sex_num'],\n",
1029
+ " favorable_classes=[1],\n",
1030
+ " privileged_classes=[[1]])\n",
1031
+ "\n",
1032
+ "#metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
1033
+ "# unprivileged_groups=unprivileged_groups,\n",
1034
+ "# privileged_groups=privileged_groups)\n",
1035
+ "#display(Markdown(\"#### Original training dataset\"))\n",
1036
+ "#print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())\n"
1037
+ ]
1038
+ },
1039
+ {
1040
+ "cell_type": "code",
1041
+ "execution_count": 16,
1042
+ "metadata": {},
1043
+ "outputs": [
1044
+ {
1045
+ "data": {
1046
+ "text/markdown": [
1047
+ "#### Original training dataset"
1048
+ ],
1049
+ "text/plain": [
1050
+ "<IPython.core.display.Markdown object>"
1051
+ ]
1052
+ },
1053
+ "metadata": {},
1054
+ "output_type": "display_data"
1055
+ },
1056
+ {
1057
+ "name": "stdout",
1058
+ "output_type": "stream",
1059
+ "text": [
1060
+ "Difference in mean outcomes between unprivileged and privileged groups = -0.200292\n"
1061
+ ]
1062
+ }
1063
+ ],
1064
+ "source": [
1065
+ "metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
1066
+ " unprivileged_groups=unprivileged_groups,\n",
1067
+ " privileged_groups=privileged_groups)\n",
1068
+ "display(Markdown(\"#### Original training dataset\"))\n",
1069
+ "print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())"
1070
+ ]
1071
+ },
1072
+ {
1073
+ "cell_type": "code",
1074
+ "execution_count": 17,
1075
+ "metadata": {},
1076
+ "outputs": [],
1077
+ "source": [
1078
+ "import ipynbname\n",
1079
+ "nb_fname = ipynbname.name()\n",
1080
+ "nb_path = ipynbname.path()\n",
1081
+ "\n",
1082
+ "from xgboost import XGBClassifier\n",
1083
+ "import pickle\n",
1084
+ "\n",
1085
+ "data_orig_train, data_orig_test = dataset_orig.split([0.7], shuffle=True)\n",
1086
+ "X_train = data_orig_train.features\n",
1087
+ "y_train = data_orig_train.labels.ravel()\n",
1088
+ "\n",
1089
+ "X_test = data_orig_test.features\n",
1090
+ "y_test = data_orig_test.labels.ravel()\n",
1091
+ "num_estimators = 100\n",
1092
+ "\n",
1093
+ "model = GradientBoostingClassifier(n_estimators= 1)\n",
1094
+ "\n",
1095
+ "mdl = model.fit(X_train, y_train)\n",
1096
+ "with open('../../Results/GBC/' + nb_fname + '.pkl', 'wb') as f:\n",
1097
+ " pickle.dump(mdl, f)\n",
1098
+ "\n",
1099
+ "with open('../../Results/GBC/' + nb_fname + '_Train' + '.pkl', 'wb') as f:\n",
1100
+ " pickle.dump(data_orig_train, f) \n",
1101
+ " \n",
1102
+ "with open('../../Results/GBC/' + nb_fname + '_Test' + '.pkl', 'wb') as f:\n",
1103
+ " pickle.dump(data_orig_test, f) "
1104
+ ]
1105
+ },
1106
+ {
1107
+ "cell_type": "code",
1108
+ "execution_count": 18,
1109
+ "metadata": {},
1110
+ "outputs": [
1111
+ {
1112
+ "name": "stderr",
1113
+ "output_type": "stream",
1114
+ "text": [
1115
+ "invalid value encountered in double_scalars\n",
1116
+ "invalid value encountered in double_scalars\n",
1117
+ "invalid value encountered in double_scalars\n"
1118
+ ]
1119
+ }
1120
+ ],
1121
+ "source": [
1122
+ "from csv import writer\n",
1123
+ "from sklearn.metrics import accuracy_score, f1_score\n",
1124
+ "\n",
1125
+ "final_metrics = []\n",
1126
+ "accuracy = []\n",
1127
+ "f1= []\n",
1128
+ "\n",
1129
+ "for i in range(1,num_estimators+1):\n",
1130
+ " \n",
1131
+ " model = GradientBoostingClassifier(n_estimators= i)\n",
1132
+ " mdl = model.fit(X_train, y_train)\n",
1133
+ " yy = mdl.predict(X_test)\n",
1134
+ " accuracy.append(accuracy_score(y_test, yy))\n",
1135
+ " f1.append(f1_score(y_test, yy))\n",
1136
+ " fair = get_fair_metrics_and_plot(data_orig_test, mdl) \n",
1137
+ " fair_list = fair.iloc[1].tolist()\n",
1138
+ " #fair_list.insert(0, i)\n",
1139
+ " final_metrics.append(fair_list)\n"
1140
+ ]
1141
+ },
1142
+ {
1143
+ "cell_type": "code",
1144
+ "execution_count": 19,
1145
+ "metadata": {},
1146
+ "outputs": [
1147
+ {
1148
+ "name": "stdout",
1149
+ "output_type": "stream",
1150
+ "text": [
1151
+ " 0 1 2 3 4\n",
1152
+ "0 0.000000 0.000000 0.000000 NaN 0.282056\n",
1153
+ "1 0.000000 0.000000 0.000000 NaN 0.282056\n",
1154
+ "2 0.000000 0.000000 0.000000 NaN 0.282056\n",
1155
+ "3 -0.033524 0.026489 0.014297 0.405867 0.225216\n",
1156
+ "4 -0.033524 0.026489 0.014297 0.405867 0.225216\n",
1157
+ ".. ... ... ... ... ...\n",
1158
+ "95 -0.175809 -0.137035 0.098752 0.285825 0.120346\n",
1159
+ "96 -0.175809 -0.137035 0.098752 0.285825 0.120346\n",
1160
+ "97 -0.175648 -0.137035 0.098635 0.286012 0.120308\n",
1161
+ "98 -0.176141 -0.137035 0.098939 0.284476 0.120308\n",
1162
+ "99 -0.175980 -0.136516 0.098679 0.284663 0.120431\n",
1163
+ "\n",
1164
+ "[100 rows x 5 columns]\n"
1165
+ ]
1166
+ },
1167
+ {
1168
+ "data": {
1169
+ "text/html": [
1170
+ "<div>\n",
1171
+ "<style scoped>\n",
1172
+ " .dataframe tbody tr th:only-of-type {\n",
1173
+ " vertical-align: middle;\n",
1174
+ " }\n",
1175
+ "\n",
1176
+ " .dataframe tbody tr th {\n",
1177
+ " vertical-align: top;\n",
1178
+ " }\n",
1179
+ "\n",
1180
+ " .dataframe thead th {\n",
1181
+ " text-align: right;\n",
1182
+ " }\n",
1183
+ "</style>\n",
1184
+ "<table border=\"1\" class=\"dataframe\">\n",
1185
+ " <thead>\n",
1186
+ " <tr style=\"text-align: right;\">\n",
1187
+ " <th></th>\n",
1188
+ " <th>classifier</th>\n",
1189
+ " <th>T0</th>\n",
1190
+ " <th>T1</th>\n",
1191
+ " <th>T2</th>\n",
1192
+ " <th>T3</th>\n",
1193
+ " <th>T4</th>\n",
1194
+ " <th>T5</th>\n",
1195
+ " <th>T6</th>\n",
1196
+ " <th>T7</th>\n",
1197
+ " <th>T8</th>\n",
1198
+ " <th>...</th>\n",
1199
+ " <th>T90</th>\n",
1200
+ " <th>T91</th>\n",
1201
+ " <th>T92</th>\n",
1202
+ " <th>T93</th>\n",
1203
+ " <th>T94</th>\n",
1204
+ " <th>T95</th>\n",
1205
+ " <th>T96</th>\n",
1206
+ " <th>T97</th>\n",
1207
+ " <th>T98</th>\n",
1208
+ " <th>T99</th>\n",
1209
+ " </tr>\n",
1210
+ " </thead>\n",
1211
+ " <tbody>\n",
1212
+ " <tr>\n",
1213
+ " <th>accuracy</th>\n",
1214
+ " <td>0.859158</td>\n",
1215
+ " <td>0.754232</td>\n",
1216
+ " <td>0.754232</td>\n",
1217
+ " <td>0.754232</td>\n",
1218
+ " <td>0.797743</td>\n",
1219
+ " <td>0.797743</td>\n",
1220
+ " <td>0.797743</td>\n",
1221
+ " <td>0.809896</td>\n",
1222
+ " <td>0.809896</td>\n",
1223
+ " <td>0.843641</td>\n",
1224
+ " <td>...</td>\n",
1225
+ " <td>0.858724</td>\n",
1226
+ " <td>0.858724</td>\n",
1227
+ " <td>0.858724</td>\n",
1228
+ " <td>0.858724</td>\n",
1229
+ " <td>0.859049</td>\n",
1230
+ " <td>0.859158</td>\n",
1231
+ " <td>0.859158</td>\n",
1232
+ " <td>0.859266</td>\n",
1233
+ " <td>0.859266</td>\n",
1234
+ " <td>0.859158</td>\n",
1235
+ " </tr>\n",
1236
+ " <tr>\n",
1237
+ " <th>f1</th>\n",
1238
+ " <td>0.675662</td>\n",
1239
+ " <td>0.000000</td>\n",
1240
+ " <td>0.000000</td>\n",
1241
+ " <td>0.000000</td>\n",
1242
+ " <td>0.305514</td>\n",
1243
+ " <td>0.305514</td>\n",
1244
+ " <td>0.307063</td>\n",
1245
+ " <td>0.374732</td>\n",
1246
+ " <td>0.374732</td>\n",
1247
+ " <td>0.621288</td>\n",
1248
+ " <td>...</td>\n",
1249
+ " <td>0.674988</td>\n",
1250
+ " <td>0.674988</td>\n",
1251
+ " <td>0.674988</td>\n",
1252
+ " <td>0.674988</td>\n",
1253
+ " <td>0.675655</td>\n",
1254
+ " <td>0.675824</td>\n",
1255
+ " <td>0.675824</td>\n",
1256
+ " <td>0.675993</td>\n",
1257
+ " <td>0.675993</td>\n",
1258
+ " <td>0.675662</td>\n",
1259
+ " </tr>\n",
1260
+ " <tr>\n",
1261
+ " <th>statistical_parity_difference</th>\n",
1262
+ " <td>-0.175980</td>\n",
1263
+ " <td>0.000000</td>\n",
1264
+ " <td>0.000000</td>\n",
1265
+ " <td>0.000000</td>\n",
1266
+ " <td>-0.033524</td>\n",
1267
+ " <td>-0.033524</td>\n",
1268
+ " <td>-0.033505</td>\n",
1269
+ " <td>-0.047616</td>\n",
1270
+ " <td>-0.047616</td>\n",
1271
+ " <td>-0.157535</td>\n",
1272
+ " <td>...</td>\n",
1273
+ " <td>-0.176132</td>\n",
1274
+ " <td>-0.176132</td>\n",
1275
+ " <td>-0.176132</td>\n",
1276
+ " <td>-0.176132</td>\n",
1277
+ " <td>-0.175971</td>\n",
1278
+ " <td>-0.175809</td>\n",
1279
+ " <td>-0.175809</td>\n",
1280
+ " <td>-0.175648</td>\n",
1281
+ " <td>-0.176141</td>\n",
1282
+ " <td>-0.175980</td>\n",
1283
+ " </tr>\n",
1284
+ " <tr>\n",
1285
+ " <th>equal_opportunity_difference</th>\n",
1286
+ " <td>-0.136516</td>\n",
1287
+ " <td>0.000000</td>\n",
1288
+ " <td>0.000000</td>\n",
1289
+ " <td>0.000000</td>\n",
1290
+ " <td>0.026489</td>\n",
1291
+ " <td>0.026489</td>\n",
1292
+ " <td>0.024931</td>\n",
1293
+ " <td>-0.005468</td>\n",
1294
+ " <td>-0.005468</td>\n",
1295
+ " <td>-0.138445</td>\n",
1296
+ " <td>...</td>\n",
1297
+ " <td>-0.136516</td>\n",
1298
+ " <td>-0.136516</td>\n",
1299
+ " <td>-0.136516</td>\n",
1300
+ " <td>-0.136516</td>\n",
1301
+ " <td>-0.137035</td>\n",
1302
+ " <td>-0.137035</td>\n",
1303
+ " <td>-0.137035</td>\n",
1304
+ " <td>-0.137035</td>\n",
1305
+ " <td>-0.137035</td>\n",
1306
+ " <td>-0.136516</td>\n",
1307
+ " </tr>\n",
1308
+ " <tr>\n",
1309
+ " <th>average_abs_odds_difference</th>\n",
1310
+ " <td>0.098679</td>\n",
1311
+ " <td>0.000000</td>\n",
1312
+ " <td>0.000000</td>\n",
1313
+ " <td>0.000000</td>\n",
1314
+ " <td>0.014297</td>\n",
1315
+ " <td>0.014297</td>\n",
1316
+ " <td>0.013261</td>\n",
1317
+ " <td>0.003529</td>\n",
1318
+ " <td>0.003529</td>\n",
1319
+ " <td>0.096792</td>\n",
1320
+ " <td>...</td>\n",
1321
+ " <td>0.098843</td>\n",
1322
+ " <td>0.098843</td>\n",
1323
+ " <td>0.098843</td>\n",
1324
+ " <td>0.098843</td>\n",
1325
+ " <td>0.098869</td>\n",
1326
+ " <td>0.098752</td>\n",
1327
+ " <td>0.098752</td>\n",
1328
+ " <td>0.098635</td>\n",
1329
+ " <td>0.098939</td>\n",
1330
+ " <td>0.098679</td>\n",
1331
+ " </tr>\n",
1332
+ " <tr>\n",
1333
+ " <th>disparate_impact</th>\n",
1334
+ " <td>-1.256450</td>\n",
1335
+ " <td>NaN</td>\n",
1336
+ " <td>NaN</td>\n",
1337
+ " <td>NaN</td>\n",
1338
+ " <td>-0.901730</td>\n",
1339
+ " <td>-0.901730</td>\n",
1340
+ " <td>-0.884520</td>\n",
1341
+ " <td>-1.035325</td>\n",
1342
+ " <td>-1.035325</td>\n",
1343
+ " <td>-1.275262</td>\n",
1344
+ " <td>...</td>\n",
1345
+ " <td>-1.253686</td>\n",
1346
+ " <td>-1.253686</td>\n",
1347
+ " <td>-1.253686</td>\n",
1348
+ " <td>-1.253686</td>\n",
1349
+ " <td>-1.253032</td>\n",
1350
+ " <td>-1.252377</td>\n",
1351
+ " <td>-1.252377</td>\n",
1352
+ " <td>-1.251722</td>\n",
1353
+ " <td>-1.257105</td>\n",
1354
+ " <td>-1.256450</td>\n",
1355
+ " </tr>\n",
1356
+ " <tr>\n",
1357
+ " <th>theil_index</th>\n",
1358
+ " <td>0.120431</td>\n",
1359
+ " <td>0.282056</td>\n",
1360
+ " <td>0.282056</td>\n",
1361
+ " <td>0.282056</td>\n",
1362
+ " <td>0.225216</td>\n",
1363
+ " <td>0.225216</td>\n",
1364
+ " <td>0.224965</td>\n",
1365
+ " <td>0.209861</td>\n",
1366
+ " <td>0.209861</td>\n",
1367
+ " <td>0.140384</td>\n",
1368
+ " <td>...</td>\n",
1369
+ " <td>0.120580</td>\n",
1370
+ " <td>0.120580</td>\n",
1371
+ " <td>0.120580</td>\n",
1372
+ " <td>0.120580</td>\n",
1373
+ " <td>0.120383</td>\n",
1374
+ " <td>0.120346</td>\n",
1375
+ " <td>0.120346</td>\n",
1376
+ " <td>0.120308</td>\n",
1377
+ " <td>0.120308</td>\n",
1378
+ " <td>0.120431</td>\n",
1379
+ " </tr>\n",
1380
+ " </tbody>\n",
1381
+ "</table>\n",
1382
+ "<p>7 rows × 101 columns</p>\n",
1383
+ "</div>"
1384
+ ],
1385
+ "text/plain": [
1386
+ " classifier T0 T1 T2 \\\n",
1387
+ "accuracy 0.859158 0.754232 0.754232 0.754232 \n",
1388
+ "f1 0.675662 0.000000 0.000000 0.000000 \n",
1389
+ "statistical_parity_difference -0.175980 0.000000 0.000000 0.000000 \n",
1390
+ "equal_opportunity_difference -0.136516 0.000000 0.000000 0.000000 \n",
1391
+ "average_abs_odds_difference 0.098679 0.000000 0.000000 0.000000 \n",
1392
+ "disparate_impact -1.256450 NaN NaN NaN \n",
1393
+ "theil_index 0.120431 0.282056 0.282056 0.282056 \n",
1394
+ "\n",
1395
+ " T3 T4 T5 T6 \\\n",
1396
+ "accuracy 0.797743 0.797743 0.797743 0.809896 \n",
1397
+ "f1 0.305514 0.305514 0.307063 0.374732 \n",
1398
+ "statistical_parity_difference -0.033524 -0.033524 -0.033505 -0.047616 \n",
1399
+ "equal_opportunity_difference 0.026489 0.026489 0.024931 -0.005468 \n",
1400
+ "average_abs_odds_difference 0.014297 0.014297 0.013261 0.003529 \n",
1401
+ "disparate_impact -0.901730 -0.901730 -0.884520 -1.035325 \n",
1402
+ "theil_index 0.225216 0.225216 0.224965 0.209861 \n",
1403
+ "\n",
1404
+ " T7 T8 ... T90 T91 \\\n",
1405
+ "accuracy 0.809896 0.843641 ... 0.858724 0.858724 \n",
1406
+ "f1 0.374732 0.621288 ... 0.674988 0.674988 \n",
1407
+ "statistical_parity_difference -0.047616 -0.157535 ... -0.176132 -0.176132 \n",
1408
+ "equal_opportunity_difference -0.005468 -0.138445 ... -0.136516 -0.136516 \n",
1409
+ "average_abs_odds_difference 0.003529 0.096792 ... 0.098843 0.098843 \n",
1410
+ "disparate_impact -1.035325 -1.275262 ... -1.253686 -1.253686 \n",
1411
+ "theil_index 0.209861 0.140384 ... 0.120580 0.120580 \n",
1412
+ "\n",
1413
+ " T92 T93 T94 T95 \\\n",
1414
+ "accuracy 0.858724 0.858724 0.859049 0.859158 \n",
1415
+ "f1 0.674988 0.674988 0.675655 0.675824 \n",
1416
+ "statistical_parity_difference -0.176132 -0.176132 -0.175971 -0.175809 \n",
1417
+ "equal_opportunity_difference -0.136516 -0.136516 -0.137035 -0.137035 \n",
1418
+ "average_abs_odds_difference 0.098843 0.098843 0.098869 0.098752 \n",
1419
+ "disparate_impact -1.253686 -1.253686 -1.253032 -1.252377 \n",
1420
+ "theil_index 0.120580 0.120580 0.120383 0.120346 \n",
1421
+ "\n",
1422
+ " T96 T97 T98 T99 \n",
1423
+ "accuracy 0.859158 0.859266 0.859266 0.859158 \n",
1424
+ "f1 0.675824 0.675993 0.675993 0.675662 \n",
1425
+ "statistical_parity_difference -0.175809 -0.175648 -0.176141 -0.175980 \n",
1426
+ "equal_opportunity_difference -0.137035 -0.137035 -0.137035 -0.136516 \n",
1427
+ "average_abs_odds_difference 0.098752 0.098635 0.098939 0.098679 \n",
1428
+ "disparate_impact -1.252377 -1.251722 -1.257105 -1.256450 \n",
1429
+ "theil_index 0.120346 0.120308 0.120308 0.120431 \n",
1430
+ "\n",
1431
+ "[7 rows x 101 columns]"
1432
+ ]
1433
+ },
1434
+ "execution_count": 19,
1435
+ "metadata": {},
1436
+ "output_type": "execute_result"
1437
+ }
1438
+ ],
1439
+ "source": [
1440
+ "import numpy as np\n",
1441
+ "final_result = pd.DataFrame(final_metrics)\n",
1442
+ "#print(final_result)\n",
1443
+ "final_result[3] = np.log(final_result[3])\n",
1444
+ "final_result = final_result.transpose()\n",
1445
+ "acc_f1 = pd.DataFrame(accuracy)\n",
1446
+ "acc_f1['f1'] = f1\n",
1447
+ "acc_f1 = pd.DataFrame(acc_f1).transpose()\n",
1448
+ "acc = acc_f1.rename(index={0: 'accuracy', 1: 'f1'})\n",
1449
+ "final_result = final_result.rename(index={0: 'statistical_parity_difference', 1: 'equal_opportunity_difference', 2: 'average_abs_odds_difference', 3: 'disparate_impact', 4: 'theil_index'})\n",
1450
+ "final_result = pd.concat([acc,final_result])\n",
1451
+ "final_result.columns = ['T' + str(col) for col in final_result.columns]\n",
1452
+ "final_result.insert(0, \"classifier\", final_result['T' + str(num_estimators - 1)]) ##Add final metrics add the beginning of the df\n",
1453
+ "final_result.to_csv('../../Results/GBC/' + nb_fname + '.csv')\n",
1454
+ "final_result"
1455
+ ]
1456
+ },
1457
+ {
1458
+ "cell_type": "code",
1459
+ "execution_count": null,
1460
+ "metadata": {},
1461
+ "outputs": [],
1462
+ "source": []
1463
+ }
1464
+ ],
1465
+ "metadata": {
1466
+ "_change_revision": 0,
1467
+ "_is_fork": false,
1468
+ "kernelspec": {
1469
+ "display_name": "Python 3",
1470
+ "language": "python",
1471
+ "name": "python3"
1472
+ }
1473
+ },
1474
+ "nbformat": 4,
1475
+ "nbformat_minor": 1
1476
+ }
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/10-adultincomeprediction-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/11-ml-adult-income-checkpoint.ipynb ADDED
@@ -0,0 +1,1050 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {
6
+ "_cell_guid": "a2e5f6dc-3011-e73c-bd39-23f6909f2b37"
7
+ },
8
+ "source": [
9
+ "This is my first attempt on Kaggle so I decided to try using the sklearn framework that I am most comfortable with right now. So far, I am able to get the accuracy to about 86.73%."
10
+ ]
11
+ },
12
+ {
13
+ "cell_type": "code",
14
+ "execution_count": 7,
15
+ "metadata": {
16
+ "_cell_guid": "df66acdf-db88-4292-922e-ad5a1ce53a67"
17
+ },
18
+ "outputs": [],
19
+ "source": [
20
+ "\n",
21
+ "# This Python 3 environment comes with many helpful analytics libraries installed\n",
22
+ "# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n",
23
+ "# For example, here's several helpful packages to load in\n",
24
+ "import numpy as np # linear algebra\n",
25
+ "import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n",
26
+ "import csv\n",
27
+ "from sklearn.model_selection import train_test_split\n",
28
+ "from sklearn.preprocessing import LabelEncoder\n",
29
+ "import matplotlib.pyplot as plt\n",
30
+ "\n",
31
+ "# Input data files are available in the \"../input\" directory.\n",
32
+ "# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n",
33
+ "from subprocess import check_output\n",
34
+ "#print(check_output([\"ls\", \"../input\"]).decode(\"utf8\"))\n",
35
+ "# Any results you write to the current directory are saved as output.\"\n",
36
+ "\n",
37
+ "# set pandas chained_assignment flag = None here\n",
38
+ "pd.options.mode.chained_assignment = None"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": 8,
44
+ "metadata": {},
45
+ "outputs": [],
46
+ "source": [
47
+ "from aif360.datasets import StandardDataset\n",
48
+ "from aif360.metrics import BinaryLabelDatasetMetric, ClassificationMetric\n",
49
+ "import matplotlib.patches as patches\n",
50
+ "from aif360.algorithms.preprocessing import Reweighing\n",
51
+ "#from packages import *\n",
52
+ "#from ml_fairness import *\n",
53
+ "import matplotlib.pyplot as plt\n",
54
+ "import seaborn as sns\n",
55
+ "\n",
56
+ "\n",
57
+ "\n",
58
+ "from IPython.display import Markdown, display"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 9,
64
+ "metadata": {
65
+ "_cell_guid": "278f91bf-0b55-dd81-e65f-85df55ec6ec0"
66
+ },
67
+ "outputs": [],
68
+ "source": [
69
+ "def preprocess_target(dframe, df_column_name):\n",
70
+ " col = dframe[[df_column_name]]\n",
71
+ " le_col = LabelEncoder()\n",
72
+ " le_col.fit(np.ravel(col))\n",
73
+ " return le_col.transform(np.ravel(col))\n",
74
+ "\n",
75
+ "def preprocess_features(dframe):\n",
76
+ " for column in dframe:\n",
77
+ " enc = LabelEncoder()\n",
78
+ " if(column not in ['age','education.num','fnlwgt','capital.gain','capital.loss','hours.per.week']):\n",
79
+ " dframe[column] = enc.fit_transform(dframe[column])\n",
80
+ " return dframe"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "code",
85
+ "execution_count": 10,
86
+ "metadata": {
87
+ "_cell_guid": "b6bd1726-9220-487e-9e1a-ef06ef6c55ec"
88
+ },
89
+ "outputs": [
90
+ {
91
+ "data": {
92
+ "text/html": [
93
+ "<div>\n",
94
+ "<style scoped>\n",
95
+ " .dataframe tbody tr th:only-of-type {\n",
96
+ " vertical-align: middle;\n",
97
+ " }\n",
98
+ "\n",
99
+ " .dataframe tbody tr th {\n",
100
+ " vertical-align: top;\n",
101
+ " }\n",
102
+ "\n",
103
+ " .dataframe thead th {\n",
104
+ " text-align: right;\n",
105
+ " }\n",
106
+ "</style>\n",
107
+ "<table border=\"1\" class=\"dataframe\">\n",
108
+ " <thead>\n",
109
+ " <tr style=\"text-align: right;\">\n",
110
+ " <th></th>\n",
111
+ " <th>age</th>\n",
112
+ " <th>workclass</th>\n",
113
+ " <th>education</th>\n",
114
+ " <th>marital.status</th>\n",
115
+ " <th>occupation</th>\n",
116
+ " <th>education.num</th>\n",
117
+ " <th>race</th>\n",
118
+ " <th>sex</th>\n",
119
+ " <th>relationship</th>\n",
120
+ " <th>capital.gain</th>\n",
121
+ " <th>capital.loss</th>\n",
122
+ " <th>native.country</th>\n",
123
+ " </tr>\n",
124
+ " </thead>\n",
125
+ " <tbody>\n",
126
+ " <tr>\n",
127
+ " <th>0</th>\n",
128
+ " <td>90</td>\n",
129
+ " <td>0</td>\n",
130
+ " <td>11</td>\n",
131
+ " <td>6</td>\n",
132
+ " <td>0</td>\n",
133
+ " <td>9</td>\n",
134
+ " <td>4</td>\n",
135
+ " <td>0</td>\n",
136
+ " <td>1</td>\n",
137
+ " <td>0</td>\n",
138
+ " <td>4356</td>\n",
139
+ " <td>39</td>\n",
140
+ " </tr>\n",
141
+ " <tr>\n",
142
+ " <th>1</th>\n",
143
+ " <td>82</td>\n",
144
+ " <td>4</td>\n",
145
+ " <td>11</td>\n",
146
+ " <td>6</td>\n",
147
+ " <td>4</td>\n",
148
+ " <td>9</td>\n",
149
+ " <td>4</td>\n",
150
+ " <td>0</td>\n",
151
+ " <td>1</td>\n",
152
+ " <td>0</td>\n",
153
+ " <td>4356</td>\n",
154
+ " <td>39</td>\n",
155
+ " </tr>\n",
156
+ " <tr>\n",
157
+ " <th>2</th>\n",
158
+ " <td>66</td>\n",
159
+ " <td>0</td>\n",
160
+ " <td>15</td>\n",
161
+ " <td>6</td>\n",
162
+ " <td>0</td>\n",
163
+ " <td>10</td>\n",
164
+ " <td>2</td>\n",
165
+ " <td>0</td>\n",
166
+ " <td>4</td>\n",
167
+ " <td>0</td>\n",
168
+ " <td>4356</td>\n",
169
+ " <td>39</td>\n",
170
+ " </tr>\n",
171
+ " <tr>\n",
172
+ " <th>3</th>\n",
173
+ " <td>54</td>\n",
174
+ " <td>4</td>\n",
175
+ " <td>5</td>\n",
176
+ " <td>0</td>\n",
177
+ " <td>7</td>\n",
178
+ " <td>4</td>\n",
179
+ " <td>4</td>\n",
180
+ " <td>0</td>\n",
181
+ " <td>4</td>\n",
182
+ " <td>0</td>\n",
183
+ " <td>3900</td>\n",
184
+ " <td>39</td>\n",
185
+ " </tr>\n",
186
+ " <tr>\n",
187
+ " <th>4</th>\n",
188
+ " <td>41</td>\n",
189
+ " <td>4</td>\n",
190
+ " <td>15</td>\n",
191
+ " <td>5</td>\n",
192
+ " <td>10</td>\n",
193
+ " <td>10</td>\n",
194
+ " <td>4</td>\n",
195
+ " <td>0</td>\n",
196
+ " <td>3</td>\n",
197
+ " <td>0</td>\n",
198
+ " <td>3900</td>\n",
199
+ " <td>39</td>\n",
200
+ " </tr>\n",
201
+ " <tr>\n",
202
+ " <th>...</th>\n",
203
+ " <td>...</td>\n",
204
+ " <td>...</td>\n",
205
+ " <td>...</td>\n",
206
+ " <td>...</td>\n",
207
+ " <td>...</td>\n",
208
+ " <td>...</td>\n",
209
+ " <td>...</td>\n",
210
+ " <td>...</td>\n",
211
+ " <td>...</td>\n",
212
+ " <td>...</td>\n",
213
+ " <td>...</td>\n",
214
+ " <td>...</td>\n",
215
+ " </tr>\n",
216
+ " <tr>\n",
217
+ " <th>32556</th>\n",
218
+ " <td>22</td>\n",
219
+ " <td>4</td>\n",
220
+ " <td>15</td>\n",
221
+ " <td>4</td>\n",
222
+ " <td>11</td>\n",
223
+ " <td>10</td>\n",
224
+ " <td>4</td>\n",
225
+ " <td>1</td>\n",
226
+ " <td>1</td>\n",
227
+ " <td>0</td>\n",
228
+ " <td>0</td>\n",
229
+ " <td>39</td>\n",
230
+ " </tr>\n",
231
+ " <tr>\n",
232
+ " <th>32557</th>\n",
233
+ " <td>27</td>\n",
234
+ " <td>4</td>\n",
235
+ " <td>7</td>\n",
236
+ " <td>2</td>\n",
237
+ " <td>13</td>\n",
238
+ " <td>12</td>\n",
239
+ " <td>4</td>\n",
240
+ " <td>0</td>\n",
241
+ " <td>5</td>\n",
242
+ " <td>0</td>\n",
243
+ " <td>0</td>\n",
244
+ " <td>39</td>\n",
245
+ " </tr>\n",
246
+ " <tr>\n",
247
+ " <th>32558</th>\n",
248
+ " <td>40</td>\n",
249
+ " <td>4</td>\n",
250
+ " <td>11</td>\n",
251
+ " <td>2</td>\n",
252
+ " <td>7</td>\n",
253
+ " <td>9</td>\n",
254
+ " <td>4</td>\n",
255
+ " <td>1</td>\n",
256
+ " <td>0</td>\n",
257
+ " <td>0</td>\n",
258
+ " <td>0</td>\n",
259
+ " <td>39</td>\n",
260
+ " </tr>\n",
261
+ " <tr>\n",
262
+ " <th>32559</th>\n",
263
+ " <td>58</td>\n",
264
+ " <td>4</td>\n",
265
+ " <td>11</td>\n",
266
+ " <td>6</td>\n",
267
+ " <td>1</td>\n",
268
+ " <td>9</td>\n",
269
+ " <td>4</td>\n",
270
+ " <td>0</td>\n",
271
+ " <td>4</td>\n",
272
+ " <td>0</td>\n",
273
+ " <td>0</td>\n",
274
+ " <td>39</td>\n",
275
+ " </tr>\n",
276
+ " <tr>\n",
277
+ " <th>32560</th>\n",
278
+ " <td>22</td>\n",
279
+ " <td>4</td>\n",
280
+ " <td>11</td>\n",
281
+ " <td>4</td>\n",
282
+ " <td>1</td>\n",
283
+ " <td>9</td>\n",
284
+ " <td>4</td>\n",
285
+ " <td>1</td>\n",
286
+ " <td>3</td>\n",
287
+ " <td>0</td>\n",
288
+ " <td>0</td>\n",
289
+ " <td>39</td>\n",
290
+ " </tr>\n",
291
+ " </tbody>\n",
292
+ "</table>\n",
293
+ "<p>32561 rows × 12 columns</p>\n",
294
+ "</div>"
295
+ ],
296
+ "text/plain": [
297
+ " age workclass education marital.status occupation education.num \\\n",
298
+ "0 90 0 11 6 0 9 \n",
299
+ "1 82 4 11 6 4 9 \n",
300
+ "2 66 0 15 6 0 10 \n",
301
+ "3 54 4 5 0 7 4 \n",
302
+ "4 41 4 15 5 10 10 \n",
303
+ "... ... ... ... ... ... ... \n",
304
+ "32556 22 4 15 4 11 10 \n",
305
+ "32557 27 4 7 2 13 12 \n",
306
+ "32558 40 4 11 2 7 9 \n",
307
+ "32559 58 4 11 6 1 9 \n",
308
+ "32560 22 4 11 4 1 9 \n",
309
+ "\n",
310
+ " race sex relationship capital.gain capital.loss native.country \n",
311
+ "0 4 0 1 0 4356 39 \n",
312
+ "1 4 0 1 0 4356 39 \n",
313
+ "2 2 0 4 0 4356 39 \n",
314
+ "3 4 0 4 0 3900 39 \n",
315
+ "4 4 0 3 0 3900 39 \n",
316
+ "... ... ... ... ... ... ... \n",
317
+ "32556 4 1 1 0 0 39 \n",
318
+ "32557 4 0 5 0 0 39 \n",
319
+ "32558 4 1 0 0 0 39 \n",
320
+ "32559 4 0 4 0 0 39 \n",
321
+ "32560 4 1 3 0 0 39 \n",
322
+ "\n",
323
+ "[32561 rows x 12 columns]"
324
+ ]
325
+ },
326
+ "execution_count": 10,
327
+ "metadata": {},
328
+ "output_type": "execute_result"
329
+ }
330
+ ],
331
+ "source": [
332
+ "# import data and preprocess\n",
333
+ "df = pd.read_csv('../../Data/adult.csv')\n",
334
+ "\n",
335
+ "# select and preprocess features\n",
336
+ "le_data = LabelEncoder()\n",
337
+ "features = ['age','workclass','education','marital.status','occupation','education.num','race','sex','relationship','capital.gain','capital.loss','native.country','income']\n",
338
+ "data = df[features]\n",
339
+ "data = preprocess_features(data)\n",
340
+ "\n",
341
+ "# select target\n",
342
+ "data_new = data\n",
343
+ "target = data['income']\n",
344
+ "data = data.drop('income', axis=1)"
345
+ ]
346
+ },
347
+ {
348
+ "cell_type": "code",
349
+ "execution_count": null,
350
+ "metadata": {
351
+ "_cell_guid": "bb1e318c-874c-9ffd-8161-f52da187f462"
352
+ },
353
+ "outputs": [],
354
+ "source": [
355
+ "# split train and test data\n",
356
+ "X_train, X_test, y_train, y_test = train_test_split(\n",
357
+ " data, target, test_size=0.4, random_state=0)"
358
+ ]
359
+ },
360
+ {
361
+ "cell_type": "code",
362
+ "execution_count": null,
363
+ "metadata": {
364
+ "_cell_guid": "bab2adca-9f0a-00db-e5fe-f8b4584d8ea3"
365
+ },
366
+ "outputs": [],
367
+ "source": [
368
+ "# select algorithm\n",
369
+ "#from sklearn.tree import DecisionTreeClassifier\n",
370
+ "#from sklearn.ensemble import AdaBoostClassifier\n",
371
+ "#clf = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),algorithm=\"SAMME\",n_estimators=200)\n",
372
+ "from sklearn.ensemble import GradientBoostingClassifier\n",
373
+ "clf = GradientBoostingClassifier(loss='deviance', n_estimators=100, learning_rate=1.0,max_depth=2, random_state=0)"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": null,
379
+ "metadata": {
380
+ "_cell_guid": "b6c87238-e66f-f13f-1d2c-61078074c8ae"
381
+ },
382
+ "outputs": [],
383
+ "source": [
384
+ "# fit and predict\n",
385
+ "clf.fit(X_train, y_train)\n",
386
+ "predictions = clf.predict(X_test)"
387
+ ]
388
+ },
389
+ {
390
+ "cell_type": "code",
391
+ "execution_count": null,
392
+ "metadata": {
393
+ "_cell_guid": "1120a4c6-e3cb-e52c-da9e-a43979acf665"
394
+ },
395
+ "outputs": [],
396
+ "source": [
397
+ "# display the relative importance of each attribute\n",
398
+ "relval = clf.feature_importances_\n",
399
+ "\n",
400
+ "# horizontal bar plot of feature importance\n",
401
+ "pos = np.arange(12) + 0.5\n",
402
+ "plt.barh(pos, relval, align='center')\n",
403
+ "plt.title(\"Feature Importance\")\n",
404
+ "plt.xlabel(\"\")\n",
405
+ "plt.ylabel(\"Features\")\n",
406
+ "plt.yticks(pos, ('Age','Working Class','Education','Marital Status','Occupation','Education Grade','Race','Sex','Relationship Status','Capital Gain','Capital Loss','Native Country'))\n",
407
+ "plt.grid(True)"
408
+ ]
409
+ },
410
+ {
411
+ "cell_type": "code",
412
+ "execution_count": null,
413
+ "metadata": {
414
+ "_cell_guid": "22c1bd91-5c08-6239-1f5b-8b78a812cd70"
415
+ },
416
+ "outputs": [],
417
+ "source": [
418
+ "# calc metrics\n",
419
+ "true_negatives = 0\n",
420
+ "false_negatives = 0\n",
421
+ "true_positives = 0\n",
422
+ "false_positives = 0\n",
423
+ "for prediction, truth in zip(predictions, y_test):\n",
424
+ " if prediction == 0 and truth == 0:\n",
425
+ " true_negatives += 1\n",
426
+ " elif prediction == 0 and truth == 1:\n",
427
+ " false_negatives += 1\n",
428
+ " elif prediction == 1 and truth == 0:\n",
429
+ " false_positives += 1\n",
430
+ " elif prediction == 1 and truth == 1:\n",
431
+ " true_positives += 1\n",
432
+ " else:\n",
433
+ " print (\"Warning: Found a predicted label not == 0 or 1.\")\n",
434
+ " print (\"All predictions should take value 0 or 1.\")\n",
435
+ " print (\"Evaluating performance for processed predictions:\")\n",
436
+ " break"
437
+ ]
438
+ },
439
+ {
440
+ "cell_type": "code",
441
+ "execution_count": null,
442
+ "metadata": {
443
+ "_cell_guid": "e2f94bee-13b2-8b8b-3d0f-98cd67bf04d3"
444
+ },
445
+ "outputs": [],
446
+ "source": [
447
+ "try:\n",
448
+ " print(\"Test Dataset (40%):\")\n",
449
+ " print(\"true_positives\",true_positives)\n",
450
+ " print(\"true_negatives\",true_negatives)\n",
451
+ " print(\"false_positives\",false_positives)\n",
452
+ " print(\"false_negatives\",false_negatives)\n",
453
+ " total_predictions = true_negatives + false_negatives + false_positives + true_positives\n",
454
+ " print(\"total predictions:\",total_predictions)\n",
455
+ " accuracy = 1.0*(true_positives + true_negatives)/total_predictions\n",
456
+ " print(\"accuracy:\",accuracy)\n",
457
+ " precision = 1.0*true_positives/(true_positives+false_positives)\n",
458
+ " print(\"precision:\",precision)\n",
459
+ " recall = 1.0*true_positives/(true_positives+false_negatives)\n",
460
+ " print(\"recall\",recall)\n",
461
+ " f1 = 2.0 * true_positives/(2*true_positives + false_positives+false_negatives)\n",
462
+ " print(\"f1\",f1)\n",
463
+ " f2 = (1+2.0*2.0) * precision*recall/(4*precision + recall)\n",
464
+ " print(\"f2\",f2)\n",
465
+ " print (clf)\n",
466
+ " #print (PERF_FORMAT_STRING.format(accuracy, precision, recall, f1, f2, display_precision = 5))\n",
467
+ " #print (RESULTS_FORMAT_STRING.format(total_predictions, true_positives, false_positives, false_negatives, true_negatives))\n",
468
+ " print (\"\")\n",
469
+ "except:\n",
470
+ " print (\"Got a divide by zero when trying out:\", clf)\n",
471
+ " print (\"Precision or recall may be undefined due to a lack of true positive predicitons.\")"
472
+ ]
473
+ },
474
+ {
475
+ "cell_type": "markdown",
476
+ "metadata": {},
477
+ "source": [
478
+ "## Fairness"
479
+ ]
480
+ },
481
+ {
482
+ "cell_type": "code",
483
+ "execution_count": 11,
484
+ "metadata": {},
485
+ "outputs": [],
486
+ "source": [
487
+ "# This DataFrame is created to stock differents models and fair metrics that we produce in this notebook\n",
488
+ "algo_metrics = pd.DataFrame(columns=['model', 'fair_metrics', 'prediction', 'probs'])\n",
489
+ "\n",
490
+ "def add_to_df_algo_metrics(algo_metrics, model, fair_metrics, preds, probs, name):\n",
491
+ " return algo_metrics.append(pd.DataFrame(data=[[model, fair_metrics, preds, probs]], columns=['model', 'fair_metrics', 'prediction', 'probs'], index=[name]))"
492
+ ]
493
+ },
494
+ {
495
+ "cell_type": "code",
496
+ "execution_count": 12,
497
+ "metadata": {},
498
+ "outputs": [],
499
+ "source": [
500
+ "def fair_metrics(dataset, pred, pred_is_dataset=False):\n",
501
+ " if pred_is_dataset:\n",
502
+ " dataset_pred = pred\n",
503
+ " else:\n",
504
+ " dataset_pred = dataset.copy()\n",
505
+ " dataset_pred.labels = pred\n",
506
+ " \n",
507
+ " cols = ['statistical_parity_difference', 'equal_opportunity_difference', 'average_abs_odds_difference', 'disparate_impact', 'theil_index']\n",
508
+ " obj_fairness = [[0,0,0,1,0]]\n",
509
+ " \n",
510
+ " fair_metrics = pd.DataFrame(data=obj_fairness, index=['objective'], columns=cols)\n",
511
+ " \n",
512
+ " for attr in dataset_pred.protected_attribute_names:\n",
513
+ " idx = dataset_pred.protected_attribute_names.index(attr)\n",
514
+ " privileged_groups = [{attr:dataset_pred.privileged_protected_attributes[idx][0]}] \n",
515
+ " unprivileged_groups = [{attr:dataset_pred.unprivileged_protected_attributes[idx][0]}] \n",
516
+ " \n",
517
+ " classified_metric = ClassificationMetric(dataset, \n",
518
+ " dataset_pred,\n",
519
+ " unprivileged_groups=unprivileged_groups,\n",
520
+ " privileged_groups=privileged_groups)\n",
521
+ "\n",
522
+ " metric_pred = BinaryLabelDatasetMetric(dataset_pred,\n",
523
+ " unprivileged_groups=unprivileged_groups,\n",
524
+ " privileged_groups=privileged_groups)\n",
525
+ "\n",
526
+ " acc = classified_metric.accuracy()\n",
527
+ "\n",
528
+ " row = pd.DataFrame([[metric_pred.mean_difference(),\n",
529
+ " classified_metric.equal_opportunity_difference(),\n",
530
+ " classified_metric.average_abs_odds_difference(),\n",
531
+ " metric_pred.disparate_impact(),\n",
532
+ " classified_metric.theil_index()]],\n",
533
+ " columns = cols,\n",
534
+ " index = [attr]\n",
535
+ " )\n",
536
+ " fair_metrics = fair_metrics.append(row) \n",
537
+ " \n",
538
+ " fair_metrics = fair_metrics.replace([-np.inf, np.inf], 2)\n",
539
+ " \n",
540
+ " return fair_metrics\n",
541
+ "\n",
542
+ "def plot_fair_metrics(fair_metrics):\n",
543
+ " fig, ax = plt.subplots(figsize=(20,4), ncols=5, nrows=1)\n",
544
+ "\n",
545
+ " plt.subplots_adjust(\n",
546
+ " left = 0.125, \n",
547
+ " bottom = 0.1, \n",
548
+ " right = 0.9, \n",
549
+ " top = 0.9, \n",
550
+ " wspace = .5, \n",
551
+ " hspace = 1.1\n",
552
+ " )\n",
553
+ "\n",
554
+ " y_title_margin = 1.2\n",
555
+ "\n",
556
+ " plt.suptitle(\"Fairness metrics\", y = 1.09, fontsize=20)\n",
557
+ " sns.set(style=\"dark\")\n",
558
+ "\n",
559
+ " cols = fair_metrics.columns.values\n",
560
+ " obj = fair_metrics.loc['objective']\n",
561
+ " size_rect = [0.2,0.2,0.2,0.4,0.25]\n",
562
+ " rect = [-0.1,-0.1,-0.1,0.8,0]\n",
563
+ " bottom = [-1,-1,-1,0,0]\n",
564
+ " top = [1,1,1,2,1]\n",
565
+ " bound = [[-0.1,0.1],[-0.1,0.1],[-0.1,0.1],[0.8,1.2],[0,0.25]]\n",
566
+ "\n",
567
+ " display(Markdown(\"### Check bias metrics :\"))\n",
568
+ " display(Markdown(\"A model can be considered bias if just one of these five metrics show that this model is biased.\"))\n",
569
+ " for attr in fair_metrics.index[1:len(fair_metrics)].values:\n",
570
+ " display(Markdown(\"#### For the %s attribute :\"%attr))\n",
571
+ " check = [bound[i][0] < fair_metrics.loc[attr][i] < bound[i][1] for i in range(0,5)]\n",
572
+ " display(Markdown(\"With default thresholds, bias against unprivileged group detected in **%d** out of 5 metrics\"%(5 - sum(check))))\n",
573
+ "\n",
574
+ " for i in range(0,5):\n",
575
+ " plt.subplot(1, 5, i+1)\n",
576
+ " ax = sns.barplot(x=fair_metrics.index[1:len(fair_metrics)], y=fair_metrics.iloc[1:len(fair_metrics)][cols[i]])\n",
577
+ " \n",
578
+ " for j in range(0,len(fair_metrics)-1):\n",
579
+ " a, val = ax.patches[j], fair_metrics.iloc[j+1][cols[i]]\n",
580
+ " marg = -0.2 if val < 0 else 0.1\n",
581
+ " ax.text(a.get_x()+a.get_width()/5, a.get_y()+a.get_height()+marg, round(val, 3), fontsize=15,color='black')\n",
582
+ "\n",
583
+ " plt.ylim(bottom[i], top[i])\n",
584
+ " plt.setp(ax.patches, linewidth=0)\n",
585
+ " ax.add_patch(patches.Rectangle((-5,rect[i]), 10, size_rect[i], alpha=0.3, facecolor=\"green\", linewidth=1, linestyle='solid'))\n",
586
+ " plt.axhline(obj[i], color='black', alpha=0.3)\n",
587
+ " plt.title(cols[i])\n",
588
+ " ax.set_ylabel('') \n",
589
+ " ax.set_xlabel('')"
590
+ ]
591
+ },
592
+ {
593
+ "cell_type": "code",
594
+ "execution_count": 13,
595
+ "metadata": {},
596
+ "outputs": [],
597
+ "source": [
598
+ "def get_fair_metrics_and_plot(data, model, plot=False, model_aif=False):\n",
599
+ " pred = model.predict(data).labels if model_aif else model.predict(data.features)\n",
600
+ " # fair_metrics function available in the metrics.py file\n",
601
+ " fair = fair_metrics(data, pred)\n",
602
+ "\n",
603
+ " if plot:\n",
604
+ " # plot_fair_metrics function available in the visualisations.py file\n",
605
+ " # The visualisation of this function is inspired by the dashboard on the demo of IBM aif360 \n",
606
+ " plot_fair_metrics(fair)\n",
607
+ " display(fair)\n",
608
+ " \n",
609
+ " return fair"
610
+ ]
611
+ },
612
+ {
613
+ "cell_type": "code",
614
+ "execution_count": 14,
615
+ "metadata": {},
616
+ "outputs": [],
617
+ "source": [
618
+ "#print(X)\n",
619
+ "\n",
620
+ "\n",
621
+ "#combine_final = [train_df, test_df]\n",
622
+ "#result = pd.concat(combine_final)\n",
623
+ "#print(result.ifany())\n",
624
+ "#print(result)\n",
625
+ "privileged_groups = [{'sex': 1}]\n",
626
+ "unprivileged_groups = [{'sex': 0}]\n",
627
+ "dataset_orig = StandardDataset(data_new,\n",
628
+ " label_name='income',\n",
629
+ " protected_attribute_names=['sex'],\n",
630
+ " favorable_classes=[1],\n",
631
+ " privileged_classes=[[1]])\n",
632
+ "\n",
633
+ "#metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
634
+ "# unprivileged_groups=unprivileged_groups,\n",
635
+ "# privileged_groups=privileged_groups)\n",
636
+ "#display(Markdown(\"#### Original training dataset\"))\n",
637
+ "#print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())\n"
638
+ ]
639
+ },
640
+ {
641
+ "cell_type": "code",
642
+ "execution_count": 15,
643
+ "metadata": {},
644
+ "outputs": [
645
+ {
646
+ "data": {
647
+ "text/markdown": [
648
+ "#### Original training dataset"
649
+ ],
650
+ "text/plain": [
651
+ "<IPython.core.display.Markdown object>"
652
+ ]
653
+ },
654
+ "metadata": {},
655
+ "output_type": "display_data"
656
+ },
657
+ {
658
+ "name": "stdout",
659
+ "output_type": "stream",
660
+ "text": [
661
+ "Difference in mean outcomes between unprivileged and privileged groups = -0.196276\n"
662
+ ]
663
+ }
664
+ ],
665
+ "source": [
666
+ "metric_orig_train = BinaryLabelDatasetMetric(dataset_orig, \n",
667
+ " unprivileged_groups=unprivileged_groups,\n",
668
+ " privileged_groups=privileged_groups)\n",
669
+ "display(Markdown(\"#### Original training dataset\"))\n",
670
+ "print(\"Difference in mean outcomes between unprivileged and privileged groups = %f\" % metric_orig_train.mean_difference())"
671
+ ]
672
+ },
673
+ {
674
+ "cell_type": "code",
675
+ "execution_count": 16,
676
+ "metadata": {},
677
+ "outputs": [],
678
+ "source": [
679
+ "import ipynbname\n",
680
+ "nb_fname = ipynbname.name()\n",
681
+ "nb_path = ipynbname.path()\n",
682
+ "\n",
683
+ "from sklearn.ensemble import GradientBoostingClassifier\n",
684
+ "import pickle\n",
685
+ "\n",
686
+ "data_orig_train, data_orig_test = dataset_orig.split([0.7], shuffle=True)\n",
687
+ "X_train = data_orig_train.features\n",
688
+ "y_train = data_orig_train.labels.ravel()\n",
689
+ "\n",
690
+ "X_test = data_orig_test.features\n",
691
+ "y_test = data_orig_test.labels.ravel()\n",
692
+ "num_estimators = 100\n",
693
+ "\n",
694
+ "model = GradientBoostingClassifier(loss='deviance', n_estimators=1, learning_rate=1.0,max_depth=2, random_state=0)\n",
695
+ "\n",
696
+ "mdl = model.fit(X_train, y_train)\n",
697
+ "with open('../../Results/GBC/' + nb_fname + '.pkl', 'wb') as f:\n",
698
+ " pickle.dump(mdl, f)\n",
699
+ "\n",
700
+ "with open('../../Results/GBC/' + nb_fname + '_Train' + '.pkl', 'wb') as f:\n",
701
+ " pickle.dump(data_orig_train, f) \n",
702
+ " \n",
703
+ "with open('../../Results/GBC/' + nb_fname + '_Test' + '.pkl', 'wb') as f:\n",
704
+ " pickle.dump(data_orig_test, f) "
705
+ ]
706
+ },
707
+ {
708
+ "cell_type": "code",
709
+ "execution_count": 17,
710
+ "metadata": {},
711
+ "outputs": [],
712
+ "source": [
713
+ "from csv import writer\n",
714
+ "from sklearn.metrics import accuracy_score, f1_score\n",
715
+ "\n",
716
+ "final_metrics = []\n",
717
+ "accuracy = []\n",
718
+ "f1= []\n",
719
+ "\n",
720
+ "for i in range(1,num_estimators+1):\n",
721
+ " \n",
722
+ " model = GradientBoostingClassifier(n_estimators= i, learning_rate=1.0,max_depth=2, random_state=0, loss='deviance')\n",
723
+ " mdl = model.fit(X_train, y_train)\n",
724
+ " yy = mdl.predict(X_test)\n",
725
+ " accuracy.append(accuracy_score(y_test, yy))\n",
726
+ " f1.append(f1_score(y_test, yy))\n",
727
+ " fair = get_fair_metrics_and_plot(data_orig_test, mdl) \n",
728
+ " fair_list = fair.iloc[1].tolist()\n",
729
+ " fair_list.insert(0, i)\n",
730
+ " final_metrics.append(fair_list)\n"
731
+ ]
732
+ },
733
+ {
734
+ "cell_type": "code",
735
+ "execution_count": 18,
736
+ "metadata": {},
737
+ "outputs": [
738
+ {
739
+ "data": {
740
+ "text/html": [
741
+ "<div>\n",
742
+ "<style scoped>\n",
743
+ " .dataframe tbody tr th:only-of-type {\n",
744
+ " vertical-align: middle;\n",
745
+ " }\n",
746
+ "\n",
747
+ " .dataframe tbody tr th {\n",
748
+ " vertical-align: top;\n",
749
+ " }\n",
750
+ "\n",
751
+ " .dataframe thead th {\n",
752
+ " text-align: right;\n",
753
+ " }\n",
754
+ "</style>\n",
755
+ "<table border=\"1\" class=\"dataframe\">\n",
756
+ " <thead>\n",
757
+ " <tr style=\"text-align: right;\">\n",
758
+ " <th></th>\n",
759
+ " <th>classifier</th>\n",
760
+ " <th>T0</th>\n",
761
+ " <th>T1</th>\n",
762
+ " <th>T2</th>\n",
763
+ " <th>T3</th>\n",
764
+ " <th>T4</th>\n",
765
+ " <th>T5</th>\n",
766
+ " <th>T6</th>\n",
767
+ " <th>T7</th>\n",
768
+ " <th>T8</th>\n",
769
+ " <th>...</th>\n",
770
+ " <th>T90</th>\n",
771
+ " <th>T91</th>\n",
772
+ " <th>T92</th>\n",
773
+ " <th>T93</th>\n",
774
+ " <th>T94</th>\n",
775
+ " <th>T95</th>\n",
776
+ " <th>T96</th>\n",
777
+ " <th>T97</th>\n",
778
+ " <th>T98</th>\n",
779
+ " <th>T99</th>\n",
780
+ " </tr>\n",
781
+ " </thead>\n",
782
+ " <tbody>\n",
783
+ " <tr>\n",
784
+ " <th>accuracy</th>\n",
785
+ " <td>0.869997</td>\n",
786
+ " <td>0.821067</td>\n",
787
+ " <td>0.839595</td>\n",
788
+ " <td>0.846453</td>\n",
789
+ " <td>0.843587</td>\n",
790
+ " <td>0.843075</td>\n",
791
+ " <td>0.844099</td>\n",
792
+ " <td>0.845225</td>\n",
793
+ " <td>0.847067</td>\n",
794
+ " <td>0.848910</td>\n",
795
+ " <td>...</td>\n",
796
+ " <td>0.868871</td>\n",
797
+ " <td>0.868871</td>\n",
798
+ " <td>0.868666</td>\n",
799
+ " <td>0.868871</td>\n",
800
+ " <td>0.868769</td>\n",
801
+ " <td>0.868769</td>\n",
802
+ " <td>0.868564</td>\n",
803
+ " <td>0.869178</td>\n",
804
+ " <td>0.869280</td>\n",
805
+ " <td>0.869997</td>\n",
806
+ " </tr>\n",
807
+ " <tr>\n",
808
+ " <th>f1</th>\n",
809
+ " <td>0.706968</td>\n",
810
+ " <td>0.523186</td>\n",
811
+ " <td>0.604792</td>\n",
812
+ " <td>0.629080</td>\n",
813
+ " <td>0.611986</td>\n",
814
+ " <td>0.613367</td>\n",
815
+ " <td>0.611975</td>\n",
816
+ " <td>0.617796</td>\n",
817
+ " <td>0.625564</td>\n",
818
+ " <td>0.632470</td>\n",
819
+ " <td>...</td>\n",
820
+ " <td>0.705043</td>\n",
821
+ " <td>0.704907</td>\n",
822
+ " <td>0.703900</td>\n",
823
+ " <td>0.704907</td>\n",
824
+ " <td>0.704744</td>\n",
825
+ " <td>0.704472</td>\n",
826
+ " <td>0.703875</td>\n",
827
+ " <td>0.705801</td>\n",
828
+ " <td>0.705557</td>\n",
829
+ " <td>0.706968</td>\n",
830
+ " </tr>\n",
831
+ " <tr>\n",
832
+ " <th>statistical_parity_difference</th>\n",
833
+ " <td>-0.183827</td>\n",
834
+ " <td>-0.166884</td>\n",
835
+ " <td>-0.143830</td>\n",
836
+ " <td>-0.150066</td>\n",
837
+ " <td>-0.182743</td>\n",
838
+ " <td>-0.185535</td>\n",
839
+ " <td>-0.179820</td>\n",
840
+ " <td>-0.161690</td>\n",
841
+ " <td>-0.152730</td>\n",
842
+ " <td>-0.153993</td>\n",
843
+ " <td>...</td>\n",
844
+ " <td>-0.187507</td>\n",
845
+ " <td>-0.187657</td>\n",
846
+ " <td>-0.187339</td>\n",
847
+ " <td>-0.187657</td>\n",
848
+ " <td>-0.187811</td>\n",
849
+ " <td>-0.183986</td>\n",
850
+ " <td>-0.185510</td>\n",
851
+ " <td>-0.184453</td>\n",
852
+ " <td>-0.184290</td>\n",
853
+ " <td>-0.183827</td>\n",
854
+ " </tr>\n",
855
+ " <tr>\n",
856
+ " <th>equal_opportunity_difference</th>\n",
857
+ " <td>-0.072391</td>\n",
858
+ " <td>-0.246539</td>\n",
859
+ " <td>-0.056392</td>\n",
860
+ " <td>-0.060173</td>\n",
861
+ " <td>-0.238734</td>\n",
862
+ " <td>-0.227913</td>\n",
863
+ " <td>-0.223696</td>\n",
864
+ " <td>-0.133076</td>\n",
865
+ " <td>-0.077557</td>\n",
866
+ " <td>-0.082003</td>\n",
867
+ " <td>...</td>\n",
868
+ " <td>-0.084936</td>\n",
869
+ " <td>-0.087698</td>\n",
870
+ " <td>-0.091728</td>\n",
871
+ " <td>-0.087698</td>\n",
872
+ " <td>-0.087698</td>\n",
873
+ " <td>-0.076918</td>\n",
874
+ " <td>-0.085704</td>\n",
875
+ " <td>-0.072889</td>\n",
876
+ " <td>-0.071394</td>\n",
877
+ " <td>-0.072391</td>\n",
878
+ " </tr>\n",
879
+ " <tr>\n",
880
+ " <th>average_abs_odds_difference</th>\n",
881
+ " <td>0.070864</td>\n",
882
+ " <td>0.160599</td>\n",
883
+ " <td>0.056297</td>\n",
884
+ " <td>0.058011</td>\n",
885
+ " <td>0.154799</td>\n",
886
+ " <td>0.151742</td>\n",
887
+ " <td>0.146839</td>\n",
888
+ " <td>0.097278</td>\n",
889
+ " <td>0.067588</td>\n",
890
+ " <td>0.069127</td>\n",
891
+ " <td>...</td>\n",
892
+ " <td>0.078537</td>\n",
893
+ " <td>0.079806</td>\n",
894
+ " <td>0.081486</td>\n",
895
+ " <td>0.079806</td>\n",
896
+ " <td>0.079918</td>\n",
897
+ " <td>0.073174</td>\n",
898
+ " <td>0.077850</td>\n",
899
+ " <td>0.071554</td>\n",
900
+ " <td>0.070924</td>\n",
901
+ " <td>0.070864</td>\n",
902
+ " </tr>\n",
903
+ " <tr>\n",
904
+ " <th>disparate_impact</th>\n",
905
+ " <td>-1.199121</td>\n",
906
+ " <td>-2.142558</td>\n",
907
+ " <td>-1.135429</td>\n",
908
+ " <td>-1.125965</td>\n",
909
+ " <td>-1.727185</td>\n",
910
+ " <td>-1.720900</td>\n",
911
+ " <td>-1.701447</td>\n",
912
+ " <td>-1.366819</td>\n",
913
+ " <td>-1.211665</td>\n",
914
+ " <td>-1.197980</td>\n",
915
+ " <td>...</td>\n",
916
+ " <td>-1.226642</td>\n",
917
+ " <td>-1.229974</td>\n",
918
+ " <td>-1.234349</td>\n",
919
+ " <td>-1.229974</td>\n",
920
+ " <td>-1.230556</td>\n",
921
+ " <td>-1.197055</td>\n",
922
+ " <td>-1.213610</td>\n",
923
+ " <td>-1.196169</td>\n",
924
+ " <td>-1.200879</td>\n",
925
+ " <td>-1.199121</td>\n",
926
+ " </tr>\n",
927
+ " <tr>\n",
928
+ " <th>theil_index</th>\n",
929
+ " <td>0.106252</td>\n",
930
+ " <td>0.170030</td>\n",
931
+ " <td>0.143530</td>\n",
932
+ " <td>0.135172</td>\n",
933
+ " <td>0.141621</td>\n",
934
+ " <td>0.140911</td>\n",
935
+ " <td>0.141760</td>\n",
936
+ " <td>0.139657</td>\n",
937
+ " <td>0.136899</td>\n",
938
+ " <td>0.134469</td>\n",
939
+ " <td>...</td>\n",
940
+ " <td>0.106709</td>\n",
941
+ " <td>0.106788</td>\n",
942
+ " <td>0.107254</td>\n",
943
+ " <td>0.106788</td>\n",
944
+ " <td>0.106822</td>\n",
945
+ " <td>0.106981</td>\n",
946
+ " <td>0.107209</td>\n",
947
+ " <td>0.106447</td>\n",
948
+ " <td>0.106651</td>\n",
949
+ " <td>0.106252</td>\n",
950
+ " </tr>\n",
951
+ " </tbody>\n",
952
+ "</table>\n",
953
+ "<p>7 rows × 101 columns</p>\n",
954
+ "</div>"
955
+ ],
956
+ "text/plain": [
957
+ " classifier T0 T1 T2 \\\n",
958
+ "accuracy 0.869997 0.821067 0.839595 0.846453 \n",
959
+ "f1 0.706968 0.523186 0.604792 0.629080 \n",
960
+ "statistical_parity_difference -0.183827 -0.166884 -0.143830 -0.150066 \n",
961
+ "equal_opportunity_difference -0.072391 -0.246539 -0.056392 -0.060173 \n",
962
+ "average_abs_odds_difference 0.070864 0.160599 0.056297 0.058011 \n",
963
+ "disparate_impact -1.199121 -2.142558 -1.135429 -1.125965 \n",
964
+ "theil_index 0.106252 0.170030 0.143530 0.135172 \n",
965
+ "\n",
966
+ " T3 T4 T5 T6 \\\n",
967
+ "accuracy 0.843587 0.843075 0.844099 0.845225 \n",
968
+ "f1 0.611986 0.613367 0.611975 0.617796 \n",
969
+ "statistical_parity_difference -0.182743 -0.185535 -0.179820 -0.161690 \n",
970
+ "equal_opportunity_difference -0.238734 -0.227913 -0.223696 -0.133076 \n",
971
+ "average_abs_odds_difference 0.154799 0.151742 0.146839 0.097278 \n",
972
+ "disparate_impact -1.727185 -1.720900 -1.701447 -1.366819 \n",
973
+ "theil_index 0.141621 0.140911 0.141760 0.139657 \n",
974
+ "\n",
975
+ " T7 T8 ... T90 T91 \\\n",
976
+ "accuracy 0.847067 0.848910 ... 0.868871 0.868871 \n",
977
+ "f1 0.625564 0.632470 ... 0.705043 0.704907 \n",
978
+ "statistical_parity_difference -0.152730 -0.153993 ... -0.187507 -0.187657 \n",
979
+ "equal_opportunity_difference -0.077557 -0.082003 ... -0.084936 -0.087698 \n",
980
+ "average_abs_odds_difference 0.067588 0.069127 ... 0.078537 0.079806 \n",
981
+ "disparate_impact -1.211665 -1.197980 ... -1.226642 -1.229974 \n",
982
+ "theil_index 0.136899 0.134469 ... 0.106709 0.106788 \n",
983
+ "\n",
984
+ " T92 T93 T94 T95 \\\n",
985
+ "accuracy 0.868666 0.868871 0.868769 0.868769 \n",
986
+ "f1 0.703900 0.704907 0.704744 0.704472 \n",
987
+ "statistical_parity_difference -0.187339 -0.187657 -0.187811 -0.183986 \n",
988
+ "equal_opportunity_difference -0.091728 -0.087698 -0.087698 -0.076918 \n",
989
+ "average_abs_odds_difference 0.081486 0.079806 0.079918 0.073174 \n",
990
+ "disparate_impact -1.234349 -1.229974 -1.230556 -1.197055 \n",
991
+ "theil_index 0.107254 0.106788 0.106822 0.106981 \n",
992
+ "\n",
993
+ " T96 T97 T98 T99 \n",
994
+ "accuracy 0.868564 0.869178 0.869280 0.869997 \n",
995
+ "f1 0.703875 0.705801 0.705557 0.706968 \n",
996
+ "statistical_parity_difference -0.185510 -0.184453 -0.184290 -0.183827 \n",
997
+ "equal_opportunity_difference -0.085704 -0.072889 -0.071394 -0.072391 \n",
998
+ "average_abs_odds_difference 0.077850 0.071554 0.070924 0.070864 \n",
999
+ "disparate_impact -1.213610 -1.196169 -1.200879 -1.199121 \n",
1000
+ "theil_index 0.107209 0.106447 0.106651 0.106252 \n",
1001
+ "\n",
1002
+ "[7 rows x 101 columns]"
1003
+ ]
1004
+ },
1005
+ "execution_count": 18,
1006
+ "metadata": {},
1007
+ "output_type": "execute_result"
1008
+ }
1009
+ ],
1010
+ "source": [
1011
+ "import numpy as np\n",
1012
+ "final_result = pd.DataFrame(final_metrics)\n",
1013
+ "final_result[4] = np.log(final_result[4])\n",
1014
+ "final_result = final_result.transpose()\n",
1015
+ "final_result.loc[0] = f1 # add f1 and acc to df\n",
1016
+ "acc = pd.DataFrame(accuracy).transpose()\n",
1017
+ "acc = acc.rename(index={0: 'accuracy'})\n",
1018
+ "final_result = pd.concat([acc,final_result])\n",
1019
+ "final_result = final_result.rename(index={0: 'f1', 1: 'statistical_parity_difference', 2: 'equal_opportunity_difference', 3: 'average_abs_odds_difference', 4: 'disparate_impact', 5: 'theil_index'})\n",
1020
+ "final_result.columns = ['T' + str(col) for col in final_result.columns]\n",
1021
+ "final_result.insert(0, \"classifier\", final_result['T' + str(num_estimators - 1)]) ##Add final metrics add the beginning of the df\n",
1022
+ "final_result.to_csv('../../Results/GBC/' + nb_fname + '.csv')\n",
1023
+ "final_result"
1024
+ ]
1025
+ }
1026
+ ],
1027
+ "metadata": {
1028
+ "_change_revision": 0,
1029
+ "_is_fork": false,
1030
+ "kernelspec": {
1031
+ "display_name": "Python 3",
1032
+ "language": "python",
1033
+ "name": "python3"
1034
+ },
1035
+ "language_info": {
1036
+ "codemirror_mode": {
1037
+ "name": "ipython",
1038
+ "version": 3
1039
+ },
1040
+ "file_extension": ".py",
1041
+ "mimetype": "text/x-python",
1042
+ "name": "python",
1043
+ "nbconvert_exporter": "python",
1044
+ "pygments_lexer": "ipython3",
1045
+ "version": "3.8.5"
1046
+ }
1047
+ },
1048
+ "nbformat": 4,
1049
+ "nbformat_minor": 1
1050
+ }
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/2-income-census-prediction-gradient-boosting-algos-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/3-income-prediction-xgbclassifier-auc-0-926-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
AdultNoteBook/Kernels/GBC/.ipynb_checkpoints/4-deep-analysis-and-90-accuracy-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff