import seaborn as sns
import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.linear_model import LogisticRegression
from sklearn import metrics as m
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn import preprocessing
import matplotlib.pyplot as plt
import numpy as np

### 去除警告
import warnings
warnings.filterwarnings(action="ignore")

### 设置可展示最多的行列
pd.set_option("display.max_rows",30)
pd.set_option("display.max_columns",30)

### 导入数据
bank_train = pd.read_csv("datasets/bank-additional-full.csv",sep='\t',delimiter=';')


## 1 数据描述

### 1.1 查看训练和测试数据集长度

#### 数据集总长度
len(bank_train)

### 1.2 查看属性集属性
bank_train.keys()

### 1.3 查看属性集描述
bank_train.describe() #数据集中有许多标称型变量


### 1.4 查看各标称属性的取值范围
for item in (set(bank_train.keys()).difference(set(bank_train.describe()))):
    print(item+" : ")
    print(bank_train[item].unique())

notation_keys = []
for item in (set(bank_train.keys()).difference(set(bank_train.describe()))):
    notation_keys.append(item)

## 2 数据预览

### 2.1 数据缺失情况
bank_train.isnull().any()

### 2.2 数据分布情况

#### 2.2.1 标称型数据分布情况

##### 工作
plt.subplots(figsize=(20,12))
des_groups = bank_train.groupby('job').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['job']))
sns.barplot(x='job',y='y',data=des_groups)
plt.show()

##### 受教育情况
plt.subplots(figsize=(15,9))
des_groups = bank_train.groupby('education').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['education']))
sns.barplot(x='education',y='y',data=des_groups)
plt.show()

##### 前次联系月份
plt.subplots(figsize=(10,6))
des_groups = bank_train.groupby('month').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['month']))
sns.barplot(x='month',y='y',data=des_groups)
plt.show()

##### 前次联系星期
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('day_of_week').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['day_of_week']))
sns.barplot(x='day_of_week',y='y',data=des_groups)
plt.show()

##### 前次营销结果
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('poutcome').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['poutcome']))
sns.barplot(x='poutcome',y='y',data=des_groups)
plt.show()

##### 是否有信用额度
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('default').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['default']))
sns.barplot(x='default',y='y',data=des_groups)
plt.show()

##### 婚姻状况
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('marital').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['marital']))
sns.barplot(x='marital',y='y',data=des_groups)
plt.show()

##### 联系类型
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('contact').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['contact']))
sns.barplot(x='contact',y='y',data=des_groups)
plt.show()

##### 住房贷款情况
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('housing').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['housing']))
sns.barplot(x='housing',y='y',data=des_groups)
plt.show()

##### 个人贷款情况
plt.subplots(figsize=(6,6))
des_groups = bank_train.groupby('loan').count().reset_index()
# 数值转为百分比
des_groups['y'] = des_groups['y']/len(bank_train.dropna(subset=['loan']))
sns.barplot(x='loan',y='y',data=des_groups)
plt.show()

#### 2.2.2 数值型数据分布情况

##### 年龄
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['age'])
plt.show()

##### 上次交流持续时间
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['duration'])
plt.show()

##### 此次营销对该用户的联系次数
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['campaign'])
plt.show()

##### 上次营销距今过去的天数
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['pdays'])
plt.show()

##### 上次营销对该用户的联系次数
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['previous'])
plt.show()

##### 就业变动率
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['emp.var.rate'])
plt.show()

##### 消费价格指数
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['cons.price.idx'])
plt.show()

##### 消费信心指数
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['cons.conf.idx'])
plt.show()

##### 欧洲银行三个月间的同行拆借利率
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['euribor3m'])
plt.show()

##### 雇员数量
plt.subplots(figsize=(12,5))
sns.distplot(bank_train['nr.employed'])
plt.show()

### 2.3 数据与定期存款

#### 2.3.1 标称型数据与定期存款

##### 工作
deposit_data = pd.crosstab(bank_train.job, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(10, 6))
plt.show()


##### 受教育情况
deposit_data = pd.crosstab(bank_train.education, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(10, 6))
plt.show()

##### 前次联系月份
deposit_data = pd.crosstab(bank_train.month, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(10, 6))
plt.show()

##### 前次联系星期
deposit_data = pd.crosstab(bank_train.day_of_week, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(10, 6))
plt.show()

##### 前次营销结果
deposit_data = pd.crosstab(bank_train.poutcome, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

##### 是否有信用额度
deposit_data = pd.crosstab(bank_train.default, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

##### 婚姻状况
deposit_data = pd.crosstab(bank_train.marital, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()


##### 联系类型
deposit_data = pd.crosstab(bank_train.contact, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()


##### 住房贷款情况
deposit_data = pd.crosstab(bank_train.housing, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()


##### 个人贷款情况
deposit_data = pd.crosstab(bank_train.loan, bank_train.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()


#### 2.3.2 数值型数据与定期存款

##### 年龄
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.age, palette="pastel")
plt.show()

##### 上次交流持续时间
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.duration, palette="pastel")
plt.show()

##### 此次营销对该用户的联系次数
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.campaign, palette="pastel")
plt.show()


##### 上次营销距今过去的天数
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.pdays, palette="pastel")
plt.show()


##### 上次营销对该用户的联系次数
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.previous, palette="pastel")
plt.show()


##### 就业变动率
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train['emp.var.rate'], palette="pastel")
plt.show()


##### 消费价格指数
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train['cons.price.idx'], palette="pastel")
plt.show()

##### 消费信心指数
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train['cons.conf.idx'], palette="pastel")
plt.show()

##### 欧洲银行三个月间的同行拆借利率
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train.euribor3m, palette="pastel")
plt.show()

##### 雇员数量
plt.subplots(figsize=(6,6))
sns.violinplot(x=bank_train.y,y=bank_train['nr.employed'], palette="pastel")
plt.show()


## 3 数据预处理

convert_bank_train = bank_train.copy()

### 3.1 标称型数据转数值型数据
le = preprocessing.LabelEncoder()

#### 工作
le.fit(list(bank_train['job'].unique()))
convert_bank_train['job'] = le.transform(list(convert_bank_train['job']))

#### 受教育情况
le.fit(list(bank_train['education'].unique()))
convert_bank_train['education'] = le.transform(list(convert_bank_train['education']))

#### 前次联系月份
le.fit(list(bank_train['month'].unique()))
convert_bank_train['month'] = le.transform(list(convert_bank_train['month']))

#### 前次联系星期
le.fit(list(bank_train['day_of_week'].unique()))
convert_bank_train['day_of_week'] = le.transform(list(convert_bank_train['day_of_week']))

#### 前次营销结果
le.fit(list(bank_train['poutcome'].unique()))
convert_bank_train['poutcome'] = le.transform(list(convert_bank_train['poutcome']))

#### 是否有信用额度
le.fit(list(bank_train['default'].unique()))
convert_bank_train['default'] = le.transform(list(convert_bank_train['default']))

#### 婚姻状况
le.fit(list(bank_train['marital'].unique()))
convert_bank_train['marital'] = le.transform(list(convert_bank_train['marital']))

#### 联系类型
le.fit(list(bank_train['contact'].unique()))
convert_bank_train['contact'] = le.transform(list(convert_bank_train['contact']))

#### 住房贷款情况
le.fit(list(bank_train['housing'].unique()))
convert_bank_train['housing'] = le.transform(list(convert_bank_train['housing']))

#### 个人贷款情况
le.fit(list(bank_train['loan'].unique()))
convert_bank_train['loan'] = le.transform(list(convert_bank_train['loan']))

##### 是否办理贷款
le.fit(list(bank_train['y'].unique()))
convert_bank_train['y'] = le.transform(list(convert_bank_train['y']))


### 3.2 缺失值处理

#### 3.2.1 获取所有属性不为空的数据集作为训练集
full_data = convert_bank_train[(convert_bank_train['marital']!=3) & (convert_bank_train['loan']!=1)
                               & (convert_bank_train['default'] !=1)& (convert_bank_train['housing']!=1)
                               & (convert_bank_train['job']!=11)]



#### 3.2.2 使用随机森林填充缺失值

rf_full_data = convert_bank_train.copy()

# 婚姻状况
# 设置分类器
miss_value_cls = RandomForestClassifier()
# 构建训练集
miss_value_train = full_data.drop('marital',axis=1)
miss_value_label = full_data['marital']
# 训练随机森林
miss_value_cls.fit(miss_value_train,miss_value_label)
# 替换原值
rf_full_data.loc[rf_full_data['marital']==3,'marital'] = miss_value_cls.predict(rf_full_data[rf_full_data['marital']==3].drop('marital',axis=1))

# 贷款情况
# 设置分类器
miss_value_cls = RandomForestClassifier()
# 构建训练集
miss_value_train = full_data.drop('loan',axis=1)
miss_value_label = full_data['loan']
# 训练随机森林
miss_value_cls.fit(miss_value_train,miss_value_label)
# 替换原值
rf_full_data.loc[rf_full_data['loan']==1,'loan'] = miss_value_cls.predict(rf_full_data[rf_full_data['loan']==1].drop('loan',axis=1))

# 信用情况
# 设置分类器
miss_value_cls = RandomForestClassifier()
# 构建训练集
miss_value_train = full_data.drop('default',axis=1)
miss_value_label = full_data['default']
# 训练随机森林
miss_value_cls.fit(miss_value_train,miss_value_label)
# 替换原值
rf_full_data.loc[rf_full_data['default']==1,'default'] = miss_value_cls.predict(rf_full_data[rf_full_data['default']==1].drop('default',axis=1))

# 房贷情况
# 设置分类器
miss_value_cls = RandomForestClassifier()
# 构建训练集
miss_value_train = full_data.drop('housing',axis=1)
miss_value_label = full_data['housing']
# 训练随机森林
miss_value_cls.fit(miss_value_train,miss_value_label)
# 替换原值
rf_full_data.loc[rf_full_data['housing']==1,'housing'] = miss_value_cls.predict(rf_full_data[rf_full_data['housing']==1].drop('housing',axis=1))

# 工作情况
# 设置分类器
miss_value_cls = RandomForestClassifier()
# 构建训练集
miss_value_train = full_data.drop('job',axis=1)
miss_value_label = full_data['job']
# 训练随机森林
miss_value_cls.fit(miss_value_train,miss_value_label)
# 替换原值
rf_full_data.loc[rf_full_data['job']==11,'job'] = miss_value_cls.predict(rf_full_data[rf_full_data['job']==11].drop('job',axis=1))



### 3.3 数据分箱

boxing_full_data = convert_bank_train.copy()
#### 3.3.1 转换方法

# pdays
def pdays_boxing(data):
    data.loc[data['pdays']>=27,'pdays_notation'] = 0
    data.loc[data['pdays']<27,'pdays_notation'] = 1
    return data

# duration
def duration_boxing(data):
    # 小时转换分钟
    data['minutes'] = data['duration']/60
    # # 分钟转换小时
    # data['hours'] = data['duration']/3600

    data.loc[data['minutes']<3,'duration_notation'] = 0
    data.loc[(data['minutes']>2) & (data['minutes']<5),'duration_notation'] = 1
    data.loc[data['minutes']>4,'duration_notation'] = 2
    return data

# age
def age_boxing(data):
    data.loc[data['age']<=30,'age_notation'] = 0
    data.loc[(data['age']>30) & (data['age']<50),'age_notation'] = 1
    data.loc[data['age']>=50,'age_notation'] = 2
    return data

# empvarrate
def empVarRate_boxing(data):
    data.loc[data['emp.var.rate']<=-1.8,'emp.var.rate_notation'] = 0
    data.loc[(data['emp.var.rate']>-1.8) & (data['emp.var.rate']<1.1),'emp.var.rate_notation'] = 1
    data.loc[data['emp.var.rate']>=1.1,'emp.var.rate_notation'] = 2
    return data

#### 3.3.2 对原数据进行转换

# 转换 pdays
boxing_full_data['pdays_notation'] = 0
boxing_full_data = pdays_boxing(boxing_full_data)
boxing_full_data = boxing_full_data.drop('pdays',axis=1)
# 转换 duration
boxing_full_data['duration_notation'] = 0
boxing_full_data['minutes'] = 0
# boxing_full_data['hours'] = 0
boxing_full_data = duration_boxing(boxing_full_data)
boxing_full_data = boxing_full_data.drop(['duration','minutes'],axis=1)
# 转换age
boxing_full_data['age_notation'] = 0
boxing_full_data = age_boxing(boxing_full_data)
boxing_full_data = boxing_full_data.drop('age',axis=1)
# 转换emp.var.rate
boxing_full_data['emp.var.rate_notation'] = 0
boxing_full_data = empVarRate_boxing(boxing_full_data)
boxing_full_data = boxing_full_data.drop('emp.var.rate',axis=1)

# 分类代码
test = boxing_full_data.groupby("emp.var.rate_notation").count().reset_index()


#### 3.3.3 转换后数据预览

##### 上次营销距今过去的天数
deposit_data = pd.crosstab(boxing_full_data.pdays_notation, boxing_full_data.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

##### 上次交流持续时间
deposit_data = pd.crosstab(boxing_full_data.duration_notation, boxing_full_data.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

##### 年龄
deposit_data = pd.crosstab(boxing_full_data.age_notation, boxing_full_data.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

##### 就业变动率
deposit_data = pd.crosstab(boxing_full_data['emp.var.rate_notation'], boxing_full_data.y)
deposit_data.div(deposit_data.sum(1).astype(float), axis=0).plot(kind='bar', color=['green','lime'], stacked=True, grid=False, figsize=(6, 6))
plt.show()

### 3.4 One-Hot热编码

dummie_full_data = rf_full_data.copy()

# 获取所有标称型属性
notation_keys = []
for item in (set(bank_train.keys()).difference(set(bank_train.describe()))):
    notation_keys.append(item)

# 去除目标变量
notation_keys.remove('y')

for cname in notation_keys:
    print(cname)
    dummies_single_data = pd.get_dummies(dummie_full_data[cname])
    # 获取每列重命名后名称：列名_序号
    rn_columns = []
    for column in dummies_single_data.columns:
        rn_column = cname + "_" + str(column)
        rn_columns.append(rn_column)
    # 重命名热编码后的列名称
    dummies_single_data.columns = rn_columns
    # 将新列添加到原数据中
    dummie_full_data = dummie_full_data.join(dummies_single_data)
    dummie_full_data = dummie_full_data.drop(cname, axis=1)


### 3.5 数据归一化

#### 划分目标变量与实验变量
# attributes = cols[0:-1]
# label = cols[-1:]
bank_data = dummie_full_data.copy()
cols = bank_data.columns.values.tolist()
train_attr = bank_data[cols].drop(['y'],axis=1)
train_label = bank_data[['y']]

#### 实验变量归一化
for key in train_attr.keys():
    train_attr[key] = preprocessing.scale(train_attr[key])

#### 查看离散化，归一化结果
print(train_attr)


## 4 建模


### 4.1 划分训练变量和目标变量

#### 原始变量训练


# cols = convert_bank_train.shape[1]
# train_attr = convert_bank_train[:,0:cols-1]
# train_label = convert_bank_train[:,-1:]

### 4.1 选择用于训练的模型
classifiers = {
                'Gradient Boosting Classifier':GradientBoostingClassifier(),
               'Adaptive Boosting Classifier':AdaBoostClassifier(),
               # 'Linear Discriminant Analysis':LinearDiscriminantAnalysis(),
               'Logistic Regression':LogisticRegression(),
                'SVC': SVC(),
               'Random Forest Classifier': RandomForestClassifier(),
               'K Nearest Neighbour':KNeighborsClassifier()
               }

### 4.2 设置输出日志格式和评价指标
log_cols = ["Classifier", "Accuracy","Precision Score","Recall Score","F1-Score","roc-auc_Score"]
#metrics_cols = []
log = pd.DataFrame(columns=log_cols)

### 4.3 切分数据集，将原数据集划分成训练数据集和测试数据集
shuffleSplit = StratifiedShuffleSplit(n_splits=2,test_size=0.1,random_state=0)

### 4.4 十折验证
for name,classifier in classifiers.items():
    for itrain, itest in shuffleSplit.split(train_attr,train_label):
        # attr_train,attr_test = train_attr.[itrain],train_attr.iloc[itest]
        # label_train,label_test = train_label.iloc[itrain],train_label.iloc[itest]
        attr_train,attr_test = train_attr.iloc[itrain],train_attr.iloc[itest]
        label_train,label_test = train_label.iloc[itrain],train_label.iloc[itest]
        # 设置分类器
        print(name)
        cls = classifier
        cls = cls.fit(attr_train,label_train)
        # 预测结果
        test_predict = cls.predict(attr_test)
        # 预测结果与实际结果多指标比较
        # 采用micro作为平均度量指标
        avg = 'micro'
        accuracy = m.accuracy_score(label_test,test_predict)
        precision = m.precision_score(label_test,test_predict,average=avg)
        recall = m.recall_score(label_test,test_predict,average=avg)
        roc_auc = m.roc_auc_score(label_test,test_predict,average=avg)
        f1_score = m.f1_score(label_test,test_predict,average=avg)
        # accuracy1 = np.mean(cross_val_score(cls, attr_test, label_test,scoring='accuracy', n_jobs=-1))
        # precision1 = np.mean(cross_val_score(cls, attr_test, label_test, scoring='precision', n_jobs=-1))
        # recall1 = np.mean(cross_val_score(cls, attr_test, label_test, scoring='recall', n_jobs=-1))
        # f1_score1 = np.mean(cross_val_score(cls, attr_test, label_test, scoring='f1', n_jobs=-1))
        # roc_auc1 = np.mean(cross_val_score(cls, attr_test, label_test, scoring='roc_auc', n_jobs=-1))
        # log_entry1 = pd.DataFrame([[name, accuracy1, precision1, recall1, f1_score1, roc_auc1]], columns=log_cols)
        log_entry = pd.DataFrame([[name,accuracy,precision,recall,f1_score,roc_auc]],columns=log_cols)
        log = log.append(log_entry)
        # log = log.append(log_entry1)

# 取平均值
log_mean = log.groupby('Classifier').mean().reset_index()

# 查看训练结果
print(log_mean)


### 4.5 建模结果可视化
# 绝对指标
log_visual = log.copy()


#### 指标最大最小值标准化
min_max_scaler = preprocessing.MinMaxScaler()
# 相对指标
log_visual_relative = log_mean.copy()
log_visual_relative[["Accuracy","Precision Score","Recall Score","F1-Score","roc-auc_Score"]] = \
    log_visual_relative[["Accuracy","Precision Score","Recall Score","F1-Score","roc-auc_Score"]]\
        .apply(lambda x: (x-np.min(x))/(np.max(x)-np.min(x)))
####

#### 4.5.1 准确率

##### 4.5.1.1 准确率绝对值
plt.subplots(figsize=(15,9))
sns.barplot(x='Classifier',y='Accuracy',data=log_visual)
plt.show()
##### 4.5.1.1 准确率相对值
plt.subplots(figsize=(15,9))
sns.barplot(x='Classifier',y='Accuracy',data=log_visual_relative)
plt.show()

#### 4.5.1 准确率

##### 4.5.1.1 准确率绝对值
plt.subplots(figsize=(6,3))
sns.set_color_codes('muted')
sns.barplot(x='Accuracy',y='Classifier',data=log_visual,color='g')
plt.show()
##### 4.5.1.2 准确率相对值
plt.subplots(figsize=(6,3))
sns.barplot(x='Accuracy',y='Classifier',data=log_visual_relative)
plt.show()

#### 4.5.2 精确率

##### 4.5.2.1 精确率绝对值
plt.subplots(figsize=(6,3))
sns.set_color_codes('muted')
sns.barplot(x='Precision Score',y='Classifier',data=log_visual,color='g')
plt.show()
##### 4.5.2.2 精确率相对值
plt.subplots(figsize=(6,3))
sns.barplot(x='Precision Score',y='Classifier',data=log_visual_relative)
plt.show()

#### 4.5.3 召回率

##### 4.5.3.1 召回率绝对值
plt.subplots(figsize=(6,3))
sns.set_color_codes('muted')
sns.barplot(x='Recall Score',y='Classifier',data=log_visual,color='g')
plt.show()
##### 4.5.3.2 召回率相对值
plt.subplots(figsize=(6,3))
sns.barplot(x='Recall Score',y='Classifier',data=log_visual_relative)
plt.show()

#### 4.5.4 F1指数

##### 4.5.4.1 F1指数绝对值
plt.subplots(figsize=(6,9))
sns.set_color_codes('muted')
sns.barplot(x='F1-Score',y='Classifier',data=log_visual,color='g')
plt.show()
##### 4.5.4.2 F1指数相对值
plt.subplots(figsize=(15,9))
sns.barplot(x='F1-Score',y='Classifier',data=log_visual_relative)
plt.show()

#### 4.5.5 ROC-AUC

##### 4.5.5.1 ROC-AUC绝对值
plt.subplots(figsize=(6,9))
sns.set_color_codes('muted')
sns.barplot(x='roc-auc_Score',y='Classifier',data=log_visual,color='g')
plt.show()
##### 4.5.5.2 ROC-AUC相对值
plt.subplots(figsize=(15,9))
sns.barplot(x='roc-auc_Score',y='Classifier',data=log_visual_relative)
plt.show()



########################## 过程封装

# 封装标称型数据转数值型数据过程
def convert_data(data):
    convert_bank_train = data.copy()

    ### 3.1 标称型数据转数值型数据
    le = preprocessing.LabelEncoder()

    #### 工作
    le.fit(list(convert_bank_train['job'].unique()))
    convert_bank_train['job'] = le.transform(list(convert_bank_train['job']))

    #### 受教育情况
    le.fit(list(bank_train['education'].unique()))
    convert_bank_train['education'] = le.transform(list(convert_bank_train['education']))

    #### 前次联系月份
    le.fit(list(bank_train['month'].unique()))
    convert_bank_train['month'] = le.transform(list(convert_bank_train['month']))

    #### 前次联系星期
    le.fit(list(bank_train['day_of_week'].unique()))
    convert_bank_train['day_of_week'] = le.transform(list(convert_bank_train['day_of_week']))

    #### 前次营销结果
    le.fit(list(bank_train['poutcome'].unique()))
    convert_bank_train['poutcome'] = le.transform(list(convert_bank_train['poutcome']))

    #### 是否有信用额度
    le.fit(list(bank_train['default'].unique()))
    convert_bank_train['default'] = le.transform(list(convert_bank_train['default']))

    #### 婚姻状况
    le.fit(list(bank_train['marital'].unique()))
    convert_bank_train['marital'] = le.transform(list(convert_bank_train['marital']))

    #### 联系类型
    le.fit(list(bank_train['contact'].unique()))
    convert_bank_train['contact'] = le.transform(list(convert_bank_train['contact']))

    #### 住房贷款情况
    le.fit(list(bank_train['housing'].unique()))
    convert_bank_train['housing'] = le.transform(list(convert_bank_train['housing']))

    #### 个人贷款情况
    le.fit(list(bank_train['loan'].unique()))
    convert_bank_train['loan'] = le.transform(list(convert_bank_train['loan']))

    ##### 是否办理贷款
    le.fit(list(bank_train['y'].unique()))
    convert_bank_train['y'] = le.transform(list(convert_bank_train['y']))
    return convert_bank_train


# 封装随机森林填充缺失值过程
def rdm_forest(data):
    rf_full_data = data.copy()

    # 婚姻状况
    # 设置分类器
    miss_value_cls = RandomForestClassifier()
    # 构建训练集
    miss_value_train = rf_full_data.drop('marital', axis=1)
    miss_value_label = rf_full_data['marital']
    # 训练随机森林
    miss_value_cls.fit(miss_value_train, miss_value_label)
    # 替换原值
    rf_full_data.loc[rf_full_data['marital'] == 3, 'marital'] = miss_value_cls.predict(
        rf_full_data[rf_full_data['marital'] == 3].drop('marital', axis=1))

    # 贷款情况
    # 设置分类器
    miss_value_cls = RandomForestClassifier()
    # 构建训练集
    miss_value_train = rf_full_data.drop('loan', axis=1)
    miss_value_label = rf_full_data['loan']
    # 训练随机森林
    miss_value_cls.fit(miss_value_train, miss_value_label)
    # 替换原值
    rf_full_data.loc[rf_full_data['loan'] == 1, 'loan'] = miss_value_cls.predict(
        rf_full_data[rf_full_data['loan'] == 1].drop('loan', axis=1))

    # 信用情况
    # 设置分类器
    miss_value_cls = RandomForestClassifier()
    # 构建训练集
    miss_value_train = rf_full_data.drop('default', axis=1)
    miss_value_label = rf_full_data['default']
    # 训练随机森林
    miss_value_cls.fit(miss_value_train, miss_value_label)
    # 替换原值
    rf_full_data.loc[rf_full_data['default'] == 1, 'default'] = miss_value_cls.predict(
        rf_full_data[rf_full_data['default'] == 1].drop('default', axis=1))

    # 房贷情况
    # 设置分类器
    miss_value_cls = RandomForestClassifier()
    # 构建训练集
    miss_value_train = rf_full_data.drop('housing', axis=1)
    miss_value_label = rf_full_data['housing']
    # 训练随机森林
    miss_value_cls.fit(miss_value_train, miss_value_label)
    # 替换原值
    rf_full_data.loc[rf_full_data['housing'] == 1, 'housing'] = miss_value_cls.predict(
        rf_full_data[rf_full_data['housing'] == 1].drop('housing', axis=1))

    # 工作情况
    # 设置分类器
    miss_value_cls = RandomForestClassifier()
    # 构建训练集
    miss_value_train = rf_full_data.drop('job', axis=1)
    miss_value_label = rf_full_data['job']
    # 训练随机森林
    miss_value_cls.fit(miss_value_train, miss_value_label)
    # 替换原值
    rf_full_data.loc[rf_full_data['job'] == 11, 'job'] = miss_value_cls.predict(
        rf_full_data[rf_full_data['job'] == 11].drop('job', axis=1))

    # 返回填充后数据
    return rf_full_data


# 封装数据分箱过程
def boxing(data):
    boxing_full_data = data.copy()
    boxing_full_data['pdays_notation'] = 0
    boxing_full_data = pdays_boxing(boxing_full_data)
    boxing_full_data = boxing_full_data.drop('pdays', axis=1)
    # 转换 duration
    boxing_full_data['duration_notation'] = 0
    boxing_full_data['minutes'] = 0
    # boxing_full_data['hours'] = 0
    boxing_full_data = duration_boxing(boxing_full_data)
    boxing_full_data = boxing_full_data.drop(['duration', 'minutes'], axis=1)
    # 转换age
    boxing_full_data['age_notation'] = 0
    boxing_full_data = age_boxing(boxing_full_data)
    boxing_full_data = boxing_full_data.drop('age', axis=1)
    # 转换emp.var.rate
    boxing_full_data['emp.var.rate_notation'] = 0
    boxing_full_data = empVarRate_boxing(boxing_full_data)
    boxing_full_data = boxing_full_data.drop('emp.var.rate', axis=1)
    # 返回分箱后数据
    return boxing_full_data

# 封装热编码过程
def hot_encoding(data):
    dummie_full_data = data.copy()
    # 获取所有标称型属性
    notation_keys = []
    for item in (set(bank_train.keys()).difference(set(bank_train.describe()))):
        notation_keys.append(item)

    # 去除目标变量
    notation_keys.remove('y')

    for cname in notation_keys:
        # print(cname)
        dummies_single_data = pd.get_dummies(dummie_full_data[cname])
        # 获取每列重命名后名称：列名_序号
        rn_columns = []
        for column in dummies_single_data.columns:
            rn_column = cname + "_" + str(column)
            rn_columns.append(rn_column)
        # 重命名热编码后的列名称
        dummies_single_data.columns = rn_columns
        # 将新列添加到原数据中
        dummie_full_data = dummie_full_data.join(dummies_single_data)
        dummie_full_data = dummie_full_data.drop(cname, axis=1)
    # 返回热编码结果
    return dummie_full_data

# 封装拆分实验变量和目标变量过程
def dismantle_data(data):
    bank_data = data.copy()
    cols = bank_data.columns.values.tolist()
    train_attr = bank_data[cols].drop(['y'],axis=1)
    train_label = bank_data[['y']]
    return train_attr,train_label

# 封装数据归一化过程
def data_normalization(data):
    #### 实验变量归一化
    for key in data.keys():
        data[key] = preprocessing.scale(data[key])
    # 返回实验变量和目标变量数据集
    return data

# 封装训练模型
def model_train(train_attr,train_label):
    # #### 实验变量归一化
    # for key in train_attr.keys():
    #     train_attr[key] = preprocessing.scale(train_attr[key])

    #### 查看离散化，归一化结果
    classifiers = {
        'Gradient Boosting Classifier': GradientBoostingClassifier(),
        'Adaptive Boosting Classifier': AdaBoostClassifier(),
        'Logistic Regression': LogisticRegression(),
        'SVC': SVC(),
        'Random Forest Classifier': RandomForestClassifier(),
        'K Nearest Neighbour': KNeighborsClassifier()
    }

    ### 4.2 设置输出日志格式和评价指标
    log_cols = ["Classifier", "Accuracy", "Precision Score", "Recall Score", "F1-Score", "roc-auc_Score"]
    # metrics_cols = []
    log = pd.DataFrame(columns=log_cols)

    ### 4.3 切分数据集，将原数据集划分成训练数据集和测试数据集
    shuffleSplit = StratifiedShuffleSplit(n_splits=2, test_size=0.1, random_state=0)

    ### 4.4 十折验证
    for name, classifier in classifiers.items():
        for itrain, itest in shuffleSplit.split(train_attr, train_label):
            attr_train, attr_test = train_attr.iloc[itrain], train_attr.iloc[itest]
            label_train, label_test = train_label.iloc[itrain], train_label.iloc[itest]
            # 设置分类器
            # print(name)
            cls = classifier
            cls = cls.fit(attr_train, label_train)
            # 预测结果
            test_predict = cls.predict(attr_test)
            # 预测结果与实际结果多指标比较
            # 采用micro作为平均度量指标
            avg = 'micro'
            accuracy = m.accuracy_score(label_test, test_predict)
            precision = m.precision_score(label_test, test_predict, average=avg)
            recall = m.recall_score(label_test, test_predict, average=avg)
            roc_auc = m.roc_auc_score(label_test, test_predict, average=avg)
            f1_score = m.f1_score(label_test, test_predict, average=avg)
            log_entry = pd.DataFrame([[name, accuracy, precision, recall, f1_score, roc_auc]], columns=log_cols)
            log = log.append(log_entry)
    # 取平均值
    log_mean = log.groupby('Classifier').mean().reset_index()
    # 查看训练结果
    print(log_mean)
    # 返回训练结果
    return log_mean


#########################
# 分箱方法

# pdays
def pdays_boxing(data):
    data.loc[data['pdays']>=27,'pdays_notation'] = 0
    data.loc[data['pdays']<27,'pdays_notation'] = 1
    return data

# duration
def duration_boxing(data):
    # 小时转换分钟
    data['minutes'] = data['duration']/60
    # # 分钟转换小时
    # data['hours'] = data['duration']/3600

    data.loc[data['minutes']<3,'duration_notation'] = 0
    data.loc[(data['minutes']>2) & (data['minutes']<5),'duration_notation'] = 1
    data.loc[data['minutes']>4,'duration_notation'] = 2
    return data

# age
def age_boxing(data):
    data.loc[data['age']<=30,'age_notation'] = 0
    data.loc[(data['age']>30) & (data['age']<50),'age_notation'] = 1
    data.loc[data['age']>=50,'age_notation'] = 2
    return data

# empvarrate
def empVarRate_boxing(data):
    data.loc[data['emp.var.rate']<=-1.8,'emp.var.rate_notation'] = 0
    data.loc[(data['emp.var.rate']>-1.8) & (data['emp.var.rate']<1.1),'emp.var.rate_notation'] = 1
    data.loc[data['emp.var.rate']>=1.1,'emp.var.rate_notation'] = 2
    return data

## 5 模型评估

# 算法映射
algorithms = {'Gradient Boosting Classifier':'GraBoost','Adaptive Boosting Classifier':'AdaBoost',
              'Logistic Regression':'LR','SVC':'SVC','Random Forest Classifier':'RDF',
              'K Nearest Neighbour':'K-NN'}

# 保存所有模型结果
logs = []

### 5.1 默认情况
def default():
    default_data = convert_data(bank_train)
    train,label = dismantle_data(default_data)
    log = model_train(train,label)
    log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
    logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()


### 5.2 数据归一化
normalize_data = convert_data(bank_train)
train,label = dismantle_data(normalize_data)
log = model_train(data_normalization(train),label)
logs.append(logs)
log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()

### 5.3 归一化+热编码
def he():
    hot_encoding_data = hot_encoding(bank_train)
    train,label = dismantle_data(hot_encoding_data)
    le.fit(list(label['y'].unique()))
    label['y'] = le.transform(list(label['y']))
    log = model_train(data_normalization(train),label)
    logs.append(logs)
    log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
    logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()


### 5.4 归一化+随机森林填充缺失值
def rdf():
    random_forest_data = convert_data(bank_train)
    random_forest_data = rdm_forest(random_forest_data)
    train,label = dismantle_data(random_forest_data)
    log = model_train(data_normalization(train),label)
    logs.append(logs)
    log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
    logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()


### 5.5 归一化+分箱
def boxing():
    boxing_data = convert_data(bank_train)
    boxing_data = boxing(boxing_data)
    train,label = dismantle_data(boxing_data)
    log = model_train(data_normalization(train),label)
    logs.append(logs)
    log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
    logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()


### 5.6 归一化+随机森林填充缺失值+热编码
rf_he_data = convert_data(bank_train)
random_forest_data = rdm_forest(rf_he_data)
rf_he_data = hot_encoding(rf_he_data)
train,label = dismantle_data(rf_he_data)
log = model_train(data_normalization(train),label)
logs.append(logs)
log['Classifier'] = log['Classifier'].map(lambda x:algorithms[x])
logs.append(log)

plt.title('models compare')
plt.plot(log.Classifier, log.Accuracy, color='green', label='accuracy')
plt.plot(log.Classifier, log['Precision Score'], color='red', label='precision')
plt.plot(log.Classifier, log['Recall Score'], color='blue', label='recall')
plt.plot(log.Classifier, log['F1-Score'], color='skyblue', label='F1')
plt.plot(log.Classifier, log['roc-auc_Score'], color='orange', label='ROC-AUC')
plt.legend() # 显示图例
plt.show()



##### 工作


##### 受教育情况


##### 前次联系月份


##### 前次联系星期


##### 前次营销结果


##### 是否有信用额度


##### 婚姻状况


##### 联系类型


##### 住房贷款情况


##### 个人贷款情况