#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 12 14:35:01 2021

@author: yaoyifan
"""

import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt 
import warnings  
warnings.filterwarnings("ignore")

train= pd.read_csv("train.csv")
test=pd.read_csv("test.csv")
train_original=train.copy()
test_original=test.copy()

#以下开始数据可视化描述变量关系
train["Loan_Status"].value_counts(normalize="true").plot.bar()
plt.show()

#Independent Variable (Categorical) 

plt.figure(1) 

plt.subplot(221) 
train['Gender'].value_counts(normalize=True).plot.bar(figsize=(20,10), title= 'Gender') 

plt.subplot(222)
train['Married'].value_counts(normalize=True).plot.bar(title= 'Married') 

plt.subplot(223) 
train['Self_Employed'].value_counts(normalize=True).plot.bar(title= 'Self_Employed') 

plt.subplot(224) 
train['Credit_History'].value_counts(normalize=True).plot.bar(title= 'Credit_History') 

plt.show()

#Independent Variable (Ordinal)

plt.figure(1) 

plt.subplot(131) 
train['Dependents'].value_counts(normalize=True).plot.bar(figsize=(24,6), title= 'Dependents') 

plt.subplot(132)
train['Education'].value_counts(normalize=True).plot.bar(title= 'Education') 

plt.subplot(133) 
train['Property_Area'].value_counts(normalize=True).plot.bar(title= 'Property_Area') 

plt.show()

#Numerical_Variable (ApplicantIncome)

plt.figure(1)

plt.subplot(121) 
sns.distplot(train['ApplicantIncome']); 

plt.subplot(122)
train['ApplicantIncome'].plot.box(figsize=(16,5))

plt.show()

#ApplicantIncome_by_Education

train.boxplot(column='ApplicantIncome', by = 'Education'),
p=plt.suptitle("")

#Numerical_Variable (CoapplicantIncome)

plt.figure(1)

plt.subplot(121)
sns.distplot(train["CoapplicantIncome"])

plt.subplot(122)
train["CoapplicantIncome"].plot.box(figsize=(14,6))

plt.show()

train['CoapplicantIncome'].quantile([0,0.25,0.50,0.75,1])

#Numerical_Variable (LoanAmount)

plt.figure(1)

plt.subplot(121)
df=train.dropna()
sns.distplot(df['LoanAmount']); 

plt.subplot(122) 
train['LoanAmount'].plot.box(figsize=(16,5)) 

plt.show()

# Gender vs Loan_status
Gender = pd.crosstab(train['Gender'], train['Loan_Status']).apply(lambda x : x/x.sum(),axis=1 )
Gender.div(Gender.sum(1).astype(float),axis=0).plot(kind="bar", stacked=True, figsize=(10,4))
plt.show()

# Married vs Loan_Status

Married = pd.crosstab(train['Married'],train['Loan_Status'])
Married.div(Married.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True,figsize=(10,4))
plt.show()

# Dependents vs Loan_Status

Dependents = pd.crosstab(train['Dependents'],train['Loan_Status'])
Dependents.div(Dependents.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True,figsize=(10,4))
plt.show()

# Education vs Loan_Status

Education = pd.crosstab(train['Education'],train['Loan_Status'])
Education.div(Education.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True,figsize=(10,4))
plt.show()

# Self_Employed vs Loan_Status

Self_Employed = pd.crosstab(train['Self_Employed'],train['Loan_Status'])
Self_Employed.div(Self_Employed.sum(1).astype(float),axis=0).plot(kind="bar",stacked=True,figsize=(10,4))
plt.show()

# Credit_History vs Loan_Status

Credit_History=pd.crosstab(train['Credit_History'],train['Loan_Status']) 
Credit_History.div(Credit_History.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True, figsize=(10,4))
plt.show() 

#Property_Area vs Loan_Status


Property_Area=pd.crosstab(train['Property_Area'],train['Loan_Status']) 
Property_Area.div(Property_Area.sum(1).astype(float),axis=0).plot(kind="bar", stacked=True, figsize=(15,4))
plt.show()

# ApplicantIncome vs Loan_Status

train.groupby('Loan_Status')['ApplicantIncome'].mean()
train.groupby('Loan_Status')['ApplicantIncome'].mean().plot.bar()
plt.show()

bins=[0,2500,4000,6000,81000] 
group=['Low','Average','High', 'Very high'] 
train['Income_bin']=pd.cut(train['ApplicantIncome'],bins,labels=group)
train['Income_bin']
Income_bin=pd.crosstab(train['Income_bin'],train['Loan_Status'])
Income_bin.div(Income_bin.sum(1).astype(float), axis=0).plot(kind="bar", stacked=True,figsize=(15,5))
plt.xlabel('ApplicantIncome')
p=plt.ylabel('Percentage')
#plt.ylabel('Percentage')
#plt.show()

# CoapplicantIncome vs Loan_Status

bins=[0,1000,3000,42000]
groups=['Low','Average','High']
train['CoapplicantIncome_bin']=pd.cut(train['CoapplicantIncome'],bins,labels=groups)
Coapplicant_bin=pd.crosstab(train["CoapplicantIncome_bin"],train['Loan_Status'])
Coapplicant_bin.div(Coapplicant_bin.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(10,4))
plt.xlabel('CoapplicantIncome')
#p=plt.ylabel('Percentage')
plt.ylabel('Percentage')
plt.show()

#Total_Income vs Loan_Status

train['Total_Income']=train['ApplicantIncome']+train['CoapplicantIncome']
bins=[0,2500,4000,6000,81000]
groups=['Low','Average','High','Very High']
train['TotalIncome_bin']= pd.cut(train['Total_Income'],bins,labels=groups)
TotalIncome_bin= pd.crosstab(train['TotalIncome_bin'],train['Loan_Status'])
TotalIncome_bin.div(TotalIncome_bin.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True,figsize=(10,4))
plt.xlabel('Total_Income')
plt.ylabel('Percentage')
plt.show()

#LoanAmount vs Loan_Status

bins=[0,100,200,700]
group=['Low','Average','High']
train['LoanAmount_bin']=pd.cut(train['LoanAmount'],bins,labels=group)
LoanAmount_bin=pd.crosstab(train['LoanAmount_bin'],train['Loan_Status'])
#LoanAmount_bin.apply(lambda x : x/x.sum(),axis=1)
LoanAmount_bin.div(LoanAmount_bin.sum(1).astype(float),axis=0).plot(kind='bar',stacked=True)
plt.xlabel('Loan_Amount')
plt.ylabel('Percentage')
plt.show()

#删除之前因为画图而产生的“特征bin”的多余特征.

train=train.drop(['Income_bin','CoapplicantIncome_bin','LoanAmount_bin','TotalIncome_bin','Total_Income'],axis=1)

#虚拟编码特征
train['Dependents'].replace('3+',3, inplace=True)
test['Dependents'].replace('3+',3, inplace=True)
train['Loan_Status'].replace('N',0, inplace=True)
train['Loan_Status'].replace('Y',1, inplace=True)
matrix = train.corr() 
plt.subplots(figsize=(9, 6)) 
sns.heatmap(matrix, vmax=0.8, square=True, cmap="BuPu");

# 查看缺失值

train.isnull().sum()

#训练集用众数填补
train['Gender'].fillna(train['Gender'].mode()[0],inplace=True)
train['Married'].fillna(train['Married'].mode()[0],inplace=True)
train['Dependents'].fillna(train['Dependents'].mode()[0],inplace=True)
train['Self_Employed'].fillna(train['Self_Employed'].mode()[0],inplace=True)
train['Credit_History'].fillna(train['Credit_History'].mode()[0],inplace=True)
train.isnull().sum()


train['Loan_Amount_Term'].value_counts()


train['Loan_Amount_Term'].fillna(train['Loan_Amount_Term'].mode()[0],inplace=True)

train['Loan_Amount_Term'].value_counts()

train['LoanAmount'].fillna(train['LoanAmount'].median(),inplace=True)
train.isnull().sum()

#测试集用众数填补
test['Gender'].fillna(test['Gender'].mode()[0], inplace=True) 
test['Married'].fillna(test['Married'].mode()[0],inplace=True)
test['Dependents'].fillna(test['Dependents'].mode()[0], inplace=True)
test['Self_Employed'].fillna(test['Self_Employed'].mode()[0], inplace=True)
test['Credit_History'].fillna(test['Credit_History'].mode()[0], inplace=True)
test['Loan_Amount_Term'].fillna(test['Loan_Amount_Term'].mode()[0], inplace=True) 
test['LoanAmount'].fillna(test['LoanAmount'].median(), inplace=True)

test.isnull().sum()

# 为了构建逻辑回归函数，将变量取对数
train['LoanAmount_log']=np.log(train['LoanAmount'])
test['LoanAmount_log']=np.log(test['LoanAmount'])
train['LoanAmount_log'].hist(bins=20)
plt.show()

#将 Loan ID 特征去掉，因为不影响预测结果

train = train.drop('Loan_ID',axis=1)
test = test.drop('Loan_ID',axis=1)
x= train.drop('Loan_Status',axis=1)
y= train.Loan_Status

# Dummy Variables（虚拟变量），对于无序多分类变量，引入模型时需要转化为哑变量

x=pd.get_dummies(x) 
train=pd.get_dummies(train)
test=pd.get_dummies(test)

# 分离测试集训练集

from sklearn.model_selection import train_test_split
x_train,x_cv,y_train,y_cv = train_test_split(x,y,test_size=0.3)

# 构建逻辑回归模型.

from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
model=LogisticRegression()
model.fit(x_train,y_train)
pred_cv=model.predict(x_cv)
# Accuracy_Score
round(accuracy_score(y_cv,pred_cv),2)
pred_test=model.predict(test)


#构建决策树模型
from sklearn.model_selection import StratifiedKFold
# Let’s fit the decision tree model with 5 folds of cross validation.
from sklearn import tree
i=1
skf=StratifiedKFold(n_splits=5,random_state=1,shuffle=True)
for train_index, test_index in skf.split(x,y):
    print('\n{} of K fold {}'.format(i,skf.n_splits))
    xtr, xvl= x.loc[train_index],x.loc[test_index]
    ytr, yvl =y.loc[train_index], y.loc[test_index]
    model=tree.DecisionTreeClassifier(random_state=1)
    model.fit(xtr,ytr)
    pred_test=model.predict(xvl)
    score=accuracy_score(yvl,pred_test)
    print('Accuracy Score',score)
    i+=1
    pred_test=model.predict(test)

#构建随机森林模型
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score
i=1
skf=StratifiedKFold(n_splits=5,random_state=1,shuffle=True)
for train_index, test_index in skf.split(x,y):
    print('\n{} of K fold {}'.format(i,skf.n_splits))
    xtr, xvl= x.loc[train_index],x.loc[test_index]
    ytr, yvl =y.loc[train_index], y.loc[test_index]
    model=RandomForestClassifier(random_state=1,max_depth=10)
    model.fit(xtr,ytr)
    pred_test=model.predict(xvl)
    score=accuracy_score(yvl,pred_test)
    print('Accuracy Score',score)
    i+=1
    pred_test=model.predict(test)
    
# 进行网格搜索和交叉验证   
from sklearn.model_selection import GridSearchCV

# Provide range for max_depth from 1 to 20 with an interval of 2 and from 1 to 200 with an interval of 20 for n_estimators 
paramgrid = {'max_depth': list(range(1, 20, 2)), 'n_estimators': list(range(1, 200, 20))}
grid_search=GridSearchCV(RandomForestClassifier(random_state=1),paramgrid)

from sklearn.model_selection import train_test_split 
x_train, x_cv, y_train, y_cv = train_test_split(x,y, test_size =0.3, random_state=1)

# 拟合网格搜索
grid_search.fit(x_train,y_train)
# 展示最佳参数
grid_search.best_estimator_

#K-fold交叉验证，它将原始数据分成K组(K-Fold)，将每个子集数据分别做一次验证集，其余的K-1组子集数据作为训练集，这样会得到K个模型。这K个模型分别在验证集中评估结果，最后的误差MSE(Mean Squared Error)加和平均就得到交叉验证误差

i=1 
skf = StratifiedKFold(n_splits=5,random_state=1,shuffle=True) 
for train_index,test_index in skf.split(x,y):     
    print('\n{} of kfold {}'.format(i,skf.n_splits))     
    xtr,xvl = x.loc[train_index],x.loc[test_index]     
    ytr,yvl = y[train_index],y[test_index]         
    model = RandomForestClassifier(random_state=1, max_depth=7, n_estimators=41)     
    model.fit(xtr, ytr)     
    pred_test = model.predict(xvl)     
    score = accuracy_score(yvl,pred_test)
    print('accuracy_score',score)     
    i+=1 
    pred_test = model.predict(test) 
    pred2=model.predict_proba(test)[:,1]

importances=pd.Series(model.feature_importances_, index=x.columns) 
importances.plot(kind='barh', figsize=(12,8))
plt.show()

#构建Adaboost模型
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
i=1
skf=StratifiedKFold(n_splits=5,random_state=1,shuffle=True)
for train_index,test_index in skf.split(x,y):
    print('\n{} of k fold {}'.format(i,skf.n_splits))
    xtr,xvl=x.loc[train_index],x.loc[test_index]
    ytr,yvl=y.loc[train_index],y.loc[test_index]
    dt=DecisionTreeClassifier()
    clf=AdaBoostClassifier(n_estimators=100,base_estimator=dt,learning_rate=1)
    clf.fit(xtr,ytr)
    pred_test=model.predict(xvl)
    score=accuracy_score(yvl,pred_test)
    print('Accuracy Score',score)
    i+=1
    pred_test=model.predict(test)
    
