# lightGBM模型 调优前 和调优后的 ROC 对比
import numpy as np
import pandas as pd
import os
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import scale, StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score
from sklearn.metrics import confusion_matrix, accuracy_score, mean_squared_error, r2_score, roc_auc_score, roc_curve, classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from lightgbm import LGBMClassifier
from sklearn.model_selection import KFold

from sklearn.metrics import f1_score,precision_score,recall_score,roc_auc_score,accuracy_score,roc_curve
import matplotlib.pyplot as plt
from xgboost.sklearn import XGBClassifier
import lightgbm as lgb

data = pd.read_csv('../featureEngineering/featuredData.csv')
y = data['Outcome']
X = data.drop(['Outcome'],axis=1)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)

gbm=LGBMClassifier(random_state = 12345)  #lgb
gbm.fit(X_train,y_train)
gbm_y_pre=gbm.predict(X_test)
gbm_y_proba=gbm.predict_proba(X_test)

gbm_accuracy_score=accuracy_score(y_test,gbm_y_pre)
gbm_preci_score=precision_score(y_test,gbm_y_pre)
gbm_recall_score=recall_score(y_test,gbm_y_pre)
gbm_f1_score=f1_score(y_test,gbm_y_pre)
gbm_auc=roc_auc_score(y_test,gbm_y_proba[:,1])
print('gbm_accuracy_score: %f,gbm_preci_score: %f,gbm_recall_score: %f,gbm_f1_score: %f,gbm_auc: %f'
      %(gbm_accuracy_score,gbm_preci_score,gbm_recall_score,gbm_f1_score,gbm_auc))



plgbm_tuned = LGBMClassifier(learning_rate=0.01,max_depth=5,n_estimators=500,num_leaves=50,min_child_weight=12,gamma=0.35)
plgbm_tuned.fit(X_train,y_train)
plgbm_tuned_y_pre=plgbm_tuned.predict(X_test)
plgbm_tuned_y_proba=plgbm_tuned.predict_proba(X_test)

plgbm_tuned_accuracy_score=accuracy_score(y_test,plgbm_tuned_y_pre)
plgbm_tuned_preci_score=precision_score(y_test,plgbm_tuned_y_pre)
plgbm_tuned_recall_score=recall_score(y_test,plgbm_tuned_y_pre)
plgbm_tuned_f1_score=f1_score(y_test,plgbm_tuned_y_pre)
plgbm_tuned_auc=roc_auc_score(y_test,plgbm_tuned_y_proba[:,1])
print('plgbm_tuned_accuracy_score: %f,plgbm_tuned_preci_score: %f,plgbm_tuned_recall_score: %f,plgbm_tuned_f1_score: %f,plgbm_tuned_auc: %f'
      %(plgbm_tuned_accuracy_score,plgbm_tuned_preci_score,plgbm_tuned_recall_score,plgbm_tuned_f1_score,plgbm_tuned_auc))

fpr11,tpr11,thres11 = roc_curve(y_test,gbm_y_proba[:,1])
fpr22,tpr22,thres22 = roc_curve(y_test,plgbm_tuned_y_proba[:,1])

plt.figure(figsize=(6,6))
plt.plot(fpr11, tpr11, 'b', label = 'LGBM = %0.3f' % gbm_accuracy_score, color='r')
plt.plot(fpr22, tpr22, 'b', label = 'PLGBM= %0.3f' % plgbm_tuned_accuracy_score, color='blue')
plt.plot([0, 1], [0, 1])
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('True Positive Rate')
plt.xlabel('False Positive Rate')
plt.tick_params(labelsize=15)
plt.legend()
plt.show()