#coding=utf-8
import lightgbm as lgb
import pandas as pd
import numpy as np
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from sklearn.metrics import auc
import pickle
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, recall_score, precision_score
data=pd.read_csv('./data_labels-O-3.csv')
data['Sex']=pd.factorize(data['Sex'])[0].astype(np.uint16)
data['Department']=pd.factorize(data['Department'])[0].astype(np.uint16)
data['Dx']=pd.factorize(data['Dx'])[0].astype(np.uint16)
data['Ane_type']=pd.factorize(data['Ane_type'])[0].astype(np.uint16)
data['Pre_Htn']=pd.factorize(data['Pre_Htn'])[0].astype(np.uint16)
data['Pre_Dm']=pd.factorize(data['Pre_Dm'])[0].astype(np.uint16)
X = data.iloc[:,1:26]
print(X)
Y = data['labels']
print(Y)
seed = 4
test_size = 0.34
from sklearn.model_selection import train_test_split # 划分数据集
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=test_size, random_state=seed)
categorical_feature = ['Sex','Opname','Department','Dx','Position','Ane_type','Pre_Htn','Emop','Pre_Dm']
# create dataset for lightgbm
lgb_train = lgb.Dataset(X_train, label=y_train, categorical_feature=categorical_feature)
lgb_eval = lgb.Dataset(X_test, y_test, reference=lgb_train)

# specify your configurations as a dict
params = {
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': {'binary_logloss', 'auc'}, #⼆进制对数损失
    'num_leaves': 30,
    'max_depth': 7,
    'min_data_in_leaf': 100,
    'learning_rate': 0.1,
    'feature_fraction': 0.8,
    'bagging_fraction': 0.8,
    'bagging_freq': 5,
    'lambda_l1': 1,
    'lambda_l2': 0.001, # 越⼩l2正则程度越⾼
    'n_gain_to_split': 0.2,
    'verbose': 5,
    'is_unbalance': False
 }
params1={
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': {'binary_logloss', 'auc'},
    'num_leaves': 20,
    'max_depth': 11,
    'min_data_in_leaf': 101,
    'max_bin' : 145,
    'learning_rate': 0.1,
    'feature_fraction': 0.8,
    'bagging_fraction': 0.8,
    'bagging_freq': 5,
    'lambda_l1': 0.001,
    'lambda_l2': 0.001,
    'min_gain_to_split': 0.0,
    'verbose': 5,
    'is_unbalance': False
 }
params2={
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': {'binary_logloss', 'auc'},
    'num_leaves': 20,
    'max_depth': 9,
    'min_data_in_leaf': 101,
    'max_bin' : 145,
    'learning_rate': 0.1,
    'feature_fraction': 0.8,
    'bagging_fraction': 1,
    'bagging_freq': 2,
    'lambda_l1': 0.001,
    'lambda_l2': 0.001,
    'min_gain_to_split': 0.2,
    'min_child_samples':15,
    'min_child_weight':0.001,
    'verbose': 5,
    'is_unbalance': False
}
# train
print('Start training...')
gbm = lgb.train(params1, lgb_train, num_boost_round=2000, valid_sets=lgb_eval, early_stopping_rounds=50)

print('Start predicting...')

preds = gbm.predict(X_test, num_iteration=gbm.best_iteration) # 输出的是概率结果

# 导出结果
thresholds = [0.1, 0.15,0.2,0.25, 0.3,0.35, 0.4,0.45, 0.5,0.55, 0.6,0.65, 0.7, 0.75,0.8,0.85, 0.9]

j = 1

conf_List = []
precision_test_List = []
recall_test_List = []
f1_test_List = []
Specificity_list = []
for threshold in thresholds:
    y_pred_test = []
    for pred in preds:
        result = 1 if pred >= threshold else 0
        y_pred_test.append(result)
    conf = confusion_matrix(y_test, y_pred_test)
    precision_test = precision_score(y_test, y_pred_test)
    recall_test = recall_score(y_test, y_pred_test)
    specificity = float(conf[0, 0]) / (conf[0, 0] + conf[0, 1])
    f1_test = 2 * (precision_test * recall_test) / (precision_test + recall_test)
    conf_List.append(conf)
    precision_test_List.append(precision_test)
    recall_test_List.append(recall_test)
    f1_test_List.append(f1_test)
    Specificity_list.append(specificity)
    j = j + 1

with open('./feature_importance1.csv', 'w+') as file:
    string = str(conf_List) + '\n'+ 'precision:' + str(precision_test_List)+'\n' + 'recall:' + str(recall_test_List) + '\n' +'f1_test:' + str( f1_test_List) + '\n'+'specificity:'+str(Specificity_list) +'\n'
    file.write(string)
y_pred_keras_test = gbm.predict(X_test).ravel()
fpr_keras_test, tpr_keras_test, thresholds_keras_test = roc_curve(y_test, y_pred_keras_test)

auc_keras_test = auc(fpr_keras_test, tpr_keras_test)
fig_roc_test=plt.figure(figsize=(10,5))
plt.plot([0, 1], [0, 1], 'g--')
plt.plot(fpr_keras_test, tpr_keras_test, label='ROC(auc = {:.3f})'.format(auc_keras_test))
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.title('ROC curve_test')
plt.legend(loc='best')
plt.show()

import shap
shap.initjs()
explainer = shap.TreeExplainer(gbm)
shap_values = explainer.shap_values(X_train)
# visualize the first prediction's explanation (use matplotlib=True tavoid Javascript)
shap.force_plot(explainer.expected_value[1], shap_values[1][11], X_train.iloc[11,:])
fig3 = plt.figure()
shap.summary_plot(shap_values[1],X_train,max_display=40)
plt.tight_layout()
fig3.savefig('./VitalDB/summary_plot1',dpi=200, bbox_inches='tight')

import shap # package used to calculate Shap values
import os
ax_save_path = './VitalDB/shap_dependence/'
if not os.path.exists(ax_save_path):
    os.mkdir(ax_save_path)
shap_values = shap.TreeExplainer(gbm).shap_values(X_train)
if len(shap_values)==2:
    shap_values = shap_values[1]
for column in list(X.columns):
    fig1 = plt.figure(figsize=(8, 5))
    #print(columns)
    ax = plt.gca()
    shap.dependence_plot(column,shap_values,X_train,ax=ax,show=False)#,interaction_index=None)
    fig1.savefig(ax_save_path+column+'.jpg',dpi=200, bbox_inches='tight')
    plt.clf()
    #plt.show()