# -*- coding: utf-8 -*-
"""
Created on Thu Jul  1 18:31:04 2021

@author: helai.bai
"""

from sko.GA import GA

import numpy as np
import pandas as pd
import lightgbm as lgb
import scorecardpy as sc
from sklearn.model_selection import train_test_split

import os
os.chdir(r'C:\Users\helai.bai\Desktop\shuju\code\ever30_all_y')

from selector_2 import variable_filter

def reg(rex,col_list):
    import re
    a = [i for i in list(col_list) if re.search(rex,i) != None]
    return a

train = pd.read_csv(r'C:\Users\helai.bai\Desktop\众安联合建模\交付\数据\features_model_30.csv')
valid = pd.read_csv(r'C:\Users\helai.bai\Desktop\众安联合建模\交付\数据\features_valid1_30.csv')

flag_df = pd.read_excel(r'C:\Users\helai.bai\Desktop\众安联合建模\交付\数据\众安样本标签.xlsx', engine='openpyxl')

train = train.merge(flag_df[['all_y', 'loan_inner_no']], how='left', on='loan_inner_no').drop(columns=['loan_inner_no'])
valid = valid.merge(flag_df[['all_y', 'loan_inner_no']], how='left', on='loan_inner_no').drop(columns=['loan_inner_no'])

train = train.rename(columns={'all_y':'flagy'})
valid = valid.rename(columns={'all_y':'flagy'})

train = train[train['flagy'] != -1]
valid = valid[valid['flagy'] != -1]

drops = ["pd_cell_city","pd_id_city","pd_id_where","pd_cell_province","tl_id_eletail_lasttype",
         "tl_id_eletail_lasttime","tl_cell_eletail_lasttime","tl_cell_eletail_lasttype",
          "mma_var1","mma_var2","pd_cell_type","mma_var3","mma_var100","mma_var101",
          "mma_var201","mma_var202","mma_var302","mma_var303","frg_group_num",
          "pef_paypower_prov","pef_bus_type","pef_ind_cate1","pef_ind_cate2"]       
data_thin = train[train.columns.drop(drops)]
col = reg('inteday|amount|passnum|neworgnum|newallnum|week|night|time|allorgnum|oth|else|tot_mons|orgType',data_thin.columns)
data_thin1 = data_thin[data_thin.columns.drop(col)]
data_thin1.cmec_conservative = data_thin1.cmec_conservative.map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10})
data_thin1.cmec_radical = data_thin1.cmec_radical.map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10})
#变量筛选
data_thin2 = variable_filter(data_thin1, y = "flagy", correlation_threshold = 0, timing_sequencial_psi_threshold = 0)

Y = data_thin2['flagy']
X = data_thin2.drop('flagy',axis=1)

X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.3, random_state=123)

####################################  超参数优化 ##########################

from sklearn.metrics import roc_auc_score, roc_curve

int_params = ['num_leaves', 'min_data_in_leaf', 'max_bin', 'max_depth', 'n_estimators', 'bagging_freq']

def objective(p):
    params = {key:int(value) if key in int_params else value for key, value in zip(params_df['arg'], p)}
    
    model = lgb.LGBMClassifier(n_jobs = -1, objective = 'binary', random_state = 123, importance_type = 'gain', **params)
    model.fit(X_train, Y_train)
    pred1 = model.predict_proba(X_train)[:,1]
    pred2 = model.predict_proba(X_test)[:,1]
    auc1 = roc_auc_score(Y_train, pred1)
    auc2 = roc_auc_score(Y_test, pred2)

    # Extract the best score
    bounds = abs(auc1 - auc2)
    bounds = 0.02 * (10 ** ((bounds - 0.02) * 10))
    loss = bounds - auc2
    print('\f')
    print(auc1, auc2)
    print(loss)
    return loss

params_df = pd.DataFrame(columns=['arg', 'lb', 'ub', 'precision'], data=[
    ['num_leaves', 4, 20, 1],
    ['learning_rate', 0.01, 0.3, 0.0001],
    ['bagging_freq', 0, 5, 1],
    ['feature_fraction', 0.5, 1, 0.001],
    ['bagging_fraction', 0.5, 1, 0.001],
    ['min_data_in_leaf', 50, 500, 1],
    ['max_bin', 2, 10, 1],
    ['reg_alpha', 0, 1, 0.001],
    ['reg_lambda', 0, 1, 0.001],
    ['max_depth', 3, 8, 1],
    ['n_estimators', 30, 100, 1]
])

ga = GA(func=objective, n_dim=len(params_df), size_pop=50, max_iter=80, prob_mut=0.001, lb=params_df['lb'].values.tolist(), ub=params_df['ub'].values.tolist(), precision=params_df['precision'].values.tolist())
best_params = ga.run()
ga.generation_best_X
print(best_params)

best_params = [4.00000000e+00, 3.48571429e-02, 4.00000000e+00, 6.44814090e-01,
        6.06653620e-01, 5.00000000e+02, 3.00000000e+00, 6.82306940e-01,
        9.96089932e-01, 8.00000000e+00, 1.00000000e+02]
best_params = {key:int(value) if key in int_params else value for key, value in zip(params_df['arg'], best_params)}
best_params

#用最优超参数训练模型
best_bayes_model = lgb.LGBMClassifier(n_jobs = -1, objective = 'binary', random_state = 123, importance_type = 'gain', **best_params)
best_bayes_model.fit(X_train, Y_train, eval_metric='auc', verbose=2)

#模型评价
lgb_train_preds = best_bayes_model.predict_proba(X_train)[:, 1]
lgb_test_preds = best_bayes_model.predict_proba(X_test)[:, 1]
# get the ks
from sklearn.metrics import roc_curve
fpr_train, tpr_train, _ = roc_curve(np.array(Y_train), lgb_train_preds)
fpr_test,  tpr_test,  _ = roc_curve(np.array(Y_test),  lgb_test_preds)

ks_train = max(tpr_train - fpr_train)
ks_test = max(tpr_test - fpr_test)
print("KS(Train): %f" % ks_train)
print("KS(Test): %f" % ks_test)
# get the auc
print("AUC Score(Train): %f" % roc_auc_score(Y_train, lgb_train_preds))
print("AUC Score(Test): %f" % roc_auc_score(Y_test, lgb_test_preds))

################################## Feature Importance #####################################
feature_imp = pd.Series(best_bayes_model.feature_importances_)
feature_name = pd.Series(X_train.columns)
feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
feature_df = feature_df.sort_values(by='element', ascending=False)
feature_df = feature_df[feature_df['element'] > 0]

######################################### 循环筛选参数调整 ##################################
best_bayes_params = {'bagging_fraction': 0.558,
  'bagging_freq': 2,
  'boosting_type': 'gbdt',
  'feature_fraction': 0.5515,
  'importance_type': 'gain',
  'learning_rate': 0.0332,
  'max_bin': 8,
  'max_depth': 3,
  'min_data_in_leaf': 174,
  'n_estimators': 91,
  'num_leaves': 13,
  'reg_alpha': 49.5049,
  'reg_lambda': 41.3374}

# best_bayes_params = best_params

var = feature_df['feature_name'].values
X_train_final = X_train[var]
### 重新训练
while True:
    X_test_final = X_test[X_train_final.columns]

    gbm = lgb.LGBMClassifier(n_jobs = -1, objective = 'binary', **best_bayes_params)
    gbm.fit(X_train_final, Y_train)
    print('train over')
    feature_imp = pd.Series(gbm.feature_importances_)
    feature_name = pd.Series(gbm.booster_.feature_name())
    feature_df = pd.DataFrame({'feature_name': feature_name, 'element': feature_imp})
    feature_df = feature_df.sort_values(by='element', ascending=False)
    feature_df = feature_df[feature_df['element'] > 0]
    if len(feature_imp) == len(feature_df):
        break
    else:
        X_train_final = X_train[feature_df.feature_name.tolist()]
####################################### 模型评价 ##########################
bst = gbm
train_class_pred = bst.predict_proba(X_train_final)[:,1]
test_class_pred = bst.predict_proba(X_test_final)[:,1]

# get the ks
from sklearn.metrics import roc_curve
fpr_train, tpr_train, thresholds_test = roc_curve(np.array(Y_train), train_class_pred)
fpr_test, tpr_test, thresholds_test = roc_curve(np.array(Y_test), test_class_pred)

ks_train = max(tpr_train - fpr_train)
ks_test = max(tpr_test - fpr_test)
print("KS(Train): %f" % ks_train)
print("KS(Test): %f" % ks_test)

# get the auc
from sklearn.metrics import roc_auc_score
print("AUC Score(Train): %f" % roc_auc_score(Y_train, train_class_pred))
print("AUC Score(Test): %f" % roc_auc_score(Y_test, test_class_pred))

sc.perf_eva(Y_train, train_class_pred, title = "train")
sc.perf_eva(Y_test, test_class_pred, title = "test")

####################################### 模型评价 ##########################
valid.cmec_conservative = valid.cmec_conservative.map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10})
valid.cmec_radical = valid.cmec_radical.map({'A':1,'B':2,'C':3,'D':4,'E':5,'F':6,'G':7,'H':8,'I':9,'J':10})
X_valid = valid[feature_name]
Y_valid = valid['flagy']
valid_preds = bst.predict_proba(X_valid)[:, 1]
####模型效果
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
fpr_valid, tpr_valid, thresholds_valid = roc_curve(np.array(Y_valid), valid_preds)
ks_valid = max(tpr_valid - fpr_valid)
print("KS(yz1): %f" % ks_valid)
print("AUC Score(Train): %f" % roc_auc_score(Y_valid, valid_preds))
####模型效果图
sc.perf_eva(Y_valid, valid_preds, title = "oot")
