# -*- coding: utf-8 -*-
"""
Created on Tue Jun  6 17:16:21 2023

@author: ying.tu
"""


eb#!/usr/bin/env python
# coding: utf-8


#%%

'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     一、加载python包       ''''''''''''''''''''''''''''''

'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

import joblib
import pandas as pd
import numpy as np
import time
import datetime
import copy
import sys
sys.path.append(r'C:\Users\ying.tu\Desktop\模型\lightGBM_V5') #修改自己环境的包路径
import perform as pf
import lightgbm as lgb
import numpy as np
import pandas as pd
from sklearn.feature_selection import RFE
from hyperopt import fmin, tpe, hp, Trials
from lightgbm.sklearn import LGBMClassifier
from lightgbm_tool import *
from lightgbm_tool import kfold_objective, tune_n_estimators_learning_rate, plot_n_estimators_learning_rate, tune_feature_num, LGBModelTuner
from data_handle import train_test_split, del_nan, del_mode, slim, del_cat, get_dummied, get_hit_indices,read_from, get_flags, get_sorted_feature
from data_handle import *
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import RFE
from hyperopt import fmin, tpe, hp, Trials
from lightgbm.sklearn import LGBMClassifier
from lightgbm_tool import *
from lightgbm_tool import kfold_objective, tune_n_estimators_learning_rate, plot_n_estimators_learning_rate, tune_feature_num, LGBModelTuner
from data_handle import train_test_split, del_nan, del_mode, slim, del_cat, get_dummied, get_hit_indices,read_from, get_flags, get_sorted_feature
from data_handle import *
import toad
#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     二、导入数据       ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
work_path=r'E:\模型开发\浦发银行多头分半监督'
import os
os.chdir(work_path)
Data = pd.read_csv(r'E:\模型开发\浦发银行多头分半监督\modeldata.csv',encoding='utf-8')

X=  Data.iloc[:,11:]
Y = Data['y'].astype('int')
Y.mean()



# 判断样本bad_rate是否低于0.05，若低于则设置scale_pos_weight,用于处理不平衡样本,在lgbm训练中使用
if Y.mean()<0.05:
    scale_pos_weight = 0.05*len(Y)/len(np.where(Y==1)[0])
else:
    scale_pos_weight = 1

# 剔除缺失值与同值较高的、类别较多的、转换数据类型，节省内存
 
X = del_nan(X, nan_ratio_threshold=0.90) 
X = del_mode(X, mode_ratio_threshold=0.95)
X = slim(X)
Y = slim(Y)        
X = del_cat(X, cat_threshold=10)
X = get_dummied(X)

hit_indices = range(0,1000000)
# 数据划分
X, X_test, Y, Y_test = train_test_split(X, Y, test_size=0.3, random_state=17)


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     三、初步确定chosen_feature，通过cv确定n_estimators  ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 以flag不全为0的样本进行模型训练
X = X.loc[X.index.isin(hit_indices)]
Y = Y.loc[Y.index.isin(hit_indices)]

lgbm_train = lgb.Dataset(X, Y, silent=True)
fixed_params = {
    'boosting_type': 'gbdt',
    'class_weight': None,
    'colsample_bytree': 0.8,
    'importance_type': 'gain',
    'learning_rate': 0.05,
    'max_depth': 3,
    'min_child_weight': 0.02,
    'min_split_gain': 0,
    #'n_estimators': 500,
    'n_jobs': -1,
    'num_leaves': 8, 
    'objective': 'binary',
    'random_state': 7,
    'reg_alpha': 0.0,
    'reg_lambda': 0.0,
    'subsample': 0.8,
    'subsample_for_bin': 200000,
    'subsample_freq': 0,
    'scale_pos_weight':scale_pos_weight}


cv_result = lgb.cv(fixed_params, lgbm_train, num_boost_round=500, early_stopping_rounds=100, nfold=5, metrics='auc',seed=7)
fixed_params['n_estimators'] = len(cv_result['auc-mean'])

lgbm_model = LGBMClassifier(**fixed_params)
lgbm_model.fit(X, Y, eval_metric='auc')
origin_features = X.columns
lgbm_importance = lgbm_model.feature_importances_

#保存特征重要性
chosen_feature_df = pd.DataFrame(origin_features)
chosen_feature_df['lgbm_importance'] = lgbm_importance
chosen_feature_df = chosen_feature_df.loc[chosen_feature_df.lgbm_importance>0]
chosen_feature_df.to_excel('chosen_feature_importance0606.xlsx')
#%%

#threshold 要自己写 一般控制数量在300-400 不然跑不动
chosen_feature = get_sorted_feature(origin_features, lgbm_importance, threshold=0)

X = X[chosen_feature]
X_test = X_test[chosen_feature]

#获取初步选出的特征的flag，获取当前特征对应flag不全为0的index，更新hit_indices
#flags = get_flags(chosen_feature)
hit_indices = range(0,1000000)



#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''                 四、使用rfe对chosen_features重新排序      ''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 使用rfe对初筛的特征重新排序，选取排名前100/150（可修改）的特征

# 以flag不全为0的样本进行模型训练
X = X.loc[X.index.isin(hit_indices)]
Y = Y.loc[Y.index.isin(hit_indices)]

lgbm_model = LGBMClassifier(**fixed_params)
rfe = RFE(lgbm_model, n_features_to_select=50, step=1, verbose=1) #终止时特征数50个，跑出的结果这50个都是1，因为跑到还有50个特征时终止了
rfe.fit(X, Y)
rfe_rank = rfe.ranking_
rfe_lgb_df=pd.DataFrame(rfe_rank,index=chosen_feature)
rfe_lgb_df = rfe_lgb_df.rename(columns={0:'rank_lgb_rfe'})
lgb_rfe_features = rfe_lgb_df.loc[rfe_lgb_df.rank_lgb_rfe<=101].index.tolist() #这里取了前150个特征

X = X[lgb_rfe_features]

#获取rfe选出的特征的flag，获取当前特征对应flag不全为0的index，更新hit_indices
#flags = get_flags(lgb_rfe_features)
#hit_indices = get_hit_indices(Data, flags)
#hit_indices = [i for i in range(len(Data))]
hit_indices = range(0,1000000)


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''             五、贝叶斯调参  ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 设置目标函数，设置使用的数据（训练集），参数为cv后的参数
# 在贝叶斯调参过程中，样本量较大时可能会比较慢，这个时候可以考虑将n_estimators适当设置的小一些，比如设到50-100，learning_rate适当大些，
# 后面手动再调节这个参数

# 以flag不全为0的样本进行模型训练
X = X.loc[X.index.isin(hit_indices)]
Y = Y.loc[Y.index.isin(hit_indices)]

fixed_params_bay = fixed_params.copy()
ks_score = kfold_objective(X, Y, model='lgb', params=fixed_params_bay)
 
# 定义参数搜索的域，值域应以具体样本适当调整，且值域的范围大些
# 不同域的类型
# choice：类别变量
# quniform：离散均匀（整数间隔均匀）
# uniform：连续均匀（间隔为一个浮点数）
# loguniform：连续对数均匀（对数下均匀分布）
para_space_mlp = {
    'feature_num': hp.quniform('feature_num', 50, len(lgb_rfe_features), 1), #步长可修改,下限100需要调整，需要小于len(lgb_rfe_features)
    'learning_rate':hp.loguniform('learning_rate',np.log(0.01),np.log(0.2)),
    'max_depth': hp.quniform('max_depth', 3, 5, 1),
    'num_leaves': hp.quniform('num_leaves', 2, 32, 1),
    'min_child_weight':hp.quniform('min_child_weight',0,500,5), #该参数的值域适当大些
    'min_child_samples': hp.quniform('min_child_samples', 1000, 3000, 10), #该参数与样本量相关 1000 -3000 也可以
    'min_split_gain': hp.quniform('min_split_gain', 1, 10, 0.1),
    # 'subsample':hp.quniform('subsample',0.6,1,0.1),
    # 'colsample_bytree':hp.quniform('colsample_bytree',0.6,1,0.1),
    'reg_alpha': hp.uniform('reg_alpha', 1, 10),
    'reg_lambda': hp.uniform('reg_lambda', 1, 10)}

# 进行贝叶斯调参
trials = Trials()
max_evals = 100 #max_evals迭代次数越大越慢，可设置合理的值
best = fmin(ks_score, para_space_mlp, algo=tpe.suggest, max_evals=max_evals, 
           rstate=np.random.default_rng(7), trials=trials)
#rstate=np.random.default_rng(seed)
#rstate=np.random.RandomState(7)
 #%%
# 对贝叶斯调参后的所有参数，拟合计算训练测试ks、auc，寻找出效果最好且相差最小的那组参数
trials_result = trials.trials

# 获取best参数所对应的抵贷次数
for i in range(0,max_evals):
    param = trials_result[i]['misc']['vals']
    param_1 = {k:v[0] for k,v in param.items()}
    if param_1 == best:
    	print(i)

#保存贝叶斯参数迭代过程
fw = open("trials_result.txt",'w+')
fw.write(str(trials_result))
fw.close()

#读取贝叶斯参数迭代过程

fr = open("trials_result.txt",'r+')
trials_result = eval(fr.read())



#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     六、根据贝叶斯迭代过程作图  ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 根据贝叶斯每一次迭代的参数，对训练集、测试集的ks、auc作图

ks_trains = []
ks_tests = []
auc_trains = []
auc_tests = []
space = []
for i in range(0,max_evals):
    param = trials_result[i]['misc']['vals']
    param_1 = {k:v[0] for k,v in param.items()}
    param_1['max_depth'] = int(param_1['max_depth'])
    param_1['feature_num'] = int(param_1['feature_num'])
    param_1['min_child_samples'] = int(param_1['min_child_samples'])
    param_1['min_child_weight'] = int(param_1['min_child_weight'])
    param_1['num_leaves'] = int(param_1['num_leaves'])
    fixed_params.update(param_1)
    feature_num = int(fixed_params.pop('feature_num'))
    chosen_feature_1 = lgb_rfe_features[:feature_num]
#    flags_1 = get_flags(chosen_feature_1)
#    flags_1.remove('flag_graylist')
    # hit_indices_1 = get_hit_indices(Data, flags)
#    hit_indices_1 =[j for j in range(len(Data))]
    hit_indices_1 = range(0,100000)

    X_tr, X_te = X[chosen_feature_1], X_test[chosen_feature_1]
    X_tr = X_tr.loc[X_tr.index.isin(hit_indices_1)]
    Y_tr = Y.loc[Y.index.isin(hit_indices_1)]
    X_te = X_te.loc[X_te.index.isin(hit_indices_1)]
    Y_te = Y_test.loc[Y_test.index.isin(hit_indices_1)]
    lgbm_tuner = LGBModelTuner(LGBMClassifier(**fixed_params), X_tr, Y_tr, X_te, Y_te, hit_indices_1)
    result_ks = lgbm_tuner.get_model_result(fixed_params)
    train_ks = result_ks['ks'][0]
    test_ks = result_ks['ks'][1]
    train_auc = result_ks['auc'][0]
    test_auc = result_ks['auc'][1]
    ks_trains.append(train_ks)
    ks_tests.append(test_ks)
    auc_trains.append(train_auc)
    auc_tests.append(test_auc)
    space.append(i)
#%%

models_ks('max_evals', space, ks_trains, ks_tests)
models_auc('max_evals', space, auc_trains, auc_tests)



# 打印全部的迭代数据点太多，可选择打印出指定区间内的数据
i = 5
j = 10
models_ks('max_evals', space[i:j], ks_trains[i:j], ks_tests[i:j])
models_auc('max_evals', space[i:j], auc_trains[i:j], auc_tests[i:j])


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     七、选出最优的参数  ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 可以选取迭代的任一步的参数
# fmin出来的best是在第20次迭代，根据上面的图，综合查看auc和ks效果，选择83对应的参数进行下一步的手动精调

param = trials_result[50]['misc']['vals']
param_best = {k:v[0] for k,v in param.items()}

fixed_params.update(param_best)
fixed_params['max_depth'] = int(fixed_params['max_depth'])
fixed_params['feature_num'] = int(fixed_params['feature_num'])
fixed_params['min_child_samples'] = int(fixed_params['min_child_samples'])
fixed_params['min_child_weight'] = int(fixed_params['min_child_weight'])
fixed_params['num_leaves'] = int(fixed_params['num_leaves'])


# 根据选出的参数训练模型，得到最终的features
# 在用贝叶斯得到的参数拟合模型后，实际有效的特征数由314降到85，这是由于过拟合的参数会限制特征数
feature_num = int(fixed_params.get('feature_num'))
chosen_final_feature = lgb_rfe_features[:feature_num]

X = X[chosen_final_feature]
X_test = X_test[chosen_final_feature]

hit_indices = range(0,1000000)




#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     八、手动调参    ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
X = X.loc[X.index.isin(hit_indices)]
Y = Y.loc[Y.index.isin(hit_indices)]
X_test = X_test.loc[X_test.index.isin(hit_indices)]
Y_test = Y_test.loc[Y_test.index.isin(hit_indices)]

lgbm_tuner = LGBModelTuner(LGBMClassifier(**fixed_params), X, Y, X_test, Y_test, hit_indices)


params = lgbm_tuner.params
lgbm_tuner.get_model_result(params)
lgbm_tuner.estimator #查看当前模型的参数组合

{'boosting_type': 'gbdt',
 'class_weight': None,
 'colsample_bytree': 0.8,
 'importance_type': 'gain',
 'learning_rate': 0.11155463487315995,
 'max_depth': 5,
 'min_child_samples': 1120,
 'min_child_weight': 55,
 'min_split_gain': 1.0,
 'n_estimators': 181,
 'n_jobs': -1,
 'num_leaves': 14,
 'objective': 'binary',
 'random_state': 7,
 'reg_alpha': 1.5275404233282286,
 'reg_lambda': 2.728606507333034,
 'silent': 'warn',
 'subsample': 0.8,
 'subsample_for_bin': 200000,
 'subsample_freq': 0,
 'scale_pos_weight': 1,
 'feature_num': 111}


#%%
# 调max_depth
# max_depth是提高精确度的最重要的参数，一般会选取3-5
lgbm_tuner.try_tune('max_depth', [1,2,3,4,5,6,7])
lgbm_tuner.tune('max_depth', 3)
lgbm_tuner.get_model_result(params)

# 调num_leaves 
# 大致换算关系：num_leaves = 2^(max_depth)，但是它的值的设置应该小于 2^(max_depth)，否则可能会导致过拟合。
lgbm_tuner.try_tune('num_leaves', [2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32])
lgbm_tuner.try_tune('num_leaves', np.arange(2,32,1))

lgbm_tuner.tune('num_leaves', 8)
lgbm_tuner.get_model_result(params)

#小于2^4 =16 在 8之后 auc不再变化

# 调min_child_weight/min_sum_hessian_in_leaf 
# 调完精度的参数，开始调控制过拟合的参数
# min_sum_hessian_in_leaf：也叫min_child_weight,
# 使一个结点分裂的最小海森值之和（Minimum sum of hessians in one leaf to allow a split. Higher values potentially decrease overfitting）,可以很好的控制过拟合。
# min_child_weight与min_sum_hessian_in_leaf只需要调节一个就好，两者是同一个东西。
#样本量的10%左右
# 10- 500 每隔10 看一看 在哪里ks降的比较厉害  range(10,500,10)
lgbm_tuner.try_tune('min_child_weight', [10,20,30,40,60,80,100,120,140,160,180,200]) #先粗调后细调，参数范围不要思维定式
lgbm_tuner.try_tune('min_child_weight', np.arange(110,130,1))
lgbm_tuner.try_tune('min_child_weight', np.arange(40,80,1))

lgbm_tuner.tune('min_child_weight', 61)
lgbm_tuner.get_model_result(params)


# 调min_data_in_leaf/min_child_samples 这两者是同一个参数，只需要调节一个就好
# min_data_in_leaf 是一个很重要的参数, 也叫min_child_samples，指定了叶子节点向下分裂的最小样本数，它的值取决于训练数据的样本个树 和num_leaves.
# 将其设置的较大可以避免生成一个过深的树, 但有可能导致欠拟合。可设置取值范围在 样本数/num_leaves附近调节。
# 样本量的10%左右， 大数据集，一般会设置在千级以上
lgbm_tuner.try_tune('min_child_samples', [100, 500, 1000,1500, 2000,2500, 3000,3500])
lgbm_tuner.try_tune('min_child_samples', [1300,1500,1700,1900,2000,2200,2400,2600,2800,3000])
lgbm_tuner.try_tune('min_child_samples', np.arange(500,1500,10))
lgbm_tuner.try_tune('min_child_samples', np.arange(1800,2200,5))

lgbm_tuner.tune('min_child_samples', 2500)
lgbm_tuner.get_model_result(params)


lgbm_tuner.estimator #查看当前模型的参数组合

# 调正则化参数lambda_l1:reg_alpha 
# 毫无疑问，是降低过拟合的，
lgbm_tuner.try_tune('reg_alpha', [1,2,3,4,5,6,7,8,9,10])
lgbm_tuner.try_tune('reg_alpha', np.arange(1,10,0.05))
lgbm_tuner.try_tune('reg_alpha', [8.1,8.2,8.3,8.4,8.5,8.6,8.7,8.8,8.9,9])
lgbm_tuner.tune('reg_alpha', 5)
lgbm_tuner.get_model_result(params)


# 正则化参数lambda_l1:reg_lambda 
lgbm_tuner.try_tune('reg_lambda', [1,2,3,4,5,6,7,8,9,10])
lgbm_tuner.try_tune('reg_lambda', np.arange(1,10,0.05))
lgbm_tuner.try_tune('reg_lambda', [1.8,1.9,2,2.1,2.2,2.3,2.4,2.5,2.6,2.7])
lgbm_tuner.tune('reg_lambda',3)
lgbm_tuner.get_model_result(params)


# 调min_split_gain
# 执行切分的最小增益，也是控制过拟合的参数，值越大，对过拟合控制越明显，设置过大会欠拟合 1 - 4 ？
# 这个参数也看出数据的质量，计算的增益不高，就无法向下分裂，如果设置的深度很深但无法向下分裂， LGBM会给出warning，无法找到可分裂的，说明数据质量达到了极限
lgbm_tuner.try_tune('min_split_gain', [1,2,4,6,8,10,12,14,15])
lgbm_tuner.try_tune('min_split_gain', np.arange(1,4,0.1))
lgbm_tuner.try_tune('min_split_gain', [3.9,4,4.1,4.2,4.3,4.4,4.5,4.6,4.7,4.8,4.9])
lgbm_tuner.tune('min_split_gain',2.2)
lgbm_tuner.get_model_result(params)


# 调learning_rate 
# 在调完所有的参数后，再来对learning_rate进行调节， 0.01 - 0.2 
lgbm_tuner.try_tune('learning_rate', [0.02, 0.04, 0.06, 0.08, 0.1,0.12,0.14,0.16])
lgbm_tuner.try_tune('learning_rate', np.arange(0.17,0.19,0.005))
lgbm_tuner.try_tune('learning_rate', np.arange(0.001,0.015,0.001))
lgbm_tuner.tune('learning_rate', 0.008)
lgbm_tuner.get_model_result(params)


# 350 以内
lgbm_tuner.try_tune('n_estimators', [40,60,80,100,120,140,160, 180, 200, 220,240,260,280,300])
lgbm_tuner.try_tune('n_estimators', [10,20,30,40,50,60,70,80])
lgbm_tuner.try_tune('n_estimators', np.arange(80,100,1))
lgbm_tuner.get_model_result(params)


# 联合调learning_rate 与 n_estimators， n_estimators控制决策树的数量，而learning_rate是梯度下降的步长参数。
# learning_rate可以用来控制梯度提升学习的速度，一般值可设在 0.01 和 0.3 之间。一般做法是先用稍多一些的子树比如1000，并设一个较低的learning_rate
# 然后通过early_stopping找到最优迭代次数
df_train_auc, df_test_auc = tune_n_estimators_learning_rate(lgbm_tuner,
                                                    learning_rate=[0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,0.09,0.1],
                                                    n_estimators=[80,90,100,110,120,140,160])
plot_n_estimators_learning_rate(df_train_auc, df_test_auc)

lgbm_tuner.tune('n_estimators', 260)
lgbm_tuner.tune('learning_rate',0.04)

# 获取当前参数
# 最终模型
# 查看调参结束后模型效果以及参数情况
params = lgbm_tuner.params
lgbm_tuner.get_model_result(params)
lgbm_tuner.estimator

#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     九、使用调好的参数再一次rfe对特征排序     ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
# 调好的参数对特征再次排序
lgbm_model = LGBMClassifier(**params)
rfe = RFE(lgbm_model, n_features_to_select=1, step=1, verbose=1) #终止时特征数1个
rfe.fit(X, Y)
rfe_rank_2 = rfe.ranking_
rfe_lgb_df_2=pd.DataFrame(rfe_rank_2,index=chosen_final_feature)

rfe_lgb_df_2=pd.DataFrame(rfe_rank_2,index=chosen_final_feature_df.iloc[:,0])

rfe_lgb_df_2 = rfe_lgb_df_2.rename(columns={0:'rank_lgb_rfe'})

# 查看特征数对模型效果的影响
space,ks_trains, ks_tests, auc_trains, auc_tests = tune_feature_num(params, X, Y, X_test, Y_test, rfe_lgb_df_2,
                                                                    hit_indices,min_feature_num=5, step=1)



#手动输入想看的区间
i, j = space.index(50), space.index(60)
models_ks('feature_num', space[i:j], ks_trains[i:j], ks_tests[i:j])
models_auc('feature_num', space[i:j], auc_trains[i:j], auc_tests[i:j])

# 选择最终的特征数  手动修改下面的数字哦
chosen_final_feature_rfe = rfe_lgb_df_2.loc[rfe_lgb_df_2.rank_lgb_rfe<=67].index.tolist()
lgbm_tuner = LGBModelTuner(LGBMClassifier(**params), X[chosen_final_feature_rfe], Y, X_test[chosen_final_feature_rfe], Y_test, hit_indices)
lgbm_tuner.get_model_result(params)
lgbm_tuner.estimator

# 可根据最终的特征数再一次的调优


#hit_indices = get_hit_indices(Data, flags)
# hit_indices=  [i for i in range(len(Data))]
hit_indices = range(0,1000000)


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     十、使用调好的参数进行模型训练     ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
import perform as pf
from sklearn.metrics import roc_curve,auc
from lightgbm.sklearn import LGBMRegressor

lgbm_model = LGBMRegressor(boosting_type='gbdt', class_weight=None, colsample_bytree=0.8, feature_num=111, importance_type='gain',
               learning_rate=0.11155463487315995, max_depth=5,
               min_child_samples=2500, min_child_weight=61, min_split_gain=1.0,
               n_estimators=181, num_leaves=14, objective='binary',
               random_state=7, reg_alpha=1.5275404233282286,
               reg_lambda=2.728606507333034, scale_pos_weight=1, subsample=0.8)



# 训练
train = X.copy()
train_y = Y.copy()

X_use=train[chosen_final_feature_rfe]
X_test_use=X_test[chosen_final_feature_rfe]
lgbm_model.fit(X_use,train_y,eval_metric='auc')
preds_train = lgbm_model.predict(X_use) 
ks_value,bad_percent,good_percent=pf.cal_ks(-preds_train,train_y,section_num=20)  
max_ks0=np.max(ks_value)
false_positive_rate, recall, thresholds = roc_curve(train_y, preds_train)
roc_auc0=auc(false_positive_rate,recall) 

print('当前模型在样本内训练集的KS值和AUC值分别为{0}'.format([max_ks0,roc_auc0]))

plt.figure()
plt.hist(preds_train)
plt.ylabel('Number of samples')
plt.xlabel('probability of y=1')
plt.title('Probability Distribution on train samples')

plt.figure()
plt.plot(list(range(0,21)),np.append([0],bad_percent),'-r',label='Bad Percent')
plt.plot(list(range(0,21)),np.append([0],good_percent),'-g',label='Good Percent')
plt.plot(list(range(0,21)),np.append([0],ks_value),'-b',label='KS value')
plt.legend(loc='lower right')
plt.ylabel('% of total Good/Bad')
plt.xlabel('% of population')

plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc0)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out')
plt.show()


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     十一、使用调好的参数进行模型测试    ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
valid = X_test.copy()
valid_y = Y_test.copy()

X_test_use=valid[chosen_final_feature_rfe]
lgbm_model.fit(X_use,train_y,eval_metric='auc')
preds_test = lgbm_model.predict(X_test_use)
ks_value,bad_percent,good_percent=pf.cal_ks(-preds_test,valid_y,section_num=20)
max_ks0=np.max(ks_value)

false_positive_rate, recall, thresholds = roc_curve(valid_y, preds_test)
roc_auc0=auc(false_positive_rate,recall) 

print('当前模型在样本内测试集的KS值和AUC值分别为{0}'.format([max_ks0,roc_auc0]))

plt.figure()
plt.hist(preds_test)
plt.ylabel('Number of samples')
plt.xlabel('probability of y=1')
plt.title('Probability Distribution on test samples')

plt.figure()
plt.plot(list(range(0,21)),np.append([0],bad_percent),'-r',label='Bad Percent')
plt.plot(list(range(0,21)),np.append([0],good_percent),'-g',label='Good Percent')
plt.plot(list(range(0,21)),np.append([0],ks_value),'-b',label='KS value')
plt.legend(loc='lower right')
plt.ylabel('% of total Good/Bad')
plt.xlabel('% of population')

plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc0)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out')
plt.show()


#%%
basic_badrate = sum(Y) / len(Y)
point0 = 600
odds0 = basic_badrate / (1 - basic_badrate)
PDO = 25
B = PDO / np.log(2)
A = point0 + B * np.log(odds0)
point_train = A - B * np.log(preds_train / (1 - preds_train))
point_test = A - B * np.log(preds_test / (1 - preds_test))

#point_train[np.where(np.array(hit_indices) != 1)[0]] = np.NaN
#point_test[np.where(np.array(flag_test_all) != 1)[0]] = np.NaN
point_train[np.where(point_train > 1000)[0]] = 1000
point_train[np.where(point_train < 300)[0]] = 300
point_test[np.where(point_test > 1000)[0]] = 1000
point_test[np.where(point_test < 300)[0]] = 300

plt.figure()
plt.hist(point_train)
plt.ylabel('Number of samples')
plt.xlabel('Score')
plt.title('Distribution on train samples')

plt.figure()
plt.hist(point_test)
plt.ylabel('Number of samples')
plt.xlabel('Score')
plt.title('Distribution on test samples')
plt.show()

# %%
# 计算PSI
PSI_value = pf.PSI(preds_train, preds_test)
# %%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''  十二、生成部署代码  保存if_else部署代码  ''''''''

'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''
output_file_path=r'E:\模型开发\浦发银行多头分半监督'
import os
os.chdir(output_file_path)

from model_parser import LGBModelParser

p = LGBModelParser(lgbm_model, chosen_final_feature_rfe)
# p = LGBModelParser(lgbm_model, chosen_feature)

# p = LGBModelParser.load_from('lgbm.model', 'lgbm.feature')
p.parse("if_else1.py", lang="python")


#%%
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''''''''''''''     十三、保存最终模型结果    ''''''''''''''''''''''''''''''
      
'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

'''output_file_path是输出lgbm模型文件和所选变量的文件夹路径，按照实际情况调整'''
output_file_path=r'E:\模型开发\浦发银行多头分半监督'
import os
os.chdir(output_file_path)

lgbm_model.booster_.save_model('raw_lgbm1.model') 
chosen_feature_pd=pd.DataFrame(chosen_final_feature_rfe)
chosen_feature_pd.to_csv('lgbm_features1.csv',encoding="utf_8_sig",index=False)
# 不要排序 



#%%

'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

''''''''''''       十四、读取保存好的 model 和feature names   '''''''''''

''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''

'''input_model_path是导入之前保存好的lgbm model的路径，input_xgb_features_path是导入lgbm model对应的features的csv文件路径 ，按照实际情况调整 '''

Data = slim(Data)
Dataoot = Data[(Data['user_date'] > pd.to_datetime('20211231'))]

X = Data[(Data['user_date'] > pd.to_datetime('20211231'))]
X = X[lgbm_features]

X =get_dummied(X)

Y = X['y']

from toad.metrics import KS, AUC



import os
input_model_path = r'E:\模型开发\nbcb借呗1期贷前lgbm\model 2 0206'
os.chdir(input_model_path)
lgbm_model = lgb.Booster(model_file='raw_lgbm.model')
chosen_feature_pd=pd.read_csv('lgbm_features.csv',encoding='UTF-8')
chosen_feature=list(np.array(chosen_feature_pd.iloc[:,0],dtype='str'))

preds = lgbm_model.predict(X.loc[:,chosen_feature]) 
print('allks',KS(preds, Y))



point0 = 650
# odds0 = basic_badrate / (1 - basic_badrate)
odds0=0.0538100937562337
PDO = 200

B = PDO / np.log(2)
A = point0 + B * np.log(odds0)

point_oot = A - B * np.log(preds / (1 - preds))
point_oot[np.where(point_oot > 1000)[0]] = 1000
point_oot[np.where(point_oot < 300)[0]] = 300

point_oot=np.around(point_oot).astype(int)



ks_value,bad_percent,good_percent=pf.cal_ks(-preds,Y,section_num=20)                    
max_ks0=np.max(ks_value)

false_positive_rate, recall, thresholds = roc_curve(Y, preds)
roc_auc0=auc(false_positive_rate,recall) 

print('当前模型在样本内训练集的KS值和AUC值分别为{0}'.format([max_ks0,roc_auc0]))

plt.figure()
plt.hist(preds)
plt.ylabel('Number of samples')
plt.xlabel('probability of y=1')
plt.title('Probability Distribution on test samples')

plt.figure()
plt.plot(list(range(0,21)),np.append([0],bad_percent),'-r',label='Bad Percent')
plt.plot(list(range(0,21)),np.append([0],good_percent),'-g',label='Good Percent')
plt.plot(list(range(0,21)),np.append([0],ks_value),'-b',label='KS value')
plt.legend(loc='lower right')
plt.ylabel('% of total Good/Bad')
plt.xlabel('% of population')

plt.figure()
plt.title('Receiver Operating Characteristic')
plt.plot(false_positive_rate, recall, 'b', label='AUC = %0.2f' % roc_auc0)
plt.legend(loc='lower right')
plt.plot([0,1],[0,1],'r--')
plt.xlim([0.0,1.0])
plt.ylim([0.0,1.0])
plt.ylabel('Recall')
plt.xlabel('Fall-out')
plt.show()



