#coding=utf-8
# bayes_opt
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score
import lightgbm as lgb
from Data_Mining_Competition.分类任务.糖尿病遗传风险预测挑战赛.demo import data


train=data[data['患有糖尿病标识'] !=-1]
train_label=train['患有糖尿病标识']

# lgb_cv 函数定义了要去调哪些参数，并且使用交叉验证去计算特定指标的值（例子中用的是roc_auc)。
# 实际调参的时候也不可能所有参数一起调整，最好是分批次调参。
# 而且调参的时候可以观察本轮最优参数是否逼近了设定的搜索阈值，下一次调参的时候可以把搜索的范围扩大。
def lgb_cv(n_estimators,min_gain_to_split,subsample, max_depth,colsample_bytree, min_child_samples,reg_alpha,reg_lambda,num_leaves,learning_rate):
        model = lgb.LGBMClassifier(boosting_type='gbdt', objective='binary',n_jobs=-1,
                                   colsample_bytree=float(colsample_bytree),
                                   min_child_samples=int(min_child_samples),
                                   n_estimators=int(n_estimators),
                                   num_leaves=int(num_leaves),
                                   reg_alpha=float(reg_alpha),
                                   reg_lambda=float(reg_lambda),
                                   max_depth=int(max_depth),
                                   subsample=float(subsample),
                                   min_gain_to_split = float(min_gain_to_split),
                                   learning_rate=float(learning_rate),
                                   )
        cv_score = cross_val_score(model, train, train_label, scoring="roc_auc", cv=5).mean()
        return cv_score

# 实例化BayesianOptimization类，参数靠自己去定义取值范围
lgb_bo = BayesianOptimization(
        lgb_cv,
        {
        'colsample_bytree': (0.5,1),
        'min_child_samples': (2, 200),
        'num_leaves': (5, 1000),
        'subsample': (0.6, 1),
        'max_depth':(2,12),
        'n_estimators': (10, 1000),
        'reg_alpha':(0,10),
        'reg_lambda':(0,10),
        'min_gain_to_split':(0,1),
        'learning_rate':(0,0.1)
         },
    )

# 训练
lgb_bo.maximize()

# 可以输出最优的值以及最优参数等等
print(lgb_bo.max)
#
# params={
#                'boosting_type': 'gbdt',
#                'objective': 'binary',
#                'learning_rate': 0.04,
#                'metric': metric,
#                'seed': 2020,
#                'nthread': -1,
#                'max_depth': 10,
#                'colsample_bytree': 0.625,
#                'min_child_samples': 20,
#                'min_gain_to_split': 0.218,
#
#                'num_leaves': 442,
#                'reg_alpha': 0.51,
#                'reg_lambda': 6.41,
#                'subsample': 0.89
#
#        }
#{'target': 1.0, 'params':
# {'colsample_bytree': 0.5059892230942038,
# 'learning_rate': 0.08623650286678286,
# 'max_depth': 2.177214092361655,
# 'min_child_samples': 50.464304572600064,
# 'min_gain_to_split': 0.7137900956300536,
# 'n_estimators': 321.50379491149425,
# 'num_leaves': 514.3977617209787,
# 'reg_alpha': 1.7615499854716554,
# 'reg_lambda': 7.3595588423975915,
# 'subsample': 0.6213009057092999
# }