
import numpy as np
import pandas as pd
import lightgbm as lgb
import matplotlib.pyplot as plt 
from hyperopt import fmin,hp,Trials,space_eval,rand,tpe,anneal
import lightgbm

# print("step2: searching parameters...")

# 1，定义目标函数 x_train, x_test, y_train, y_test, args,
# def loss( config):
#     evals_result = {}  # 记录训练结果所用
#     gbm_model = lightgbm.train(
#         args.ml_param1,
#         lgb_train,
#         valid_sets=[lgb_train, lgb_eval],
#         num_boost_round=args.ml_param2['num_boost_round'],
#         early_stopping_rounds=args.ml_param2['early_stopping_rounds'],
#         evals_result=evals_result,
#         verbose_eval=args.ml_param2['verbose_eval'],
        
#         # fobj=IC_lgb,
#         # feval=IC_loss
#         )
#     y_pred_test = gbm_model(lgb_eval)
#     # val_score = f1_score(dftest['label'],y_pred_test>0.5)
#     array_a = y_test
#     array_b = y_pred_test
#     cor = (np.nanmean((array_a*array_b))-np.nanmean(array_a)*np.nanmean(array_b))/(np.nanstd(array_a)*np.nanstd(array_b))
#     return -cor

  
# 2，定义超参空间

#可以根据需要，注释掉偏后的一些不太重要的超参
spaces =  {
    "boosting_type": hp.choice("boosting_type", ['gbdt', 'goss', 'dart']),
    'num_leaves': hp.choice('num_leaves',[int(x) for x in range(15, 500)]),
    'learning_rate': hp.loguniform('learning_rate',np.log(0.005),np.log(0.2)),
    "max_depth": hp.choice("max_depth", [int(x) for x in range(5, 30)] ),
    "metric": hp.choice("metric", ['rmse', 'l1', 'l2']) ,
    "feature_fraction": hp.uniform('feature_fraction', 0.5, 1.0),
}


# 3，执行超参搜索
# hyperopt支持如下搜索算法
#随机搜索(hyperopt.rand.suggest)
#模拟退火(hyperopt.anneal.suggest)
#TPE算法（hyperopt.tpe.suggest，算法全称为Tree-structured Parzen Estimator Approach）
def param_tune(x_train, x_test, y_train, y_test, args):
    lgb_train = lightgbm.Dataset(x_train, label=y_train)
    lgb_eval = lightgbm.Dataset(x_test, label=y_test, reference=lgb_train)
    ml_param1 = args.ml_param1
    def myloss( config):
        ml_param1.update(config)
        evals_result = {}  # 记录训练结果所用
        gbm_model = lightgbm.train(
            ml_param1,
            lgb_train,
            valid_sets=[lgb_train, lgb_eval],
            num_boost_round=args.ml_param2['num_boost_round'],
            early_stopping_rounds=args.ml_param2['early_stopping_rounds'],
            evals_result=evals_result,
            verbose_eval=args.ml_param2['verbose_eval'],
            )
        y_pred_test = gbm_model.predict(x_test)
        # val_score = f1_score(dftest['label'],y_pred_test>0.5)
        array_a = y_test
        array_b = y_pred_test
        cor = (np.nanmean((array_a*array_b))-np.nanmean(array_a)*np.nanmean(array_b))/(np.nanstd(array_a)*np.nanstd(array_b))
        print(f'cor:{cor}, param: {ml_param1}')
        return -cor

    trials = Trials()
    best = fmin(fn=myloss, space=spaces, algo= tpe.suggest, max_evals=500, trials=trials)
    
    # 4，获取最优参数
    best_params = space_eval(spaces,best)
    print("\nbest_params = ",best_params)
    return trials, best_params


# 5，绘制搜索过程
def tune_plot(trials,root):
    losses = [x["result"]["loss"] for x in trials.trials]
    minlosses = [np.min(losses[0:i+1]) for i in range(len(losses))] 
    steps = range(len(losses))
    fig,ax = plt.subplots(figsize=(6,3.7),dpi=144)
    ax.scatter(x = steps, y = losses, alpha = 0.3)
    ax.plot(steps,minlosses,color = "red",axes = ax)
    plt.xlabel("step")
    plt.ylabel("loss")
    plt.savefig(f'{root}/lgb_tune.png')


