import numpy as np
import pandas as pd
from sklearn.ensemble import RandomForestRegressor
import random
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, RandomizedSearchCV, KFold, cross_validate
import hyperopt
from hyperopt import hp, fmin, tpe, Trials, partial
from hyperopt.early_stop import no_progress_loss

# sklearn.model_selection.

random.seed(0)
np.random.seed(0)


# from sklearn.datasets import load_iris
# from sklearn.model_selection import RandomizedSearchCV

def hyperopt_objective(params):
    Y_col = 'temperature'
    dataset = pd.read_csv(f"../data/ml_dataset_{Y_col}.csv")

    features = list(dataset.columns[:-1])
    X = dataset[features]
    Y = dataset[dataset.columns[-1]]
    model = RandomForestRegressor(n_estimators=int(params["n_estimators"])
                                  , max_depth=int(params["max_depth"])
                                  , max_features=int(params["max_features"])
                                  , min_impurity_decrease=params["min_impurity_decrease"]
                                  , random_state=0
                                  , verbose=False
                                  , n_jobs=-1)

    # 交叉验证结果，输出负根均方误差（-RMSE）
    cv = KFold(n_splits=5, shuffle=True, random_state=0)
    validation_loss = cross_validate(model, X, Y
                                     , scoring="r2"
                                     , cv=cv
                                     , verbose=False
                                     , n_jobs=-1
                                     , error_score='raise'
                                     )
    return -np.mean(abs(validation_loss["test_score"]))


def opt_finger_temperature():
    pass


def param_hyperopt(max_evals=100):
    # 保存迭代过程
    trials = Trials()

    # 设置提前停止
    early_stop_fn = no_progress_loss(100)

    param_grid_simple = {'n_estimators': hp.quniform("n_estimators", 100, 200, 20)
        , 'max_depth': hp.quniform("max_depth", 10, 50, 1)
        , "min_samples_split": hp.quniform("min_samples_split", 1, 10, 1)
        , "min_samples_leaf": hp.quniform("min_samples_leaf", 1, 10, 1)
        , "max_features": hp.quniform("max_features", 500, 2000, 10)
        , "min_impurity_decrease": hp.quniform("min_impurity_decrease", 0, 5, 1)
                         }

    # 定义代理模型
    # algo = partial(tpe.suggest, n_startup_jobs=20, n_EI_candidates=50)
    params_best = fmin(hyperopt_objective  # 目标函数
                       , space=param_grid_simple  # 参数空间
                       , algo=tpe.suggest  # 代理模型你要哪个呢？
                       # , algo = algo
                       , max_evals=max_evals  # 允许的迭代次数
                       , verbose=True
                       , trials=trials
                       , early_stop_fn=early_stop_fn
                       )

    # 打印最优参数，fmin会自动打印最佳分数
    print("\n", "\n", "best params: ", params_best,
          "\n")
    return params_best, trials


if __name__ == '__main__':
    params_best, trials = param_hyperopt(100)