from datetime import datetime

from catboost import CatBoostClassifier, Pool
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import GridSearchCV
from xbase_util.common_util import date2s
# from xgboost import XGBClassifier

from src.bean.catboost_classifier_loss_function import CatBoostClassifierLossFunction
from src.bean.catboost_eval_metric import CatBoostEvalMetric
from src.model.common.sample import predict_sample
from src.model.model_common_util import is_depth_int, test_evaluate, save_model
from src.util.common_util import while_input, is_int, is_float, is_int_between, printx, input_is_yes
from src.util.config_manager import ConfigManager


def input_loss_fun():
    for i, loss_function in enumerate(CatBoostClassifierLossFunction):
        printx(f"{i + 1}.{loss_function}")
    loss_num = int(
        while_input("[首次训练]请输入损失函数序号:", is_int_between, (1, len(CatBoostClassifierLossFunction))))
    selected_loss = list(CatBoostClassifierLossFunction)[loss_num - 1].loss_name
    printx(f"[首次训练]损失函数:{selected_loss}")
    return selected_loss


def input_eval_metric():
    for i, function in enumerate(CatBoostEvalMetric):
        printx(f"{i + 1}.{function}")
    eval_metric_num = int(
        while_input("[首次训练]请输入评估指标函数序号:", is_int_between, (1, len(CatBoostEvalMetric))))
    selected_metric = list(CatBoostEvalMetric)[eval_metric_num - 1].metric_name
    return selected_metric


def common_train(config: ConfigManager, files, model_id, df, cluster):
    x_resampled, y_resampled, x_test, y_test, test_size, sample_type = predict_sample(df)
    selected_loss = input_loss_fun()
    selected_metric = input_eval_metric()
    iterations = int(while_input("[首次训练]请输入迭代次数(建议1000):", is_int))
    depth = int(while_input("[首次训练]请输入每棵决策树最大深度(建议10，最大16):", is_depth_int))
    learning_rate = float(while_input("[首次训练]请输入学习率(大于0.1收敛快；小于0.1易得最优解，慢):", is_float))
    use_best_model = input_is_yes("[首次训练]use_best_model:")
    is_grid_search = input_is_yes("是否使用网格搜索:")
    if is_grid_search:
        catboost_model = CatBoostClassifier(learning_rate=learning_rate, iterations=iterations, depth=depth,
                                            use_best_model=use_best_model, eval_metric=selected_metric)
        grid_search = GridSearchCV(estimator=catboost_model, param_grid={
            'depth': [4, 6, 8, 10, 12, 14, 16, 18],
            'learning_rate': [0.05, 0.08, 0.1, 0.15, 0.2, 0.25],
            'iterations': [500, 1000, 1500, 2000, 2500]
        }, cv=3, verbose=2, n_jobs=-1)
        train_time = date2s(datetime.now())
        grid_search.fit(x_resampled, y_resampled, **{'eval_set': (x_test, y_test),
                                                     'early_stopping_rounds': 10,
                                                     'verbose': 0})
        print("最佳参数：", grid_search.best_params_)
        depth = grid_search.best_params_['depth']
        iterations = grid_search.best_params_['iterations']
        learning_rate = grid_search.best_params_['learning_rate']
        print("最佳得分：", grid_search.best_score_)
        model = grid_search.best_estimator_
        print("最佳模型测试集得分：", model.score(x_test, y_test))
    else:
        model = CatBoostClassifier(iterations=iterations, depth=depth, learning_rate=learning_rate,
                                   loss_function=selected_loss, use_best_model=use_best_model,
                                   eval_metric=selected_metric)
        printx("[首次训练]使用全部数据训练模型")
        train_time = date2s(datetime.now())
        model.fit(Pool(x_resampled, label=y_resampled), eval_set=[(x_test, y_test)] if use_best_model else None)
    accuracy, precision, recall, f1 = test_evaluate(model, x_test, y_test)
    save_model(config, model_id, model, train_time, files, sample_type, iterations, depth, learning_rate, test_size,
               f1,
               recall,
               accuracy, precision, selected_metric, use_best_model, is_grid_search, cluster)
