import random

from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
# 贝叶斯调参优化随机森林
from sklearn.model_selection import cross_val_score
from bayes_opt import BayesianOptimization
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt

df = pd.read_excel('数据/水肿-训练数据.xlsx')
# 输出标签
df = df.drop(columns='随访水肿体积降维')
label = df.pop('随访ED_volume')
# print('label\n', label)  # Length: 493
# 输入变量
variable = df
print('variable\n', variable)  # [349 rows x 43 columns]

label_column = '水肿扩散'


# ==========归一化============================
def df_mm(df):
    from sklearn.preprocessing import MinMaxScaler

    print('>>>>>>> 数据归一化')
    df = np.array(df)
    if df.ndim == 1:
        df = df.reshape(-1, 1)

    scaler = MinMaxScaler()
    df = scaler.fit_transform(df)

    return df, scaler


variable, _ = df_mm(variable)
print('variable-归一化', variable)
label, label_sacler = df_mm(label)
print('label-归一化', label)


train_x = variable[:350, :]
train_y = label[:350]
test_x = variable[411:, :]
test_y = label[411:]
print('train_y', train_y)
# 传入需要调节的参数
def rf_cv(n_estimators, min_samples_split, max_features,
          max_depth, min_samples_leaf, max_leaf_nodes):
    # 定义变量空间
    val = cross_val_score(
    	# 由于bayes优化只能优化连续超参数，因此要加上int()转为离散超参数
        RandomForestRegressor(n_estimators=int(n_estimators),
                               min_samples_split=int(min_samples_split),
                               min_samples_leaf = int(min_samples_leaf),
                               max_leaf_nodes = int(max_leaf_nodes),
                               max_features=int(max_features),
                               max_depth=int(max_depth),
                               random_state=42,
                               oob_score = True),
                                train_x, train_y,
                                scoring='neg_mean_squared_error',
                                cv=4).mean()
    return val
# 构建贝叶斯优化模型，确定取值范围
rf_bo = BayesianOptimization(rf_cv,
                             {'n_estimators': (1, 500),
                              'min_samples_split': (2, 20),
                              'max_features': (1, 30),
                              'max_depth': (3, 20),
                              'min_samples_leaf' : (2,20),
                              'max_leaf_nodes':(10,40),
                              })
rf_bo.maximize()

def XGb_cv(eta,max_depth,gamma,min_child_weight):
    val = cross_val_score(
        # 由于bayes优化只能优化连续超参数，因此要加上int()转为离散超参数
        xgb.XGBRegressor(
            eta=int(eta),
            max_depth = int(max_depth),
            gamma = int(gamma),
            min_child_weight = int(min_child_weight),
            random_state = 42,
            oob_score=True),
        train_x, train_y,
        scoring='neg_mean_squared_error',
        cv=4).mean()
    return val
XGb_bo = BayesianOptimization(XGb_cv,
                             {'eta': (0, 3),
                              'max_depth': (3, 10),
                              'gamma': (0, 10),
                              'min_child_weight' : (0,20),
                              })
# XGb_bo.maximize()

searcher = rf_bo
# searcher = XGb_bo

best_params = searcher.max['params']
for i in best_params.keys():
    best_params[i]=int(best_params[i])
    print(best_params[i])

model = RandomForestRegressor(
    n_estimators=best_params['n_estimators'],
    min_samples_split=best_params['min_samples_split'],
    max_features=best_params['max_features'],
    max_depth=best_params['max_depth'],
    min_samples_leaf=best_params['min_samples_leaf'],
    max_leaf_nodes=best_params['max_leaf_nodes']
)
# model = xgb.XGBRegressor(
#     eta=best_params['eta'],
#     max_depth=best_params['max_depth'],
#     gamma=best_params['gamma'],
#     min_child_weight=best_params['min_child_weight']
# )

# scores = cross_val_score(model, train_x, train_y, cv=4)

model.fit(train_x,train_y)

predict = model.predict(test_x)

# # 反归一化
test_y = np.array(test_y).reshape((-1, 1))
predict = np.array(predict).reshape((-1, 1))
# test_y = label_sacler.inverse_transform(test_y)
# predict = label_sacler.inverse_transform(test_y)


def predict_plt():
    plt.figure(figsize=(12, 6))
    plt.plot(test_y, label='真实值', color='blue', linestyle='dashdot')
    plt.plot(predict, label='预测值', color='red', linestyle='dashdot')
    # prop
    plt.legend(prop={"family": "simsun"})
    # fontdict
    plt.xlabel('样本', fontdict={'family': 'simsun'}, fontweight='bold')
    plt.ylabel(label_column, fontdict={'family': 'simsun'}, fontweight='bold')
    # fontproperties
    plt.title('测试集拟合情况', fontproperties={'family': 'simsun'}, fontweight='bold')
    plt.xticks(fontproperties='Times New Roman')  # 刻度
    plt.yticks(fontproperties='Times New Roman')
    plt.grid(linestyle='-.')  # 虚线横竖网格
    plt.savefig(f'图像/预测模型-{label_column}-RF-全体亚组', dpi=480)
    plt.show()


# predict_plt()


# 计算误差
# https://blog.csdn.net/z15818264727/article/details/122638802
def cal_error():
    # 使用sklearn调用衡量线性回归的MSE 、 RMSE、 MAE、r2
    print('>>>>>>> 计算误差')
    real = test_y  # 实际值

    from math import sqrt
    from sklearn.metrics import mean_absolute_error
    from sklearn.metrics import mean_squared_error
    from sklearn.metrics import r2_score

    mae = mean_absolute_error(real, predict)
    mse = mean_squared_error(real, predict)
    rmse = sqrt(mean_squared_error(real, predict))
    r2_score = r2_score(real, predict)

    print(f'mae: {mae:0.4f}')
    print(f'mse: {mse:0.4f}')
    print(f'rmse: {rmse:0.4f}')
    print(f'r2: {r2_score:0.4f}')
    # print('4折交叉验证准确率: %s' % np.array(scores).mean())
    print(best_params)

cal_error()

from sklearn.metrics import roc_curve, roc_auc_score
