import pandas as pd
from sklearn.metrics import mean_squared_error, mean_absolute_error, root_mean_squared_error
import matplotlib.pyplot as plt
from xgboost import plot_importance
from sklearn.metrics import r2_score
import joblib


def test_model_predict(x_test, y_test, model_path, logger):
    logger.info('--------开始模型预测---------')
    model = joblib.load(model_path)
    y_val_pred = model.predict(x_test)
    # y_val_pred_t = model.predict(x_train)

    mse = mean_squared_error(y_test, y_val_pred)
    mae = mean_absolute_error(y_test, y_val_pred)
    rmse = root_mean_squared_error(y_test, y_val_pred)
    r2 = r2_score(y_test, y_val_pred)

    logger.info(f'验证集均方误差（MSE）：{mse}')
    logger.info(f'验证集平均绝对误差（MAE）：{mae}')
    logger.info(f'验证集均方根误差（RMSE）：{rmse}')
    logger.info(f'验证集r2_score：{r2}')

    # mse = mean_squared_error(y_train, y_val_pred_t)
    # mae = mean_absolute_error(y_train, y_val_pred_t)
    # rmse = root_mean_squared_error(y_train, y_val_pred_t)

    # print(f'训练集均方误差（MSE）：{mse}')
    # print(f'训练集平均绝对误差（MAE）：{mae}')
    # print(f'训练集均方根误差（RMAE）：{rmse}')

    # for i, target in enumerate(y_train.columns):
    #     target_mse = mean_squared_error(y_test.iloc[:, i], y_val_pred[:, i])
    #     target_mae = mean_absolute_error(y_test.iloc[:, i], y_val_pred[:, i])
    #     print(target, target_mse, target_mae)


def xgb_importance_plot(model_addr, logger):
    logger.info('--------开始绘制xgboost特征贡献图---------')
    estimator = joblib.load(model_addr)
    plt.figure(figsize=(12, 8))
    plot_importance(
        estimator,
        max_num_features=15,
        importance_type='weight',
        height=0.8,
        color='#3498db',
        grid=False,
        title='Feature Importance (XGBoost)',
        ax = plt.gca()
    )
    plt.tight_layout()
    plt.show()


def plot_single_target_importance(target_name, n_features, importance_df):
    """绘制单个目标变量的特征重要性"""
    plt.figure(figsize=(10, 6))
    importances = importance_df.loc[target_name].sort_values(ascending=False)
    importances[:n_features].plot(kind='barh')
    plt.title(f'Feature Importance for {target_name}')
    plt.xlabel('Importance Score')
    plt.ylabel('Features')
    plt.tight_layout()
    plt.show()


def plot_combined_importance(importance_df,top_n=5):
    """绘制所有目标变量的综合特征重要性"""
    plt.figure(figsize=(12, 8))

    # 计算平均重要性
    avg_importance = importance_df.mean(axis=0).sort_values(ascending=False)

    # 绘制平均重要性
    avg_importance[:top_n].plot(kind='barh', color='lightblue', alpha=0.6)

    # 为每个目标变量添加点
    for target in importance_df.index:
        target_importances = importance_df.loc[target][avg_importance.index[:top_n]]
        plt.scatter(target_importances.values,
                    range(len(target_importances)),
                    label=target)

    plt.yticks(range(top_n), avg_importance.index[:top_n])
    plt.title(f'Top {top_n} Features Importance Across All Targets')
    plt.xlabel('Importance Score')
    plt.ylabel('Features')
    plt.legend()
    plt.tight_layout()
    plt.show()


def Multi_importance_plot(model_addr, feature_names, label_names, logger):
    logger.info('--------开始绘制多输出特征贡献图---------')
    estimator = joblib.load(model_addr)
    feature_importances = []
    for i, estimator in enumerate(estimator.estimators_):
        importances = estimator.feature_importances_
        feature_importances.append(importances)

    print(feature_importances)

    # 转换为DataFrame方便处理
    importance_df = pd.DataFrame(feature_importances,
                                 columns=feature_names,
                                 index=label_names)

    for target in label_names:
        plot_single_target_importance(target, 10, importance_df)

    plot_combined_importance(importance_df, top_n=5)

