import pandas as pd
import numpy as np
from sklearn.preprocessing import LabelEncoder, StandardScaler, label_binarize
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import (accuracy_score, classification_report, confusion_matrix,
                             mean_absolute_error, mean_squared_error, roc_curve, auc)
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
import joblib
from datetime import datetime
import os

# ========================
# 设置中文显示和警告
# ========================
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
warnings.filterwarnings("ignore")

# ========================
# 1. 数据准备
# ========================
df = pd.read_excel("./AHP-MIV-BP_0.93/origin_data (1).xlsx", sheet_name="Sheet4")
X = df.iloc[:, :-1]
y = df.iloc[:, -1]

# 标签编码
le = LabelEncoder()
y = le.fit_transform(y)

# 特征标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
X = pd.DataFrame(X_scaled, columns=X.columns)

# ========================
# 2. 定义指标类别及设备子类别
# ========================
category_dict = {
    '人员': ['管理人员的管理能力', '技术人员的技术素质', '工人的安全意识与操作能力', '特殊岗位人员持证率'],
    '设备': {
        '开采设备': ['支护设备完好率', '采掘设备完好率', '采掘机械化水平'],
        '通风设备': ['矿井通风设备完好率', '通风构筑物完好率'],
        '瓦斯治理': ['瓦斯抽放设备完好率', '瓦斯监测设备完好率', '隔爆防爆设备完好率'],
        '火灾防护': ['防火及消防设施完好率', '火灾监测装备完好率'],
        '水害防护': ['井下排水设备完好率','探防水设备完好率'],
        '提升与运输设备': ['运输设备完好率', '运输机械化水平','矿井提升设备完好率'],
        '供电系统': ['供电系统保护装置完好率','供电系统设备完好率']
    },
    '环境': ['风量供需比', '顶板管理难易程度', '矿井正常涌水量', '煤层自燃发火倾向性','平均瓦斯涌出量'],
    '管理': ['安全管理制度及落实率', '安全检查整改落实率', '安全检查技术人员配备率', '安全投入指数', '应急响应机制完善率','作业人员培训及考核']
}

select_num = {'人员': 2, '环境': 3, '管理': 3}

# ========================
# 3. 定义AHP权重计算函数
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, CR

# ========================
# 4. 读取AHP矩阵并计算权重
# ========================
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, CR = ahp_weights(matrix)
    print(f"{sheet_name} 权重：{np.round(w, 4)}, CR={round(CR, 4)}")
    return w

# ========================
# 5. 定义MIV计算函数
# ========================
def miv_feature_selection(X, model, perturbation=0.1):
    miv_vals = []
    for col in X.columns:
        X_plus = X.copy()
        X_minus = X.copy()
        X_plus[col] *= (1 + perturbation)
        X_minus[col] *= (1 - perturbation)
        y_plus = model.predict_proba(StandardScaler().fit_transform(X_plus))[:, 1]
        y_minus = model.predict_proba(StandardScaler().fit_transform(X_minus))[:, 1]
        miv_vals.append(np.mean(y_plus - y_minus))
    miv_df = pd.DataFrame({"指标": X.columns, "MIV": miv_vals})
    miv_df["MIV绝对值"] = miv_df["MIV"].abs()
    miv_df = miv_df.sort_values(by="MIV绝对值", ascending=False).reset_index(drop=True)
    return miv_df

# ========================
# 6. 临时模型训练 + MIV计算
# ========================
mlp_temp = MLPClassifier(hidden_layer_sizes=(10,5), max_iter=1000, random_state=42)
mlp_temp.fit(X, y)
miv_df = miv_feature_selection(X, mlp_temp)

# ========================
# 7. 按类别筛选指标
# ========================
selected_features = []
for cat in ['人员']:
    cat_df = miv_df[miv_df['指标'].isin(category_dict[cat])].sort_values(by='MIV绝对值', ascending=False)
    selected_features.extend(cat_df.head(select_num[cat])['指标'].tolist())
for subcat, indicators in category_dict['设备'].items():
    subcat_df = miv_df[miv_df['指标'].isin(indicators)]
    if not subcat_df.empty:
        selected_features.append(subcat_df.loc[subcat_df['MIV绝对值'].idxmax(), '指标'])
for cat in ['环境']:
    cat_df = miv_df[miv_df['指标'].isin(category_dict[cat])].sort_values(by='MIV绝对值', ascending=False)
    selected_features.extend(cat_df.head(select_num[cat])['指标'].tolist())
for cat in ['管理']:
    cat_df = miv_df[miv_df['指标'].isin(category_dict[cat])].sort_values(by='MIV绝对值', ascending=False)
    selected_features.extend(cat_df.head(select_num[cat])['指标'].tolist())
X_selected = X[selected_features]

# ========================
# 8. AHP权重计算
# ========================
# ========================
# AHP权重计算函数，包括最大特征根的计算
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]  # 最大特征根
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化权重
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, max_eigval, CR


# ========================
# AHP矩阵读取与权重计算函数
# ========================
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, max_eigval, CR = ahp_weights(matrix)  # 计算权重、最大特征根、CI和CR
    print(f"{sheet_name} 权重：{np.round(w, 4)}, 最大特征根: {round(max_eigval, 4)}, CR={round(CR, 4)}")
    return w, max_eigval, CR


# ========================
# 计算全局权重并加入最大特征根的计算
# ========================
import pandas as pd
import numpy as np
from datetime import datetime


# ========================
# AHP权重计算函数，包括最大特征根的计算
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]  # 最大特征根
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化权重
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, max_eigval, CR


# ========================
# AHP矩阵读取与权重计算函数
# ========================
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, max_eigval, CR = ahp_weights(matrix)  # 计算权重、最大特征根、CI和CR
    print(f"{sheet_name} 权重：{np.round(w, 4)}, 最大特征根: {round(max_eigval, 4)}, CR={round(CR, 4)}")
    return w, max_eigval, CR


# ========================
# 计算全局权重并加入最大特征根的计算
# ========================
import pandas as pd
import numpy as np
from datetime import datetime


# ========================
# AHP权重计算函数，包括最大特征根的计算
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]  # 最大特征根
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化权重
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, max_eigval, CR


# ========================
# AHP矩阵读取与权重计算函数
# ========================
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, max_eigval, CR = ahp_weights(matrix)  # 计算权重、最大特征根、CI和CR
    print(f"{sheet_name} 权重：{np.round(w, 4)}, 最大特征根: {round(max_eigval, 4)}, CR={round(CR, 4)}")
    return w, max_eigval, CR


# ========================
# 计算全局权重并加入最大特征根的计算
# ========================
import pandas as pd
import numpy as np
from datetime import datetime


# ========================
# AHP权重计算函数，包括最大特征根的计算
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]  # 最大特征根
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化权重
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, max_eigval, CR


# ========================
# AHP矩阵读取与权重计算函数
# ========================
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, max_eigval, CR = ahp_weights(matrix)  # 计算权重、最大特征根、CI和CR
    print(f"{sheet_name} 权重：{np.round(w, 4)}, 最大特征根: {round(max_eigval, 4)}, CR={round(CR, 4)}")
    return w, max_eigval, CR


# ========================
# 计算全局权重并加入最大特征根的计算
# ========================
import numpy as np
import pandas as pd
import joblib
from datetime import datetime


# ========================
# 8. AHP权重计算
# ========================
import numpy as np
import pandas as pd

# ========================
# AHP权重计算函数
# ========================
import numpy as np
import pandas as pd


# ========================
# AHP权重计算函数
# ========================
import numpy as np
import pandas as pd


# 假设 read_ahp_matrix 返回的是 (矩阵, 特征根)，我们只需要矩阵部分
def read_ahp_matrix(sheet_name, file_path="ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    eigvals, eigvecs = np.linalg.eig(matrix)
    return matrix, eigvals  # 返回矩阵和特征根


# ========================
# AHP权重计算函数
# ========================
# ========================
# AHP权重计算函数，包括最大特征根的计算
# ========================
def ahp_weights(matrix):
    eigvals, eigvecs = np.linalg.eig(matrix)
    max_index = np.argmax(eigvals.real)
    max_eigval = eigvals.real[max_index]  # 最大特征根
    w = eigvecs[:, max_index].real
    w = w / w.sum()  # 归一化权重
    n = matrix.shape[0]
    CI = (max_eigval - n) / (n - 1)
    # RI 经验随机一致性指数（n<=10可选值）
    RI_dict = {1: 0.0, 2: 0.0, 3: 0.58, 4: 0.90, 5: 1.12, 6: 1.24, 7: 1.32, 8: 1.41, 9: 1.45, 10: 1.49}
    RI = RI_dict.get(n, 1.49)
    CR = CI / RI if RI != 0 else 0
    return w, max_eigval, CR


# ========================
# AHP矩阵读取与权重计算函数
# ========================
def read_ahp_matrix(sheet_name, file_path="./AHP-MIV-BP_0.93/ahp_matrices.xlsx"):
    df = pd.read_excel(file_path, sheet_name=sheet_name, index_col=0)
    if df.empty:
        raise ValueError(f"{sheet_name} 的矩阵为空，请检查 Excel 内容")
    matrix = df.values.astype(float)
    w, max_eigval, CR = ahp_weights(matrix)  # 计算权重、最大特征根、CI和CR
    print(f"{sheet_name} 权重：{np.round(w, 4)}, 最大特征根: {round(max_eigval, 4)}, CR={round(CR, 4)}")
    return w, max_eigval, CR


# ========================
# 计算全局权重并加入最大特征根的计算
# ========================
def compute_global_weights():
    # 一级指标
    w1, max_eigval_w1, CR_w1 = read_ahp_matrix("一级指标")

    # 二级指标
    w_person, max_eigval_person, CR_person = read_ahp_matrix("人员因素")
    w_manage, max_eigval_manage, CR_manage = read_ahp_matrix("管理因素")
    w_env, max_eigval_env, CR_env = read_ahp_matrix("环境因素")
    w_device, max_eigval_device, CR_device = read_ahp_matrix("设备因素")

    # 设备三级指标
    w_support, max_eigval_support, CR_support = read_ahp_matrix("开采设备")
    w_excav, max_eigval_excav, CR_excav = read_ahp_matrix("通风设备")
    w_mech, max_eigval_mech, CR_mech = read_ahp_matrix("瓦斯设备")
    w_fire, max_eigval_fire, CR_fire = read_ahp_matrix("防火设备")
    w_water, max_eigval_water, CR_water = read_ahp_matrix("防水设备")
    w_transport, max_eigval_transport, CR_transport = read_ahp_matrix("提升与运输设备")
    w_power, max_eigval_power, CR_power = read_ahp_matrix("供电设备")

    # 计算一级和二级指标权重
    global_person = w1[0] * w_person
    global_manage = w1[1] * w_manage
    global_env = w1[2] * w_env

    # 计算设备三级指标权重
    global_support = w1[3] * w_device[0] * w_support
    global_excav = w1[3] * w_device[1] * w_excav
    global_mech = w1[3] * w_device[2] * w_mech
    global_fire = w1[3] * w_device[3] * w_fire
    global_water = w1[3] * w_device[4] * w_water
    global_transport = w1[3] * w_device[5] * w_transport
    global_power = w1[3] * w_device[6] * w_power

    # 最终全局权重
    global_weights = np.concatenate([global_person, global_manage, global_env,
                                     global_support, global_excav, global_mech,
                                     global_fire, global_water, global_transport, global_power])

    # 返回全局权重以及每个指标的最大特征根
    max_eigvals = [
        max_eigval_w1, max_eigval_person, max_eigval_manage, max_eigval_env, max_eigval_device,
        max_eigval_support, max_eigval_excav, max_eigval_mech, max_eigval_fire, max_eigval_water,
        max_eigval_transport, max_eigval_power
    ]

    return global_weights, max_eigvals


# 获取全局权重及最大特征根
global_weights, max_eigvals = compute_global_weights()

# 打印结果
print("全局权重:", global_weights)
print("最大特征根:", max_eigvals)

# ========================
# 11. 模型训练
# ========================
# mlp_final = MLPClassifier(hidden_layer_sizes=(10,), max_iter=500, random_state=42)
# mlp_final.fit(X_selected, y)

# ========================
# 12. 保存模型
# ========================
model_dir = "saved_models_test"
if not os.path.exists(model_dir):
    os.makedirs(model_dir)

timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

# model_filename = f"{model_dir}/mlp_model_{timestamp}.joblib"
# encoder_filename = f"{model_dir}/label_encoder_{timestamp}.joblib"
# scaler_filename = f"{model_dir}/feature_scaler_{timestamp}.joblib"
# features_filename = f"{model_dir}/selected_features_{timestamp}.joblib"

# joblib.dump(mlp_final, model_filename)
# joblib.dump(le, encoder_filename)
# joblib.dump(scaler, scaler_filename)
# joblib.dump(selected_features, features_filename)

# print(f"\n模型已保存:")
# print(f"- 模型文件: {model_filename}")
# print(f"- 标签编码器: {encoder_filename}")
# print(f"- 特征缩放器: {scaler_filename}")
# print(f"- 选择的特征: {features_filename}")

# ========================
# 9. 数据划分 + 数据扩充
# ========================
# 测试集固定25个样本
X_train_raw, X_test, y_train_raw, y_test = train_test_split(X_selected, y, test_size=25, random_state=42, stratify=y)

# 数据扩充函数
def augment_data(X, y, factor=3, noise_level=0.05):
    X_list = [X.copy()]
    y_list = [y.copy()]
    for _ in range(factor-1):
        X_noisy = X + np.random.normal(0, noise_level, X.shape)
        X_list.append(X_noisy)
        y_list.append(y)
    X_aug = pd.concat(X_list, ignore_index=True)
    y_aug = np.hstack(y_list)
    return X_aug, y_aug

X_train, y_train = augment_data(X_train_raw, y_train_raw, factor=3, noise_level=0.05)

# ========================
# 10. 模型训练
# ========================
mlp_final = MLPClassifier(hidden_layer_sizes=(10,), max_iter=500, random_state=42)
mlp_final.fit(X_train, y_train)

# ========================
# 10.1 保存模型参数文件
# ========================
# 创建模型保存文件夹
# model_dir = "saved_models"
# if not os.path.exists(model_dir):
#     os.makedirs(model_dir)

# 生成时间戳作为文件名
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")


# 保存模型组件
# model_filename = f"{model_dir}/mlp_model_{timestamp}.joblib"
# encoder_filename = f"{model_dir}/label_encoder_{timestamp}.joblib"
# scaler_filename = f"{model_dir}/feature_scaler_{timestamp}.joblib"
# features_filename = f"{model_dir}/selected_features_{timestamp}.joblib"

# # 保存模型和相关组件
# joblib.dump(mlp_final, model_filename)
# joblib.dump(le, encoder_filename)
# joblib.dump(scaler, scaler_filename)
# joblib.dump(selected_features, features_filename)


# ========================
# 10.1 保存模型参数文件
# ========================
# 创建模型保存文件夹
# model_dir = "saved_models"
# if not os.path.exists(model_dir):
#     os.makedirs(model_dir)

# 生成时间戳作为文件名
# timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

# 保存模型组件
model_filename = f"{model_dir}/mlp_model_{timestamp}.joblib"
encoder_filename = f"{model_dir}/label_encoder_{timestamp}.joblib"
scaler_filename = f"{model_dir}/feature_scaler_{timestamp}.joblib"
features_filename = f"{model_dir}/selected_features_{timestamp}.joblib"

# 保存模型和相关组件
joblib.dump(mlp_final, model_filename)
joblib.dump(le, encoder_filename)
joblib.dump(scaler, scaler_filename)
joblib.dump(selected_features, features_filename)



# ========================
# 10.2 从文件加载模型参数
# ========================
print(f"\n从文件加载模型...")

# 加载模型和相关组件
mlp_loaded = joblib.load(model_filename)
le_loaded = joblib.load(encoder_filename)
scaler_loaded = joblib.load(scaler_filename)
selected_features_loaded = joblib.load(features_filename)

print(f"模型加载成功！")
print(f"加载的特征数量: {len(selected_features_loaded)}")
print(f"加载的特征: {selected_features_loaded}")

# ========================
# 11. 测试集预测与评估（使用加载的模型）
# ========================
print("\n使用加载的模型进行预测和评估...")

# 使用加载的模型进行预测
y_pred = mlp_loaded.predict(X_test)
accuracy = accuracy_score(y_test, y_pred)
report = classification_report(y_test, y_pred, output_dict=True)
cm = confusion_matrix(y_test, y_pred)
mae = mean_absolute_error(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
rmse = np.sqrt(mse)

print(f"\n模型准确率： {accuracy:.2f}")
print(f"MAE: {mae:.4f}, MSE: {mse:.4f}, RMSE: {rmse:.4f}")
print("宏平均精确率:", report['macro avg']['precision'])
print("宏平均召回率:", report['macro avg']['recall'])
print("宏平均F1值:", report['macro avg']['f1-score'])
print("加权平均精确率:", report['weighted avg']['precision'])
print("加权平均召回率:", report['weighted avg']['recall'])
print("加权平均F1值:", report['weighted avg']['f1-score'])

for label in le_loaded.classes_:
    idx = le_loaded.transform([label])[0]
    print(f"类别 {label}: 精确率={report[str(idx)]['precision']}, 召回率={report[str(idx)]['recall']}, F1值={report[str(idx)]['f1-score']}")

# ========================
# 12. 混淆矩阵可视化（使用加载的模型结果）
# ========================
plt.figure(figsize=(8,6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=le_loaded.classes_, yticklabels=le_loaded.classes_)
plt.xlabel("预测类别")
plt.ylabel("真实类别")
plt.title(f"混淆矩阵（25个测试样本）- 使用加载的模型 {timestamp}")
plt.show()

# ========================
# 13. 多分类ROC/AUC（使用加载的模型）
# ========================
y_bin = label_binarize(y_test, classes=np.unique(y))
y_score = mlp_loaded.predict_proba(X_test)  # 使用加载的模型

fpr, tpr, roc_auc = {}, {}, {}
for i in range(y_bin.shape[1]):
    fpr[i], tpr[i], _ = roc_curve(y_bin[:,i], y_score[:,i])
    roc_auc[i] = auc(fpr[i], tpr[i])

plt.figure(figsize=(8,6))
for i in range(y_bin.shape[1]):
    plt.plot(fpr[i], tpr[i], label=f"类别 {le_loaded.inverse_transform([i])[0]} (AUC={roc_auc[i]:.2f})")
plt.plot([0,1],[0,1],'--', color='gray')
plt.xlabel("假阳性率")
plt.ylabel("真正率")
plt.title(f"多分类ROC曲线 - 使用加载的模型 {timestamp}")
plt.legend()
plt.show()

# ========================
# 14. 保存评估结果
# ========================
# 创建评估结果字典
evaluation_results = {
    'timestamp': timestamp,
    'model_filename': model_filename,
    'accuracy': accuracy,
    'mae': mae,
    'mse': mse,
    'rmse': rmse,
    'macro_avg_precision': report['macro avg']['precision'],
    'macro_avg_recall': report['macro avg']['recall'],
    'macro_avg_f1': report['macro avg']['f1-score'],
    'weighted_avg_precision': report['weighted avg']['precision'],
    'weighted_avg_recall': report['weighted avg']['recall'],
    'weighted_avg_f1': report['weighted avg']['f1-score'],
    'selected_features': selected_features_loaded,
    'test_samples_count': len(y_test)
}

# 为每个类别添加详细指标
for label in le_loaded.classes_:
    idx = le_loaded.transform([label])[0]
    evaluation_results[f'class_{label}_precision'] = report[str(idx)]['precision']
    evaluation_results[f'class_{label}_recall'] = report[str(idx)]['recall']
    evaluation_results[f'class_{label}_f1'] = report[str(idx)]['f1-score']

# 保存评估结果到文件
results_filename = f"{model_dir}/evaluation_results_{timestamp}.joblib"
joblib.dump(evaluation_results, results_filename)

# 同时保存为Excel格式便于查看
results_df = pd.DataFrame([evaluation_results])
excel_filename = f"{model_dir}/evaluation_results_{timestamp}.xlsx"
results_df.to_excel(excel_filename, index=False)

print(f"\n评估结果已保存:")
print(f"- 评估结果（joblib格式）: {results_filename}")
print(f"- 评估结果（Excel格式）: {excel_filename}")

print(f"\n=== 模型保存和评估完成 ===")
print(f"时间戳: {timestamp}")
print(f"所有文件已保存到 '{model_dir}' 文件夹中")
