import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.ensemble import RandomForestRegressor, AdaBoostRegressor
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
from keras.models import Model
from keras.layers import Input, Dense, Attention, BatchNormalization, concatenate, MultiHeadAttention
from keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam
import seaborn as sns
import matplotlib.pyplot as plt
from joblib import dump, load
import pickle
import logging
from sklearn.linear_model import Lasso
import itertools
import numpy as np
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import cross_val_score
import xgboost as xgb
import pandas as pd
import logging
import pickle

import os
# === 日志配置 ===

# Set up basic logging configuration
logging.basicConfig(
    level=logging.INFO,  # Set the logging level to INFO (or DEBUG, WARNING, etc.)
    format='%(asctime)s - %(levelname)s - %(message)s'  # Define log format
)



# Test logging
logging.info("This is an info message.")
logging.error("This is an error message.")
# 函数定义

def preprocess_data_labelencoding(df, input_features):
    """
    处理数据，包含数值转换检测和独热编码
    """
    logging.info("Checking columns that cannot be converted to numeric values...")
    non_numeric_columns = []
    encoded_mapping = {}
    label_mappings = {}

    # 检查非数值列
    for col in input_features:
        if col not in df.columns:
            raise ValueError(f"Column '{col}' not found in the dataset.")
        try:
            pd.to_numeric(df[col], errors='raise')
        except ValueError:
            non_numeric_columns.append(col)


    # 对非数值列进行独热编码
    new_input_features = input_features.copy()  # 用于保存更新后的特征列表
    if non_numeric_columns:
        logging.info(f"Performing one-hot encoding on columns: {non_numeric_columns}")
        for col in non_numeric_columns:
            # 独热编码单列
            encoder = LabelEncoder()
            df[col] = encoder.fit_transform(df[col])

            # 保存类别到整数的映射
            label_mappings[col] = dict(zip(encoder.classes_, range(len(encoder.classes_))))


    return df, label_mappings

def add_autoencoder_layer(X_train):
    """
    增加自编码层或注意力机制
    """
    logging.info("Adding an attention mechanism to enhance features...")
    scaler = StandardScaler()
    X_train_normalized = scaler.fit_transform(X_train)

    input_layer = Input(shape=(X_train.shape[1],))
    dense_1 = Dense(128, activation='relu')(input_layer)
    dense_2 = Dense(64, activation='relu')(dense_1)
    dense_3 = Dense(32, activation='relu')(dense_2)
    dense_4 = Dense(32, activation='relu')(dense_3)
    output_layer = Dense(X_train.shape[1], activation='linear')(dense_4)  # 输出层激活改为 linear

    # 构建模型
    model = Model(inputs=input_layer, outputs=output_layer)

    # 设置优化器（学习率改为 0.001）
    optimizer = Adam(learning_rate=0.001)
    model.compile(optimizer=optimizer, loss='mse')

    # 设置早停回调
    early_stopping = EarlyStopping(monitor='loss', patience=50, restore_best_weights=True)

    # 训练模型
    logging.info("Training the attention-enhanced autoencoder...")
    model.fit(X_train_normalized, X_train_normalized, epochs=5000, batch_size=4, verbose=1, callbacks=[early_stopping])

    # 定义一个新模型，从输入到 dense_4 层的输出
    intermediate_model = Model(inputs=model.input, outputs=model.get_layer(index=-2).output)

    # 使用新模型提取 dense_4 层的输出
    enhanced_features = intermediate_model.predict(X_train_normalized)

    return enhanced_features, scaler


def XGB_reg_label(
    excel_file_path,
    model_save_path,
    input_features,
    target_feature,
    param_grid,
    save_model=True
):
    try:
        # 读取数据
        logging.info("Reading the dataset...")
        df = pd.read_excel(excel_file_path)

        # 数据预处理
        logging.info("Preprocessing the dataset...")
        df, encoded_mapping = preprocess_data_labelencoding(df, input_features)

        # 设置目标变量和特征变量
        X = df[input_features]
        y = df[target_feature]
        print(X.head())

        # 数据集划分
        logging.info("Splitting the dataset into training and testing sets...")
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

        # 增强特征（示例代码，可根据需要取消注释）
        # X_train_enhanced = add_attention_layer(X_train)
        # X_test_enhanced = add_attention_layer(X_test)
        X_train_enhanced = X_train
        X_test_enhanced = X_test

        # 初始化最佳参数和指标
        best_score = float('inf')
        best_params = None
        best_train_metrics = {}
        best_test_metrics = {}

        # 遍历所有参数组合
        logging.info("Iterating through all hyperparameter combinations...")
        for params in itertools.product(*param_grid.values()):
            params_dict = dict(zip(param_grid.keys(), params))
            logging.info(f"Trying parameters: {params_dict}")

            # 创建并训练模型
            model = xgb.XGBRegressor(**params_dict, enable_categorical=False, tree_method='hist')
            model.fit(X_train_enhanced, y_train)

            # 预测并计算训练集指标
            y_pred_train = model.predict(X_train_enhanced)
            mse_train = mean_squared_error(y_train, y_pred_train)
            r2_train = r2_score(y_train, y_pred_train)
            best_train_metrics[f"{params_dict}"] = (mse_train, r2_train)

            # 预测并计算测试集指标
            y_pred_test = model.predict(X_test_enhanced)
            mse_test = mean_squared_error(y_test, y_pred_test)
            r2_test = r2_score(y_test, y_pred_test)
            best_test_metrics[f"{params_dict}"] = (mse_test, r2_test)

            # 打印当前参数的指标
            logging.info(f"Training metrics: MSE={mse_train:.4f}, R²={r2_train:.4f}")           # 画图的
            logging.info(f"Testing metrics:  MSE={mse_test:.4f}, R²={r2_test:.4f}")             # 画图的

            # 交叉验证（可选：根据交叉验证结果选择最佳参数）
            cv_scores = cross_val_score(model, X_train_enhanced, y_train,
                                         scoring='neg_mean_squared_error', cv=5, n_jobs=-1)
            avg_cv_mse = -np.mean(cv_scores)
            logging.info(f"Cross-validation average MSE: {avg_cv_mse:.4f}")                     # 画图的

            # 更新最佳参数
            if avg_cv_mse < best_score:
                best_score = avg_cv_mse
                best_params = params_dict

        # 确定最佳参数后重新训练最终模型
        logging.info(f"Best parameters selected: {best_params}")
        final_model = xgb.XGBRegressor(**best_params, enable_categorical=False, tree_method='hist')
        final_model.fit(X_train_enhanced, y_train)

        # 保存模型
        if save_model:
            # 获取文件的绝对路径
            absolute_path = os.path.abspath(model_save_path)

            logging.info(f"Saving the final model to {absolute_path}...")
            with open(absolute_path, 'wb') as file:
                pickle.dump(final_model, file)

        # 最终评估
        y_pred_final_train = final_model.predict(X_train_enhanced)
        y_pred_final_test = final_model.predict(X_test_enhanced)
        final_mse_train = mean_squared_error(y_train, y_pred_final_train)
        final_r2_train = r2_score(y_train, y_pred_final_train)
        final_mse_test = mean_squared_error(y_test, y_pred_final_test)
        final_r2_test = r2_score(y_test, y_pred_final_test)

        #logging.info(f"Final model evaluation:")
        #logging.info(f"Training set: MSE={final_mse_train:.4f}, R²={final_r2_train:.4f}")
        #logging.info(f"Test set:   MSE={final_mse_test:.4f}, R²={final_r2_test:.4f}")

        return (best_params, final_mse_test, final_r2_test, final_mse_train, final_r2_train,
                encoded_mapping, y_pred_final_train, y_train, y_pred_final_test, y_test)

    except Exception as e:
        logging.error(f"An error occurred: {e}", exc_info=True)
        raise

def ensemble_model(excel_file_path,
    model_save_path,
    input_features,
    target_feature,
    save_model=True):

    """
    集成学习模型
    """

    try:
        # 读取数据
        logging.info("Reading the dataset...")
        df = pd.read_excel(excel_file_path)
        if df.isnull().any().any():
            print("DataFrame contains missing values.")
        else:
            print("DataFrame does not contain missing values.")
        # 数据预处理
        logging.info("Preprocessing the dataset...")
        df, encoded_mapping = preprocess_data_labelencoding(df, input_features)

        if df.isnull().any().any():
            print("DataFrame contains missing values.")
        else:
            print("DataFrame does not contain missing values.")

        # 设置目标变量和特征变量
        X = df[input_features]
        y = df[target_feature]


        # 数据集划分
        logging.info("Splitting the dataset into training and testing sets...")
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
        enhanced_features = X_train
        test_enhanced_features = X_test

        if X_train.isnull().any().any():
            print("DataFrame contains missing values.")
        else:
            print("DataFrame does not contain missing values.")

        # 定义基学习器
        logging.info("Training base learners...")
        rf = RandomForestRegressor(n_estimators=100, random_state=42)
        svm = SVR(kernel='rbf', C=10, gamma=0.1)
        mlp = MLPRegressor(hidden_layer_sizes=(64, 32), activation='relu', max_iter=500, random_state=42)

        param_grid = {
            'n_estimators': [500, 700],
            'learning_rate': [0.01, 0.05],
            'max_depth': [6, 9],
            'min_child_weight': [4, 6],
            'gamma': [0.1, 0.3]}
        xg_reg = xgb.XGBRegressor(objective='reg:squarederror', enable_categorical=False, tree_method='hist')


        # 分别训练基学习器
        rf.fit(enhanced_features, y_train)
        svm.fit(enhanced_features, y_train)
        mlp.fit(enhanced_features, y_train)

        grid_search = GridSearchCV(estimator=xg_reg, param_grid=param_grid, cv=5, n_jobs=-1, verbose=2)
        grid_search.fit(enhanced_features, y_train)
        best_parameters = grid_search.best_params_
        logging.info("Training the model with the best parameters...")
        best_xg_reg = xgb.XGBRegressor(**best_parameters, enable_categorical=False, tree_method='hist')
        best_xg_reg.fit(enhanced_features, y_train)

        # 获取基学习器的预测结果
        # 获取基学习器的预测结果
        rf_preds = rf.predict(enhanced_features).reshape(-1, 1)  # 转为列向量
        svm_preds = svm.predict(enhanced_features).reshape(-1, 1)
        mlp_preds = mlp.predict(enhanced_features).reshape(-1, 1)
        xgb_preds = best_xg_reg.predict(enhanced_features).reshape(-1, 1)
        rf_preds_df = pd.DataFrame(rf_preds, columns=['rf_preds'])
        svm_preds_df = pd.DataFrame(svm_preds, columns=['svm_preds'])
        mlp_preds_df = pd.DataFrame(mlp_preds, columns=['mlp_preds'])
        xgb_preds_df = pd.DataFrame(xgb_preds, columns=['xgb_preds'])

        # 确保预测结果的索引与 X_train 对齐
        rf_preds_df.index = X_train.index
        svm_preds_df.index = X_train.index
        mlp_preds_df.index = X_train.index
        xgb_preds_df.index = X_train.index

        # 将基学习器的结果组合为新的特征
        base_preds = pd.concat([pd.DataFrame(X_train), rf_preds_df, svm_preds_df, mlp_preds_df, xgb_preds_df], axis=1)
        if base_preds.isnull().any().any():
            print("DataFrame contains missing values.")
            sns.heatmap(base_preds.isnull(), cbar=False, cmap="viridis")
            plt.title("Heatmap of Missing Values")
            plt.show()
        else:
            print("DataFrame does not contain missing values.")

        # 使用 AdaBoost 对基学习器结果进行 Boosting
        logging.info("Boosting base learners with AdaBoost...")
        booster = AdaBoostRegressor(base_estimator=RandomForestRegressor(), n_estimators=50, random_state=42)
        booster.fit(base_preds, y_train)

        # 预测并评估最终模型
        rf_preds_te = rf.predict(test_enhanced_features).reshape(-1, 1)  # 转为列向量
        svm_preds_te = svm.predict(test_enhanced_features).reshape(-1, 1)
        mlp_preds_te = mlp.predict(test_enhanced_features).reshape(-1, 1)
        xgb_preds_te = best_xg_reg.predict(test_enhanced_features).reshape(-1, 1)
        rf_preds_df_te = pd.DataFrame(rf_preds_te, columns=['rf_preds'])
        svm_preds_df_te = pd.DataFrame(svm_preds_te, columns=['svm_preds'])
        mlp_preds_df_te = pd.DataFrame(mlp_preds_te, columns=['mlp_preds'])
        xgb_preds_df_te = pd.DataFrame(xgb_preds_te, columns=['xgb_preds'])
        rf_preds_df_te.index = X_test.index
        svm_preds_df_te.index = X_test.index
        mlp_preds_df_te.index = X_test.index
        xgb_preds_df_te.index = X_test.index
        base_preds_te = pd.concat([pd.DataFrame(X_test), rf_preds_df_te, svm_preds_df_te, mlp_preds_df_te, xgb_preds_df_te], axis=1)


        y_pred_tr = booster.predict(base_preds)
        y_pred = booster.predict(base_preds_te)
        mse_tr = mean_squared_error(y_train, y_pred_tr)
        r2_tr = r2_score(y_train, y_pred_tr)
        mse = mean_squared_error(y_test, y_pred)
        r2 = r2_score(y_test, y_pred)

        # 保存模型
        if save_model:
            # 获取文件的绝对路径
            absolute_path = os.path.abspath(model_save_path)

            logging.info(f"Saving the final model to {absolute_path}...")
            with open(absolute_path, 'wb') as file:
                pickle.dump(booster, file)

        return booster, mse, r2, mse_tr, r2_tr, encoded_mapping, base_preds, base_preds_te, y_pred_tr, y_train, y_pred, y_test

    except Exception as e:
        logging.error(f"An error occurred: {e}")
        raise

def XGB_reg_label_autoencoder(
    excel_file_path,
    model_save_path,
    input_features,
    target_feature,
    param_grid,
    save_model=True
):
    try:
        # 读取数据
        logging.info("Reading the dataset...")
        df = pd.read_excel(excel_file_path)

        # 数据预处理
        logging.info("Preprocessing the dataset...")
        df, encoded_mapping = preprocess_data_labelencoding(df, input_features)

        # 设置目标变量和特征变量
        X = df[input_features]
        y = df[target_feature]
        print(X.head())

        # 数据集划分
        logging.info("Splitting the dataset into training and testing sets...")
        X_enhanced, scaler = add_autoencoder_layer(X)
        if not isinstance(X_enhanced, pd.DataFrame):
            X_enhanced = pd.DataFrame(X_enhanced, columns=[f"enhanced_{i}" for i in range(X_enhanced.shape[1])])

        # 将 X 和 X_enhanced 拼接
        X = pd.concat([X, X_enhanced], axis=1)

        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

        # 增强特征
        # 使用 L1 正则化的线性模型
        lasso = Lasso(alpha=0.01)  # alpha 控制正则化强度
        lasso.fit(X_train, y_train)

        # 输出特征的选择结果
        selected_features = X_train.columns[lasso.coef_ != 0]
        print("Selected features:", selected_features)

        X_train_enhanced= X_train[selected_features]
        X_test_enhanced = X_test[selected_features]


        # 初始化最佳参数和指标
        best_score = float('inf')
        best_params = None
        best_train_metrics = {}
        best_test_metrics = {}

        # 遍历所有参数组合
        logging.info("Iterating through all hyperparameter combinations...")
        for params in itertools.product(*param_grid.values()):
            params_dict = dict(zip(param_grid.keys(), params))
            logging.info(f"Trying parameters: {params_dict}")

            # 创建并训练模型
            model = xgb.XGBRegressor(**params_dict, enable_categorical=False, tree_method='hist')
            model.fit(X_train_enhanced, y_train)

            # 预测并计算训练集指标
            y_pred_train = model.predict(X_train_enhanced)
            mse_train = mean_squared_error(y_train, y_pred_train)
            r2_train = r2_score(y_train, y_pred_train)
            best_train_metrics[f"{params_dict}"] = (mse_train, r2_train)

            # 预测并计算测试集指标
            y_pred_test = model.predict(X_test_enhanced)
            mse_test = mean_squared_error(y_test, y_pred_test)
            r2_test = r2_score(y_test, y_pred_test)
            best_test_metrics[f"{params_dict}"] = (mse_test, r2_test)

            # 打印当前参数的指标
            logging.info(f"Training metrics: MSE={mse_train:.4f}, R²={r2_train:.4f}")           # 画图的
            logging.info(f"Testing metrics:  MSE={mse_test:.4f}, R²={r2_test:.4f}")             # 画图的

            # 交叉验证（可选：根据交叉验证结果选择最佳参数）
            cv_scores = cross_val_score(model, X_train_enhanced, y_train,
                                         scoring='neg_mean_squared_error', cv=5, n_jobs=-1)
            avg_cv_mse = -np.mean(cv_scores)
            logging.info(f"Cross-validation average MSE: {avg_cv_mse:.4f}")                     # 画图的

            # 更新最佳参数
            if avg_cv_mse < best_score:
                best_score = avg_cv_mse
                best_params = params_dict

        # 确定最佳参数后重新训练最终模型
        logging.info(f"Best parameters selected: {best_params}")
        final_model = xgb.XGBRegressor(**best_params, enable_categorical=False, tree_method='hist')
        final_model.fit(X_train_enhanced, y_train)

        # 保存模型
        if save_model:
            # 获取文件的绝对路径
            absolute_path = os.path.abspath(model_save_path)
            logging.info(f"Saving the scaler to {absolute_path}...")
            dump(scaler, 'scaler.joblib')
            logging.info(f"Saving the final model to {absolute_path}...")
            with open(absolute_path, 'wb') as file:
                pickle.dump(final_model, file)


        # 最终评估
        logging.info("Evaluating the model on the test set...")

        y_pred_final_train = final_model.predict(X_train_enhanced)
        y_pred_final_test = final_model.predict(X_test_enhanced)
        final_mse_train = mean_squared_error(y_train, y_pred_final_train)
        final_r2_train = r2_score(y_train, y_pred_final_train)
        final_mse_test = mean_squared_error(y_test, y_pred_final_test)
        final_r2_test = r2_score(y_test, y_pred_final_test)

        #logging.info(f"Final model evaluation:")
        #logging.info(f"Training set: MSE={final_mse_train:.4f}, R²={final_r2_train:.4f}")
        #logging.info(f"Test set:   MSE={final_mse_test:.4f}, R²={final_r2_test:.4f}")

        return (best_params, final_mse_test, final_r2_test, final_mse_train, final_r2_train,
                encoded_mapping, y_pred_final_train, y_train, y_pred_final_test, y_test)


    except Exception as e:
        logging.error(f"An error occurred: {e}")
        raise







##########  前端  ############
# 1. 选择用于训练的数据库（目前需要对应到数据库的excel文件）
excel_file_path = '抗压强度.xlsx'

# 2. 选择输入和输出特征
# 2.1 选择输入特征
Input_feature = ['地区',
 '水用量（kg/m3）',
 '水泥ID',
 '水泥用量（kg/m3）',
 '粉煤灰用量（kg/m3）',
 '砂ID',
 '砂用量（kg/m3）',
 '石ID',
 '石用量（kg/m3）',
 '减水剂ID',
 '减水剂掺量（%）',
 '增效剂ID',
 '增效剂掺量（%）']
# 2.2 选择输出特征
Target_feature = ['7d抗压（MPa）']

# 3. 选择模型
Selected_model = '极限梯度增强树'  # 选项：'极限梯度增强树'、'集成增强模型'、'自编码特征筛选模型'

# 4. 选择超参数
# 4.1 前端选择是否需要“默认超参数优化”
IsDefined = False  #如果默认超参数优化此处传参为True, 否则传参为False，且进入4.2部分。

# 4.2 用户手动选择超参数：前端分别传参到字典中
if not IsDefined:
    param_grid = {
        'n_estimators': [700,600],
        'learning_rate': [0.01,0.1],
        'max_depth': [9],
        'min_child_weight': [4],
        'gamma':[0.3]
    }

# 5. 选择模型保存名称

model_save_path = 'xgb_model.pkl'




#########  后端   ###########
# 6. 运行程序部分，print的部分用于前端展示
if Selected_model == '极限梯度增强树':
    if IsDefined:
        param_grid = {
            'n_estimators': [500, 600, 700,800,900],
            'learning_rate': [0.01, 0.1, 0.2],
            'max_depth': [7,8,9],
            'min_child_weight': [3, 4, 5],
            'gamma':[0.3, 0.4, 0.5]
        }
        best_params, mse, r2, mse_tr, r2_tr, mapping, y_pred_tr, y_train, y_pred, y_test = XGB_reg_label(
            excel_file_path= excel_file_path,
            model_save_path=model_save_path,
            input_features=Input_feature,
            target_feature=Target_feature,
            param_grid=param_grid,
            save_model=True
        )
        print("Best Parameters:", best_params)

        print("MSE of training set:", mse_tr)    # 训练集的mse和r2
        print("R² of training set:", r2_tr)
        print("MSE of test set:", mse)    # 测试集的mse和r2
        print("R² of test set:", r2)
        print("Encoded Mapping:", mapping)

        print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Training Set:', y_train)
        print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Test Set:', y_test)

    if not IsDefined:
        best_params, mse, r2, mse_tr, r2_tr, mapping,y_pred_tr, y_train, y_pred, y_test = XGB_reg_label(
            excel_file_path= excel_file_path,
            model_save_path=model_save_path,
            input_features=Input_feature,
            target_feature=Target_feature,
            param_grid=param_grid,
            save_model=True
        )

        print("Best Parameters:", best_params)

        print("MSE of training set:", mse_tr)    # 训练集的mse和r2
        print("R² of training set:", r2_tr)
        print("MSE of test set:", mse)    # 测试集的mse和r2
        print("R² of test set:", r2)
        print("Encoded Mapping:", mapping)

        print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Training Set:', y_train)
        print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Test Set:', y_test)

elif Selected_model == '集成增强模型':
        booster, mse, r2,mse_tr, r2_tr, mapping, base_preds, base_preds_te, y_pred_tr, y_train, y_pred, y_test = ensemble_model(
        excel_file_path= excel_file_path,
        model_save_path=model_save_path,
        input_features=Input_feature,
        target_feature=Target_feature,
        save_model=True
        )



        print("MSE of training set:", mse_tr)    # 训练集的mse和r2
        print("R² of training set:", r2_tr)
        print("MSE of test set:", mse)    # 测试集的mse和r2
        print("R² of test set:", r2)

        print("Encoded Mapping:", mapping)

        print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Training Set:', y_train)
        print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Test Set:', y_test)

elif Selected_model == '自编码特征筛选模型':
    if IsDefined:
        param_grid = {
            'n_estimators': [500, 600, 700,800,900],
            'learning_rate': [0.01, 0.1, 0.2],
            'max_depth': [7,8,9],
            'min_child_weight': [3, 4, 5],
            'gamma':[0.3, 0.4, 0.5]
        }
        best_params, mse, r2, mse_tr, r2_tr,mapping, y_pred_tr, y_train, y_pred, y_test = XGB_reg_label_autoencoder(
            excel_file_path= excel_file_path,
            model_save_path='xgb_model.pkl',
            input_features=Input_feature,
            target_feature=Target_feature,
            param_grid=param_grid,
            save_model=True
        )

        print("Best Parameters:", best_params)
        print("MSE of training set:", mse_tr)    # 训练集的mse和r2
        print("R² of training set:", r2_tr)
        print("MSE of test set:", mse)    # 测试集的mse和r2
        print("R² of test set:", r2)
        print("Encoded Mapping:", mapping)

        print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Training Set:', y_train)
        print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Test Set:', y_test)

    if not IsDefined:
        best_params, mse, r2, mse_tr, r2_tr, mapping, y_pred_tr, y_train, y_pred, y_test = XGB_reg_label_autoencoder(
            excel_file_path= excel_file_path,
            model_save_path='xgb_model.pkl',
            input_features=Input_feature,
            target_feature=Target_feature,
            param_grid=param_grid,
            save_model=True
        )

        print("Best Parameters:", best_params)
        print("MSE of training set:", mse_tr)    # 训练集的mse和r2
        print("R² of training set:", r2_tr)
        print("MSE of test set:", mse)    # 测试集的mse和r2
        print("R² of test set:", r2)
        print("Encoded Mapping:", mapping)

        print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Training Set:', y_train)
        print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
        print('Plot for True Value of Test Set:', y_test)

# 超参数优化需要最优的超参数和优化过程，训练集和测试集的mse和r2，每一次分别打印；打印下模型保存目录的地址





############  模型对比模块  ###########


# 第一个直接调用xgb函数不传参，第二个基准模型比较没有超参优化，集成模型也对比xgb；
def XGB_reg_label_base(
    excel_file_path,
    model_save_path,
    input_features,
    target_feature,
    save_model=True
):
    # 读取数据
    logging.info("模型对比环节........................")

    df = pd.read_excel(excel_file_path)
    # 数据预处理
    df, encoded_mapping = preprocess_data_labelencoding(df, input_features)
    # 设置目标变量和特征变量
    X = df[input_features]
    y = df[target_feature]

    # 数据集划分
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)


    # 增强特征（示例代码，可根据需要取消注释）
    # X_train_enhanced = add_attention_layer(X_train)
    # X_test_enhanced = add_attention_layer(X_test)
    X_train_enhanced = X_train
    X_test_enhanced = X_test

    final_model = xgb.XGBRegressor(enable_categorical=False, tree_method='hist')
    final_model.fit(X_train_enhanced, y_train)

    # 保存模型
    if save_model:
        # 获取文件的绝对路径
        absolute_path = os.path.abspath(model_save_path)

        logging.info(f"Saving the final model to {absolute_path}...")
        with open(absolute_path, 'wb') as file:
            pickle.dump(final_model, file)

    # 最终评估
    y_pred_final_train = final_model.predict(X_train_enhanced)
    y_pred_final_test = final_model.predict(X_test_enhanced)
    final_mse_train = mean_squared_error(y_train, y_pred_final_train)
    final_r2_train = r2_score(y_train, y_pred_final_train)
    final_mse_test = mean_squared_error(y_test, y_pred_final_test)
    final_r2_test = r2_score(y_test, y_pred_final_test)
    return (final_mse_test, final_r2_test, final_mse_train, final_r2_train,
        y_pred_final_train, y_train, y_pred_final_test, y_test)

def XGB_reg_label_autoencoder_base(
    excel_file_path,
    model_save_path,
    input_features,
    target_feature,
    save_model=True
):
    # 读取数据
    logging.info("模型对比环节........................")
    df = pd.read_excel(excel_file_path)

    # 数据预处理
    df, encoded_mapping = preprocess_data_labelencoding(df, input_features)

    # 设置目标变量和特征变量
    X = df[input_features]
    y = df[target_feature]
    print(X.head())

    # 数据集划分
    X_enhanced, scaler = add_autoencoder_layer(X)
    if not isinstance(X_enhanced, pd.DataFrame):
        X_enhanced = pd.DataFrame(X_enhanced, columns=[f"enhanced_{i}" for i in range(X_enhanced.shape[1])])

    # 将 X 和 X_enhanced 拼接
    X = pd.concat([X, X_enhanced], axis=1)

    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

    # 增强特征
    # 使用 L1 正则化的线性模型
    lasso = Lasso(alpha=0.01)  # alpha 控制正则化强度
    lasso.fit(X_train, y_train)

    # 输出特征的选择结果
    selected_features = X_train.columns[lasso.coef_ != 0]
    print("Selected features:", selected_features)

    X_train_enhanced= X_train[selected_features]
    X_test_enhanced = X_test[selected_features]

    final_model = xgb.XGBRegressor(enable_categorical=False, tree_method='hist')
    final_model.fit(X_train_enhanced, y_train)

    # 保存模型（如果需要）
    if save_model:
        # 获取文件的绝对路径
        absolute_path = os.path.abspath(model_save_path)

        logging.info(f"Saving the final model to {absolute_path}...")
        with open(absolute_path, 'wb') as file:
            pickle.dump(final_model, file)

    y_pred_final_train = final_model.predict(X_train_enhanced)
    y_pred_final_test = final_model.predict(X_test_enhanced)
    final_mse_train = mean_squared_error(y_train, y_pred_final_train)
    final_r2_train = r2_score(y_train, y_pred_final_train)
    final_mse_test = mean_squared_error(y_test, y_pred_final_test)
    final_r2_test = r2_score(y_test, y_pred_final_test)

    #logging.info(f"Final model evaluation:")
    #logging.info(f"Training set: MSE={final_mse_train:.4f}, R²={final_r2_train:.4f}")
    #logging.info(f"Test set:   MSE={final_mse_test:.4f}, R²={final_r2_test:.4f}")

    return (final_mse_test, final_r2_test, final_mse_train, final_r2_train,
            y_pred_final_train, y_train, y_pred_final_test, y_test)


# 1. 根据模型选择模型得到默认模型
if Selected_model == '极限梯度增强树' or '集成增强模型':
    mse, r2, mse_tr, r2_tr, y_pred_tr, y_train, y_pred, y_test = XGB_reg_label_base(
    excel_file_path= excel_file_path,
    model_save_path='base_model.pkl',
    input_features=Input_feature,
    target_feature=Target_feature,
    save_model=True)

    print("MSE of training set:", mse_tr)    # 训练集的mse和r2
    print("R² of training set:", r2_tr)
    print("MSE of test set:", mse)    # 测试集的mse和r2
    print("R² of test set:", r2)


    print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
    print('Plot for True Value of Training Set:', y_train)
    print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
    print('Plot for True Value of Test Set:', y_test)

elif Selected_model == '自编码特征筛选模型':
    mse, r2, mse_tr, r2_tr, y_pred_tr, y_train, y_pred, y_test = XGB_reg_label_autoencoder_base(
    excel_file_path= excel_file_path,
    model_save_path='base_model.pkl',
    input_features=Input_feature,
    target_feature=Target_feature,
    save_model=True)

    print("MSE of training set:", mse_tr)    # 训练集的mse和r2
    print("R² of training set:", r2_tr)
    print("MSE of test set:", mse)    # 测试集的mse和r2
    print("R² of test set:", r2)


    print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
    print('Plot for True Value of Training Set:', y_train)
    print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
    print('Plot for True Value of Test Set:', y_test)

# 2. 本地上传模型的结果(最基础接口，遇到文本会报错)

models_directory_1 = "./"

with open(os.path.join(models_directory_1,'7d抗压强度.pkl'), 'rb') as file:
    load_model = pickle.load(file)

# 读取数据
logging.info("Reading the dataset...")
df = pd.read_excel(excel_file_path)

# 设置目标变量和特征变量
X = df[Input_feature]
y = df[Target_feature]

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

y_pred_tr = load_model.predict(X_train)
y_pred = load_model.predict(X_test)
mse_tr = mean_squared_error(y_train, y_pred_tr)
r2_tr = r2_score(y_train, y_pred_tr)
mse = mean_squared_error(y_test, y_pred)
r2 = r2_score(y_test, y_pred)


print("MSE of training set:", mse_tr)    # 训练集的mse和r2
print("R² of training set:", r2_tr)
print("MSE of test set:", mse)    # 测试集的mse和r2
print("R² of test set:", r2)


print('Plot for Prediction Value of Training Set:', y_pred_tr)     # 画第一张散点图， y_pred_tr为纵坐标，y_train为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
print('Plot for True Value of Training Set:', y_train)
print('Plot for Prediction Value of Test Set:', y_pred)            # 画第二张散点图， y_pred为纵坐标，y_test为横坐标，纵轴名称为Prediction Value，横轴名称为True Value
print('Plot for True Value of Test Set:', y_test)
