#集成增强模型
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import OneHotEncoder, LabelEncoder, MinMaxScaler, StandardScaler
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
import xgboost as xgb
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import logging
import pandas as pd
import pickle
import os
import time
import random
import os
from sklearn.impute import SimpleImputer
import pandas as pd
from  config import OUTPUT_PKL_PATH
# 函数定义

def ensemble_model(excel_file_path, model_save_path, input_features, target_feature, save_model=True):
    """
    集成学习模型
    """

    try:
        # 读取数据
        logging.info("Reading the dataset...")
        df = pd.read_excel(excel_file_path)

        # 数据预处理（包括填充缺失值）
        logging.info("Preprocessing the dataset...")

        # 对输入特征应用标签编码
        df, encoded_mapping = preprocess_data_labelencoding(df, input_features)

        # 设置目标变量和特征变量
        X = df[input_features]
        y = df[target_feature]

        # 使用SimpleImputer填补缺失值
        imputer = SimpleImputer(strategy='mean')  # 使用均值填充
        X_imputed = imputer.fit_transform(X)

        # 将填充后的数据转换回 DataFrame，并保留原始的 index
        X_imputed_df = pd.DataFrame(X_imputed, columns=X.columns, index=X.index)

        # 数据集划分
        logging.info("Splitting the dataset into training and testing sets...")
        X_train, X_test, y_train, y_test = train_test_split(X_imputed_df, y, test_size=0.2, random_state=42)
        enhanced_features = X_train
        test_enhanced_features = X_test

        # 定义基学习器
        logging.info("Training base learners...")
        rf = RandomForestRegressor(n_estimators=100, random_state=42)
        svm = SVR(kernel='rbf', C=10, gamma=0.1)
        mlp = MLPRegressor(hidden_layer_sizes=(64, 32), activation='relu', max_iter=500, random_state=42)

        param_grid = {
            'n_estimators': [500, 700],
            'learning_rate': [0.01, 0.05],
            'max_depth': [6, 9],
            'min_child_weight': [4, 6],
            'gamma': [0.1, 0.3]}

        xg_reg = xgb.XGBRegressor(objective='reg:squarederror', enable_categorical=False, tree_method='hist')

        # 分别训练基学习器
        rf.fit(enhanced_features, y_train)
        svm.fit(enhanced_features, y_train)
        mlp.fit(enhanced_features, y_train)

        grid_search = GridSearchCV(estimator=xg_reg, param_grid=param_grid, cv=5, n_jobs=-1, verbose=2)
        grid_search.fit(enhanced_features, y_train)
        best_parameters = grid_search.best_params_
        logging.info("Training the model with the best parameters...")
        best_xg_reg = xgb.XGBRegressor(**best_parameters, enable_categorical=False, tree_method='hist')
        best_xg_reg.fit(enhanced_features, y_train)

        # 获取基学习器的预测结果
        rf_preds = rf.predict(enhanced_features).reshape(-1, 1)  # 转为列向量
        svm_preds = svm.predict(enhanced_features).reshape(-1, 1)
        mlp_preds = mlp.predict(enhanced_features).reshape(-1, 1)
        xgb_preds = best_xg_reg.predict(enhanced_features).reshape(-1, 1)

        rf_preds_df = pd.DataFrame(rf_preds, columns=['rf_preds'])
        svm_preds_df = pd.DataFrame(svm_preds, columns=['svm_preds'])
        mlp_preds_df = pd.DataFrame(mlp_preds, columns=['mlp_preds'])
        xgb_preds_df = pd.DataFrame(xgb_preds, columns=['xgb_preds'])

        # 确保预测结果的索引与 X_train 对齐
        rf_preds_df.index = X_train.index
        svm_preds_df.index = X_train.index
        mlp_preds_df.index = X_train.index
        xgb_preds_df.index = X_train.index

        # 将基学习器的结果组合为新的特征
        base_preds = pd.concat([pd.DataFrame(X_train), rf_preds_df, svm_preds_df, mlp_preds_df, xgb_preds_df], axis=1)

        # 使用 AdaBoost 对基学习器结果进行 Boosting
        logging.info("Boosting base learners with AdaBoost...")

        booster = AdaBoostRegressor(n_estimators=50, random_state=42)
        booster.fit(base_preds, y_train)

        # 预测并评估最终模型
        rf_preds_te = rf.predict(test_enhanced_features).reshape(-1, 1)
        svm_preds_te = svm.predict(test_enhanced_features).reshape(-1, 1)
        mlp_preds_te = mlp.predict(test_enhanced_features).reshape(-1, 1)
        xgb_preds_te = best_xg_reg.predict(test_enhanced_features).reshape(-1, 1)

        rf_preds_df_te = pd.DataFrame(rf_preds_te, columns=['rf_preds'])
        svm_preds_df_te = pd.DataFrame(svm_preds_te, columns=['svm_preds'])
        mlp_preds_df_te = pd.DataFrame(mlp_preds_te, columns=['mlp_preds'])
        xgb_preds_df_te = pd.DataFrame(xgb_preds_te, columns=['xgb_preds'])

        rf_preds_df_te.index = X_test.index
        svm_preds_df_te.index = X_test.index
        mlp_preds_df_te.index = X_test.index
        xgb_preds_df_te.index = X_test.index

        base_preds_te = pd.concat([pd.DataFrame(X_test), rf_preds_df_te, svm_preds_df_te, mlp_preds_df_te, xgb_preds_df_te], axis=1)

        y_pred_tr = booster.predict(base_preds)
        y_pred = booster.predict(base_preds_te)

        mse_tr = mean_squared_error(y_train, y_pred_tr)
        r2_tr = r2_score(y_train, y_pred_tr)
        mse = mean_squared_error(y_test, y_pred)
        r2 = r2_score(y_test, y_pred)

        # 保存模型
        model_file_path = None
        if save_model:
            # 获取用户传入的文件名，并拼接时间戳和随机数
            if model_save_path is None or model_save_path.strip() == "":
                raise ValueError("模型保存路径无效")

            # base_path = os.path.dirname(model_save_path)  # 获取传入路径的目录部分
            # model_filename = f"{int(time.time() * 1000)}_{random.randint(1000, 9999)}_final_model.pkl"
            # model_file_path = os.path.join(base_path, model_filename)

            # 格式化模型文件名，插入时间戳和随机数
            model_filename = f"{int(time.time() * 1000)}_{random.randint(1000, 9999)}_final_model.pkl"
            # 拼接完整的模型文件路径
            model_file_path = os.path.join(OUTPUT_PKL_PATH, model_filename)


            logging.info(f"Saving the final model to {model_file_path}...")
            with open(model_file_path, 'wb') as file:
                pickle.dump(booster, file)

        return booster, mse, r2, mse_tr, r2_tr, encoded_mapping, base_preds, base_preds_te, y_pred_tr, y_train, y_pred, y_test, model_file_path

    except Exception as e:
        logging.error(f"An error occurred: {e}")
        raise
def preprocess_data_labelencoding(df, input_features):
    """
    处理数据，包含数值转换检测和独热编码
    """
    logging.info("Checking columns that cannot be converted to numeric values...")
    non_numeric_columns = []
    encoded_mapping = {}
    label_mappings = {}

    # 检查非数值列
    for col in input_features:
        if col not in df.columns:
            raise ValueError(f"Column '{col}' not found in the dataset.")
        try:
            pd.to_numeric(df[col], errors='raise')
        except ValueError:
            non_numeric_columns.append(col)


    # 对非数值列进行独热编码
    new_input_features = input_features.copy()  # 用于保存更新后的特征列表
    if non_numeric_columns:
        logging.info(f"Performing one-hot encoding on columns: {non_numeric_columns}")
        for col in non_numeric_columns:
            # 独热编码单列
            encoder = LabelEncoder()
            df[col] = encoder.fit_transform(df[col])

            # 保存类别到整数的映射
            label_mappings[col] = dict(zip(encoder.classes_, range(len(encoder.classes_))))


    return df, label_mappings
