# Python 3.10.14

# joblib==1.4.2
# numpy==2.2.4
# pandas==2.2.3
# scikit-learn==1.6.1
# xgboost==3.0.5

from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.linear_model import Ridge, LinearRegression
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from xgboost import XGBRegressor
from sklearn.multioutput import MultiOutputRegressor
from pathlib import Path
import numpy as np
import pandas as pd
import ast
import json
import re
import joblib


class MfsUdbPredictor:
    def __init__(
            self, test_size=0.2, random_state=0, data_dir="data", model_dir='model',
            file_name="res0613.xlsx",
            mfs_list=[""] * 4, udb_list=[""] * 6, cms_list=[""] * 1,
            start_index="steel_frame-type", end_index="B-18",
            mfs_model='xgb', udb_model='xgb', cms_model='xgb'):
        self.MODEL_MAP = {
            'xgb': XGBRegressor,
            'ridge': Ridge,
            'lr': LinearRegression,
        }
        self.DEFAULT_PARAMS = {
            'xgb':   {'n_estimators': 350, 'learning_rate': 0.1, 'max_depth': 6, 'random_state': random_state},
            'ridge': {'alpha': 1.0},
            'lr': {},
        }
        self.base_dir = Path(__file__).parent
        self.data_dir = self.base_dir / data_dir
        self.file_path = self.data_dir / file_name
        self.model_dir = self.base_dir / model_dir
        self.model_dir.mkdir(parents=True, exist_ok=True)
        self.start_index = start_index
        self.end_index = end_index
        self.mfs_list = mfs_list
        self.udb_list = udb_list
        self.cms_list = cms_list
        self.mfs_dim = len(mfs_list)
        self.udb_dim = len(udb_list)
        self.cms_dim = len(cms_list)
        self.mfs_model_name = mfs_model
        self.udb_model_name = udb_model
        self.cms_model_name = cms_model
        self.df = None
        self.mfs_columns = None
        self.udb_columns = None
        self.cms_columns = None
        self.X = None
        self.y_mfs = None
        self.y_udb = self.y_cms = None
        self.numeric_features = None
        self.categorical_features = None
        self.preprocessor = None
        self.model_mfs = self.model_udb = self.model_cms = None
        self.test_size = test_size
        self.random_state = random_state
        self.X_train = self.X_test = None
        self.y_train_mfs = self.y_test_mfs = None
        self.y_train_udb = self.y_test_udb = None
        self.y_train_cms = self.y_test_cms = None
        self.predictions_mfs = self.predictions_udb = None
        self.cable_max_stress = None
        self.feature_columns = None
        self.y_cols_mfs = None
        self.y_cols_udb = None

    def _build_pipeline(self, model_key: str, is_one_dim: bool = False):
        mk = model_key.lower()
        if mk == 'xgb':
            xgb_reg = XGBRegressor(**self.DEFAULT_PARAMS[mk])
            regressor = xgb_reg if is_one_dim else MultiOutputRegressor(xgb_reg)
            return Pipeline([('preprocessor', self.preprocessor), ('regressor', regressor)])
        model_cls = self.MODEL_MAP[mk]
        params = self.DEFAULT_PARAMS.get(mk, {})
        if model_cls in (RandomForestRegressor, ExtraTreesRegressor):
            params = {**params, 'random_state': self.random_state}
        base_model = model_cls(**params)
        regressor = base_model if is_one_dim else MultiOutputRegressor(base_model)
        return Pipeline([('preprocessor', self.preprocessor), ('regressor', regressor)])

    def get_col_id_list(self, col: str, n: int) -> list:
        return [f"{col}_{i}" for i in range(n)]

    def load_and_preprocess_data(self):
        self.df = pd.read_excel(self.file_path)
        for col in self.df.columns:
            if (col.startswith('mfs') or col.startswith('udb')) and self.df[col].dtype == object:
                self.df[col] = self.df[col].apply(ast.literal_eval)
        self.mfs_columns = [c for c in self.df.columns if c.startswith('mfs-')]
        self.udb_columns = [c for c in self.df.columns if c.startswith('udb-')]
        self.cms_columns = ['cable_max_stress']
        for col in self.mfs_columns:
            cols = self.get_col_id_list(col, self.mfs_dim)
            tmp = pd.DataFrame(self.df[col].tolist(), index=self.df.index, columns=cols)
            self.df = pd.concat([self.df, tmp], axis=1)
        for col in self.udb_columns:
            cols = self.get_col_id_list(col, self.udb_dim)
            tmp = pd.DataFrame(self.df[col].tolist(), index=self.df.index, columns=cols)
            self.df = pd.concat([self.df, tmp], axis=1)
        feat_cols = self.df.columns.tolist()
        start_idx = feat_cols.index(self.start_index)
        end_idx = feat_cols.index(self.end_index)
        self.X = self.df.iloc[:, start_idx:end_idx + 1]
        self.feature_columns = self.X.columns.tolist()
        y_cols_mfs = []
        for c in self.mfs_columns:
            y_cols_mfs.extend(self.get_col_id_list(c, self.mfs_dim))
        self.y_mfs = self.df[y_cols_mfs]
        y_cols_udb = []
        for c in self.udb_columns:
            y_cols_udb.extend(self.get_col_id_list(c, self.udb_dim))
        self.y_udb = self.df[y_cols_udb]
        self.y_cms = self.df[self.cms_columns].values.ravel()
        self.numeric_features = self.X.select_dtypes(include=['int64', 'float64']).columns.tolist()
        self.categorical_features = self.X.select_dtypes(include=['object']).columns.tolist()
        self.preprocessor = ColumnTransformer(
            transformers=[
                ('num', MinMaxScaler(), self.numeric_features),
                ('cat', OneHotEncoder(handle_unknown='ignore'), self.categorical_features)
            ])
        
        self.y_cols_mfs = y_cols_mfs
        self.y_cols_udb = y_cols_udb

    def generate_mfs_json(self):
        num_mfs_cols = len(self.mfs_columns)
        reshaped_predictions = self.predictions_mfs.reshape(-1, num_mfs_cols, self.mfs_dim)
        mfs_list = []
        for i, pred in enumerate(reshaped_predictions):
            for j, mfs_col in enumerate(self.mfs_columns):
                if '(' in mfs_col and ')' in mfs_col:
                    match = re.search(r'\(([^,]+),\s*([^)]+)\)', mfs_col)
                    x, y = (float(match.group(1)), float(match.group(2))) if match else (None, None)
                else:
                    x = y = None
                mfs_dict = {
                    "zone_moment": pred[j][0],
                    "zone_axial_force": pred[j][1],
                    "zone_shear_force": pred[j][2],
                    "zone_K": pred[j][3],
                    "zone_x": x,
                    "zone_y": y
                }
                mfs_list.append(mfs_dict)
        return mfs_list

    def generate_udb_json(self):
        num_udb_cols = len(self.udb_columns)
        reshaped_predictions = self.predictions_udb.reshape(-1, num_udb_cols, self.udb_dim)
        unlined_displacement_list = []
        unlined_break_depth_list = []
        displacement_list = []
        break_depth_list = []
        for i, pred in enumerate(reshaped_predictions):
            for j, udb_col in enumerate(self.udb_columns):
                if '(' in udb_col and ')' in udb_col:
                    match = re.search(r'\(([^,]+),\s*([^)]+)\)', udb_col)
                    x, y = (float(match.group(1)), float(match.group(2))) if match else (None, None)
                else:
                    x = y = None
                unlined_displacement_list.append({
                    "displacement": pred[j][0],
                    "gridpoint_position_x": x,
                    "gridpoint_position_y": y
                })
                unlined_break_depth_list.append({
                    "gridpoint_position_x": x,
                    "gridpoint_position_y": y,
                    "max_shear_distance": pred[j][1],
                    "max_tension_distance": pred[j][2]
                })
                displacement_list.append({
                    "displacement": pred[j][3],
                    "gridpoint_position_x": x,
                    "gridpoint_position_y": y
                })
                break_depth_list.append({
                    "gridpoint_position_x": x,
                    "gridpoint_position_y": y,
                    "max_shear_distance": pred[j][4],
                    "max_tension_distance": pred[j][5]
                })
        return {
            "unlined_displacement_around_tunnel_inside": unlined_displacement_list,
            "unlined_break_depth_list": unlined_break_depth_list,
            "displacement_around_tunnel_inside": displacement_list,
            "break_depth_list": break_depth_list
        }

    def get_results_json(self):
        mfs_list = self.generate_mfs_json()
        udb_data = self.generate_udb_json()
        result = {
            "mfs_list_first_conc": mfs_list,
            "unlined_displacement_around_tunnel_inside": udb_data["unlined_displacement_around_tunnel_inside"],
            "unlined_break_depth_list": udb_data["unlined_break_depth_list"],
            "displacement_around_tunnel_inside": udb_data["displacement_around_tunnel_inside"],
            "break_depth_list": udb_data["break_depth_list"],
            "cable_max_stress": self.cable_max_stress
        }

        def _sanitize(obj):
            if isinstance(obj, np.ndarray):
                return obj.astype(float).tolist()
            if isinstance(obj, (np.float32, np.float64, np.int32, np.int64)):
                return float(obj)
            if isinstance(obj, dict):
                return {k: _sanitize(v) for k, v in obj.items()}
            if isinstance(obj, list):
                return [_sanitize(item) for item in obj]
            return obj

        result = _sanitize(result)
        return json.dumps(result, indent=4, ensure_ascii=False)

    def input_dict_to_features(self, input_dict):
        features = {}
        features['depth'] = input_dict['depth']
        features['relax_ratio'] = input_dict['relax_ratio']
        steel_frame = input_dict['steel_frame']
        features['steel_frame-type'] = steel_frame['type']
        features['steel_frame-spacing'] = steel_frame['spacing']
        first_conc = input_dict['first_conc']
        features['first_conc-type'] = first_conc['type']
        features['first_conc-thickness'] = first_conc['thickness']
        cable = input_dict['cable'][0]
        features['cable-type'] = cable['type']
        features['cable-diameter'] = cable['diameter']
        features['cable-arc_start'] = cable['arc_start']
        features['cable-arc_end'] = cable['arc_end']
        features['cable-bolt_length'] = cable['bolt_length']
        features['cable-plane_space'] = cable['plane_space']
        features['cable-spacing'] = cable['spacing']
        features['cable-prestress'] = cable['prestress']
        param_list = input_dict['param_list']
        for i in range(18):
            features[f'B-{i + 1}'] = param_list[i]
        features_df = pd.DataFrame([features])
        features_df = features_df[self.feature_columns]
        return features_df

    def predict_from_input_dict(self, input_dict, prefix, data_count, need_save=False):
        model_sub_dir = self.model_dir / f"{prefix}_{data_count}"
        if not model_sub_dir.exists():
            print(f"未能找到训练数量为{data_count}的模型文件目录")
            return None
        try:
            self.preprocessor = joblib.load(model_sub_dir / "preprocessor.joblib")
            self.model_mfs = joblib.load(model_sub_dir / "model_mfs.joblib")
            self.model_udb = joblib.load(model_sub_dir / "model_udb.joblib")
            self.model_cms = joblib.load(model_sub_dir / "model_cms.joblib")
            with open(model_sub_dir / "feature_columns.json", 'r') as f:
                self.feature_columns = json.load(f)
            with open(model_sub_dir / "mfs_columns.json", 'r') as f:
                self.mfs_columns = json.load(f)
            with open(model_sub_dir / "udb_columns.json", 'r') as f:
                self.udb_columns = json.load(f)
            with open(model_sub_dir / "cms_columns.json", 'r') as f:
                self.cms_columns = json.load(f)
            features_df = self.input_dict_to_features(input_dict)
            predictions_mfs = self.model_mfs.predict(features_df)
            predictions_udb = self.model_udb.predict(features_df)
            predictions_cms = self.model_cms.predict(features_df)
            self.predictions_mfs = predictions_mfs
            self.predictions_udb = predictions_udb
            self.cable_max_stress = float(predictions_cms.ravel()[0])
            res = self.get_results_json()
            nested_result = json.dumps({
                "status": "success",
                "data": json.loads(res)
            }, ensure_ascii=False, indent=4)
            if need_save:
                Path(model_sub_dir / "output.json").write_text(nested_result, encoding="utf-8")
            return nested_result
        except Exception as e:
            print(f"""
                请检查{model_sub_dir}目录下是否有模型缺失: 
                    ======================
                    feature_columns.json
                    mfs_columns.json
                    model_mfs.joblib
                    model_udb.joblib
                    preprocessor.joblib
                    udb_columns.json
                    ======================
                错误信息:{e}
                  """)
            return None

    def compare_train(self, prefix):
        model_sub_dir = self.model_dir / f"{prefix}_{len(self.df)}"
        model_sub_dir.mkdir(parents=True, exist_ok=True)
        X_train, X_test, y_train_mfs, y_test_mfs = train_test_split(
            self.X, self.y_mfs, test_size=self.test_size, random_state=self.random_state)
        _, _, y_train_udb, y_test_udb = train_test_split(
            self.X, self.y_udb, test_size=self.test_size, random_state=self.random_state)
        _, _, y_train_cms, y_test_cms = train_test_split(
            self.X, self.y_cms, test_size=self.test_size, random_state=self.random_state)
        model_keys = list(self.MODEL_MAP.keys())
        indicators = []
        best_mfs_model = best_udb_model = best_cms_model = None
        best_mfs_r2 = best_udb_r2 = best_cms_r2 = -float('inf')

        for model_key in model_keys:
            print(f"🔍 正在评估模型: {model_key}")
            model_mfs = self._build_pipeline(model_key)
            model_mfs.fit(X_train, y_train_mfs)
            pred_mfs = model_mfs.predict(X_test)
            smape_mfs = self.cal_smape_every_column(y_test_mfs, pred_mfs, self.mfs_dim)
            rmse_mfs = self.cal_rmse_every_column(y_test_mfs, pred_mfs, self.mfs_dim)

            model_udb = self._build_pipeline(model_key)
            model_udb.fit(X_train, y_train_udb)
            pred_udb = model_udb.predict(X_test)
            smape_udb = self.cal_smape_every_column(y_test_udb, pred_udb, self.udb_dim)
            rmse_udb = self.cal_rmse_every_column(y_test_udb, pred_udb, self.udb_dim)

            model_cms = self._build_pipeline(model_key, is_one_dim=True)
            model_cms.fit(X_train, y_train_cms)
            pred_cms = model_cms.predict(X_test)
            smape_cms = self.cal_smape_every_column(y_test_cms, pred_cms, self.cms_dim)
            rmse_cms = self.cal_rmse_every_column(y_test_cms, pred_cms, self.cms_dim)

            r2_mfs = cross_val_score(model_mfs, X_train, y_train_mfs, cv=5, scoring='r2').mean()
            r2_udb = cross_val_score(model_udb, X_train, y_train_udb, cv=5, scoring='r2').mean()
            r2_cms = cross_val_score(model_cms, X_train, y_train_cms, cv=5, scoring='r2').mean()

            mfs_smape_dict = {f"smape-mfs-{self.mfs_list[i]}/%": smape_mfs[i] for i in range(len(smape_mfs))}
            udb_smape_dict = {f"smape-udb-{self.udb_list[i]}/%": smape_udb[i] for i in range(len(smape_udb))}
            cms_smape_dict = {f"smape-cms-{self.cms_list[i]}/%": smape_cms[i] for i in range(self.cms_dim)}
            mfs_rmse_dict = {f"rmse-mfs-{self.mfs_list[i]}": rmse_mfs[i] for i in range(len(rmse_mfs))}
            udb_rmse_dict = {f"rmse-udb-{self.udb_list[i]}": rmse_udb[i] for i in range(len(rmse_udb))}
            cms_rmse_dict = {f"rmse-cms-{self.cms_list[i]}": rmse_cms[i] for i in range(self.cms_dim)}

            indicators.append({
                'model': model_key,
                'mfs_r2': r2_mfs,
                'udb_r2': r2_udb,
                'cms_r2': r2_cms,
                **mfs_smape_dict,
                **mfs_rmse_dict,
                **udb_smape_dict,
                **udb_rmse_dict,
                **cms_smape_dict,
                **cms_rmse_dict
            })

            if r2_mfs > best_mfs_r2:
                best_mfs_r2 = r2_mfs
                best_mfs_model = model_mfs
                best_mfs_mape = smape_mfs
                best_mfs_rmse = rmse_mfs
            if r2_udb > best_udb_r2:
                best_udb_r2 = r2_udb
                best_udb_model = model_udb
                best_udb_mape = smape_udb
                best_udb_rmse = rmse_udb
            if r2_cms > best_cms_r2:
                best_cms_r2 = r2_cms
                best_cms_model = model_cms
                best_cms_mape = smape_cms
                best_cms_rmse = rmse_cms

        indicator_df = pd.DataFrame(indicators)
        indicator_df['mfs模型'] = 'no'
        indicator_df['udb模型'] = 'no'
        indicator_df['cms模型'] = 'no'
        indicator_df.loc[indicator_df['mfs_r2'].idxmax(), 'mfs模型'] = 'yes'
        indicator_df.loc[indicator_df['udb_r2'].idxmax(), 'udb模型'] = 'yes'
        indicator_df.loc[indicator_df['cms_r2'].idxmax(), 'cms模型'] = 'yes'
        indicator_path = model_sub_dir / "indicator.xlsx"
        self.save_excel(indicator_df, indicator_path)

        joblib.dump(best_mfs_model, model_sub_dir / "model_mfs.joblib")
        joblib.dump(best_udb_model, model_sub_dir / "model_udb.joblib")
        joblib.dump(best_cms_model, model_sub_dir / "model_cms.joblib")
        joblib.dump(self.preprocessor, model_sub_dir / "preprocessor.joblib")
        with open(model_sub_dir / "feature_columns.json", 'w') as f:
            json.dump(self.feature_columns, f)
        with open(model_sub_dir / "mfs_columns.json", 'w') as f:
            json.dump(self.mfs_columns, f)
        with open(model_sub_dir / "udb_columns.json", 'w') as f:
            json.dump(self.udb_columns, f)
        with open(model_sub_dir / "cms_columns.json", 'w') as f:
            json.dump(self.cms_columns, f)

        # ========== XGBoost 特征贡献度：用原始列名 ==========
        # 1. 获取预处理后全部列名
        ohe = self.preprocessor.named_transformers_['cat']
        feature_names_after = (
            self.numeric_features +
            list(ohe.get_feature_names_out(self.categorical_features))
        )

        # 2. 建立 f0 → 真实列名 映射
        f_map = {f'f{i}': name for i, name in enumerate(feature_names_after)}

        for task, model in zip(['mfs', 'udb', 'cms'], [best_mfs_model, best_udb_model, best_cms_model]):
            avg_gain = {}
            if not isinstance(model.named_steps['regressor'], MultiOutputRegressor):
                booster = model.named_steps['regressor']
                if hasattr(booster, 'get_booster'):
                    imp = booster.get_booster().get_score(importance_type='gain')
                    for k, v in imp.items():
                        fname = f_map.get(k, k)
                        avg_gain[fname] = v
            else:
                multi_model = model.named_steps['regressor']
                for est in multi_model.estimators_:
                    if hasattr(est, 'get_booster'):
                        imp = est.get_booster().get_score(importance_type='gain')
                        for k, v in imp.items():
                            fname = f_map.get(k, k)
                            avg_gain[fname] = avg_gain.get(fname, 0) + v / len(multi_model.estimators_)

            if avg_gain:
                importance_df = pd.DataFrame({'feature': list(avg_gain.keys()), 'gain': list(avg_gain.values())})
                importance_df = importance_df.sort_values('gain', ascending=False)

                # ✅ 直接合并并保存变量级重要性
                var_df = self.merge_ohe_importance(importance_df, ohe, self.categorical_features)
                var_df.to_excel(model_sub_dir / f"feature_importance_{task}.xlsx", index=False)

        need_print_performance = True
        if need_print_performance:
            formatted_best_mfs_mape = [f"{mape:.4f}" for mape in best_mfs_mape]
            formatted_best_udb_mape = [f"{mape:.4f}" for mape in best_udb_mape]
            formatted_best_cms_mape = [f"{mape:.4f}" for mape in best_cms_mape]
            formatted_best_mfs_rmse = [f"{rmse:.4f}" for rmse in best_mfs_rmse]
            formatted_best_udb_rmse = [f"{rmse:.4f}" for rmse in best_udb_rmse]
            formatted_best_cms_rmse = [f"{rmse:.4f}" for rmse in best_cms_rmse]

            print(f"最优 MFS 模型: {indicator_df.loc[indicator_df['mfs_r2'].idxmax(), 'model']}")
            print(f"最优 MFS R²: {best_mfs_r2:.4f}")
            print(f"smape-mfs | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.mfs_list)])}")
            print(f"最优 MFS SMAPE%: {formatted_best_mfs_mape}")
            print(f"rmse-mfs | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.mfs_list)])}")
            print(f"最优 MFS RMSE: {formatted_best_mfs_rmse}")

            print(f"最优 UDB 模型: {indicator_df.loc[indicator_df['udb_r2'].idxmax(), 'model']}")
            print(f"最优 UDB R²: {best_udb_r2:.4f}")
            print(f"smape-udb | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.udb_list)])}")
            print(f"最优 UDB SMAPE%: {formatted_best_udb_mape}")
            print(f"rmse-udb | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.udb_list)])}")
            print(f"最优 UDB RMSE: {formatted_best_udb_rmse}")

            print(f"最优 CMS 模型: {indicator_df.loc[indicator_df['cms_r2'].idxmax(), 'model']}")
            print(f"最优 CMS R²: {best_cms_r2:.4f}")
            print(f"smape-cms | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.cms_list)])}")
            print(f"最优 CMS SMAPE%: {formatted_best_cms_mape}")
            print(f"rmse-cms | {' '.join([f'{i + 1}. {v}' for i, v in enumerate(self.cms_list)])}")
            print(f"最优 CMS RMSE: {formatted_best_cms_rmse}")

        # 在 compare_train() 的最后，打印性能指标之前插入：
        # ==========================================================
        # 1. 计算残差
        residual_mfs = y_test_mfs - pred_mfs          # DataFrame - ndarray -> ndarray
        residual_udb = y_test_udb - pred_udb
        residual_cms = y_test_cms - pred_cms

        # 1. 计算残差
        residual_mfs = y_test_mfs.values - pred_mfs          # 保证都是 ndarray
        residual_udb = y_test_udb.values - pred_udb
        residual_cms = y_test_cms - pred_cms

        # 2. 直接利用 columns 生成 DataFrame
        # === 1. 先算平方残差 ===
        res_mfs_sq = (residual_mfs ** 2).mean(axis=1)      # 每个样本的 MSE 平均
        res_udb_sq = (residual_udb ** 2).mean(axis=1)
        res_cms_sq = residual_cms ** 2                       # 本来就是 1 维

        # === 2. 组装成 3 列 ===
        res_summary = pd.DataFrame({
            'mfs_residual': res_mfs_sq,
            'udb_residual': res_udb_sq,
            'cms_residual': res_cms_sq
        })

        # === 3. 写 Excel ===
        res_summary.to_excel(model_sub_dir / "residual_summary.xlsx",
                            index=False, float_format="%.6f")
        # ==========================================================

    def cal_smape_every_column(self, y_test, pred, dim):
        y_test = np.array(y_test)
        pred = np.array(pred)
        y_test_flat = y_test.flatten()
        pred_flat = pred.flatten()
        total_elements = y_test_flat.size
        if total_elements % dim != 0:
            raise ValueError(f"总元素数 {total_elements} 不能被 dim {dim} 整除")
        n_samples = total_elements // dim
        y_test_reshaped = y_test_flat.reshape(n_samples, dim)
        pred_reshaped = pred_flat.reshape(n_samples, dim)
        epsilon = 1e-8
        numerator = np.abs(pred_reshaped - y_test_reshaped)
        denominator = (np.abs(y_test_reshaped) + np.abs(pred_reshaped)) + epsilon
        smape_per_sample_per_output = (2.0 * numerator / denominator) * 100
        column_means = np.mean(smape_per_sample_per_output, axis=0).tolist()
        return column_means

    def cal_rmse_every_column(self, y_test, pred, dim):
        rmse_data = np.sqrt(mean_squared_error(y_test, pred, multioutput='raw_values'))
        row_num = rmse_data.size // dim
        rmse_reshaped = rmse_data.reshape(row_num, dim)
        column_means = np.mean(rmse_reshaped, axis=0).tolist()
        return column_means

    def save_excel(self, indicator_df: pd.DataFrame, indicator_path: str):
        indicator_df_with_headers = pd.DataFrame([indicator_df.columns.tolist()], columns=indicator_df.columns)
        indicator_df_with_headers = pd.concat([indicator_df_with_headers, indicator_df], ignore_index=True)
        indicator_df_transposed = indicator_df_with_headers.T
        indicator_df_transposed.to_excel(
            indicator_path,
            index=False,
            header=False,
            float_format="%.4f" # 调整excel输出精度，有些很小的
        )

    def merge_ohe_importance(self, importance_df, ohe, cat_features):
        """
        将 OHE 展开列的 gain 合并回原始变量名；
        对形如 B-1_II、B-18_V2 这类“已提前哑变量化”的列，自动提取前缀 B-1、B-18 进行合并。
        """

        # 1. OHE 列 → 原始分类变量 映射
        ohe_names = ohe.get_feature_names_out(cat_features)
        rev_map = {ohe_name: orig for orig, ohe_name in zip(cat_features, ohe_names)}

        # 2. 统一映射函数
        def map_feature(col: str) -> str:
            # 2.1 如果是 OHE 生成的列，直接返回原始变量
            if col in rev_map:
                return rev_map[col]
            # 2.2 如果是 B-1_II、B-18_V2 这类，提取 B-1、B-18
            m = re.match(r'^(B-\d+)_', col)
            if m:
                return m.group(1)
            # 2.3 其余保持原样（数值变量等）
            return col

        # 3. 执行映射并累加
        merged = (importance_df
                .assign(original_feature=importance_df['feature'].map(map_feature))
                .groupby('original_feature', as_index=False)['gain']
                .sum()
                .sort_values('gain', ascending=False))
        return merged


# ==================== 使用示例 ====================
if __name__ == "__main__":
    config = {
        "test_size": 0.2, "random_state": 0,
        "start_index": "steel_frame-type",
        "end_index": "B-18",
        "mfs_list": ["moment", "axial", "shear", "safety"],
        "udb_list": ["unlined_disp", "unlined_shear", "unlined_tension", "lined_disp", "lined_shear", "lined_tension"],
        "cms_list": ["cable_max_stress"],
        "file_name": "res.xlsx",
    }
    predictor = MfsUdbPredictor(**config)
    predictor.load_and_preprocess_data()
    predictor.compare_train(prefix="200km_double")

    # 预测阶段
    predictor = MfsUdbPredictor()
    input_dict = {
        "steel_frame": {"type": "HW150x150", "spacing": 1.2},
        "first_conc": {"type": "C20", "thickness": 100},
        "cable": [
            {"type": "PSB1080", "diameter": 100, "arc_start": 10, "arc_end": 170,
             "bolt_length": 6, "plane_space": 1, "spacing": 1.5, "prestress": 70000}
        ],
        "depth": 560,
        "param_list": ["VI1"] * 18,
        "relax_ratio": 0.2,
    }
    json_result = predictor.predict_from_input_dict(
        input_dict, prefix="200km_double", data_count=1895, need_save=True)
    print(json_result)