import os
import json
import pickle
import numpy as np
import pandas as pd
import traceback
import lightgbm as lgb
import xgboost as xgb
import torch
import joblib
from typing import Dict, Any, Tuple, Optional
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
from trainer.model_train.lstm_train import LSTMModel  # 保留LSTM兼容
# 新增: 贝叶斯优化依赖
from bayes_opt import BayesianOptimization


# 全局变量: 共享5类模型配置+核心训练参数(适配33标签顺序执行)
global_feature_cols = None
global_train_version = None
global_model_root = None
global_base_model_types = SSQ_CONFIG['model_types']  # 5类待融合模型 ["xgb", "lgb", "rf", "lr", "lstm"]


class StackingBayesianOptimization:
    """Stacking融合模型核心类(支持5模型融合+33标签顺序执行+process_info参数读取)"""
    def __init__(self, mc_sims: int = 100, run_type: str = 'train'):
        """
        初始化配置(移除多进程+适配顺序执行)
        :param mc_sims: 蒙特卡罗模拟次数(评估模型稳定性)
        """
        # 校验数据类型合法性
        if run_type not in ["train", "predict"]:
            raise ValueError(f"数据类型错误！仅支持 'train' 或 'predict', 当前输入: {run_type}")
        
        self.run_type = run_type  # 新增: 记录数据用途(train/predict)
        # 新增: 根据数据类型确定保存文件名(从配置文件读取, 避免硬编码)
        self.model_root = SSQ_CONFIG["train_file"]["model_save_dir"]
        if self.run_type == "predict":
            self.model_root = SSQ_CONFIG["predict_file"]["model_save_dir"]

        # 核心配置(移除多进程相关参数)
        self.label_cols = SSQ_CONFIG["label_cols"]  # r1-r33共33个标签(按顺序执行)
        self.train_version = SSQ_CONFIG['train_version']
        self.mc_sims = mc_sims
        
        # 融合模型保存目录(独立目录, 避免与单模型冲突)
        self.ensemble_root = os.path.join(self.model_root, f'stacking_ensemble_{self.train_version}')
        os.makedirs(self.ensemble_root, exist_ok=True)

        # 数据容器(保持与单模型数据格式一致)
        self.X_val = None  # 所有标签共用的验证集特征
        self.y_val_dict = {}  # 标签-验证集标签映射 {label: y_val}
        self.feature_cols = None  # 与单模型一致的特征列
        self.current_feature_count = 0  # 当前特征数量(用于校验)

        # 训练结果记录(扩展5模型融合相关统计字段)
        self.train_summary = {
            "version": self.train_version,
            "total_labels": 33,  # 固定33个标签(r1-r33)
            "valid_labels": 0,  # 成功加载验证集的标签数
            "success": 0,  # 成功融合的标签数
            "failed": [],  # 失败标签详情
            "metrics": {},  # 各标签融合指标
            "device": "cpu",
            "mc_sims": self.mc_sims,
            "ensemble_root": self.ensemble_root,
            "base_models": global_base_model_types,  # 参与融合的基础模型类型
            "avg_base_models_used": 0.0  # 平均每个标签参与融合的模型数
        }

        # 初始化验证集(必须成功才能启动融合训练)
        if not self._init_val_dataset():
            raise RuntimeError("验证集数据初始化失败, 终止融合训练")

        # 初始化全局变量(替代多进程共享)
        global global_feature_cols, global_train_version, global_model_root
        global_feature_cols = self.feature_cols
        global_train_version = self.train_version
        global_model_root = self.model_root

    def _init_val_dataset(self) -> bool:
        """初始化33个标签的验证集数据(确保特征/标签格式与单模型一致)"""
        logger.info(f"===== 初始化验证集(融合版本: {self.train_version}) =====")
        try:
            # 用第一个标签校验数据加载逻辑(与单模型init_data对齐)
            sample_label = self.label_cols[0]
            success, sample_data = model_utils.init_dataset(
                label_col=sample_label,
                config_label_cols=SSQ_CONFIG["label_cols"],
                load_test=False,
                print_sample=False
            )
            if not success:
                logger.error(f"样本标签[{sample_label}]数据加载失败, 终止融合训练")
                return False

            # 共享特征矩阵、特征列和特征数量
            self.X_val = sample_data["X_val"]
            self.feature_cols = sample_data["feature_cols"]
            self.current_feature_count = len(self.feature_cols)  # 记录当前特征数量
            logger.info(f"验证集规格: {len(self.X_val)}样本 × {self.current_feature_count}特征(与单模型一致)")

            # 批量加载33个标签的验证集标签(按label_cols顺序)
            valid_labels = 0
            for label in self.label_cols:
                success, label_data = model_utils.init_dataset(
                    label_col=label,
                    config_label_cols=SSQ_CONFIG["label_cols"],
                    load_test=False,
                    print_sample=False
                )
                if success:
                    # 标签二值化(与单模型训练逻辑一致)
                    self.y_val_dict[label] = np.where(
                        label_data["y_val"] >= label_data["threshold"], 1, 0
                    )
                    valid_labels += 1
                    logger.debug(f"加载标签[{label}]验证集: {len(self.y_val_dict[label])}样本")
                else:
                    logger.warning(f"跳过标签[{label}]: 验证集加载失败")

            # 过滤无效标签(保留原顺序)
            self.label_cols = [label for label in self.label_cols if label in self.y_val_dict]
            self.train_summary["valid_labels"] = valid_labels
            logger.info(f"有效融合标签数: {self.train_summary['valid_labels']}/33(过滤无效标签后)")
            logger.info(f"标签执行顺序: {self.label_cols}")
            return self.train_summary["valid_labels"] > 0

        except Exception as e:
            logger.error(f"验证集初始化异常: {str(e)}\n{traceback.format_exc()}")
            return False

    def _get_model_process_info(self, model_type: str, label: str) -> Optional[Dict[str, Any]]:
        """从process_info.json读取模型训练参数(含feature_count)"""
        # 构建模型目录(适配vXGB_train_0.2、vLR_train_0.2等格式)
        model_dir = os.path.join(global_model_root, f"v{model_type.upper()}_{global_train_version}")
        # 构建process_info路径(格式: 模型目录/标签/标签_process_info.json)
        process_info_path = os.path.join(model_dir, label, f"{label}_process_info.json")
        logger.info(f'process_info_path: {process_info_path}')
        
        try:
            if not os.path.exists(process_info_path):
                logger.warning(f"标签[{label}]模型[{model_type}]的process_info不存在: {process_info_path}")
                return None
            
            with open(process_info_path, "r", encoding="utf-8") as f:
                process_info = json.load(f)
            
            # 校验必要字段
            required_fields = ["feature_info", "training_info"]
            for field in required_fields:
                if field not in process_info:
                    logger.warning(f"标签[{label}]模型[{model_type}]的process_info缺失{field}字段")
                    return None
            
            return process_info
        except Exception as e:
            logger.error(f"读取标签[{label}]模型[{model_type}]的process_info失败: {str(e)}")
            return None

    def _train_single_label(self, label: str) -> Dict[str, Any]:
        """单标签融合训练(适配5类模型加载+元特征生成+process_info参数读取)"""
        try:
            global global_feature_cols, global_train_version, global_model_root, global_base_model_types
            if global_feature_cols is None:
                return {"label": label, "status": "fail", "error": "特征列未初始化"}

            # 1. 准备数据(确保与单模型输入格式一致)
            y_val = self.y_val_dict[label]
            X_val = self.X_val
            if len(X_val) != len(y_val):
                return {"label": label, "status": "fail", "error": "特征与标签样本数不匹配"}

            # 2. 5类基础模型配置(按类型+标签组织)
            base_model_configs = [
                {
                    "type": model_type,
                    "name": f"{model_type}_{label}",
                    "model_dir": os.path.join(global_model_root, f"v{model_type.upper()}_{global_train_version}"),
                    "load_type": "pytorch" if model_type == "lstm" else model_type  # LSTM映射为pytorch类型
                }
                for model_type in global_base_model_types
            ]

            # 3. 加载所有可用基础模型并生成预测(元特征)
            base_preds = {}  # 存储基础模型预测概率(用于元模型训练)
            base_proba_list = []  # 存储所有有效基础模型的概率数组(用于融合计算)
            base_model_names = []  # 存储有效基础模型名称(用于日志和校验)
            # 新增: 存储基础模型概率(按模型类型命名, 方便后续查看)
            base_model_probs = {}

            for cfg in base_model_configs:
                model_type = cfg["type"]
                load_type = cfg["load_type"]
                model_name = cfg["name"]
                model_dir = cfg["model_dir"]
                model = None

                try:
                    # 3.1 读取process_info
                    process_info = self._get_model_process_info(model_type, label)
                    if not process_info:
                        continue
                    
                    # 3.2 提取训练时的特征数量(final_feature_count)
                    train_feature_count = process_info["training_info"].get("final_feature_count")
                    if train_feature_count is None:
                        train_feature_count = process_info["feature_info"].get("feature_count")
                    if train_feature_count is None:
                        logger.warning(f"标签[{label}]模型[{model_type}]未记录特征数量, 跳过")
                        continue

                    # 3.3 加载模型
                    model, model_config = model_utils.load_model(
                        model_name=model_name,
                        model_type=load_type,
                        device="cpu",
                        current_feature_cols=global_feature_cols,
                        model_dir=model_dir,
                    )
                    if not model:
                        continue

                    # 3.4 针对LR: 加载掩码, 筛选常量特征
                    X_val_model = X_val  # 初始化模型输入特征
                    if model_type == "lr":
                        # 加载训练时的非常量特征掩码
                        mask_path = process_info.get("non_constant_mask_path")  # 从process_info拿掩码路径
                        if not mask_path or not os.path.exists(mask_path):
                            logger.warning(f"标签[{label}]LR模型未找到常量特征掩码, 跳过")
                            continue
                        non_constant_cols = joblib.load(mask_path)
                        
                        # 筛选当前特征: 只保留非常量特征(77→68)
                        if len(non_constant_cols) != self.current_feature_count:
                            logger.warning(f"标签[{label}]LR掩码长度不匹配, 跳过")
                            continue
                        X_val_model = X_val[:, non_constant_cols]  # 筛选后的特征(68个)
                        
                        # 校验筛选后的特征数量与训练时一致(68=68)
                        if X_val_model.shape[1] != train_feature_count:
                            logger.warning(
                                f"标签[{label}]LR特征数量不匹配: 训练时{train_feature_count}个, 筛选后{X_val_model.shape[1]}个, 跳过"
                            )
                            continue
                    else:
                        # 其他模型: 按原逻辑校验特征数量
                        if self.current_feature_count != train_feature_count:
                            logger.warning(f"标签[{label}]模型[{model_type}]特征数量不匹配, 跳过")
                            continue

                    # 3.5 生成预测(使用筛选后的X_val_model)
                    if model_type in ["xgb", "lgb"]:
                        # XGBoost使用DMatrix, LightGBM直接使用原始数据
                        if model_type == "xgb":
                            # XGBoost需要转换为DMatrix(传入筛选后的特征)
                            dmatrix = xgb.DMatrix(X_val_model)
                            best_iter = model_config.get("best_iteration", 0)
                            # 修复: iteration_range必须是元组, 当best_iter=0时取(0, 1)(默认使用所有迭代轮次)
                            iteration_range = (0, best_iter + 1) if best_iter > 0 else (0, 1)
                            pred_proba = model.predict(
                                dmatrix,
                                iteration_range=iteration_range  # 不再为None
                            )
                        else:  # model_type == "lgb"
                            # LightGBM直接使用原始数据(NumPy数组), 不使用Dataset(传入筛选后的特征)
                            best_iter = model_config.get("best_iteration", 0)
                            pred_proba = model.predict(
                                X_val_model,
                                num_iteration=best_iter if best_iter > 0 else None  # LightGBM支持None(使用所有轮次)
                            )
                    elif model_type == "lstm":
                        # LSTM预测(需转换为tensor, 适配训练时的seq_len=15)
                        with torch.no_grad():
                            # 1. 获取训练时的seq_len(从模型配置中读取, 避免硬编码)
                            seq_len = model_config["model_params"].get("seq_len", 15)  # 默认为15, 与训练参数一致
                            input_size = model_config["model_params"]["input_size"]  # 确保特征数匹配
                            
                            # 2. 关键修复: 2维(batch_size, input_size) → 3维(batch_size, seq_len, input_size)
                            # 重复特征15次以满足序列长度要求(因当前数据无时间序列维度, 用重复方式适配)
                            X_val_3d = np.repeat(X_val_model[:, np.newaxis, :], seq_len, axis=1)  # 新增seq_len维度并重复
                            
                            # 3. 转换为tensor并预测
                            X_tensor = torch.tensor(X_val_3d, dtype=torch.float32)
                            pred_proba = model(X_tensor).numpy().flatten()
                    else:  # LR/RF等sklearn模型
                        # 对LR模型: 已在3.4步筛选特征, 直接使用X_val_model
                        pred_proba = model.predict_proba(X_val_model)[:, 1]  # 取正类概率
                    
                    # 后续校验预测格式、保存结果...
                    pred_proba = pred_proba.reshape(-1)  # 确保为1维数组(样本数,)
                    if len(pred_proba) != len(y_val):
                        raise ValueError(f"预测长度不匹配(模型输出{len(pred_proba)}, 标签{len(y_val)})")
                    
                    # 保存到对应容器
                    base_preds[model_name] = pred_proba
                    base_proba_list.append(pred_proba)  # 加入融合列表
                    base_model_names.append(model_type)  # 记录模型类型
                    base_model_probs[f"{model_type}_proba"] = pred_proba.tolist()  # 按类型存储
                    logger.debug(f"标签[{label}]模型[{model_type}]预测完成, 概率数组长度: {len(pred_proba)}")

                except Exception as e:
                    logger.error(f"标签[{label}]模型[{model_type}]预测异常: {str(e)}")
                    continue

            # 4. 校验有效基础模型数量(至少需要1个模型参与融合)
            if len(base_preds) == 0:
                return {"label": label, "status": "fail", "error": "无有效基础模型参与融合"}

            # 5. 计算融合模型的验证集预测概率(核心修改: 基于基础模型概率加权融合)
            # 5.1 权重计算: 用基础模型的AUC作为权重(AUC越高, 权重越大)
            base_auc_weights = []
            for proba in base_proba_list:
                try:
                    auc = roc_auc_score(y_val, proba)
                    base_auc_weights.append(auc)
                except Exception as e:
                    logger.warning(f"标签[{label}]基础模型AUC计算失败, 使用默认权重1.0: {str(e)}")
                    base_auc_weights.append(1.0)
            
            # 5.2 权重归一化(确保权重和为1)
            base_auc_weights = np.array(base_auc_weights)
            normalized_weights = base_auc_weights / base_auc_weights.sum()
            logger.debug(f"标签[{label}]基础模型权重: {dict(zip(base_model_names, normalized_weights.round(4)))}")
            
            # 5.3 加权融合: 得到融合模型的验证集预测概率(shape: (样本数,))
            base_proba_matrix = np.array(base_proba_list).T  # 转换为(样本数, 模型数)
            ensemble_proba = np.dot(base_proba_matrix, normalized_weights)  # 加权求和
            ensemble_proba = ensemble_proba.tolist()  # 转为list方便序列化

            # 6. 训练元模型(LightGBM, 适配类别不平衡)
            trainer = StackingMetaTrainer(label, base_preds)
            if not trainer.init_meta_data(y_val):
                return {"label": label, "status": "fail", "error": "元特征初始化失败"}

            train_result = trainer.train()
            if train_result["status"] != "success":
                return {"label": label, "status": "fail", "error": train_result["error"]}

            # 7. 蒙特卡罗模拟评估稳定性
            mc_result = self._monte_carlo_simulation(
                meta_model=train_result["meta_model"],
                X_meta=trainer.X_meta,
                y_true=y_val,
                threshold=train_result["threshold"]
            )

            # 8. 保存融合模型(按「版本/stacking_ensemble/标签」组织)
            label_save_dir = os.path.join(self.ensemble_root, label)
            os.makedirs(label_save_dir, exist_ok=True)
            
            # 融合模型数据(包含所有基础模型信息)
            # 为了后续预测时能使用 “不确定性估计”, 将 “最优超参” 和 “不确定性配置” 存入模型数据。修改model_data的定义
            # 融合模型数据(包含所有基础模型信息+贝叶斯优化结果)
            model_data = {
                "meta_model": train_result["meta_model"],
                "weights": train_result["weights"],
                "base_auc_weights": normalized_weights.tolist(),
                "threshold": train_result["threshold"],
                "base_models": list(base_preds.keys()),
                # 新增: 贝叶斯优化结果
                "best_params": train_result.get("best_params", {}),  # 最优超参
                "avg_uncertainty": train_result.get("uncertainty", 0.0),  # 平均不确定性
                "metrics": {** train_result["metrics"], **mc_result["metrics"]},
                "feature_cols": global_feature_cols,
                "train_version": global_train_version,
                "mc_sims": self.mc_sims,
                "train_time": pd.Timestamp.now().strftime("%Y-%m-%d %H:%M:%S"),
                "base_model_probs": base_model_probs,
                "ensemble_proba": ensemble_proba
            }
            # 保存融合模型
            save_path = os.path.join(label_save_dir, f"stacking_{label}.pkl")
            with open(save_path, "wb") as f:
                pickle.dump(model_data, f, protocol=4)

            # 保存融合结果摘要(包含原有指标+两类概率)
            summary_path = os.path.join(label_save_dir, f"stacking_{label}_summary.json")
            # 整理摘要数据(确保JSON可序列化)
            summary_data = {
                **model_data["metrics"],
                "base_model_probs": base_model_probs,
                "ensemble_proba": ensemble_proba,
                "base_model_count": len(base_preds),
                "base_auc_weights": dict(zip(base_model_names, normalized_weights.round(4)))  # 保存权重
            }
            with open(summary_path, "w", encoding="utf-8") as f:
                json.dump(summary_data, f, indent=2, ensure_ascii=False)

            logger.info(f"标签[{label}]融合完成: 模型保存至 {save_path} (参与模型数: {len(base_preds)})")
            return {
                "label": label, "status": "success",
                "metrics": model_data["metrics"], "save_path": save_path,
                "base_model_count": len(base_preds),
                "base_model_probs": base_model_probs,
                "ensemble_proba": ensemble_proba
            }

        except Exception as e:
            error_msg = f"标签[{label}]融合训练失败: {str(e)}"
            logger.error(f"{error_msg}\n{traceback.format_exc()}")
            return {"label": label, "status": "fail", "error": error_msg}

    def _monte_carlo_simulation(self, meta_model, X_meta, y_true, threshold) -> Dict[str, Any]:
        """蒙特卡罗模拟: 评估融合模型稳定性"""
        try:
            base_proba = meta_model.predict(X_meta)
            metrics_list = []

            for _ in range(self.mc_sims):
                # 加入微小噪声模拟预测波动(确保在[0,1]范围内)
                noise = np.random.normal(0, 0.01, size=len(base_proba))
                sim_proba = np.clip(base_proba + noise, 0, 1)
                sim_label = (sim_proba >= threshold).astype(int)

                # 计算指标(处理单类别情况)
                metrics = {}
                try:
                    metrics["auc"] = roc_auc_score(y_true, sim_proba)
                except:
                    metrics["auc"] = 0.5  # 单类别时AUC设为0.5
                metrics["f1"] = f1_score(y_true, sim_label, zero_division=0)
                metrics["precision"] = precision_score(y_true, sim_label, zero_division=0)
                metrics["recall"] = recall_score(y_true, sim_label, zero_division=0)
                metrics_list.append(metrics)

            # 统计模拟结果(均值±标准差)
            metrics_df = pd.DataFrame(metrics_list)
            mc_metrics = {
                f"{k}_mean": round(metrics_df[k].mean(), 4) for k in metrics_df.columns
            }
            mc_metrics.update({
                f"{k}_std": round(metrics_df[k].std(), 4) for k in metrics_df.columns
            })
            return {"status": "success", "metrics": mc_metrics}

        except Exception as e:
            logger.warning(f"蒙特卡罗模拟失败: {str(e)}, 返回基础指标")
            return {"status": "fail", "metrics": {}}

    def train(self) -> Dict[str, Any]:
        """顺序训练33个标签的融合模型(按label_cols顺序执行)"""
        logger.info(f"===== 启动Stacking融合训练(版本: {self.train_version}) =====")
        logger.info(f"基础模型类型: {global_base_model_types}")
        logger.info(f"融合模型保存目录: {self.ensemble_root}")
        logger.info(f"蒙特卡罗模拟次数: {self.mc_sims}")
        logger.info(f"待融合标签数: {self.train_summary['valid_labels']}")
        logger.info(f"按以下顺序执行融合: {self.label_cols}")

        # 顺序执行每个标签的融合训练(替代多进程)
        results = []
        for label in self.label_cols:
            logger.info(f"===== 开始处理标签[{label}] =====")
            result = self._train_single_label(label)
            results.append(result)
            logger.info(f"===== 标签[{label}]处理完成 =====")

        # 汇总训练结果(计算平均参与模型数等扩展指标)
        total_base_models = 0
        for res in results:
            if res["status"] == "success":
                self.train_summary["success"] += 1
                self.train_summary["metrics"][res["label"]] = {
                    **res["metrics"],
                    "base_model_count": res["base_model_count"],
                    "save_path": res["save_path"],
                    "base_auc_weights": res["metrics"].get("base_auc_weights", {})
                }
                total_base_models += res["base_model_count"]
            else:
                self.train_summary["failed"].append({
                    "label": res["label"], 
                    "error": res["error"][:100]
                })

        # 计算扩展统计指标
        self.train_summary["success_rate"] = (
            self.train_summary["success"] / self.train_summary["valid_labels"] * 100
            if self.train_summary["valid_labels"] > 0 else 0.0
        )
        self.train_summary["avg_base_models_used"] = (
            total_base_models / self.train_summary["success"]
            if self.train_summary["success"] > 0 else 0.0
        )

        # 打印汇总日志+保存汇总结果
        self._log_summary()
        self._save_summary()
        return self.train_summary

    def _log_summary(self):
        """打印训练汇总(适配5模型融合的统计信息)"""
        logger.info("="*120)
        logger.info(f"                      Stacking融合训练结果汇总(版本: {self.train_version})")
        logger.info("="*120)
        logger.info(f"总标签数: 33 | 有效标签数: {self.train_summary['valid_labels']}")
        logger.info(f"成功融合数: {self.train_summary['success']}(成功率: {self.train_summary['success_rate']:.2f}%)")
        logger.info(f"平均参与模型数/标签: {self.train_summary['avg_base_models_used']:.1f}/5")
        logger.info(f"失败数: {len(self.train_summary['failed'])}")

        # 打印失败标签示例
        if self.train_summary["failed"]:
            logger.warning("失败标签示例: ")
            for item in self.train_summary["failed"][:3]:
                logger.warning(f"  标签[{item['label']}]: {item['error']}")

        # 打印平均性能指标
        if self.train_summary["metrics"]:
            metrics_df = pd.DataFrame.from_dict(self.train_summary["metrics"], orient="index")
            avg_auc = metrics_df["auc_mean"].mean() if "auc_mean" in metrics_df else 0.0
            avg_f1 = metrics_df["f1_mean"].mean() if "f1_mean" in metrics_df else 0.0
            logger.info(f"平均性能(含蒙特卡罗模拟): ")
            logger.info(f"  平均AUC: {avg_auc:.4f} | 平均F1: {avg_f1:.4f}")

        logger.info("="*120)

    def _save_summary(self):
        """保存训练汇总结果(JSON格式)"""
        summary_path = os.path.join(self.ensemble_root, "stacking_training_summary.json")
        try:
            with open(summary_path, "w", encoding="utf-8") as f:
                json.dump(self.train_summary, f, indent=2, ensure_ascii=False)
            logger.info(f"融合训练汇总已保存至: {summary_path}")
        except Exception as e:
            logger.error(f"保存汇总结果失败: {str(e)}\n{traceback.format_exc()}")


class StackingMetaTrainer:
    """元模型训练器(单个标签的融合逻辑)"""
    def __init__(self, label: str, base_preds: Dict[str, np.ndarray]):
        self.label = label
        self.base_preds = base_preds  # 基础模型预测 {模型名: 预测概率数组}
        self.base_names = list(base_preds.keys())
        self.X_meta = None  # 元特征矩阵(基础模型预测拼接)
        self.y_true = None  # 标签值
        self.meta_model = None  # 元模型(贝叶斯LightGBM)
        self.weights = None  # 基础模型权重
        self.threshold = None  # 最优F1阈值
        self.best_params = None  # 新增: 贝叶斯优化后的最优超参

        # 贝叶斯优化的超参搜索空间(替代原固定参数)
        self.param_bounds = {
            "learning_rate": (0.01, 0.2),    # 学习率
            "num_leaves": (10, 50),          # 叶子数(需为整数, 后续转换)
            "max_depth": (3, 10),            # 最大深度(需为整数)
            "min_child_samples": (5, 30),    # 最小子样本数(需为整数)
            "subsample": (0.7, 1.0),         # 行采样率
            "colsample_bytree": (0.7, 1.0),  # 列采样率
            "reg_alpha": (0.01, 1.0),        # L1正则
            "reg_lambda": (0.01, 1.0)        # L2正则
        }
        # 核心补充: 初始化self.params(存储LightGBM的固定参数, 如类别权重)
        self.params = {
            "objective": "binary",
            "metric": "auc",
            "boosting_type": "gbdt",
            "verbosity": -1,
            "seed": 42,
            "scale_pos_weight": 1.0  # 默认值, 后续在init_meta_data中更新
        }

    def init_meta_data(self, y_val: np.ndarray) -> bool:
        """初始化元特征和标签"""
        try:
            self.y_true = y_val
            # 拼接基础模型预测为元特征矩阵(形状: [n_samples, n_base_models])
            self.X_meta = np.column_stack([self.base_preds[name] for name in self.base_names])
            # 调整类别权重(与单模型逻辑一致)
            pos_ratio = np.mean(self.y_true)
            self.params["scale_pos_weight"] = (1 - pos_ratio) / pos_ratio if pos_ratio > 0 else 1.0
            logger.debug(f"标签[{self.label}]元特征初始化完成: {self.X_meta.shape} | 正样本比例: {pos_ratio:.4f}")
            return True
        except Exception as e:
            logger.error(f"标签[{self.label}]元特征初始化失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _calculate_base_weights(self) -> Dict[str, float]:
        """基于AUC计算基础模型权重"""
        auc_scores = {}
        for name, pred in self.base_preds.items():
            try:
                auc = roc_auc_score(self.y_true, pred)
                auc_scores[name] = max(0.5, auc)  # 最低0.5(优于随机)
            except:
                auc_scores[name] = 0.5  # 无效预测按0.5处理

        # 归一化权重(确保和为1)
        total = sum(auc_scores.values())
        return {name: score / total for name, score in auc_scores.items()}

    def _find_best_threshold(self, y_proba: np.ndarray) -> Tuple[float, float]:
        """寻找最优F1阈值(与单模型逻辑一致)"""
        thresholds = np.arange(0.1, 0.91, 0.01)
        f1_scores = [
            f1_score(self.y_true, (y_proba >= t).astype(int), zero_division=0)
            for t in thresholds
        ]
        best_idx = np.argmax(f1_scores)
        return thresholds[best_idx], f1_scores[best_idx]
    
    # 新增1: 贝叶斯超参优化目标函数(用AUC作为优化指标)
    def _lgb_auc_objective(self, **params):
        """贝叶斯优化目标函数(适配LightGBM 4.6.0, 用callbacks实现早停)"""
        # 1. 超参类型转换(float→int, 适配LightGBM要求)
        params["num_leaves"] = int(round(params["num_leaves"]))
        params["max_depth"] = int(round(params["max_depth"]))
        params["min_child_samples"] = int(round(params["min_child_samples"]))
        
        # 2. 合并固定参数(目标函数、类别权重等, 与单模型逻辑对齐)
        lgb_params = {
            "objective": "binary",
            "metric": "auc",
            "boosting_type": "gbdt",
            "verbosity": -1,        # 静默模式, 不打印任何交叉验证日志(等价于verbose_eval=False)
            "seed": 42,
            "scale_pos_weight": self.params["scale_pos_weight"],  # 动态计算的类别权重
            **params  # 贝叶斯优化生成的超参(如learning_rate、subsample等)
        }
        
        # 3. 配置早停回调(LightGBM 4.6.0 推荐写法)
        # stopping_rounds=10: 连续10轮验证AUC无提升则早停, 避免过拟合
        # verbose=False: 关闭早停日志, 保持输出简洁
        cv_callbacks = [lgb.early_stopping(stopping_rounds=10, verbose=False)]
        
        # 4. 执行5折交叉验证(关键: 用callbacks替代early_stopping_rounds)
        cv_results = lgb.cv(
            params=lgb_params,
            train_set=lgb.Dataset(self.X_meta, label=self.y_true),  # 元特征+真实标签
            num_boost_round=100,  # 最大训练轮次(早停会提前终止)
            nfold=5,  # 5折交叉验证, 确保结果稳定
            callbacks=cv_callbacks,  # 传入早停回调, 这是核心修改
        )
        
        # 5. 核心修改: 用实际打印的 "valid auc-mean" 作为键名, 增加异常处理
        try:
            # 从cv_results中获取验证集的AUC均值(你的环境实际键名)
            auc_scores = cv_results["valid auc-mean"]
            logger.debug(f"交叉验证AUC序列: {auc_scores[:5]}...(共{len(auc_scores)}轮)")  # 可选: 打印前5轮AUC, 确认正常
        except KeyError as e:
            # 极端情况兜底: 若键名变化, 返回随机水平AUC(0.5)
            logger.warning(f"未找到'valid auc-mean'键, 使用默认AUC=0.5, 错误: {e}")
            auc_scores = [0.5]
        
        # 返回最优AUC(取交叉验证中最高的AUC值)
        best_auc = max(auc_scores) if len(auc_scores) > 0 else 0.5
        logger.debug(f"当前超参组合的最优交叉验证AUC: {best_auc:.4f}")
        return best_auc

     # 新增2: 不确定性估计(可选, 用于预测时输出概率置信区间)
    def _predict_with_uncertainty(self, X_meta: np.ndarray, n_ensemble: int = 10) -> Tuple[np.ndarray, np.ndarray]:
        """
        贝叶斯LightGBM不确定性估计: 通过集成前n_ensemble个最优模型, 输出均值概率+标准差(不确定性)
        """
        if self.meta_model is None:
            raise ValueError("元模型未训练, 无法计算不确定性")
        
        preds = []
        # 用前n_ensemble个最优迭代轮次的模型预测(模拟不确定性)
        for round_offset in range(1, n_ensemble + 1):
            iter_num = max(1, self.meta_model.best_iteration - round_offset * 5)
            pred = self.meta_model.predict(X_meta, num_iteration=iter_num)
            preds.append(pred)
        
        preds = np.array(preds)
        mean_prob = np.mean(preds, axis=0)  # 最终预测概率(均值)
        std_prob = np.std(preds, axis=0)   # 不确定性(标准差, 越大越不可靠)
        return mean_prob, std_prob

    def train(self) -> Dict[str, Any]:
        """训练元模型(贝叶斯LightGBM)并返回结果"""
        try:
            # 1. 计算基础模型权重(原有逻辑不变)
            self.weights = self._calculate_base_weights()

            # 2. 贝叶斯超参优化(核心修改: 替代原固定参数)
            logger.debug(f"标签[{self.label}]开始贝叶斯超参优化...")
            # 初始化贝叶斯优化器
            optimizer = BayesianOptimization(
                f=self._lgb_auc_objective,  # 目标函数(最大化AUC)
                pbounds=self.param_bounds,   # 超参搜索空间
                random_state=SSQ_CONFIG['random_seed'],             # 固定随机种子确保可复现
                verbose=0                    # 静默模式, 不打印中间过程
            )
            # 执行优化: init_points=5(随机采样5个初始点), n_iter=20(迭代20次)
            optimizer.maximize(init_points=5, n_iter=20)
            # 获取最优超参
            self.best_params = optimizer.max["params"]
            # 超参类型转换(float→int)
            self.best_params["num_leaves"] = int(round(self.best_params["num_leaves"]))
            self.best_params["max_depth"] = int(round(self.best_params["max_depth"]))
            self.best_params["min_child_samples"] = int(round(self.best_params["min_child_samples"]))
            logger.debug(f"标签[{self.label}]贝叶斯优化完成, 最优超参: {self.best_params}")

            # 3. 用最优超参训练最终元模型-补充验证集和评估指标
            lgb_params = {
                "objective": "binary",
                "metric": "auc",  # 训练时的评估指标(与超参优化一致)
                "eval_metric": "auc",  # 明确指定评估指标, 用于早停
                "boosting_type": "gbdt",
                "verbosity": -1,
                "seed": SSQ_CONFIG['random_seed'],
                "scale_pos_weight": self.params["scale_pos_weight"],        # 类别权重
                **self.best_params      # 最优超参
            }
            # 训练(加早停避免过拟合)
            train_data = lgb.Dataset(self.X_meta, label=self.y_true)
            # 验证集直接用X_meta(因为元模型的训练数据就是基础模型的预测结果, 数据量小, 无需额外拆分)
            valid_data = lgb.Dataset(self.X_meta, label=self.y_true, reference=train_data)

            # 4. 训练元模型(补充 valid_sets 和明确的 callbacks)
            self.meta_model = lgb.train(
                params=lgb_params,
                train_set=train_data,
                num_boost_round=100,
                valid_sets=[valid_data],  # 传入验证集, 供早停使用
                valid_names=["valid"],    # 验证集名称(可选, 方便日志)
                callbacks=[
                    lgb.early_stopping(stopping_rounds=10, verbose=False),  # 早停回调
                    lgb.log_evaluation(period=0)  # 关闭训练日志
                ]
            )

            # 4. 评估元模型性能(原有逻辑不变, 新增不确定性估计),  用不确定性估计的均值概率作为最终预测
            y_proba, y_std = self._predict_with_uncertainty(self.X_meta)
            self.threshold, best_f1 = self._find_best_threshold(y_proba)
            y_label = (y_proba >= self.threshold).astype(int)

            # 5. 计算指标(新增不确定性统计)
            metrics = {
                "auc": round(roc_auc_score(self.y_true, y_proba), 4),
                "f1": round(best_f1, 4),
                "precision": round(precision_score(self.y_true, y_label, zero_division=0), 4),
                "recall": round(recall_score(self.y_true, y_label, zero_division=0), 4),
                # 新增: 不确定性指标(平均标准差, 越小越稳定)
                "avg_uncertainty": round(np.mean(y_std), 4),
                "best_params": self.best_params  # 保存最优超参, 方便后续分析
            }

            return {
                "status": "success",
                "meta_model": self.meta_model,
                "weights": self.weights,
                "threshold": self.threshold,
                "metrics": metrics,
                "best_params": self.best_params,  # 返回最优超参
                "uncertainty": round(np.mean(y_std), 4)  # 返回平均不确定性
            }

        except Exception as e:
            err = {"status": "fail", "error": f"贝叶斯元模型训练失败: {str(e)}\n{traceback.format_exc()}"}
            logger.error(f"标签[{self.label}]贝叶斯元模型训练失败: {str(e)}\n{traceback.format_exc()}")
            return err


# 调用示例
def main():
    try:
        # 初始化融合模型(自动加载数据, 与单模型版本严格一致)
        stacking = StackingBayesianOptimization(
            mc_sims=100
        )
        # 启动训练(顺序执行)
        summary = stacking.train()
        print(f"融合训练完成(版本: {summary['version']}): 成功率 {summary['success_rate']:.2f}%")
    except Exception as e:
        logger.critical(f"融合训练启动失败: {str(e)}\n{traceback.format_exc()}")
        raise


if __name__ == "__main__":
    main()