import os
import json
import numpy as np
import pandas as pd
import traceback
from datetime import datetime
from typing import Optional, Dict, List, Tuple
import joblib
import lightgbm as lgb
from config.ssq_config import SSQ_CONFIG, init_global_logger, logger

'''
预测流程说明（适配单列独立模型）:
1. 读取预测特征文件（final_merge_features.csv）
2. 遍历r1_next-r33_next:
   - 加载对应列的融合模型（stacking_rX_next.pkl）和配置
   - 按配置的训练特征列（或自动匹配特征数量）过滤预测特征
   - 独立预测该红球的出现概率
3. 汇总33个红球的预测概率
4. 计算置信度（支持3种模式，可通过配置切换）
5. 表格化打印结果并保存（含全部概率+置信度+推荐红球）
'''

class SSQSingleColFusionPredictor:
    def __init__(self):
        # 核心配置
        self.predict_feat_path = os.path.join(SSQ_CONFIG["predict_file"]["DATA_FOLDER"], SSQ_CONFIG["predict_file"]["final_merge_features"])
        self.model_save_dir = SSQ_CONFIG["train_file"]["model_save_dir"]
        self.result_save_dir = SSQ_CONFIG["predict_file"]["predict_result"]
        self.merge_key = "idx"
        
        # 红球列和模型映射
        self.red_cols_01 = [col for col in SSQ_CONFIG['csv01_header'] if col.startswith('r') and col[1:].isdigit()]  # r1-r33
        self.label_cols = SSQ_CONFIG['label_cols']
        self.red_prob_cols = [f"{col}_prob" for col in self.red_cols_01]
        
        # 模型文件命名规则
        self.model_file_pattern = "stacking_{label}.pkl"
        self.model_config_pattern = "stacking_{label}_summary.json"
        
        # 置信度配置
        self.confidence_mode = SSQ_CONFIG.get("predict_config", {}).get("confidence_mode", "top6_mean")
        if self.confidence_mode not in ["top6_mean", "max_min_diff", "top6_sum"]:
            raise ValueError(f"不支持的置信度模式: {self.confidence_mode}, 仅支持'top6_mean'/'max_min_diff'/'top6_sum'")
        
        # 训练特征配置（从配置文件读取或自动匹配数量）
        self.train_feature_cols = SSQ_CONFIG.get("train_config", {}).get("train_feature_cols", [])
        self.train_feature_num = SSQ_CONFIG.get("train_config", {}).get("train_feature_num", 5)
        
        # 初始化目录和日志
        os.makedirs(self.result_save_dir, exist_ok=True)
        log_filename = f"ssq_single_col_predict_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
        init_global_logger(log_file=os.path.join(self.result_save_dir, log_filename), pid=os.getpid())
        
        # 初始化变量
        self.predict_feat_df = None
        self.all_pred_probs = None
        self.model_configs = {}
        self.predict_results = None
        self.final_train_features = []
        
        # 校验配置和文件
        self._check_config()

    def _check_config(self) -> bool:
        """校验核心配置和必要文件"""
        try:
            # 校验预测特征文件
            if not os.path.exists(self.predict_feat_path):
                raise FileNotFoundError(f"预测特征文件不存在: {self.predict_feat_path}")
            
            # 校验模型保存目录
            if not os.path.exists(self.model_save_dir):
                raise FileNotFoundError(f"模型保存目录不存在: {self.model_save_dir}")
            
            # 预校验第一个模型文件
            test_label = self.label_cols[0]
            model_path = os.path.join(self.model_save_dir, f'stacking_ensemble_{SSQ_CONFIG["train_version"]}', test_label, self.model_file_pattern.format(label=test_label))
            logger.info(f'model_path: {model_path}')
            if not os.path.exists(model_path):
                raise FileNotFoundError(f"测试模型文件不存在: {model_path}")
            
            # 校验模型格式
            try:
                model_dict = joblib.load(model_path)
                if not isinstance(model_dict, dict) or "meta_model" not in model_dict:
                    raise ValueError(f"模型文件格式错误: 期望dict且含'meta_model'键, 实际: {type(model_dict)}, 键: {list(model_dict.keys()) if isinstance(model_dict, dict) else '无'}")
                if not isinstance(model_dict["meta_model"], lgb.Booster):
                    raise ValueError(f"meta_model类型错误: 期望lgb.Booster, 实际: {type(model_dict['meta_model'])}")
            except Exception as e:
                raise ValueError(f"模型文件解析失败: {str(e)}")
            
            # 校验红球列和概率列长度一致（新增）
            if len(self.red_cols_01) != len(self.red_prob_cols):
                raise ValueError(f"红球列和概率列长度不匹配: 红球列{len(self.red_cols_01)}个, 概率列{len(self.red_prob_cols)}个")
            if len(self.red_cols_01) != 33:
                logger.warning(f"红球列数量不是33个（当前{len(self.red_cols_01)}个），可能影响抽样结果")
            
            # 输出特征配置信息
            if self.train_feature_cols:
                logger.info(f"从配置文件读取训练特征列: {self.train_feature_cols}, 数量: {len(self.train_feature_cols)}")
            else:
                logger.info(f"未配置训练特征列, 将自动按数量过滤（训练特征数量: {self.train_feature_num}）")
            
            logger.info("配置校验通过，预测环境准备完成")
            logger.info(f"预测特征文件: {self.predict_feat_path}")
            logger.info(f"模型保存目录: {self.model_save_dir}")
            logger.info(f"需加载模型数: {len(self.label_cols)}（对应r1_next-r33_next）")
            logger.info(f"置信度计算模式: {self.confidence_mode}")
            return True
        except Exception as e:
            logger.error(f"配置校验失败: {str(e)}")
            raise

    def _load_and_preprocess_feats(self) -> Optional[pd.DataFrame]:
        """加载并预处理预测特征（匹配训练时的特征列/数量）"""
        try:
            logger.info("开始加载预测特征并预处理...")
            # 加载特征文件
            df = pd.read_csv(self.predict_feat_path, encoding="utf-8")
            logger.info(f"原始预测特征列数量: {len(df.columns)}, 所有列: {df.columns.tolist()}")
            
            # 过滤有效列：剔除期号和标签列
            valid_cols = [col for col in df.columns if col not in [self.merge_key] + self.label_cols]
            df_valid = df[[self.merge_key] + valid_cols].copy()
            logger.info(f"剔除期号和标签列后，剩余特征列数量: {len(valid_cols)}")
            
            # 确定最终用于预测的特征列
            if self.train_feature_cols:
                missing_cols = [col for col in self.train_feature_cols if col not in valid_cols]
                if missing_cols:
                    raise ValueError(f"预测特征文件缺少配置的训练特征列: {missing_cols}, 请检查配置或特征文件")
                self.final_train_features = self.train_feature_cols
            else:
                if len(valid_cols) < self.train_feature_num:
                    raise ValueError(f"预测特征列数量不足: 现有{len(valid_cols)}个, 训练时需要{self.train_feature_num}个")
                self.final_train_features = valid_cols[:self.train_feature_num]
            
            # 提取最终特征列并处理缺失值
            df_feats = df_valid[[self.merge_key] + self.final_train_features].copy()
            missing_strategy = SSQ_CONFIG.get("missing_strategy", "median")
            numeric_cols = df_feats[self.final_train_features].select_dtypes(include=["int64", "float64"]).columns
            if missing_strategy == "mean":
                df_feats[numeric_cols] = df_feats[numeric_cols].fillna(df_feats[numeric_cols].mean())
            elif missing_strategy == "median":
                df_feats[numeric_cols] = df_feats[numeric_cols].fillna(df_feats[numeric_cols].median())
            elif missing_strategy == "zero":
                df_feats[numeric_cols] = df_feats[numeric_cols].fillna(0)
            
            # 排序并重置索引
            df_feats = df_feats.sort_values(self.merge_key).reset_index(drop=True)
            
            logger.info(f"特征预处理完成: 期数={len(df_feats)}, 最终使用特征列数量={len(self.final_train_features)}")
            logger.info(f"最终使用的特征列: {self.final_train_features}")
            logger.info(f"预测期号范围: {df_feats[self.merge_key].tolist()}")
            return df_feats
        except Exception as e:
            logger.error(f"特征加载预处理失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _load_single_model(self, label: str) -> Tuple[Optional[lgb.Booster], Optional[Dict]]:
        """加载单个标签对应的融合模型和配置文件"""
        try:
            model_folder = os.path.join(self.model_save_dir, f'stacking_ensemble_{SSQ_CONFIG["train_version"]}', label)
            model_path = os.path.join(model_folder, self.model_file_pattern.format(label=label))
            config_path = os.path.join(model_folder, self.model_config_pattern.format(label=label))
            
            # 加载模型
            model_dict = joblib.load(model_path)
            if not isinstance(model_dict, dict) or "meta_model" not in model_dict:
                logger.error(f"标签[{label}]模型文件格式错误: 期望dict且含'meta_model'键")
                return None, None
            booster_model = model_dict["meta_model"]
            if not isinstance(booster_model, lgb.Booster):
                logger.error(f"标签[{label}]meta_model类型错误: 期望lgb.Booster")
                return None, None
            
            # 加载配置文件
            config = {}
            if os.path.exists(config_path):
                try:
                    with open(config_path, "r", encoding="utf-8") as f:
                        config = json.load(f)
                except Exception as config_err:
                    logger.warning(f"标签[{label}]配置文件解析失败: {str(config_err)}")
            
            # 合并配置
            model_config_from_dict = {
                "weights": model_dict.get("weights", []),
                "threshold": model_dict.get("threshold", 0.5),
                "metrics": model_dict.get("metrics", {})
            }
            config = {**model_config_from_dict, **config}
            
            logger.debug(f"成功加载标签[{label}]的模型和配置")
            return booster_model, config
        except Exception as e:
            logger.error(f"加载标签[{label}]的模型失败: {str(e)}\n{traceback.format_exc()}")
            return None, None

    # 新增：贝叶斯元模型不确定性估计方法（接收模型作为参数，避免上下文依赖）
    ###########################################################################
    def _predict_with_uncertainty(self, model: lgb.Booster, X: np.ndarray, n_ensemble: int = 10) -> Tuple[np.ndarray, np.ndarray]:
        """
        贝叶斯LightGBM不确定性估计：通过集成前n_ensemble个最优模型，输出均值概率+标准差
        :param model: 训练好的贝叶斯元模型（lgb.Booster）
        :param X: 预测特征矩阵（shape: [n_samples, n_features]）
        :param n_ensemble: 集成的模型个数（越大越稳定，默认10）
        :return: mean_prob（均值概率）、std_prob（不确定性标准差）
        """
        if not isinstance(model, lgb.Booster):
            logger.warning("输入不是LightGBM Booster模型，跳过不确定性估计")
            # 降级为普通预测（兼容原有逻辑）
            prob = model.predict(X, num_iteration=model.best_iteration if hasattr(model, "best_iteration") else None)
            return prob, np.zeros_like(prob)  # 不确定性设为0
        
        try:
            preds = []
            # 获取模型最优迭代轮次（贝叶斯训练时已记录）
            best_iter = model.best_iteration if model.best_iteration > 0 else 100
            # 集成前n_ensemble个相邻的最优模型（模拟不确定性）
            for offset in range(n_ensemble):
                # 从最优轮次往前偏移，确保每个模型有微小差异
                iter_num = max(1, best_iter - offset * 5)
                pred = model.predict(X, num_iteration=iter_num)
                preds.append(pred)
            
            preds = np.array(preds)
            mean_prob = np.mean(preds, axis=0)  # 最终预测概率（集成均值）
            std_prob = np.std(preds, axis=0)   # 不确定性（标准差越大，结果越不可靠）
            return mean_prob, std_prob
        except Exception as e:
            logger.warning(f"不确定性估计失败，降级为普通预测: {str(e)}")
            # 降级逻辑：使用最优轮次预测，不确定性设为0
            prob = model.predict(X, num_iteration=model.best_iteration if hasattr(model, "best_iteration") else None)
            return prob, np.zeros_like(prob)

    # 修正：_predict_single_col 方法（正确调用不确定性估计）
    def _predict_single_col(self, label: str, model: lgb.Booster) -> Optional[np.ndarray]:
        """预测单个红球列的出现概率（整合贝叶斯不确定性估计）"""
        try:
            # 1. 准备预测特征（原逻辑不变）
            X = self.predict_feat_df[self.final_train_features].values
            logger.debug(f"标签[{label}]预测特征维度: {X.shape}（特征数量: {X.shape[1]}）")

            # 2. 贝叶斯预测（带不确定性估计）
            mean_prob, std_prob = self._predict_with_uncertainty(model, X)
            
            # 3. 过滤高不确定性预测（核心优化：降低不可靠号码的概率）
            # 阈值可根据实际情况调整（如0.1→0.08，越严格过滤越多）
            uncertainty_threshold = 0.1
            filtered_prob = np.where(std_prob > uncertainty_threshold, 0.05, mean_prob)
            # 确保概率在[0,1]范围内（避免异常值）
            filtered_prob = np.clip(filtered_prob, 0.01, 0.99)

            # 4. 校验预测结果形状（原逻辑不变）
            if len(filtered_prob) != len(self.predict_feat_df):
                raise ValueError(f"标签[{label}]预测结果长度不匹配: 期望{len(self.predict_feat_df)}, 实际{len(filtered_prob)}")
            
            logger.debug(
                f"标签[{label}]预测完成: "
                f"原始概率范围[{round(mean_prob.min(),4)}, {round(mean_prob.max(),4)}] | "
                f"不确定性范围[{round(std_prob.min(),4)}, {round(std_prob.max(),4)}] | "
                f"过滤后概率范围[{round(filtered_prob.min(),4)}, {round(filtered_prob.max(),4)}]"
            )
            return filtered_prob  # 返回过滤后的最终概率

        except Exception as e:
            logger.error(f"预测标签[{label}]失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _batch_predict_all_cols(self) -> bool:
        """批量预测所有33个红球列的概率（加入随机扰动）"""
        try:
            logger.info("开始批量预测所有33个红球列...")
            n_samples = len(self.predict_feat_df)
            all_probs = np.full((n_samples, len(self.label_cols)), np.nan, dtype=np.float64)  # 强制float64类型
            
            # 设置随机种子（用当前时间戳，每次运行不同）
            np.random.seed(int(datetime.now().timestamp() % 1000000))
            
            for idx, label in enumerate(self.label_cols):
                logger.info(f"正在预测第{idx+1}/33个红球: 标签[{label}]")
                model, config = self._load_single_model(label)
                if model is None:
                    logger.error(f"标签[{label}]模型加载失败, 跳过")
                    continue
                
                prob = self._predict_single_col(label, model)
                if prob is not None:
                    # 加入微小随机扰动（±5%以内，不改变概率分布趋势）
                    noise = np.random.uniform(low=-0.05, high=0.05, size=prob.shape)
                    prob = prob + noise
                    # 确保概率在0-1之间（避免无效值）
                    prob = np.clip(prob, 0.01, 0.99)
                    all_probs[:, idx] = prob
                    self.model_configs[label] = config
                else:
                    logger.error(f"标签[{label}]预测失败, 跳过")
            
            # 处理缺失值
            if np.isnan(all_probs).any():
                nan_cols_idx = np.where(np.isnan(all_probs).any(axis=0))[0]
                nan_labels = [self.label_cols[idx] for idx in nan_cols_idx]
                logger.warning(f"以下标签预测失败（已用均值填充）: {nan_labels}")
                mean_prob = np.nanmean(all_probs, axis=1, keepdims=True)
                all_probs = np.where(np.isnan(all_probs), mean_prob, all_probs)
            
            # 强制转换为float64，避免类型问题
            self.all_pred_probs = all_probs.astype(np.float64)
            
            logger.info(f"所有红球列预测完成: 概率矩阵形状{all_probs.shape}, 数据类型{all_probs.dtype}, 无NaN值: {not np.isnan(all_probs).any()}")
            return True
        except Exception as e:
            logger.error(f"批量预测失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _calculate_confidence(self) -> np.ndarray:
        """计算置信度（按配置模式）"""
        logger.info(f"按模式[{self.confidence_mode}]计算置信度...")
        confidence_list = []
        
        for prob_row in self.all_pred_probs:
            if self.confidence_mode == "top6_mean":
                top6_probs = np.sort(prob_row)[-6:]
                confidence = np.mean(top6_probs)
            elif self.confidence_mode == "max_min_diff":
                confidence = np.max(prob_row) - np.min(prob_row)
            elif self.confidence_mode == "top6_sum":
                top6_probs = np.sort(prob_row)[-6:]
                confidence = np.sum(top6_probs)
            confidence_list.append(round(confidence, 4))
        
        return np.array(confidence_list, dtype=np.float64)

    def _get_top6_red(self, row) -> str:
        """基于预测概率加权随机抽样6个红球（修复类型转换和长度匹配问题）"""
        try:
            # 1. 提取概率值并强制转换为float64，过滤无效值
            prob_vals = row[self.red_prob_cols].values.astype(np.float64)
            # 确保红球列和概率列长度一致
            red_cols = [col for col in self.red_prob_cols if col.replace("_prob", "") in self.red_cols_01]
            red_cols = [col.replace("_prob", "") for col in red_cols]
            
            # 安全校验：长度必须一致
            if len(red_cols) != len(prob_vals):
                logger.warning(f"红球列和概率列长度不匹配（{len(red_cols)} vs {len(prob_vals)}），使用前33个红球列")
                red_cols = red_cols[:33]
                prob_vals = prob_vals[:33]
            
            # 2. 彻底处理NaN和无穷大值
            prob_vals = np.nan_to_num(prob_vals, nan=0.01, posinf=0.99, neginf=0.01)
            # 避免概率为0导致抽样失败，给每个概率加极小值
            prob_vals = prob_vals + 1e-8
            # 归一化概率（确保总和为1，避免np.random.choice报错）
            prob_sum = prob_vals.sum()
            if prob_sum == 0:
                normalized_probs = np.ones_like(prob_vals) / len(prob_vals)  # 均匀分布
                logger.warning("概率总和为0，使用均匀分布抽样")
            else:
                normalized_probs = prob_vals / prob_sum
            
            # 3. 加权随机抽样6个不重复的红球
            # 用当前时间戳的微秒级做随机种子，确保每次运行不同
            seed = int(datetime.now().timestamp() * 1000000) % 100000000
            np.random.seed(seed)
            sampled_reds = np.random.choice(red_cols, size=6, replace=False, p=normalized_probs)
            
            # 4. 按概率降序排列抽样结果
            sampled_probs = [row[f"{red}_prob"] for red in sampled_reds]
            sorted_indices = np.argsort(sampled_probs)[::-1]
            top6_reds = [sampled_reds[idx] for idx in sorted_indices]
            
            return ",".join(top6_reds)
        except Exception as e:
            logger.error(f"红球抽样失败，使用默认Top6概率排序: {str(e)}")
            # 降级策略：直接按概率降序取前6个（避免程序终止）
            prob_vals = row[self.red_prob_cols].values.astype(np.float64)
            prob_vals = np.nan_to_num(prob_vals, nan=0.01)
            top6_idx = np.argsort(prob_vals)[-6:][::-1]
            top6_reds = [self.red_prob_cols[idx].replace("_prob", "") for idx in top6_idx]
            return ",".join(top6_reds)

    def _generate_final_results(self) -> Optional[pd.DataFrame]:
        """生成最终预测结果（含概率、置信度、推荐红球）"""
        try:
            logger.info("生成最终预测结果...")
            # 构建概率DataFrame（强制浮点型）
            prob_df = pd.DataFrame(
                self.all_pred_probs,
                columns=self.red_prob_cols,
                dtype=np.float64
            )
            
            # 添加期号、置信度、推荐红球
            prob_df[self.merge_key] = self.predict_feat_df[self.merge_key].values
            confidence_array = self._calculate_confidence()
            prob_df["confidence"] = confidence_array.astype(np.float64)
            prob_df["confidence_mode"] = self.confidence_mode
            
            # 修复：apply时指定result_type="reduce"，避免返回Series导致类型问题
            prob_df["top6_recommend"] = prob_df.apply(self._get_top6_red, axis=1, result_type="reduce")
            
            # 调整列顺序
            final_cols = [self.merge_key] + self.red_prob_cols + ["confidence", "confidence_mode", "top6_recommend"]
            final_df = prob_df[final_cols].copy()
            
            # 验证概率列类型
            prob_cols_dtype = final_df[self.red_prob_cols].dtypes.unique()
            logger.info(f"概率列数据类型: {prob_cols_dtype}")
            if not all(dtype in [np.float64, np.float32] for dtype in prob_cols_dtype):
                logger.warning("部分概率列仍非数值类型，可能影响排序结果")
            
            logger.info("最终结果生成完成")
            return final_df
        except Exception as e:
            logger.error(f"生成最终结果失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _get_prob_level(self, prob: float) -> str:
        """根据概率值判断等级（高/中/低）"""
        if prob >= 0.7:
            return "高"
        elif prob >= 0.5:
            return "中"
        else:
            return "低"

    def _print_results_table(self, result_df: pd.DataFrame):
        """表格化打印预测结果（新增概率等级列）"""
        logger.info("="*80)
        logger.info(f"预测期号: {result_df[self.merge_key].iloc[0]} | 置信度计算模式: {self.confidence_mode}")
        logger.info("="*80)
        
        # 打印表头（左对齐，固定宽度，新增概率等级列）
        logger.info(f"{'排序':<6} {'红球编号':<10} {'选中概率':<12} {'概率等级':<8} {'是否推荐':<10}")
        logger.info("-"*50)
        
        # 获取当前期数据
        current_row = result_df.iloc[0]
        confidence = current_row["confidence"]
        top6_recommend = current_row["top6_recommend"].split(",")
        
        # 1. 处理推荐的前6个红球（按选中概率降序排列）
        top6_data = []
        for red_col in top6_recommend:
            prob_col = f"{red_col}_prob"
            prob = round(current_row[prob_col], 4)
            level = self._get_prob_level(prob)  # 新增：计算概率等级
            top6_data.append({
                "red_col": red_col,
                "prob": prob,
                "level": level,
                "is_recommend": "是"
            })
        # 按选中概率降序排序（确保排序正确）
        top6_data_sorted = sorted(top6_data, key=lambda x: x["prob"], reverse=True)
        
        # 2. 处理其余27个红球（按选中概率降序排列）
        other_data = []
        for red_col in self.red_cols_01:
            if red_col not in top6_recommend:
                prob_col = f"{red_col}_prob"
                prob = round(current_row[prob_col], 4)
                level = self._get_prob_level(prob)  # 新增：计算概率等级
                other_data.append({
                    "red_col": red_col,
                    "prob": prob,
                    "level": level,
                    "is_recommend": "否"
                })
        # 按选中概率降序排序
        other_data_sorted = sorted(other_data, key=lambda x: x["prob"], reverse=True)
        
        # 3. 合并数据并打印（推荐在前，其余在后）
        all_data_sorted = top6_data_sorted + other_data_sorted
        
        # 打印所有数据（带排序号，新增概率等级列）
        for i, item in enumerate(all_data_sorted, 1):
            logger.info(f"{i:<6} {item['red_col']:<10} {item['prob']:<12.4f} {item['level']:<8} {item['is_recommend']:<10}")
        
        # 打印汇总信息（补充概率等级说明）
        logger.info("-"*50)
        logger.info(f"汇总信息:")
        logger.info(f"整体置信度: {confidence:.4f}")
        logger.info(f"推荐红球（按概率降序）: {','.join([item['red_col'] for item in top6_data_sorted])}")
        logger.info(f"推荐红球平均概率: {np.mean([item['prob'] for item in top6_data_sorted]):.4f}")
        logger.info(f"概率等级说明: 高(≥0.7) | 中(0.5-0.7) | 低(<0.5)")  # 新增：等级说明
        logger.info("="*80 + "\n")

    def _save_results(self, result_df: pd.DataFrame) -> bool:
        """保存预测结果和元数据（含模型配置）"""
        try:
            logger.info("开始保存预测结果...")
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            
            # 1. 保存预测结果CSV
            result_csv_path = os.path.join(self.result_save_dir, f"ssq_single_col_predict_result_{timestamp}.csv")
            result_df.to_csv(result_csv_path, index=False, encoding="utf-8", float_format="%.4f")
            
            # 2. 保存元数据JSON
            metadata = {
                "predict_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "feature_path": self.predict_feat_path,
                "model_save_dir": self.model_save_dir,
                "final_train_features": self.final_train_features,
                "train_feature_num": len(self.final_train_features),
                "predict_periods": result_df[self.merge_key].tolist(),
                "period_count": len(result_df),
                "result_path": result_csv_path,
                # 原代码中metadata["config"]部分修改为：
                "config": {
                    "missing_strategy": SSQ_CONFIG.get("missing_strategy", "median"),
                    "confidence_mode": self.confidence_mode,
                    "confidence_desc": {
                        "top6_mean": "前6个高概率红球的平均概率",
                        "max_min_diff": "最高概率与最低概率的差值（区分度）",
                        "top6_sum": "前6个高概率红球的概率和"
                    },
                    "prob_level_desc": "高(≥0.7) | 中(0.5-0.7) | 低(<0.5)（基于预测概率的统计分级）",  # 新增
                    "red_cols": self.red_cols_01,
                    "model_file_pattern": self.model_file_pattern,
                    "top6_recommend_desc": "基于预测概率加权随机抽样的6个红球（概率越高抽样概率越大）"
                },
                "result_stats": {
                    "avg_confidence": round(result_df["confidence"].mean(), 4),
                    "max_confidence": round(result_df["confidence"].max(), 4),
                    "min_confidence": round(result_df["confidence"].min(), 4),
                    "avg_prob_per_red": {col: round(result_df[col].mean(), 4) for col in self.red_prob_cols[:5]}
                },
                "model_configs_summary": {label: config.get("metrics", {}) for label, config in self.model_configs.items() if config}
            }
            
            metadata_path = os.path.join(self.result_save_dir, f"predict_metadata_{timestamp}.json")
            with open(metadata_path, "w", encoding="utf-8") as f:
                json.dump(metadata, f, indent=2, ensure_ascii=False)
            
            logger.info(f"预测结果已保存至: {result_csv_path}")
            logger.info(f"预测元数据已保存至: {metadata_path}")
            logger.info(f"置信度统计: 平均={metadata['result_stats']['avg_confidence']} | 最高={metadata['result_stats']['max_confidence']} | 最低={metadata['result_stats']['min_confidence']}")
            return True
        except Exception as e:
            logger.error(f"保存结果失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def monte_carlo_simulation(self, n_simulations: int = 10000, top_k: int = 7) -> Dict:
        """
        蒙特卡罗模拟：执行n次预测，统计选中次数最高的top_k个号码
        :param n_simulations: 模拟次数（建议1w-5w，越多越准但耗时更长）
        :param top_k: 最终输出的高频号码数量
        :return: 统计结果（高频号码、选中次数、占比）
        """
        try:
            logger.info(f"开始蒙特卡罗模拟：{n_simulations}次预测，统计前{top_k}个高频号码")
            # 1. 初始化号码计数字典（r1-r33）
            count_dict = {red_col: 0 for red_col in self.red_cols_01}
            
            # 2. 重复n次预测（基于当前模型的概率分布抽样）
            # 先获取当前期的概率分布（self.all_pred_probs[0]，假设只预测1期）
            current_prob = self.all_pred_probs[0].copy()
            # 确保概率是有效的数值（避免NaN）
            current_prob = np.nan_to_num(current_prob, nan=0.01)
            current_prob = current_prob + 1e-8  # 避免概率为0
            normalized_prob = current_prob / current_prob.sum()  # 归一化
            
            # 3. 1w次抽样统计
            np.random.seed(int(datetime.now().timestamp()))  # 固定种子保证可复现
            for _ in range(n_simulations):
                # 每次抽样6个不重复的号码（和单次预测逻辑一致）
                sampled_reds = np.random.choice(
                    self.red_cols_01, 
                    size=6, 
                    replace=False, 
                    p=normalized_prob
                )
                # 给选中的号码计数
                for red in sampled_reds:
                    count_dict[red] += 1
            
            # 4. 排序并取前top_k个高频号码
            sorted_reds = sorted(count_dict.items(), key=lambda x: x[1], reverse=True)
            top_reds = sorted_reds[:top_k]
            total_samples = n_simulations * 6  # 总选中次数（每次选6个）
            
            # 5. 整理结果（包含选中次数和占比）
            result = {
                "simulation_params": {
                    "n_simulations": n_simulations,
                    "top_k": top_k,
                    "total_samples": total_samples
                },
                "top_reds": [
                    {
                        "red_col": red,
                        "count": count,
                        "ratio": round(count / total_samples, 4)  # 占比（更直观）
                    }
                    for red, count in top_reds
                ],
                "all_counts": count_dict  # 所有号码的计数（供后续分析）
            }
            
            # 打印统计结果
            logger.info("="*60)
            logger.info(f"蒙特卡罗模拟结果（{n_simulations}次预测）")
            logger.info("="*60)
            logger.info(f"{'排名':<6} {'红球编号':<10} {'选中次数':<12} {'选中占比':<10}")
            logger.info("-"*50)
            for i, item in enumerate(result["top_reds"], 1):
                logger.info(f"{i:<6} {item['red_col']:<10} {item['count']:<12} {item['ratio']:<10.4f}")
            logger.info("-"*50)
            logger.info(f"说明：选中占比 = 该号码被选中次数 / 总选中次数（{total_samples}次）")
            logger.info("="*60 + "\n")
            
            return result
        except Exception as e:
            logger.error(f"蒙特卡罗模拟失败: {str(e)}\n{traceback.format_exc()}")
            raise

    def run_predict(self) -> bool:
        """执行完整预测流程（单列独立预测→汇总→置信度→打印→保存）"""
        try:
            logger.info("="*80)
            logger.info("开始执行SSQ单列独立融合模型预测全流程")
            logger.info("="*80)
            
            # 1. 加载并预处理特征
            self.predict_feat_df = self._load_and_preprocess_feats()
            if self.predict_feat_df is None or self.predict_feat_df.empty:
                logger.critical("特征数据无效，预测流程终止")
                return False
            
            # 2. 批量预测所有33个红球列
            if not self._batch_predict_all_cols():
                logger.critical("批量预测失败，预测流程终止")
                return False
            
            # 3. 生成最终结果
            self.predict_results = self._generate_final_results()
            if self.predict_results is None or self.predict_results.empty:
                logger.critical("最终结果生成失败，预测流程终止")
                return False
            
            # 4. 表格化打印结果（核心优化）
            self._print_results_table(self.predict_results)
            
            # 5. 保存结果和元数据
            if not self._save_results(self.predict_results):
                logger.critical("结果保存失败，预测流程终止")
                return False
            
            # 新增：执行蒙特卡罗模拟（在保存结果后、输出最终摘要前）
            if SSQ_CONFIG.get("predict_config", {}).get("enable_monte_carlo", True):
                n_sim = SSQ_CONFIG.get("predict_config", {}).get("monte_carlo_n", 10000)
                top_k = SSQ_CONFIG.get("predict_config", {}).get("monte_carlo_top_k", 7)
                self.monte_carlo_result = self.monte_carlo_simulation(n_simulations=n_sim, top_k=top_k)
            
            # 输出最终摘要
            logger.info("="*80)
            logger.info("预测全流程完成! 最终摘要:")
            logger.info(f"预测期数: {len(self.predict_results)} 期")
            logger.info(f"推荐红球（按概率降序）: {self.predict_results['top6_recommend'].iloc[0]}")
            logger.info(f"整体置信度: {self.predict_results['confidence'].iloc[0]:.4f}")
            logger.info(f"结果文件保存路径: {self.result_save_dir}")
            logger.info("="*80)
            return True
        except Exception as e:
            logger.critical(f"预测全流程异常终止: {str(e)}\n{traceback.format_exc()}")
            return False


if __name__ == "__main__":
    # 执行预测
    predictor = SSQSingleColFusionPredictor()
    if predictor.run_predict():
        logger.info("SSQ单列独立融合模型预测任务成功完成!")
        exit(0)
    else:
        logger.error("SSQ单列独立融合模型预测任务执行失败!")
        exit(1)