"""量化分析器."""

from __future__ import annotations

import json
import logging
from dataclasses import asdict, dataclass
from typing import Any, Dict, List, Optional

import pandas as pd

from ..backtest import (
    BacktestConfig,
    RobustnessEvaluator,
    SensitivityAnalyzer,
    WalkForwardBacktester,
    calculate_backtest_metrics,
    calculate_model_metrics,
)
from ..data.augmentation import apply_data_augmentation
from ..data.cleaning import prepare_training_dataset
from ..utils import now_shanghai
from ..models import ModelTrainer
from ..models.storage import ModelMetadata, ModelStorage
from ..utils.state import GLOBAL_DATA_STORE
from ..utils import (
    align_predictions,
    analyze_learning_curve,
    diagnose_overfitting,
    format_quant_analysis_text,
    run_time_series_cv,
    test_prediction_stability,
    test_sequence_length_sensitivity,
    test_temporal_decay,
)

logger = logging.getLogger(__name__)


@dataclass
class RecommendationDetail:
    """存储模型推荐评分细节."""

    model_name: str
    score: float
    reasons: List[str]
    metrics: Dict[str, float]
    breakdown: Dict[str, float]


class QuantAnalyzer:
    """量化分析器，集成模型训练、回测、评估全流程."""

    def __init__(self, *, model_storage_dir: str = "models") -> None:
        """初始化量化分析器.

        Parameters
        ----------
        model_storage_dir:
            模型存储目录（默认 "models"）
        """
        self.storage = ModelStorage(model_storage_dir)
        self.trainer = ModelTrainer(random_seed=42)

    def analyze(
        self,
        symbol: str,
        risk_summary: Optional[str] = None,
        include_lstm: bool = True,
        include_gru: bool = False,
        include_temporal_cnn: bool = False,
        include_catboost: bool = False,
        include_mlp: bool = False,
        enable_optimization: bool = False,
    ) -> str:
        """执行量化分析并返回 JSON 格式的结果.

        Parameters
        ----------
        symbol:
            ETF 标准化代码
        risk_summary:
            风险控制报告摘要（可选）
        include_lstm:
            是否包含 LSTM 模型（默认 True）
        include_gru:
            是否包含 GRU 模型（默认 False）
        include_temporal_cnn:
            是否包含 TemporalCNN（原 CNN1D）模型
        include_catboost:
            是否包含 CatBoost 模型
        include_mlp:
            是否包含 MLP 模型
        enable_optimization:
            是否启用模型优化

        Returns
        -------
        JSON 格式的分析结果摘要
        """
        logger.info(f"开始量化分析: symbol={symbol}")

        # 1. 获取数据和特征
        dataset = GLOBAL_DATA_STORE.get_dataset(symbol)
        if dataset is None:
            raise RuntimeError(f"未找到缓存的 ETF 数据: {symbol}，请先调用 etf_data_tool")

        features_df = GLOBAL_DATA_STORE.get_features(symbol)
        if features_df is None:
            raise RuntimeError(f"未找到缓存的特征数据: {symbol}，请先调用 etf_feature_engineering_tool")

        # 2. 准备数据：统一委托给 data.cleaning 模块，确保逻辑集中维护
        X, y = prepare_training_dataset(features_df, dataset)

        # 3. 数据切分
        X_train, y_train, X_val, y_val, X_test, y_test = self.trainer.split_data(
            X, y, train_ratio=0.7, val_ratio=0.15, test_ratio=0.15
        )

        # 检查切分后的数据量
        if len(X_train) == 0:
            raise ValueError(
                f"训练集为空：总样本数 {len(X)} 条，切分后训练集为 0 条。"
                f"请确保有足够的历史数据（建议至少 200 条）。"
            )

        # 3.5 特征选择（减少过拟合风险）
        # 计算合理的特征数量上限：样本数/10，但不少于30个，不多于50个
        from ..features.feature_selector import FeatureSelector
        
        original_feature_count = len(X_train.columns)
        max_features = max(30, min(50, len(X_train) // 10))
        min_features = max(10, min(30, len(X_train) // 20))  # 最少特征数：样本数/20，但不少于10个，不多于30个
        
        # 如果特征数量超过上限，执行特征选择
        if original_feature_count > max_features:
            logger.info(
                f"特征数量 {original_feature_count} 超过合理上限 {max_features}，"
                f"开始执行特征选择（样本数={len(X_train)}，目标范围：{min_features}-{max_features}）"
            )
            
            # 创建特征选择器，设置最大和最小特征数量
            # 调整阈值使其更宽松，避免过度筛选
            selector = FeatureSelector(
                max_feature_count=max_features,
                min_feature_count=min_features,
                target_correlation_threshold=0.01,  # 放宽目标相关性阈值（从0.02降到0.01）
                importance_threshold=0.001,  # 放宽重要性阈值（从0.005降到0.001）
                mutual_info_threshold=0.005,  # 放宽互信息阈值（从0.01降到0.005）
            )
            
            # 执行特征选择（使用训练集数据）
            feature_selection_result = selector.select_features(X_train, y_train)
            
            # 检查特征选择结果，如果特征数过少，放宽阈值重新选择
            selected_count = feature_selection_result.selected_count
            if selected_count < min_features:
                logger.warning(
                    f"特征选择后特征数过少（{selected_count} 个 < {min_features}），"
                    f"放宽阈值重新选择..."
                )
                
                # 进一步放宽阈值
                selector_relaxed = FeatureSelector(
                    max_feature_count=max_features,
                    min_feature_count=min_features,
                    target_correlation_threshold=0.005,  # 进一步放宽
                    importance_threshold=0.0005,  # 进一步放宽
                    mutual_info_threshold=0.001,  # 进一步放宽
                )
                feature_selection_result = selector_relaxed.select_features(X_train, y_train)
                selected_count = feature_selection_result.selected_count
                
                if selected_count < min_features:
                    logger.warning(
                        f"放宽阈值后特征数仍不足（{selected_count} 个 < {min_features}），"
                        f"将使用所有剩余特征"
                    )
                    # 如果还是不够，至少保留前 min_features 个最重要的特征
                    # 这里我们使用原始选择结果，但记录警告
            
            # 应用特征选择结果到所有数据集
            X_train = selector.apply_selection(X_train, feature_selection_result)
            X_val = selector.apply_selection(X_val, feature_selection_result)
            X_test = selector.apply_selection(X_test, feature_selection_result)
            
            final_feature_count = len(X_train.columns)
            logger.info(
                f"特征选择完成: 原始特征数={feature_selection_result.original_count}, "
                f"选中特征数={final_feature_count}, "
                f"剔除特征数={len(feature_selection_result.removed_features)}"
            )
            
            # 验证特征选择结果
            if final_feature_count < 10:
                logger.error(
                    f"特征选择后特征数过少（{final_feature_count} 个），"
                    f"可能严重影响模型性能。建议检查特征工程或放宽特征选择阈值。"
                )
            elif final_feature_count < min_features:
                logger.warning(
                    f"特征选择后特征数偏少（{final_feature_count} 个 < {min_features}），"
                    f"可能影响模型性能。"
                )
            
            # 记录特征选择信息，用于后续报告
            feature_selection_info = {
                "applied": True,
                "original_count": feature_selection_result.original_count,
                "selected_count": final_feature_count,
                "removed_count": len(feature_selection_result.removed_features),
                "removed_features": feature_selection_result.removed_features[:10],  # 只记录前10个
                "removal_reasons": {
                    feat: reason
                    for feat, reason in feature_selection_result.removal_reasons.items()
                    if feat in feature_selection_result.removed_features[:10]
                },
            }
        else:
            logger.info(
                f"特征数量 {original_feature_count} 在合理范围内（上限={max_features}），"
                f"跳过特征选择"
            )
            feature_selection_info = {"applied": False, "original_count": original_feature_count}

        # 3.6 数据增强（可选）
        augmentation_info = None
        if include_lstm or include_gru or include_temporal_cnn:
            # 判断需要训练的深度学习模型类型
            dl_models = []
            if include_lstm:
                dl_models.append("lstm")
            if include_gru:
                dl_models.append("gru")
            if include_temporal_cnn:
                dl_models.append("temporal_cnn")
            
            # 应用数据增强
            X_train_aug, y_train_aug, augmentation_info = apply_data_augmentation(
                X_train=X_train,
                y_train=y_train,
                model_types=dl_models,
                random_seed=42,
            )
            
            if augmentation_info and augmentation_info.get("applied"):
                logger.info(
                    f"数据增强完成: 原始样本 {len(X_train)} 条，"
                    f"增强后 {len(X_train_aug)} 条，"
                    f"增强方法: {', '.join(augmentation_info.get('methods', []))}"
                )
                # 使用增强后的训练集
                X_train = X_train_aug
                y_train = y_train_aug
            else:
                logger.info("数据增强未应用（不满足条件或不需要）")
        else:
            logger.debug("传统机器学习模型，跳过数据增强")

        # 4. 训练模型
        logger.debug("开始训练模型...")
        models = self.trainer.train_all_default_models(
            X_train,
            y_train,
            X_val,
            y_val,
            include_lstm=include_lstm,
            include_gru=include_gru,
            include_temporal_cnn=include_temporal_cnn,
            include_catboost=include_catboost,
            include_mlp=include_mlp,
        )

        # 5. 计算模型评估指标
        model_metrics = {}
        overfitting_warnings = {}  # 存储过拟合警告信息
        feature_count = X_train.shape[1]
        train_sample_count = len(X_train)
        
        for model_name, model in models.items():
            # 5.1 在验证集上评估模型
            y_val_pred_proba_raw = model.predict_proba(X_val)
            feature_importance = model.get_feature_importance()
            val_label_len = len(y_val.values)
            val_pred_len = len(y_val_pred_proba_raw)
            y_val_aligned, y_val_pred_proba = align_predictions(
                y_val.values,
                y_val_pred_proba_raw,
                model,
            )
            if val_label_len != val_pred_len:
                logger.debug(
                    "模型 %s 验证集标签/预测长度对齐：原标签=%s, 原预测=%s, 对齐后=%s",
                    model_name,
                    val_label_len,
                    val_pred_len,
                    len(y_val_aligned),
                )

            val_metrics = calculate_model_metrics(y_val_aligned, y_val_pred_proba, feature_importance)

            # 5.2 在训练集上评估模型（用于过拟合检测）
            y_train_pred_proba_raw = model.predict_proba(X_train)
            train_label_len = len(y_train.values)
            train_pred_len = len(y_train_pred_proba_raw)
            y_train_aligned, y_train_pred_proba = align_predictions(
                y_train.values,
                y_train_pred_proba_raw,
                model,
            )
            if train_label_len != train_pred_len:
                logger.debug(
                    "模型 %s 训练集标签/预测长度对齐：原标签=%s, 原预测=%s, 对齐后=%s",
                    model_name,
                    train_label_len,
                    train_pred_len,
                    len(y_train_aligned),
                )

            train_metrics = calculate_model_metrics(y_train_aligned, y_train_pred_proba, None)
            learning_curve_stats, learning_curve_alerts = analyze_learning_curve(
                y_train_aligned,
                y_train_pred_proba,
                y_val_aligned,
                y_val_pred_proba,
            )
            cv_report = run_time_series_cv(model_name, X_train, y_train)
            
            # 5.3 过拟合检测：对比训练集和验证集的AUC
            train_auc = train_metrics.auc
            val_auc = val_metrics.auc
            auc_diff = train_auc - val_auc
            accuracy_diff = (
                (train_metrics.accuracy - val_metrics.accuracy)
                if train_metrics.accuracy is not None and val_metrics.accuracy is not None
                else 0.0
            )
            f1_diff = (
                (train_metrics.f1 - val_metrics.f1)
                if train_metrics.f1 is not None and val_metrics.f1 is not None
                else 0.0
            )
            # 获取早停信息（从模型训练摘要中获取）
            early_stop_info = None
            if hasattr(model, "_training_summary") and model._training_summary is not None:
                early_stop_info = model._training_summary.get("early_stop_info")
            
            diagnosis = diagnose_overfitting(
                model_name,
                model,
                train_metrics,
                val_metrics,
                auc_diff,
                accuracy_diff,
                f1_diff,
                feature_count,
                train_sample_count,
                early_stop_info=early_stop_info,
            )
            
            # 如果训练集AUC明显高于验证集（差异>0.1），可能存在过拟合
            is_overfitting = diagnosis["severity"] != "无"
            if auc_diff < -0.05:
                # 如果验证集AUC明显高于训练集，可能是异常情况
                logger.warning(
                    f"模型 {model_name}: 验证集AUC ({val_auc:.4f}) 明显高于训练集AUC ({train_auc:.4f})，"
                    f"差异为 {auc_diff:.4f}。这可能是异常情况，请检查数据或模型。"
                )
            else:
                logger.info(
                    f"模型 {model_name}: 训练集AUC ({train_auc:.4f}) 和验证集AUC ({val_auc:.4f}) 接近，"
                    f"差异为 {auc_diff:.4f}，模型表现稳定。"
                )

            warning_payload = {
                "severity": diagnosis["severity"],
                "train_auc": train_auc,
                "val_auc": val_auc,
                "auc_diff": auc_diff,
                "accuracy_diff": accuracy_diff,
                "f1_diff": f1_diff,
                "triggers": diagnosis["triggers"],
                "recommendations": diagnosis["recommendations"],
                "learning_curve_alerts": learning_curve_alerts,
                "learning_curve_summary": learning_curve_stats,
                "time_series_cv": cv_report,
            }
            if (
                diagnosis["severity_score"] > 0
                or learning_curve_alerts
                or (not cv_report.get("skipped") and cv_report.get("alerts"))
            ):
                logger.warning(
                    "模型 %s 触发过拟合/稳定性警告：%s",
                    model_name,
                    warning_payload["triggers"],
                )
                overfitting_warnings[model_name] = warning_payload
            
            # 时间序列特有检测（仅针对深度学习模型）
            time_series_tests = {}
            if model_name in {"lstm", "gru", "temporal_cnn", "cnn_1d"}:
                logger.debug(f"[{model_name}] 执行时间序列特有检测...")
                
                # 1. 序列长度敏感性测试
                try:
                    seq_sensitivity = test_sequence_length_sensitivity(
                        model_name, X_train, y_train, X_val, y_val
                    )
                    time_series_tests["sequence_sensitivity"] = seq_sensitivity
                except Exception as exc:
                    logger.warning(f"[{model_name}] 序列长度敏感性测试失败: {exc}")
                    time_series_tests["sequence_sensitivity"] = {
                        "skipped": True,
                        "reason": str(exc),
                    }
                
                # 2. 时间衰减测试
                try:
                    temporal_decay = test_temporal_decay(model, X_val, y_val)
                    time_series_tests["temporal_decay"] = temporal_decay
                except Exception as exc:
                    logger.warning(f"[{model_name}] 时间衰减测试失败: {exc}")
                    time_series_tests["temporal_decay"] = {
                        "skipped": True,
                        "reason": str(exc),
                    }
                
                # 3. 预测稳定性测试
                try:
                    pred_stability = test_prediction_stability(model, X_val)
                    time_series_tests["prediction_stability"] = pred_stability
                except Exception as exc:
                    logger.warning(f"[{model_name}] 预测稳定性测试失败: {exc}")
                    time_series_tests["prediction_stability"] = {
                        "skipped": True,
                        "reason": str(exc),
                    }
                
                # 收集时间序列检测的警告
                ts_alerts = []
                if not time_series_tests.get("sequence_sensitivity", {}).get("skipped", True):
                    if time_series_tests["sequence_sensitivity"].get("sensitivity_detected", False):
                        ts_alerts.extend(time_series_tests["sequence_sensitivity"].get("alerts", []))
                
                if not time_series_tests.get("temporal_decay", {}).get("skipped", True):
                    if time_series_tests["temporal_decay"].get("decay_detected", False):
                        ts_alerts.extend(time_series_tests["temporal_decay"].get("alerts", []))
                
                if not time_series_tests.get("prediction_stability", {}).get("skipped", True):
                    if time_series_tests["prediction_stability"].get("instability_detected", False):
                        ts_alerts.extend(time_series_tests["prediction_stability"].get("alerts", []))
                
                if ts_alerts:
                    logger.warning(f"[{model_name}] 时间序列检测警告: {ts_alerts}")
                    # 将时间序列警告添加到过拟合警告中
                    if model_name not in overfitting_warnings:
                        overfitting_warnings[model_name] = warning_payload
                    overfitting_warnings[model_name]["time_series_alerts"] = ts_alerts
            
            model_metrics[model_name] = {
                "auc": val_metrics.auc,
                "accuracy": val_metrics.accuracy,
                "f1": val_metrics.f1,
                "feature_importance": val_metrics.feature_importance,
                "train_auc": train_auc,
                "train_accuracy": train_metrics.accuracy,
                "train_f1": train_metrics.f1,
                "is_overfitting": is_overfitting,
                "accuracy_diff": accuracy_diff,
                "f1_diff": f1_diff,
                "learning_curve": {
                    "stats": learning_curve_stats,
                    "alerts": learning_curve_alerts,
                },
                "time_series_cv": cv_report,
                "overfitting_diagnosis": diagnosis,
                "recommended_actions": diagnosis["recommendations"],
                "time_series_tests": time_series_tests if time_series_tests else None,  # 添加时间序列检测结果
            }

        # 6. Walk-Forward 回测
        logger.info("开始 Walk-Forward 回测...")
        config = BacktestConfig()
        backtester = WalkForwardBacktester(config)

        # 准备回测数据：只使用训练集，不包含验证集和测试集（避免数据泄露）
        # 根据验证报告，回测应该只使用训练集数据，避免使用验证集导致过拟合的回测结果
        # 注意：验证集应该仅用于模型选择和超参数调优，不应用于回测
        
        # 确保价格数据索引与特征数据对齐
        # 特征工程可能删除了最后一行（当include_target=True时），需要确保价格数据索引一致
        # 使用中文列名（AkShare 数据格式）
        prices_frame = dataset.frame[["开盘", "最高", "最低", "收盘"]]
        # 使用 align 方法确保价格数据和特征数据索引完全对齐
        # 只保留两者都有的索引（inner join）
        prices_aligned, X_train_aligned = prices_frame.align(X_train, join='inner', axis=0)
        
        # 重命名列名为英文（回测框架期望英文列名）
        prices_aligned = prices_aligned.rename(columns={
            "开盘": "open",
            "最高": "high",
            "最低": "low",
            "收盘": "close"
        })
        
        # 检查对齐后的数据量
        if len(prices_aligned) != len(X_train):
            logger.warning(
                f"价格数据和特征数据索引不完全匹配："
                f"特征数据 {len(X_train)} 行，对齐后 {len(prices_aligned)} 行。"
                f"可能是特征工程删除了某些行。"
            )
        
        prices_train = prices_aligned
        X_train_for_backtest = X_train_aligned

        # 执行回测（只使用训练集的数据，避免数据泄露）
        backtest_results = backtester.walk_forward_backtest(models, X_train_for_backtest, prices_train, train_start_idx=0)

        # 提取基准（买入并持有）回测结果，用于计算信息比率和对比指标
        baseline_results = backtest_results.pop("buy_and_hold", [])
        baseline_returns_series: pd.Series | None = None
        if baseline_results:
            # 合并所有窗口的基准日收益率，按时间排序并去重
            baseline_returns = [
                result.daily_returns
                for result in baseline_results
                if not result.daily_returns.empty
            ]
            if baseline_returns:
                baseline_returns_series = pd.concat(baseline_returns).sort_index()
                baseline_returns_series = baseline_returns_series[
                    ~baseline_returns_series.index.duplicated(keep="last")
                ]

        # 7. 计算回测指标
        backtest_metrics_dict = {}
        for model_name, result_list in backtest_results.items():
            if result_list:
                metrics = calculate_backtest_metrics(
                    result_list,
                    config,
                    benchmark_returns=baseline_returns_series,
                )
                # 获取模型的过拟合状态
                model_metric_info = model_metrics.get(model_name, {})
                is_overfitting = model_metric_info.get("is_overfitting", False)
                
                backtest_metrics_dict[model_name] = {
                    "cumulative_return": metrics.cumulative_return,
                    "total_return": metrics.total_return,
                    "annualized_return": metrics.annualized_return,
                    "annualized_volatility": metrics.annualized_volatility,
                    "downside_volatility": metrics.downside_volatility,
                    "sharpe_ratio": metrics.sharpe_ratio,
                    "sortino_ratio": metrics.sortino_ratio,
                    "calmar_ratio": metrics.calmar_ratio,
                    "max_drawdown": metrics.max_drawdown,
                    "win_rate": metrics.win_rate,
                    "information_ratio": metrics.information_ratio,
                    "tracking_error": metrics.tracking_error,
                    "excess_return": metrics.excess_return,
                    "avg_gain": metrics.avg_gain,
                    "avg_loss": metrics.avg_loss,
                    "gain_loss_ratio": metrics.gain_loss_ratio,
                    "total_trades": metrics.total_trades,
                    "avg_holding_period": metrics.avg_holding_period,
                    "stop_loss_count": metrics.stop_loss_count,
                    "take_profit_count": metrics.take_profit_count,
                    "is_overfitting": is_overfitting,  # 添加过拟合标记
                }

        # 8. 计算基准策略指标，纳入回测指标字典
        if baseline_results:
            baseline_metrics_obj = calculate_backtest_metrics(baseline_results, config)
            backtest_metrics_dict["buy_and_hold"] = {
                "cumulative_return": baseline_metrics_obj.cumulative_return,
                "total_return": baseline_metrics_obj.total_return,
                "annualized_return": baseline_metrics_obj.annualized_return,
                "annualized_volatility": baseline_metrics_obj.annualized_volatility,
                "downside_volatility": baseline_metrics_obj.downside_volatility,
                "sharpe_ratio": baseline_metrics_obj.sharpe_ratio,
                "sortino_ratio": baseline_metrics_obj.sortino_ratio,
                "calmar_ratio": baseline_metrics_obj.calmar_ratio,
                "max_drawdown": baseline_metrics_obj.max_drawdown,
                "win_rate": baseline_metrics_obj.win_rate,
                "information_ratio": baseline_metrics_obj.information_ratio,
                "tracking_error": baseline_metrics_obj.tracking_error,
                "excess_return": baseline_metrics_obj.excess_return,
                "avg_gain": baseline_metrics_obj.avg_gain,
                "avg_loss": baseline_metrics_obj.avg_loss,
                "gain_loss_ratio": baseline_metrics_obj.gain_loss_ratio,
                "total_trades": baseline_metrics_obj.total_trades,
                "avg_holding_period": baseline_metrics_obj.avg_holding_period,
                "stop_loss_count": baseline_metrics_obj.stop_loss_count,
                "take_profit_count": baseline_metrics_obj.take_profit_count,
            }

        baseline_summary_metrics = backtest_metrics_dict.get("buy_and_hold", {})

        # 9. 稳健性分析 - 场景准备
        logger.info("开始敏感性场景计算...")
        sensitivity_analyzer = SensitivityAnalyzer(config)
        threshold_sensitivity = sensitivity_analyzer.analyze_threshold_sensitivity(
            models, X_train_for_backtest, prices_train
        )
        train_length = len(X_train_for_backtest)
        stop_take_sensitivity: List = []
        window_sensitivity: List = []
        if train_length >= 300:
            stop_take_sensitivity = sensitivity_analyzer.analyze_stop_take_sensitivity(
                models, X_train_for_backtest, prices_train
            )
        if train_length >= 200:
            window_sensitivity = sensitivity_analyzer.analyze_window_sensitivity(
                models, X_train_for_backtest, prices_train
            )
        sensitivity_buckets = {"signal_threshold": threshold_sensitivity}
        if stop_take_sensitivity:
            sensitivity_buckets["risk_threshold"] = stop_take_sensitivity
        if window_sensitivity:
            sensitivity_buckets["window"] = window_sensitivity

        # 10. 选择最佳模型（排除基准策略和过拟合模型）
        # 注意：模型选择基于验证集的回测结果，避免过拟合
        # 重要：过拟合的模型不能被选为最佳模型，因为它们在验证集上表现差，泛化能力差
        # 测试集保留用于未来验证（如果需要）
        
        # 获取所有候选模型（排除基准策略）
        all_candidate_metrics = {
            name: metric
            for name, metric in backtest_metrics_dict.items()
            if name != "buy_and_hold"
        }
        
        if not all_candidate_metrics:
            raise ValueError("未获得任何有效模型回测结果用于选择最佳模型")
        
        # 排除过拟合的模型
        # 从 model_metrics 中获取过拟合信息
        non_overfitting_candidates = {}
        overfitting_models = []
        
        for model_name, backtest_metric in all_candidate_metrics.items():
            # 检查模型是否过拟合
            model_metric_info = model_metrics.get(model_name, {})
            is_overfitting = model_metric_info.get("is_overfitting", False)
            
            if is_overfitting:
                overfitting_models.append(model_name)
                logger.warning(f"模型 {model_name} 存在过拟合，排除在最佳模型候选之外")
            else:
                non_overfitting_candidates[model_name] = backtest_metric
        
        # 如果没有非过拟合的模型，使用所有模型（但记录警告）
        if not non_overfitting_candidates:
            logger.warning("所有模型都存在过拟合，将从未过拟合模型中选择最佳模型")
            # 如果所有模型都过拟合，选择过拟合程度最低的
            # 根据训练集和验证集AUC差异来判断过拟合程度
            candidate_scores = {}
            valid_candidates = {}  # 存储通过验证集AUC阈值检查的模型
            
            # 设置验证集AUC最低阈值
            min_val_auc_threshold = 0.55
            
            for model_name, backtest_metric in all_candidate_metrics.items():
                model_metric_info = model_metrics.get(model_name, {})
                train_auc = model_metric_info.get("train_auc", 0.5)
                val_auc = model_metric_info.get("auc", 0.5)
                auc_diff = train_auc - val_auc  # 过拟合程度（差异越小越好）
                
                # 验证集AUC阈值检查：低于阈值的模型直接排除
                if val_auc < min_val_auc_threshold:
                    logger.warning(
                        f"模型 {model_name} 验证集AUC {val_auc:.4f} < {min_val_auc_threshold}，"
                        f"排除在候选模型之外"
                    )
                    continue
                
                # 改进的评分公式：优先考虑验证集AUC，其次考虑回测指标
                # 验证集AUC权重提高，过拟合惩罚权重提高
                score = (
                    val_auc * 2.0 +  # 验证集AUC权重提高（优先考虑）
                    backtest_metric["sharpe_ratio"] * 0.3 +
                    backtest_metric["information_ratio"] * 0.2 -
                    abs(backtest_metric["max_drawdown"]) * 0.3 -
                    auc_diff * 50  # 过拟合惩罚权重提高（从10调整为50）
                )
                candidate_scores[model_name] = score
                valid_candidates[model_name] = {
                    "score": score,
                    "val_auc": val_auc,
                    "auc_diff": auc_diff,
                }
            
            # 如果所有模型都被排除（验证集AUC都低于阈值），抛出警告
            if not valid_candidates:
                error_msg = (
                    f"所有模型都存在过拟合且验证集AUC < {min_val_auc_threshold}，"
                    f"无法选择有效模型。建议：减少特征数量、增加训练数据、调整模型参数。"
                )
                logger.error(error_msg)
                raise ValueError(error_msg)
            
            # 选择得分最高的模型
            best_model_name = max(candidate_scores.items(), key=lambda x: x[1])[0]
            best_candidate_info = valid_candidates[best_model_name]
            logger.warning(
                f"所有模型都存在过拟合，选择过拟合程度最低的模型: {best_model_name} "
                f"(验证集AUC={best_candidate_info['val_auc']:.4f}, "
                f"AUC差异={best_candidate_info['auc_diff']:.4f}, "
                f"综合得分={best_candidate_info['score']:.4f})"
            )
        else:
            # 从非过拟合模型中选择最佳模型
            # 选择最佳模型：综合考虑验证集AUC、Sharpe比率、信息比率和最大回撤
            # 改进评分公式：优先考虑验证集AUC
            candidate_scores = {}
            for model_name, backtest_metric in non_overfitting_candidates.items():
                model_metric_info = model_metrics.get(model_name, {})
                val_auc = model_metric_info.get("auc", 0.5)
                
                # 评分公式：验证集AUC权重提高，综合考虑回测指标
                score = (
                    val_auc * 2.0 +  # 验证集AUC权重提高（优先考虑）
                    backtest_metric["sharpe_ratio"] * 0.3 +
                    backtest_metric["information_ratio"] * 0.2 -
                    abs(backtest_metric["max_drawdown"]) * 0.3
                )
                candidate_scores[model_name] = score
            
            best_model_name = max(candidate_scores.items(), key=lambda x: x[1])[0]
            best_val_auc = model_metrics.get(best_model_name, {}).get("auc", 0.0)
            logger.info(
                f"最佳模型选择完成: {best_model_name} "
                f"(验证集AUC={best_val_auc:.4f}, 基于验证集回测结果，已排除过拟合模型)"
            )
        
        best_model = models[best_model_name]

        # 11. 保存最佳模型
        logger.info(f"保存最佳模型: {best_model_name}")
        
        # 11.1 删除该 symbol 下的所有旧模型（因为只有一个最佳模型）
        existing_models = self.storage.list_models(symbol)
        if existing_models:
            logger.info(f"发现 {len(existing_models)} 个已保存的旧模型，准备删除...")
            for model_info in existing_models:
                old_model_name = model_info["model_name"]
                try:
                    self.storage.delete(symbol, old_model_name)
                    logger.info(f"已删除旧模型: {old_model_name}")
                except Exception as e:
                    logger.warning(f"删除旧模型 {old_model_name} 失败: {e}")
        
        # 11.2 保存新的最佳模型
        best_metrics = backtest_metrics_dict[best_model_name]
        # 保存模型配置（转换为字典格式）
        best_model_config = best_model.config
        config_dict = {
            "name": best_model_config.name,
            "random_seed": best_model_config.random_seed,
            "params": best_model_config.params,
        }
        
        metadata = ModelMetadata(
            model_name=best_model_name,
            symbol=symbol,
            training_date=now_shanghai().isoformat(),
            feature_names=best_model.feature_names,
            performance_metrics=best_metrics,
            config=config_dict,
        )
        self.storage.save(best_model, symbol, metadata)

        # 11.5 生成稳健性报告
        best_model_results = backtest_results.get(best_model_name, [])
        feature_importances = {
            name: metrics.get("feature_importance", {})
            for name, metrics in model_metrics.items()
            if metrics.get("feature_importance")
        }
        robustness_evaluator = RobustnessEvaluator()
        robustness_report = robustness_evaluator.generate_report(
            sensitivity_buckets,
            best_model_results,
            config,
            prices_train,
            feature_importances,
        )
        robustness_dict = asdict(robustness_report)

        recommendation = self._build_model_recommendation(
            backtest_metrics_dict,
            model_metrics,
            robustness_dict,
            best_model_name,  # 传入最佳模型名称
        )

        # 12. 构建结果摘要
        summary = {
            "symbol": symbol,
            "data_info": {
                # 数据信息：反映回测使用的数据范围（仅训练集）
                "start_date": X_train_for_backtest.index[0].isoformat(),
                "end_date": X_train_for_backtest.index[-1].isoformat(),
                "total_samples": len(X_train_for_backtest),  # 回测使用的总样本数（仅训练集）
                "train_samples": len(X_train),
                "val_samples": len(X_val),
                "test_samples": len(X_test),  # 测试集保留用于未来验证
            },
            "model_metrics": model_metrics,
            "backtest_metrics": backtest_metrics_dict,
            "baseline_metrics": {
                "cumulative_return": baseline_summary_metrics.get("cumulative_return", 0.0),
                "total_return": baseline_summary_metrics.get("total_return", 0.0),
                "annualized_return": baseline_summary_metrics.get("annualized_return", 0.0),
                "annualized_volatility": baseline_summary_metrics.get("annualized_volatility", 0.0),
                "downside_volatility": baseline_summary_metrics.get("downside_volatility", 0.0),
                "sharpe_ratio": baseline_summary_metrics.get("sharpe_ratio", 0.0),
                "sortino_ratio": baseline_summary_metrics.get("sortino_ratio", 0.0),
                "calmar_ratio": baseline_summary_metrics.get("calmar_ratio", 0.0),
                "max_drawdown": baseline_summary_metrics.get("max_drawdown", 0.0),
                "win_rate": baseline_summary_metrics.get("win_rate", 0.0),
                "avg_gain": baseline_summary_metrics.get("avg_gain", 0.0),
                "avg_loss": baseline_summary_metrics.get("avg_loss", 0.0),
                "gain_loss_ratio": baseline_summary_metrics.get("gain_loss_ratio", 0.0),
                "information_ratio": baseline_summary_metrics.get("information_ratio", 0.0),
                "tracking_error": baseline_summary_metrics.get("tracking_error", 0.0),
                "excess_return": baseline_summary_metrics.get("excess_return", 0.0),
            },
            "best_model": best_model_name,
            "robustness": robustness_dict,
            "recommendation": recommendation,
            "overfitting_warnings": overfitting_warnings,  # 添加过拟合警告信息
            "augmentation_info": augmentation_info,  # 数据增强信息
            "feature_selection_info": feature_selection_info,  # 特征选择信息
        }

        logger.info(f"量化分析完成: symbol={symbol}, best_model={best_model_name}")
        return json.dumps(summary, ensure_ascii=False, indent=2)

    @staticmethod
    def _build_model_recommendation(
        backtest_metrics: Dict[str, Dict[str, Any]],
        model_metrics: Dict[str, Any],
        robustness_dict: Dict[str, Any],
        best_model: str | None = None,
    ) -> Dict[str, Any]:
        """根据文档权重输出模型推荐与备选方案.
        
        Parameters
        ----------
        backtest_metrics:
            回测指标字典
        model_metrics:
            模型评估指标字典
        robustness_dict:
            稳健性分析字典
        best_model:
            最佳模型名称（如果提供，将优先使用作为推荐模型）
        """

        def normalize(values: Dict[str, float], higher_is_better: bool = True) -> Dict[str, float]:
            """将指标归一化到[0,1]区间，便于横向比较."""
            valid = {k: v for k, v in values.items() if v is not None}
            if not valid:
                return {}
            max_val = max(valid.values())
            min_val = min(valid.values())
            if abs(max_val - min_val) < 1e-9:
                return {k: 0.5 for k in valid}
            if higher_is_better:
                return {k: float((v - min_val) / (max_val - min_val)) for k, v in valid.items()}
            return {k: float((max_val - v) / (max_val - min_val)) for k, v in valid.items()}

        def combine_category(score_maps: List[Dict[str, float]], target_models: List[str]) -> Dict[str, float]:
            combined: Dict[str, float] = {}
            for model in target_models:
                values = [score_map[model] for score_map in score_maps if model in score_map]
                combined[model] = float(sum(values) / len(values)) if values else 0.0
            return combined

        weights = {"core": 0.4, "risk": 0.3, "trading": 0.15, "robustness": 0.15}
        raw_models = [m for m in backtest_metrics.keys() if m != "buy_and_hold"]

        if not raw_models:
            return {"status": "no_models", "notes": ["无可用模型"], "details": []}

        # 1. 初步筛选
        excluded: Dict[str, str] = {}
        filtered_models: List[str] = []
        for model in raw_models:
            metrics = backtest_metrics.get(model, {})
            auc = model_metrics.get(model, {}).get("auc", 1.0)
            ann_return = metrics.get("annualized_return", 0.0)
            max_drawdown = abs(metrics.get("max_drawdown", 0.0))
            if auc < 0.5:
                excluded[model] = "AUC < 0.5"
            elif ann_return < 0.0:
                excluded[model] = "年化收益为负"
            elif max_drawdown > 0.5:
                excluded[model] = "最大回撤超过50%"
            else:
                filtered_models.append(model)

        candidate_pool = filtered_models or raw_models

        # 2. 按夏普排序，保留前三名参与加权
        candidate_pool.sort(key=lambda m: backtest_metrics.get(m, {}).get("sharpe_ratio", 0.0), reverse=True)
        candidate_models = candidate_pool[:3]

        # 3. 构建各指标得分
        def collect(metric: str, *, transform=None) -> Dict[str, float]:
            values: Dict[str, float] = {}
            for name in candidate_models:
                base = backtest_metrics.get(name, {}).get(metric)
                if base is None:
                    continue
                value = transform(base) if transform else base
                values[name] = float(value)
            return values

        core_scores = combine_category(
            [
                normalize(collect("sharpe_ratio")),
                normalize(collect("information_ratio")),
                normalize(collect("annualized_return")),
            ],
            candidate_models,
        )

        risk_scores = combine_category(
            [
                normalize(collect("max_drawdown", transform=lambda x: abs(x)), higher_is_better=False),
                normalize(collect("annualized_volatility"), higher_is_better=False),
                normalize(collect("downside_volatility"), higher_is_better=False),
            ],
            candidate_models,
        )

        trading_scores = combine_category(
            [
                normalize(collect("win_rate")),
                normalize(collect("gain_loss_ratio")),
                normalize(collect("total_trades")),
            ],
            candidate_models,
        )

        model_stability = robustness_dict.get("model_stability", {})
        parameter_robustness = robustness_dict.get("parameter_robustness", {})
        time_variability = float(robustness_dict.get("time_segment_variability", 0.0))
        time_score = 1.0 / (1.0 + max(time_variability, 0.0)) if time_variability >= 0 else 0.5

        market_regimes = robustness_dict.get("market_regimes", [])
        if market_regimes:
            positive = sum(
                1
                for state in market_regimes
                if state.get("metrics", {}).get("annualized_return", 0.0) >= 0
            )
            regime_score = positive / len(market_regimes)
        else:
            regime_score = 0.5

        parameter_score = parameter_robustness.get("overall", 0.5)
        global_robustness = float(
            (time_score + regime_score + parameter_score) / 3 if candidate_models else 0.0
        )

        robustness_scores = {
            model: float((model_stability.get(model, 0.5) + global_robustness) / 2)
            for model in candidate_models
        }

        # 4. 汇总得分
        recommendations: List[RecommendationDetail] = []
        for model in candidate_models:
            breakdown = {
                "core": core_scores.get(model, 0.0),
                "risk": risk_scores.get(model, 0.0),
                "trading": trading_scores.get(model, 0.0),
                "robustness": robustness_scores.get(model, 0.0),
            }
            composite = sum(breakdown[key] * weights[key] for key in weights)
            metrics = backtest_metrics.get(model, {})
            reasons: List[str] = []
            if metrics.get("annualized_return", 0.0) < 0.0:
                reasons.append("年化收益低于0")
            if abs(metrics.get("max_drawdown", 0.0)) > 0.5:
                reasons.append("最大回撤超过50%")
            if metrics.get("sharpe_ratio", 0.0) < 0.5:
                reasons.append("夏普比率低于0.5")
            if model_stability.get(model, 0.5) < 0.5:
                reasons.append("稳健性得分偏低")
            auc_value = model_metrics.get(model, {}).get("auc")
            if auc_value is not None and auc_value < 0.5:
                reasons.append("验证集AUC低于0.5")

            recommendations.append(
                RecommendationDetail(
                    model_name=model,
                    score=float(composite),
                    reasons=reasons,
                    metrics={k: float(v) for k, v in metrics.items() if isinstance(v, (int, float))},
                    breakdown=breakdown,
                )
            )

        if not recommendations:
            return {
                "status": "no_models",
                "notes": ["推荐阶段没有可用模型"],
                "details": [],
            }

        recommendations.sort(key=lambda item: item.score, reverse=True)
        
        # 如果提供了最佳模型，优先使用它作为主推荐
        if best_model:
            if best_model in [r.model_name for r in recommendations]:
                # 最佳模型在候选模型中，将其设为第一推荐
                best_model_recommendation = next(r for r in recommendations if r.model_name == best_model)
                # 将最佳模型移到第一位
                recommendations = [best_model_recommendation] + [r for r in recommendations if r.model_name != best_model]
                primary = best_model_recommendation
                logger.info(f"推荐模型与最佳模型一致: {best_model}")
            elif best_model in raw_models:
                # 最佳模型在原始模型中，但不在候选模型中（可能被筛选条件排除）
                # 强制将最佳模型加入推荐列表（即使它不满足某些筛选条件）
                best_model_metrics = backtest_metrics.get(best_model, {})
                best_model_auc = model_metrics.get(best_model, {}).get("auc", 0.0)
                exclusion_reason = excluded.get(best_model, "未通过推荐评分筛选")
                logger.warning(
                    f"最佳模型 {best_model} 不在推荐候选模型中（原因：{exclusion_reason}），"
                    f"但基于模型选择逻辑，强制将其设为第一推荐"
                )
                # 为最佳模型创建推荐详情（即使它不满足某些筛选条件）
                best_model_recommendation = RecommendationDetail(
                    model_name=best_model,
                    score=float(sum(weights.values()) * 0.8),  # 给予较高但略低于满分的分数
                    reasons=[exclusion_reason] if exclusion_reason else [],
                    metrics={k: float(v) for k, v in best_model_metrics.items() if isinstance(v, (int, float))},
                    breakdown={
                        "core": 0.7,
                        "risk": 0.7,
                        "trading": 0.7,
                        "robustness": 0.7,
                    },
                )
                recommendations = [best_model_recommendation] + recommendations
                primary = best_model_recommendation
            else:
                # 最佳模型不在原始模型中（不应该发生）
                logger.error(f"最佳模型 {best_model} 不在原始模型列表中，使用评分最高的模型作为推荐")
                primary = recommendations[0]
        else:
            # 未提供最佳模型，使用评分最高的
            primary = recommendations[0]
        
        backup = (
            recommendations[1]
            if len(recommendations) > 1 and abs(primary.score - recommendations[1].score) < 0.1
            else None
        )

        notes: List[str] = []
        
        # 如果推荐模型与最佳模型不一致，说明原因
        if best_model and primary.model_name != best_model:
            best_model_metrics = backtest_metrics.get(best_model, {})
            best_model_auc = model_metrics.get(best_model, {}).get("auc", 0.0)
            exclusion_reason = excluded.get(best_model, "未通过推荐评分筛选")
            notes.append(
                f"注意：最佳模型 {best_model}（验证集AUC={best_model_auc:.4f}）"
                f"未作为推荐模型，原因：{exclusion_reason}。"
                f"当前推荐模型 {primary.model_name} 基于综合评分选择。"
            )
        
        all_poor = all(
            backtest_metrics.get(model.model_name, {}).get("annualized_return", 0.0) < 0.05
            or backtest_metrics.get(model.model_name, {}).get("sharpe_ratio", 0.0) < 0.5
            for model in recommendations
        )
        if all_poor:
            notes.append("所有候选模型表现欠佳（收益或夏普未达标），建议保守执行。")
        if backup:
            notes.append("前两名得分差距较小，可根据场景选择或考虑模型集成。")
        primary_stability = model_stability.get(primary.model_name, 0.5)
        if primary_stability < 0.5:
            notes.append("推荐模型稳健性偏弱，需关注参数敏感性与时间衰减风险。")
        if excluded:
            excluded_str = "；".join(f"{name}（{reason}）" for name, reason in excluded.items())
            notes.append(f"以下模型因初筛未通过被剔除：{excluded_str}。")

        status = "ok" if not all_poor else "weak"

        return {
            "status": status,
            "primary": {
                "model": primary.model_name,
                "score": primary.score,
                "metrics": primary.metrics,
                "score_breakdown": primary.breakdown,
                "reasons": primary.reasons,
            },
            "backup": {
                "model": backup.model_name,
                "score": backup.score,
                "metrics": backup.metrics,
                "score_breakdown": backup.breakdown,
                "reasons": backup.reasons,
            }
            if backup
            else None,
            "notes": notes,
            "details": [
                {
                    "model": detail.model_name,
                    "score": detail.score,
                    "score_breakdown": detail.breakdown,
                    "reasons": detail.reasons,
                }
                for detail in recommendations
            ],
        }

    def format_result(self, json_str: str) -> str:
        """将 JSON 格式化为易读的中文文本.

        Parameters
        ----------
        json_str:
            JSON 格式的字符串

        Returns
        -------
        格式化的中文文本
        """
        summary = json.loads(json_str)
        return format_quant_analysis_text(summary)


def main(symbol: str | None = None) -> None:
    """执行 QuantAnalyzer 测试，使用真实 ETF 数据.

    该函数从 AkShare 获取真实的 ETF 历史数据，执行量化分析，并输出结果。
    用于测试 QuantAnalyzer 的完整功能流程。
    
    Parameters
    ----------
    symbol:
        ETF 代码，例如 "515790"、"515790.SS"、"159919" 等。
        如果为 None，则使用默认代码 "515790"（光伏 ETF）。
    
    注意：该函数应该通过模块方式调用，例如：
        python -m src.quant.quant_analyzer
        或
        python -m src.quant.quant_analyzer 159919
    或者使用独立的测试脚本 test_quant_analyzer.py
    """
    import sys
    from datetime import datetime

    from ..data import fetch_etf_history
    from ..features import build_feature_dataframe
    from ..utils.state import GLOBAL_DATA_STORE

    # 1. 确定要分析的 ETF 代码
    # 如果通过命令行参数传入，优先使用命令行参数
    if symbol is None:
        if len(sys.argv) > 1:
            symbol = sys.argv[1]
        else:
            # 使用默认的 ETF 代码（光伏 ETF）
            symbol = "515790"
    
    logger.info(f"开始获取真实 ETF 数据: symbol={symbol}")
    
    # 2. 从 AkShare 获取真实的 ETF 历史数据
    # fetch_etf_history 函数会自动处理代码标准化（例如 515790 -> 515790.SS）
    try:
        dataset = fetch_etf_history(symbol)
        logger.info(
            f"成功获取 ETF 数据: {dataset.symbol}, "
            f"数据量: {dataset.count} 条, "
            f"时间范围: {dataset.start.date()} 至 {dataset.end.date()}"
        )
    except Exception as e:
        logger.error(f"获取 ETF 数据失败: symbol={symbol}, error={e}")
        print(f"\n❌ 获取 ETF 数据失败: {e}")
        print(f"请检查 ETF 代码是否正确，或网络连接是否正常。")
        raise

    # 3. 检查数据量是否足够
    # 量化分析需要至少 200 条数据才能进行有效的训练和回测
    if dataset.count < 200:
        logger.warning(
            f"数据量不足: {dataset.symbol} 只有 {dataset.count} 条数据，"
            f"建议至少 200 条数据。可能会影响分析结果。"
        )
        print(f"\n⚠️  警告: 数据量不足（{dataset.count} 条），建议至少 200 条数据。")

    # 4. 将数据集存储到全局数据存储中
    # 使用标准化后的 symbol（例如 515790.SS）
    test_symbol = dataset.symbol
    GLOBAL_DATA_STORE.set_dataset(test_symbol, dataset)
    logger.info(f"已缓存 ETF 数据集: {test_symbol}, 数据量: {dataset.count} 条")

    # 5. 生成特征数据
    # build_feature_dataframe 函数会从价格数据中提取技术指标特征
    # 该函数支持中文列名（开盘、最高、最低、收盘、成交量）和英文列名
    logger.info("开始生成特征数据...")
    try:
        features = build_feature_dataframe(dataset.frame)
        logger.info(f"特征生成完成: 原始数据 {len(dataset.frame)} 条，特征数据 {len(features)} 条")
    except Exception as e:
        logger.error(f"特征生成失败: {e}")
        print(f"\n❌ 特征生成失败: {e}")
        raise
    
    # 6. 创建目标变量（预测标签）
    # target_up: 如果下一天的收盘价高于当前收盘价，则为 1，否则为 0
    # 这是一个二分类问题：预测价格是否会上涨
    # 需要根据数据框的列名（中文或英文）来获取收盘价
    if "收盘" in dataset.frame.columns:
        close_col = "收盘"
    elif "close" in dataset.frame.columns:
        close_col = "close"
    else:
        raise ValueError("无法找到收盘价列（收盘 或 close）")
    
    close = dataset.frame[close_col].loc[features.index]
    features["target_up"] = (close.shift(-1) > close).astype(int)
    
    # 7. 将特征数据存储到全局数据存储中
    GLOBAL_DATA_STORE.set_features(test_symbol, features)
    logger.info(f"已生成并缓存特征数据: {test_symbol}, 特征数量: {len(features.columns)} 个")

    # 8. 创建量化分析器实例
    analyzer = QuantAnalyzer()

    # 9. 执行量化分析
    # 注意：所有模型都会参与分析，包括传统机器学习模型和深度学习模型
    # 深度学习模型（LSTM、GRU、TemporalCNN）训练时间较长，请耐心等待
    logger.info("开始执行量化分析（包含所有模型）...")
    print(f"\n{'=' * 80}")
    print(f"开始量化分析: {test_symbol}")
    if dataset.metadata and dataset.metadata.name:
        print(f"ETF 名称: {dataset.metadata.name}")
    print(f"数据时间范围: {dataset.start.date()} 至 {dataset.end.date()}")
    print(f"数据量: {dataset.count} 条")
    print(f"{'=' * 80}\n")
    try:
        result_json = analyzer.analyze(
            symbol=test_symbol,
            include_lstm=True,  # 包含 LSTM 模型
            include_gru=True,  # 包含 GRU 模型
            include_temporal_cnn=True,  # 包含 TemporalCNN 模型
            include_catboost=True,  # 包含 CatBoost 模型
            include_mlp=True,  # 包含 MLP 模型
            enable_optimization=True,  # 不启用模型优化（可设置为 True 启用超参数优化）
        )

        # 10. 解析并打印结果摘要
        result_dict = json.loads(result_json)
        
        print("\n" + "=" * 80)
        print("量化分析测试结果")
        print("=" * 80)
        print(f"\n标的代码: {result_dict['symbol']}")
        print(f"\n数据信息:")
        data_info = result_dict.get("data_info", {})
        print(f"  - 起始日期: {data_info.get('start_date', 'N/A')}")
        print(f"  - 结束日期: {data_info.get('end_date', 'N/A')}")
        print(f"  - 训练集样本数: {data_info.get('train_samples', 'N/A')}")
        print(f"  - 验证集样本数: {data_info.get('val_samples', 'N/A')}")
        print(f"  - 测试集样本数: {data_info.get('test_samples', 'N/A')}")
        
        print(f"\n最佳模型: {result_dict.get('best_model', 'N/A')}")
        
        # 打印模型指标摘要
        print(f"\n模型评估指标:")
        model_metrics = result_dict.get("model_metrics", {})
        for model_name, metrics in model_metrics.items():
            print(f"  - {model_name}:")
            print(f"    AUC: {metrics.get('auc', 0.0):.4f}")
            print(f"    准确率: {metrics.get('accuracy', 0.0):.4f}")
            print(f"    F1分数: {metrics.get('f1', 0.0):.4f}")
            if metrics.get('is_overfitting', False):
                print(f"    ⚠️  存在过拟合风险")
        
        # 打印回测指标摘要
        print(f"\n回测指标:")
        backtest_metrics = result_dict.get("backtest_metrics", {})
        best_model_name = result_dict.get("best_model", "")
        if best_model_name and best_model_name in backtest_metrics:
            best_metrics = backtest_metrics[best_model_name]
            print(f"  - 最佳模型 ({best_model_name}):")
            print(f"    累计收益率: {best_metrics.get('cumulative_return', 0.0):.4f}")
            print(f"    年化收益率: {best_metrics.get('annualized_return', 0.0):.4f}")
            print(f"    夏普比率: {best_metrics.get('sharpe_ratio', 0.0):.4f}")
            print(f"    最大回撤: {best_metrics.get('max_drawdown', 0.0):.4f}")
            print(f"    胜率: {best_metrics.get('win_rate', 0.0):.4f}")
        
        # 打印推荐信息
        recommendation = result_dict.get("recommendation", {})
        if recommendation:
            print(f"\n模型推荐:")
            print(f"  状态: {recommendation.get('status', 'N/A')}")
            primary = recommendation.get("primary", {})
            if primary:
                print(f"  主推荐模型: {primary.get('model', 'N/A')}")
                print(f"  推荐得分: {primary.get('score', 0.0):.4f}")
            
            notes = recommendation.get("notes", [])
            if notes:
                print(f"  备注:")
                for note in notes:
                    print(f"    - {note}")
        
        # 打印格式化的完整报告（可选）
        print("\n" + "=" * 80)
        print("完整分析报告（格式化文本）")
        print("=" * 80)
        formatted_text = analyzer.format_result(result_json)
        print(formatted_text)
        
        print("\n" + "=" * 80)
        print("测试完成！")
        print("=" * 80)

    except Exception as e:
        logger.error(f"量化分析执行失败: {e}", exc_info=True)
        print(f"\n❌ 测试失败: {e}")
        raise
    
    finally:
        # 11. 清理测试数据（可选）
        # 如果需要保留数据用于进一步分析，可以注释掉这部分
        # 注意：真实数据通常需要保留，所以这里默认不清理
        # 如果需要清理，可以取消下面的注释
        # if test_symbol in GLOBAL_DATA_STORE.datasets:
        #     del GLOBAL_DATA_STORE.datasets[test_symbol]
        #     logger.info(f"已清理数据集: {test_symbol}")
        # if test_symbol in GLOBAL_DATA_STORE.feature_frames:
        #     del GLOBAL_DATA_STORE.feature_frames[test_symbol]
        #     logger.info(f"已清理特征数据: {test_symbol}")
        pass


if __name__ == "__main__":
    # 配置日志输出，方便查看测试过程
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
    )
    
    main()
