"""传统机器学习模型实现."""

from __future__ import annotations

import logging
import warnings
from pathlib import Path
from typing import Any, Dict

import joblib
import numpy as np
import pandas as pd
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.metrics import accuracy_score, auc, f1_score, roc_auc_score
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier

try:
    from catboost import CatBoostClassifier
except ImportError:  # pragma: no cover - 依赖缺失时在运行期提示
    CatBoostClassifier = None

from .base import BaseQuantModel, ModelConfig

logger = logging.getLogger(__name__)


class LogisticRegressionModel(BaseQuantModel):
    """Logistic Regression 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="logistic_regression",
                random_seed=42,
                params={
                    # 参考 docs/quant_flow.md：加强正则化、固定 solver
                    "penalty": "l2",
                    "C": 0.1,
                    "solver": "lbfgs",
                    "max_iter": 1000,
                    "class_weight": None,
                    "random_state": 42,
                },
            )
        super().__init__(config)
        self.model: LogisticRegression | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        self.model = LogisticRegression(**self.config.params)
        self.model.fit(X_train_array, y_train_array)
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"LogisticRegression 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        if self.model is None:
            raise RuntimeError("模型未初始化")
        return self.model.predict_proba(X_array)[:, 1]

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        joblib.dump(self.model, path)
        logger.info(f"LogisticRegression 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self.model = joblib.load(path)
        self._is_trained = True
        logger.info(f"LogisticRegression 模型已从 {path} 加载")


class LinearRegressionModel(BaseQuantModel):
    """Linear Regression 模型（回归到收益率，再映射为概率）."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="linear_regression",
                random_seed=42,
                params={},
            )
        super().__init__(config)
        self.model: LinearRegression | None = None
        self._train_mean: float | None = None
        self._train_std: float | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        # 将二分类标签转换为收益率（简化：1 -> 0.01, 0 -> -0.01）
        # 实际应该使用真实的下一日收益率
        y_train_returns = np.where(y_train_array == 1, 0.01, -0.01)

        self.model = LinearRegression()
        self.model.fit(X_train_array, y_train_returns)

        # 计算训练集统计量用于标准化
        y_pred_train = self.model.predict(X_train_array)
        self._train_mean = float(np.mean(y_pred_train))
        self._train_std = float(np.std(y_pred_train))
        if self._train_std == 0:
            self._train_std = 1.0

        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_returns = self.model.predict(X_val_array)
            y_pred_proba = self._sigmoid_map(y_pred_returns)
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"LinearRegression 训练完成: {summary}")
        return summary

    def _sigmoid_map(self, y_pred: np.ndarray) -> np.ndarray:
        """将回归结果映射为概率."""
        if self._train_mean is None or self._train_std is None:
            raise RuntimeError("模型未训练")
        z_score = (y_pred - self._train_mean) / self._train_std
        return 1.0 / (1.0 + np.exp(-z_score))

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        y_pred_returns = self.model.predict(X_array)
        return self._sigmoid_map(y_pred_returns)

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        model_data = {
            "model": self.model,
            "train_mean": self._train_mean,
            "train_std": self._train_std,
        }
        joblib.dump(model_data, path)
        logger.info(f"LinearRegression 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        model_data = joblib.load(path)
        self.model = model_data["model"]
        self._train_mean = model_data["train_mean"]
        self._train_std = model_data["train_std"]
        self._is_trained = True
        logger.info(f"LinearRegression 模型已从 {path} 加载")


class XGBoostModel(BaseQuantModel):
    """XGBoost 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="xgboost",
                random_seed=42,
                params={
                    "objective": "binary:logistic",
                    "max_depth": 4,  # 降低最大深度，从5降到4，减少过拟合风险
                    "n_estimators": 100,
                    "learning_rate": 0.05,
                    "subsample": 0.8,
                    "colsample_bytree": 0.8,
                    "reg_alpha": 0.5,  # 增加L1正则化，从0.1增加到0.5
                    "reg_lambda": 2.0,  # 增加L2正则化，从1.0增加到2.0
                    "min_child_weight": 3,  # 增加最小子节点权重，从1增加到3
                    "random_state": 42,
                    "n_jobs": -1,
                },
            )
        super().__init__(config)
        self.model: XGBClassifier | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        self.model = XGBClassifier(**self.config.params)
        eval_set = None
        
        # 如果提供了验证集，用于评估模型性能
        # 注意：XGBoost 3.1.1 版本不再支持 early_stopping_rounds 和 callbacks 参数
        # 我们通过增强正则化参数（reg_lambda, reg_alpha）和降低模型复杂度（max_depth）来防止过拟合
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            eval_set = [(X_val_array, y_val_array)]

        # 训练模型
        # XGBoost 3.x 版本移除了早停功能，我们通过正则化参数来控制过拟合
        self.model.fit(
            X_train_array, 
            y_train_array, 
            eval_set=eval_set,
            verbose=False
        )
        if eval_set is not None:
            logger.info(
                "XGBoost 3.x 不支持早停参数，已通过 max_depth/reg_lambda/reg_alpha 等正则设置抑制过拟合"
            )
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"XGBoost 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        return self.model.predict_proba(X_array)[:, 1]

    def get_feature_importance(self) -> Dict[str, float] | None:
        if not self._is_trained or self.model is None:
            return None
        if self._feature_names is None:
            return None
        importance = self.model.feature_importances_
        return dict(zip(self._feature_names, importance.tolist()))

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        self.model.save_model(str(path))
        logger.info(f"XGBoost 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self.model = XGBClassifier(**self.config.params)
        self.model.load_model(str(path))
        # 设置sklearn兼容的属性
        if hasattr(self.model, 'get_booster'):
            # XGBoost模型加载后，尝试设置特征数量
            try:
                booster = self.model.get_booster()
                # 尝试不同的方法获取特征数量
                if hasattr(booster, 'num_feature'):
                    num_features = booster.num_feature()
                elif hasattr(booster, 'get_num_feature'):
                    num_features = booster.get_num_feature()
                else:
                    # 如果无法获取，尝试从模型配置中获取
                    num_features = getattr(self.model, 'n_features_in_', None)
                    if num_features is None:
                        # 如果还是无法获取，跳过设置（XGBoost 会自动处理）
                        num_features = None
                
                if num_features is not None:
                    self.model.n_features_in_ = num_features  # sklearn需要的特征数量属性
            except Exception as e:
                logger.debug(f"设置 XGBoost 特征数量时出错（可忽略）: {e}")
        # 设置fitted_属性，标识模型已训练（XGBoost的sklearn包装器需要此属性）
        self.model.fitted_ = True
        self._is_trained = True
        logger.info(f"XGBoost 模型已从 {path} 加载")


class LightGBMModel(BaseQuantModel):
    """LightGBM 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="lightgbm",
                random_seed=42,
                params={
                    "objective": "binary",
                    "num_leaves": 15,  # 减少叶子节点数，从31降到15，减少过拟合风险
                    "max_depth": 5,  # 限制最大深度，从-1（无限制）改为5
                    "n_estimators": 100,
                    "learning_rate": 0.05,
                    "subsample": 0.8,
                    "colsample_bytree": 0.8,
                    "reg_alpha": 0.5,  # 增加L1正则化，从0.1增加到0.5
                    "reg_lambda": 2.0,  # 增加L2正则化，从1.0增加到2.0
                    "min_child_samples": 30,  # 增加最小子节点样本数，从20增加到30
                    "random_state": 42,
                    "n_jobs": -1,
                },
            )
        super().__init__(config)
        self.model: LGBMClassifier | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        # 初始化 summary 字典，用于记录训练信息
        summary: Dict[str, Any] = {}

        # 在模型初始化时设置 verbosity=-1 来抑制所有 LightGBM 输出（包括警告）
        # 这些警告在小数据集或某些节点无法继续分割时是正常的，不影响模型功能
        model_params = {**self.config.params, "verbosity": -1}
        self.model = LGBMClassifier(**model_params)
        eval_set = None
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            eval_set = [(X_val_array, y_val_array)]

        # 抑制 sklearn 特征名称不匹配的警告（训练时使用 DataFrame，但转换为 numpy array）
        # 这是正常的，因为 LightGBM 内部使用 numpy array，功能不受影响
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning, message=".*does not have valid feature names.*")
            
            # 准备早停回调函数：监控 AUC 指标（而非 loss）
            # 如果验证集 AUC 在20轮内不再提升，则停止训练
            # 这样可以防止模型过度拟合训练数据
            callbacks = None
            if eval_set is not None:
                from lightgbm import early_stopping
                # 使用 early_stopping 回调（stopping_rounds=20, min_delta=0.001）
                # 注意：通过指定 eval_metric='auc'，early_stopping 会监控 AUC 而非 loss
                callbacks = [
                    early_stopping(stopping_rounds=20, min_delta=0.001, verbose=False)
                ]
            
            # 训练模型，指定评估指标为 AUC（这样 early_stopping 会监控 AUC）
            self.model.fit(
                X_train_array,
                y_train_array,
                eval_set=eval_set,
                eval_metric='auc',  # 指定评估指标为 AUC，early_stopping 会监控此指标
                callbacks=callbacks,
            )
            
            # 记录早停信息（用于后续欠拟合检测）
            if hasattr(self.model, 'booster_'):
                best_iteration = getattr(self.model.booster_, 'best_iteration', None)
                total_iterations = getattr(self.model.booster_, 'num_trees', lambda: None)()
                if best_iteration is not None and total_iterations is not None:
                    summary["early_stop_info"] = {
                        "triggered": best_iteration < total_iterations - 1,
                        "best_iteration": int(best_iteration) if best_iteration is not None else None,
                        "total_iterations": int(total_iterations) if total_iterations is not None else None,
                    }
                elif total_iterations is not None:
                    summary["early_stop_info"] = {
                        "triggered": False,
                        "best_iteration": None,
                        "total_iterations": int(total_iterations),
                    }
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            # 抑制预测时的特征名称警告
            with warnings.catch_warnings():
                warnings.filterwarnings("ignore", category=UserWarning, message=".*does not have valid feature names.*")
                y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"LightGBM 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        # 抑制 sklearn 特征名称不匹配的警告（预测时使用 DataFrame，但转换为 numpy array）
        # 这是正常的，因为 LightGBM 内部使用 numpy array，功能不受影响
        with warnings.catch_warnings():
            warnings.filterwarnings("ignore", category=UserWarning, message=".*does not have valid feature names.*")
            return self.model.predict_proba(X_array)[:, 1]

    def get_feature_importance(self) -> Dict[str, float] | None:
        if not self._is_trained or self.model is None:
            return None
        if self._feature_names is None:
            return None
        importance = self.model.feature_importances_
        return dict(zip(self._feature_names, importance.tolist()))

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        self.model.booster_.save_model(str(path))
        logger.info(f"LightGBM 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        from lightgbm import Booster

        booster = Booster(model_file=str(path))
        num_features = booster.num_feature()
        # 需要重新创建模型以加载权重
        self.model = LGBMClassifier(**self.config.params)
        self.model._Booster = booster
        self.model._n_features = num_features
        # 设置sklearn兼容的属性
        self.model.n_features_in_ = num_features  # sklearn需要的特征数量属性
        self.model._classes = np.array([0, 1])
        self.model._n_classes = 2
        # 设置fitted_属性，标识模型已训练（LightGBM的sklearn包装器需要此属性）
        self.model.fitted_ = True
        self._is_trained = True
        logger.info(f"LightGBM 模型已从 {path} 加载，特征数: {num_features}")


class CatBoostModel(BaseQuantModel):
    """CatBoost 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="catboost",
                random_seed=42,
                params={
                    "iterations": 100,
                    "depth": 5,
                    "learning_rate": 0.05,
                    "l2_leaf_reg": 3.0,
                    "loss_function": "Logloss",
                    "eval_metric": "AUC",
                    "random_seed": 42,
                    "verbose": False,
                },
            )
        super().__init__(config)
        self.model: CatBoostClassifier | None = None

    def _ensure_dependency(self) -> None:
        if CatBoostClassifier is None:
            raise ImportError(
                "未安装 catboost 库，无法训练 CatBoostModel。请运行 `uv pip install catboost` 后重试。"
            )

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._ensure_dependency()
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        eval_set = None
        X_val_array = None
        y_val_array = None
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            eval_set = (X_val_array, y_val_array)

        self.model = CatBoostClassifier(**self.config.params)
        if eval_set is not None:
            self.model.fit(
                X_train_array,
                y_train_array,
                eval_set=eval_set,
                use_best_model=True,
            )
        else:
            self.model.fit(X_train_array, y_train_array)

        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val_array is not None and y_val_array is not None:
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)
            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"CatBoost 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        return self.model.predict_proba(X_array)[:, 1]

    def get_feature_importance(self) -> Dict[str, float] | None:
        if not self._is_trained or self.model is None or self._feature_names is None:
            return None
        importance = self.model.get_feature_importance()
        return dict(zip(self._feature_names, importance.tolist()))

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        self.model.save_model(str(path))
        logger.info(f"CatBoost 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self._ensure_dependency()
        self.model = CatBoostClassifier(**self.config.params)
        self.model.load_model(str(path))
        self._is_trained = True
        logger.info(f"CatBoost 模型已从 {path} 加载")


class RandomForestModel(BaseQuantModel):
    """Random Forest 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="random_forest",
                random_seed=42,
                params={
                    "n_estimators": 100,
                    "max_depth": 6,  # 降低最大深度，从10降到6，减少过拟合风险
                    "min_samples_split": 30,  # 增加最小分割样本数，从20增加到30
                    "min_samples_leaf": 15,  # 增加叶子节点最小样本数，从10增加到15
                    "max_features": "sqrt",
                    "bootstrap": True,
                    "random_state": 42,
                    "n_jobs": -1,
                },
            )
        super().__init__(config)
        self.model: RandomForestClassifier | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        self.model = RandomForestClassifier(**self.config.params)
        self.model.fit(X_train_array, y_train_array)
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"RandomForest 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        return self.model.predict_proba(X_array)[:, 1]

    def get_feature_importance(self) -> Dict[str, float] | None:
        if not self._is_trained or self.model is None:
            return None
        if self._feature_names is None:
            return None
        importance = self.model.feature_importances_
        return dict(zip(self._feature_names, importance.tolist()))

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        joblib.dump(self.model, path)
        logger.info(f"RandomForest 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self.model = joblib.load(path)
        self._is_trained = True
        logger.info(f"RandomForest 模型已从 {path} 加载")


class SVMModel(BaseQuantModel):
    """Support Vector Machine 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="svm",
                random_seed=42,
                params={
                    "kernel": "rbf",
                    "C": 1.0,
                    "gamma": "scale",
                    "probability": True,
                    "random_state": 42,
                },
            )
        super().__init__(config)
        self.model: SVC | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        self.model = SVC(**self.config.params)
        self.model.fit(X_train_array, y_train_array)
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"SVM 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        return self.model.predict_proba(X_array)[:, 1]

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        joblib.dump(self.model, path)
        logger.info(f"SVM 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self.model = joblib.load(path)
        self._is_trained = True
        logger.info(f"SVM 模型已从 {path} 加载")


class DecisionTreeModel(BaseQuantModel):
    """Decision Tree 模型."""

    def __init__(self, config: ModelConfig | None = None) -> None:
        if config is None:
            config = ModelConfig(
                name="decision_tree",
                random_seed=42,
                params={
                    "max_depth": 6,  # 降低最大深度，从10降到6，减少过拟合风险
                    "min_samples_split": 20,  # 增加最小分割样本数，从5增加到20
                    "min_samples_leaf": 10,  # 增加叶子节点最小样本数，从2增加到10
                    "max_features": "sqrt",
                    "random_state": 42,
                },
            )
        super().__init__(config)
        self.model: DecisionTreeClassifier | None = None

    def train(
        self,
        X_train: pd.DataFrame | np.ndarray,
        y_train: pd.Series | np.ndarray,
        X_val: pd.DataFrame | np.ndarray | None = None,
        y_val: pd.Series | np.ndarray | None = None,
    ) -> Dict[str, Any]:
        self._set_feature_names(X_train)
        X_train_array = X_train.values if isinstance(X_train, pd.DataFrame) else X_train
        y_train_array = y_train.values if isinstance(y_train, pd.Series) else y_train

        self.model = DecisionTreeClassifier(**self.config.params)
        self.model.fit(X_train_array, y_train_array)
        self._is_trained = True

        summary: Dict[str, Any] = {}
        if X_val is not None and y_val is not None:
            X_val_array = X_val.values if isinstance(X_val, pd.DataFrame) else X_val
            y_val_array = y_val.values if isinstance(y_val, pd.Series) else y_val
            y_pred_proba = self.model.predict_proba(X_val_array)[:, 1]
            y_pred = (y_pred_proba >= 0.5).astype(int)

            summary["val_metrics"] = {
                "auc": float(roc_auc_score(y_val_array, y_pred_proba)),
                "accuracy": float(accuracy_score(y_val_array, y_pred)),
                "f1": float(f1_score(y_val_array, y_pred)),
            }

        logger.debug(f"DecisionTree 训练完成: {summary}")
        return summary

    def predict_proba(self, X: pd.DataFrame | np.ndarray) -> np.ndarray:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        X_array = X.values if isinstance(X, pd.DataFrame) else X
        return self.model.predict_proba(X_array)[:, 1]

    def get_feature_importance(self) -> Dict[str, float] | None:
        if not self._is_trained or self.model is None:
            return None
        if self._feature_names is None:
            return None
        importance = self.model.feature_importances_
        return dict(zip(self._feature_names, importance.tolist()))

    def save(self, path: str | Path) -> None:
        self._validate_trained()
        if self.model is None:
            raise RuntimeError("模型未初始化")
        joblib.dump(self.model, path)
        logger.info(f"DecisionTree 模型已保存到: {path}")

    def load(self, path: str | Path) -> None:
        self.model = joblib.load(path)
        self._is_trained = True
        logger.info(f"DecisionTree 模型已从 {path} 加载")

