"""模型训练器."""

from __future__ import annotations

import logging
from typing import Dict, List

import numpy as np
import pandas as pd

from .base import BaseQuantModel
from .factory import ModelFactory

logger = logging.getLogger(__name__)


class ModelTrainer:
    """统一模型训练接口."""

    def __init__(self, random_seed: int = 42) -> None:
        """初始化模型训练器，当前不再引入数据增强."""
        self.random_seed = random_seed
        np.random.seed(random_seed)

    def split_data(
        self,
        X: pd.DataFrame,
        y: pd.Series,
        train_ratio: float = 0.7,
        val_ratio: float = 0.15,
        test_ratio: float = 0.15,
    ) -> tuple[pd.DataFrame, pd.Series, pd.DataFrame, pd.Series, pd.DataFrame, pd.Series]:
        """按时间顺序切分数据（不打乱）.

        Parameters
        ----------
        X:
            特征数据
        y:
            标签数据
        train_ratio:
            训练集比例
        val_ratio:
            验证集比例
        test_ratio:
            测试集比例

        Returns
        -------
        (X_train, y_train, X_val, y_val, X_test, y_test)
        """
        if abs(train_ratio + val_ratio + test_ratio - 1.0) > 1e-6:
            raise ValueError("训练集、验证集、测试集比例之和必须为 1.0")

        n = len(X)

        # 依据 docs/quant_flow.md：总样本 < 100 条时需要手动调整验证策略
        if n < 100:
            raise ValueError(
                "总样本数不足 100 条，无法按 70/15/15 切分。"
                "请补充数据或参考 docs/quant_flow.md 调整验证方式（例如留一法）。"
            )

        train_end = int(n * train_ratio)
        # 确保训练集样本数满足 ≥50 的最低训练需求
        if train_end < 50:
            raise ValueError(
                f"训练集样本仅 {train_end} 条，低于文档要求的 50 条最低门槛。"
                "请收集更多数据或降低验证/测试占比后重试。"
            )

        val_end = train_end + int(n * val_ratio)

        X_train = X.iloc[:train_end]
        y_train = y.iloc[:train_end]
        X_val = X.iloc[train_end:val_end]
        y_val = y.iloc[train_end:val_end]
        X_test = X.iloc[val_end:]
        y_test = y.iloc[val_end:]

        logger.info(
            f"数据切分完成: 训练集={len(X_train)}, 验证集={len(X_val)}, 测试集={len(X_test)}"
        )
        return X_train, y_train, X_val, y_val, X_test, y_test

    def train_models(
        self,
        model_names: List[str],
        X_train: pd.DataFrame,
        y_train: pd.Series,
        X_val: pd.DataFrame | None = None,
        y_val: pd.Series | None = None,
    ) -> Dict[str, BaseQuantModel]:
        """批量训练多个模型.

        Parameters
        ----------
        model_names:
            模型名称列表
        X_train:
            训练特征数据
        y_train:
            训练标签
        X_val:
            验证特征数据（可选）
        y_val:
            验证标签（可选）

        Returns
        -------
        训练好的模型字典，键为模型名称，值为模型实例
        """
        trained_models: Dict[str, BaseQuantModel] = {}
        
        for model_name in model_names:
            logger.debug(f"开始训练模型: {model_name}")
            try:
                model = ModelFactory.create(model_name)
                summary = model.train(X_train, y_train, X_val, y_val)
                # 保存训练摘要到模型对象，用于后续过拟合检测
                model._training_summary = summary
                trained_models[model_name] = model
                logger.info(f"模型 {model_name} 训练完成")
            except Exception as e:
                logger.error(f"模型 {model_name} 训练失败: {e}", exc_info=True)
                raise

        return trained_models

    def train_all_default_models(
        self,
        X_train: pd.DataFrame,
        y_train: pd.Series,
        X_val: pd.DataFrame | None = None,
        y_val: pd.Series | None = None,
        *,
        include_lstm: bool = True,
        include_gru: bool = False,
        include_temporal_cnn: bool = False,
        include_catboost: bool = False,
        include_mlp: bool = False,
        include_baseline: bool = True,
        include_classic_extras: bool = True,
    ) -> Dict[str, BaseQuantModel]:
        """训练所有默认模型.

        Parameters
        ----------
        include_lstm:
            是否包含推荐模型 LSTM
        include_gru:
            是否包含推荐模型 GRU
        include_temporal_cnn:
            是否包含可选模型 TemporalCNN（旧名 CNN1D）
        include_catboost:
            是否包含可选模型 CatBoost
        include_mlp:
            是否包含可选模型 MLP
        include_baseline:
            是否训练基线模型（Logistic/Linear）
        include_classic_extras:
            是否包含额外的传统模型（SVM、DecisionTree）
        """
        mandatory_models = ["xgboost", "lightgbm", "random_forest"]
        baseline_models = ["logistic_regression", "linear_regression"]
        classic_extra_models = ["svm", "decision_tree"]

        model_names: list[str] = []
        if include_baseline:
            model_names.extend(baseline_models)
        model_names.extend(mandatory_models)
        if include_classic_extras:
            model_names.extend(classic_extra_models)

        train_sample_count = len(X_train)
        deep_models_requested = {
            "lstm": include_lstm,
            "gru": include_gru,
            "temporal_cnn": include_temporal_cnn,
            "mlp": include_mlp,
        }
        if train_sample_count < 500:
            skipped = [name for name, enabled in deep_models_requested.items() if enabled]
            if skipped:
                logger.info(
                    "训练集样本 %s 条，小于文档要求的 500 条，自动跳过深度模型：%s",
                    train_sample_count,
                    ", ".join(skipped),
                )
            include_lstm = False
            include_gru = False
            include_temporal_cnn = False
            include_mlp = False

        if include_lstm:
            model_names.append("lstm")
        if include_gru:
            model_names.append("gru")

        if include_temporal_cnn:
            model_names.append("temporal_cnn")
        if include_catboost:
            model_names.append("catboost")
        if include_mlp:
            model_names.append("mlp")

        # 去重同时保持顺序
        ordered_unique_models = list(dict.fromkeys(model_names).keys())

        logger.info("模型训练计划：%s", ", ".join(ordered_unique_models))
        return self.train_models(ordered_unique_models, X_train, y_train, X_val, y_val)


def main() -> None:
    """测试模型训练器."""
    # 配置日志
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
    )

    print("=" * 80)
    print("模型训练器测试")
    print("=" * 80)

    # 1. 生成测试数据
    print("\n1. 生成测试数据...")
    np.random.seed(42)
    n_samples = 200
    n_features = 20

    # 生成特征数据（模拟真实特征）
    X = pd.DataFrame(
        np.random.randn(n_samples, n_features),
        columns=[f"feature_{i}" for i in range(n_features)],
        index=pd.date_range("2023-01-01", periods=n_samples, freq="D"),
    )

    # 生成标签数据（二分类：0 或 1）
    y = pd.Series(
        np.random.randint(0, 2, n_samples),
        index=X.index,
        name="target_up",
    )

    print(f"   ✓ 测试数据生成完成")
    print(f"   样本数: {len(X)}")
    print(f"   特征数: {len(X.columns)}")
    print(f"   标签分布: {y.value_counts().to_dict()}")

    # 2. 测试数据切分
    print("\n2. 测试数据切分功能...")
    trainer = ModelTrainer(random_seed=42)

    try:
        X_train, y_train, X_val, y_val, X_test, y_test = trainer.split_data(
            X, y, train_ratio=0.7, val_ratio=0.15, test_ratio=0.15
        )

        print(f"   ✓ 数据切分成功")
        print(f"   训练集: {len(X_train)} 条 ({len(X_train)/len(X):.1%})")
        print(f"   验证集: {len(X_val)} 条 ({len(X_val)/len(X):.1%})")
        print(f"   测试集: {len(X_test)} 条 ({len(X_test)/len(X):.1%})")
        print(f"   总计: {len(X_train) + len(X_val) + len(X_test)} 条")

        # 验证数据切分正确性
        assert len(X_train) + len(X_val) + len(X_test) == len(X), "数据切分总数不匹配"
        assert len(y_train) + len(y_val) + len(y_test) == len(y), "标签切分总数不匹配"
        print(f"   ✓ 数据切分验证通过")

    except Exception as e:
        print(f"   ✗ 数据切分失败: {e}")
        import traceback
        traceback.print_exc()
        return

    # 3. 测试批量训练模型（不包含 LSTM，加快测试速度）
    print("\n3. 测试批量训练模型（不包含 LSTM）...")
    try:
        model_names = ["logistic_regression", "xgboost", "lightgbm"]
        models = trainer.train_models(model_names, X_train, y_train, X_val, y_val)

        print(f"   ✓ 批量训练成功")
        print(f"   训练的模型数量: {len(models)}")
        print(f"   模型列表: {list(models.keys())}")

        # 验证模型训练状态
        for model_name, model in models.items():
            assert model.is_trained, f"模型 {model_name} 未正确训练"
            print(f"   ✓ {model_name}: 训练完成")

        # 测试模型预测
        print(f"\n   测试模型预测...")
        for model_name, model in models.items():
            try:
                y_pred_proba = model.predict_proba(X_val.iloc[:5])
                feature_importance = model.get_feature_importance()
                print(f"   ✓ {model_name}: 预测完成，概率范围 [{y_pred_proba.min():.3f}, {y_pred_proba.max():.3f}]")
                if feature_importance is not None:
                    print(f"     特征重要性: {len(feature_importance)} 个特征")
            except Exception as e:
                print(f"   ⚠ {model_name}: 预测测试失败: {e}")

    except Exception as e:
        print(f"   ✗ 批量训练失败: {e}")
        import traceback
        traceback.print_exc()
        return

    # 4. 测试训练所有默认模型（不包含 LSTM）
    print("\n4. 测试训练所有默认模型（不包含 LSTM）...")
    try:
        models = trainer.train_all_default_models(
            X_train, y_train, X_val, y_val, include_lstm=False
        )

        print(f"   ✓ 默认模型训练成功")
        print(f"   训练的模型数量: {len(models)}")
        print(f"   模型列表: {list(models.keys())}")

        # 验证所有模型都已训练
        expected_models = [
            "logistic_regression",
            "linear_regression",
            "xgboost",
            "lightgbm",
            "random_forest",
            "svm",
            "decision_tree",
        ]
        for model_name in expected_models:
            assert model_name in models, f"缺少模型: {model_name}"
            assert models[model_name].is_trained, f"模型 {model_name} 未正确训练"
            print(f"   ✓ {model_name}: 训练完成")

    except Exception as e:
        print(f"   ✗ 默认模型训练失败: {e}")
        import traceback
        traceback.print_exc()
        return

    # 5. 测试训练所有默认模型（包含深度学习模型，如果数据量足够）
    print("\n5. 测试训练所有默认模型（包含深度学习模型，可选）...")
    try:
        # 检查数据量是否足够（深度学习模型需要更多数据）
        if len(X_train) >= 100:
            print("   数据量足够，开始包含深度学习模型的训练...")
            models = trainer.train_all_default_models(
                X_train,
                y_train,
                X_val,
                y_val,
                include_lstm=True,
                include_gru=True,
                include_temporal_cnn=True,
            )

            print(f"   ✓ 包含深度学习模型的默认模型训练成功")
            print(f"   训练的模型数量: {len(models)}")
            print(f"   模型列表: {list(models.keys())}")

            # 验证深度学习模型是否在模型中
            dl_models = ["lstm", "gru", "temporal_cnn"]
            for dl_model in dl_models:
                if dl_model in models:
                    assert models[dl_model].is_trained, f"{dl_model.upper()} 模型未正确训练"
                    print(f"   ✓ {dl_model.upper()}: 训练完成")
                else:
                    print(f"   ⚠ {dl_model.upper()} 未出现在结果中")
        else:
            print("   ⚠ 数据量不足（需要 >= 100 条），跳过深度学习模型测试")

    except Exception as e:
        print(f"   ⚠ 深度学习模型测试跳过: {e}")

    # 6. 测试错误处理
    print("\n6. 测试错误处理...")
    try:
        # 测试无效的比例
        try:
            trainer.split_data(X, y, train_ratio=0.5, val_ratio=0.3, test_ratio=0.3)
            print("   ✗ 应该抛出异常但没有")
        except ValueError as e:
            print(f"   ✓ 正确抛出异常: {e}")

        # 测试无效的模型名称
        try:
            trainer.train_models(["invalid_model"], X_train, y_train, X_val, y_val)
            print("   ✗ 应该抛出异常但没有")
        except Exception as e:
            print(f"   ✓ 正确抛出异常: {type(e).__name__}")

    except Exception as e:
        print(f"   ⚠ 错误处理测试失败: {e}")

    print("\n" + "=" * 80)
    print("测试完成！")
    print("=" * 80)


if __name__ == "__main__":
    import sys

    # 检查是否作为模块运行
    if not __package__:
        print("错误: 请使用以下命令运行测试:")
        print("  python -m src.models.trainer")
        print("或者:")
        print("  uv run python -m src.models.trainer")
        sys.exit(1)

    main()
