"""模型诊断相关工具函数."""

from __future__ import annotations

import logging
from typing import Any, Dict, List, Tuple

import numpy as np
import pandas as pd
from sklearn.model_selection import TimeSeriesSplit

from ...models.factory import ModelFactory
from ...backtest import calculate_model_metrics
from .evaluation import safe_binary_metrics

logger = logging.getLogger(__name__)


def analyze_learning_curve(
    train_true: np.ndarray,
    train_pred: np.ndarray,
    val_true: np.ndarray,
    val_pred: np.ndarray,
) -> Tuple[Dict[str, Dict[str, Dict[str, float | None]]], List[str]]:
    """拆分前/后半段指标，判断训练升/验证降的过拟合信号."""
    stats: Dict[str, Dict[str, Dict[str, float | None]]] = {
        "train": {"early": {}, "late": {}, "delta": {}},
        "val": {"early": {}, "late": {}, "delta": {}},
    }
    alerts: List[str] = []

    def _slice_metrics(values: np.ndarray, preds: np.ndarray) -> Tuple[Dict[str, float | None], Dict[str, float | None], Dict[str, float | None]]:
        if len(values) < 20:
            return {}, {}, {}
        mid = len(values) // 2
        early = safe_binary_metrics(values[:mid], preds[:mid])
        late = safe_binary_metrics(values[mid:], preds[mid:])
        delta: Dict[str, float | None] = {}
        for key in ("auc", "accuracy", "f1"):
            early_val = early.get(key)
            late_val = late.get(key)
            delta[key] = (
                float(late_val - early_val)
                if (late_val is not None and early_val is not None)
                else None
            )
        return early, late, delta

    train_early, train_late, train_delta = _slice_metrics(train_true, train_pred)
    val_early, val_late, val_delta = _slice_metrics(val_true, val_pred)
    stats["train"] = {"early": train_early, "late": train_late, "delta": train_delta}
    stats["val"] = {"early": val_early, "late": val_late, "delta": val_delta}

    for metric_name in ("auc", "accuracy", "f1"):
        train_gap = train_delta.get(metric_name)
        val_gap = val_delta.get(metric_name)
        if (
            train_gap is not None
            and val_gap is not None
            and train_gap > 0.02
            and val_gap < -0.02
        ):
            alerts.append(
                f"{metric_name.upper()} 学习曲线出现训练上升但验证下降（训练Δ={train_gap:.3f}, 验证Δ={val_gap:.3f}）"
            )
    return stats, alerts


def run_time_series_cv(
    model_name: str,
    X: pd.DataFrame,
    y: pd.Series,
    n_splits: int = 5,
) -> Dict[str, Any]:
    """TimeSeriesSplit 交叉验证封装，评估稳定性."""
    if len(X) < n_splits * 2:
        return {
            "skipped": True,
            "reason": f"样本数 {len(X)} 小于 2*n_splits，无法执行 TimeSeriesSplit({n_splits})",
        }
    if model_name in {"lstm", "gru", "temporal_cnn", "cnn_1d"}:
        return {
            "skipped": True,
            "reason": "深度学习模型交叉验证成本过高，跳过 TimeSeriesSplit。",
        }

    splitter = TimeSeriesSplit(n_splits=n_splits)
    fold_results: List[Dict[str, float]] = []

    for fold_idx, (train_idx, val_idx) in enumerate(splitter.split(X), start=1):
        model = ModelFactory.create(model_name)
        X_train_fold = X.iloc[train_idx]
        y_train_fold = y.iloc[train_idx]
        X_val_fold = X.iloc[val_idx]
        y_val_fold = y.iloc[val_idx]
        try:
            model.train(X_train_fold, y_train_fold, X_val_fold, y_val_fold)
            y_val_pred = model.predict_proba(X_val_fold)
            fold_metric = calculate_model_metrics(y_val_fold.values, y_val_pred, None)
            fold_results.append(
                {
                    "fold": float(fold_idx),
                    "auc": float(fold_metric.auc),
                    "accuracy": float(fold_metric.accuracy),
                    "f1": float(fold_metric.f1),
                }
            )
        except Exception as exc:  # noqa: BLE001
            logger.warning("模型 %s 在时间序列交叉验证第 %s 折失败: %s", model_name, fold_idx, exc)
            return {
                "skipped": True,
                "reason": f"第 {fold_idx} 折训练失败: {exc}",
            }

    auc_values = [res["auc"] for res in fold_results if res.get("auc") is not None]
    mean_auc = float(np.mean(auc_values)) if auc_values else None
    std_auc = float(np.std(auc_values)) if auc_values else None
    cv_ratio = (
        float(std_auc / max(abs(mean_auc), 1e-6))
        if (mean_auc is not None and std_auc is not None)
        else None
    )

    alerts: List[str] = []
    if mean_auc is not None and mean_auc < 0.55:
        alerts.append(f"AUC 均值 {mean_auc:.3f} < 0.55，预测能力不足")
    if cv_ratio is not None and cv_ratio > 0.15:
        alerts.append(f"AUC 变异系数 {cv_ratio:.2%} > 15%，模型稳定性不足")

    return {
        "skipped": False,
        "folds": fold_results,
        "mean_auc": mean_auc,
        "std_auc": std_auc,
        "cv_ratio": cv_ratio,
        "alerts": alerts,
    }


def diagnose_overfitting(
    model_name: str,
    model,
    train_metrics,
    val_metrics,
    auc_diff: float,
    accuracy_diff: float,
    f1_diff: float,
    feature_count: int,
    sample_count: int,
) -> Dict[str, Any]:
    """结合多指标给出过拟合严重程度与建议."""
    triggers: List[str] = []
    recommendations: List[str] = []
    severity_score = 0

    train_acc = train_metrics.accuracy
    val_acc = val_metrics.accuracy
    train_auc = train_metrics.auc
    val_auc = val_metrics.auc

    if accuracy_diff > 0.10:
        triggers.append(f"训练-验证准确率差 {accuracy_diff:.2%} > 10%")
        severity_score = max(severity_score, 1)
    elif accuracy_diff > 0.08:
        triggers.append(f"训练-验证准确率差 {accuracy_diff:.2%} > 8%")
        severity_score = max(severity_score, 1)

    if f1_diff > 0.08:
        triggers.append(f"训练-验证 F1 差 {f1_diff:.2%} > 8%")
        severity_score = max(severity_score, 1)

    if auc_diff > 0.15:
        triggers.append(f"训练-验证 AUC 差 {auc_diff:.3f} > 0.15")
        severity_score = max(severity_score, 2)
    elif auc_diff > 0.10:
        triggers.append(f"训练-验证 AUC 差 {auc_diff:.3f} > 0.10")
        severity_score = max(severity_score, 1)

    if train_acc is not None and val_acc is not None:
        if train_acc > 0.85 and val_acc < 0.60:
            triggers.append("训练准确率>85% 且验证准确率<60%（严重过拟合）")
            severity_score = max(severity_score, 3)
    if train_auc > 0.90 and val_auc < 0.65:
        triggers.append("训练 AUC>0.90 且验证 AUC<0.65（严重过拟合）")
        severity_score = max(severity_score, 3)

    if feature_count > max(sample_count / 10, 1):
        triggers.append(
            f"特征数量 {feature_count} > 样本数量/10（{sample_count/10:.1f}），存在特征过多风险"
        )
        severity_score = max(severity_score, 1)

    model_params = getattr(model, "config", None)
    if model_params and hasattr(model_params, "params"):
        params = model_params.params
        max_depth = params.get("max_depth")
        n_estimators = params.get("n_estimators")
        if max_depth is not None and max_depth > 6:
            triggers.append(f"max_depth={max_depth} > 6，模型可能过复杂")
            severity_score = max(severity_score, 1)
        if n_estimators is not None and n_estimators > 200:
            triggers.append(f"n_estimators={n_estimators} > 200，存在过拟合风险")
            severity_score = max(severity_score, 1)

    severity_map = {0: "无", 1: "轻微", 2: "中等", 3: "严重"}
    severity = severity_map.get(severity_score, "无")

    if severity_score >= 1:
        recommendations.append("增加正则化力度（reg_lambda/weight_decay）")
        recommendations.append("降低模型复杂度（max_depth、n_estimators 或隐藏层规模）")
    if severity_score >= 2:
        recommendations.append("重新运行特征选择，剔除冗余特征")
        recommendations.append("扩充训练数据量或采用时间窗口滚动训练")
    if severity_score >= 3:
        recommendations.append("停止使用该模型，优先选择稳定模型并重新评估参数")

    return {
        "severity": severity,
        "triggers": triggers,
        "recommendations": recommendations,
        "feature_count": feature_count,
        "sample_count": sample_count,
        "severity_score": severity_score,
        "model_name": model_name,
    }

