"""模型诊断相关工具函数."""

from __future__ import annotations

import logging
from typing import Any, Dict, List, Tuple

import numpy as np
import pandas as pd
from sklearn.model_selection import TimeSeriesSplit

from ..models.factory import ModelFactory
from ..backtest import calculate_model_metrics
from .evaluation import safe_binary_metrics

logger = logging.getLogger(__name__)


def analyze_learning_curve(
    train_true: np.ndarray,
    train_pred: np.ndarray,
    val_true: np.ndarray,
    val_pred: np.ndarray,
) -> Tuple[Dict[str, Dict[str, Dict[str, float | None]]], List[str]]:
    """拆分前/后半段指标，判断训练升/验证降的过拟合信号."""
    stats: Dict[str, Dict[str, Dict[str, float | None]]] = {
        "train": {"early": {}, "late": {}, "delta": {}},
        "val": {"early": {}, "late": {}, "delta": {}},
    }
    alerts: List[str] = []

    def _slice_metrics(values: np.ndarray, preds: np.ndarray) -> Tuple[Dict[str, float | None], Dict[str, float | None], Dict[str, float | None]]:
        if len(values) < 20:
            return {}, {}, {}
        mid = len(values) // 2
        early = safe_binary_metrics(values[:mid], preds[:mid])
        late = safe_binary_metrics(values[mid:], preds[mid:])
        delta: Dict[str, float | None] = {}
        for key in ("auc", "accuracy", "f1"):
            early_val = early.get(key)
            late_val = late.get(key)
            delta[key] = (
                float(late_val - early_val)
                if (late_val is not None and early_val is not None)
                else None
            )
        return early, late, delta

    train_early, train_late, train_delta = _slice_metrics(train_true, train_pred)
    val_early, val_late, val_delta = _slice_metrics(val_true, val_pred)
    stats["train"] = {"early": train_early, "late": train_late, "delta": train_delta}
    stats["val"] = {"early": val_early, "late": val_late, "delta": val_delta}

    for metric_name in ("auc", "accuracy", "f1"):
        train_gap = train_delta.get(metric_name)
        val_gap = val_delta.get(metric_name)
        if (
            train_gap is not None
            and val_gap is not None
            and train_gap > 0.02
            and val_gap < -0.02
        ):
            alerts.append(
                f"{metric_name.upper()} 学习曲线出现训练上升但验证下降（训练Δ={train_gap:.3f}, 验证Δ={val_gap:.3f}）"
            )
    return stats, alerts


def run_time_series_cv(
    model_name: str,
    X: pd.DataFrame,
    y: pd.Series,
    n_splits: int = 5,
) -> Dict[str, Any]:
    """TimeSeriesSplit 交叉验证封装，评估稳定性."""
    if len(X) < n_splits * 2:
        return {
            "skipped": True,
            "reason": f"样本数 {len(X)} 小于 2*n_splits，无法执行 TimeSeriesSplit({n_splits})",
        }
    # 对于深度学习模型，使用更少的折数（3折）以减少计算成本
    # 但仍然进行CV以评估稳定性
    if model_name in {"lstm", "gru", "temporal_cnn", "cnn_1d"}:
        if len(X) < 3 * 2:  # 至少需要6个样本才能进行3折CV
            return {
                "skipped": True,
                "reason": f"样本数 {len(X)} 不足，无法执行深度学习模型的 TimeSeriesSplit(3)",
            }
        n_splits = 3  # 深度学习模型使用3折CV
        logger.debug(f"深度学习模型 {model_name} 使用 {n_splits} 折交叉验证（减少计算成本）")
    else:
        n_splits = 5  # 传统模型使用5折CV

    # 使用调整后的折数
    splitter = TimeSeriesSplit(n_splits=n_splits)
    fold_results: List[Dict[str, float]] = []

    for fold_idx, (train_idx, val_idx) in enumerate(splitter.split(X), start=1):
        model = ModelFactory.create(model_name)
        X_train_fold = X.iloc[train_idx]
        y_train_fold = y.iloc[train_idx]
        X_val_fold = X.iloc[val_idx]
        y_val_fold = y.iloc[val_idx]
        try:
            model.train(X_train_fold, y_train_fold, X_val_fold, y_val_fold)
            y_val_pred = model.predict_proba(X_val_fold)
            fold_metric = calculate_model_metrics(y_val_fold.values, y_val_pred, None)
            fold_results.append(
                {
                    "fold": float(fold_idx),
                    "auc": float(fold_metric.auc),
                    "accuracy": float(fold_metric.accuracy),
                    "f1": float(fold_metric.f1),
                }
            )
        except Exception as exc:  # noqa: BLE001
            logger.warning("模型 %s 在时间序列交叉验证第 %s 折失败: %s", model_name, fold_idx, exc)
            return {
                "skipped": True,
                "reason": f"第 {fold_idx} 折训练失败: {exc}",
            }

    auc_values = [res["auc"] for res in fold_results if res.get("auc") is not None]
    mean_auc = float(np.mean(auc_values)) if auc_values else None
    std_auc = float(np.std(auc_values)) if auc_values else None
    cv_ratio = (
        float(std_auc / max(abs(mean_auc), 1e-6))
        if (mean_auc is not None and std_auc is not None)
        else None
    )

    alerts: List[str] = []
    if mean_auc is not None and mean_auc < 0.55:
        alerts.append(f"AUC 均值 {mean_auc:.3f} < 0.55，预测能力不足")
    if cv_ratio is not None and cv_ratio > 0.15:
        alerts.append(f"AUC 变异系数 {cv_ratio:.2%} > 15%，模型稳定性不足")

    # 记录CV结果到日志
    if not fold_results:
        logger.warning(f"模型 {model_name} 时间序列交叉验证未获得有效结果")
    else:
        mean_str = f"{mean_auc:.4f}" if mean_auc is not None else "N/A"
        std_str = f"{std_auc:.4f}" if std_auc is not None else "N/A"
        cv_str = f"{cv_ratio:.2%}" if cv_ratio is not None else "N/A"
        logger.info(
            f"模型 {model_name} 时间序列交叉验证完成: "
            f"{n_splits}折，AUC均值={mean_str}±{std_str}，"
            f"变异系数={cv_str}"
        )
        if alerts:
            logger.warning(f"模型 {model_name} 交叉验证警告: {', '.join(alerts)}")

    return {
        "skipped": False,
        "n_splits": n_splits,  # 记录实际使用的折数
        "folds": fold_results,
        "mean_auc": mean_auc,
        "std_auc": std_auc,
        "cv_ratio": cv_ratio,
        "alerts": alerts,
    }


def diagnose_overfitting(
    model_name: str,
    model,
    train_metrics,
    val_metrics,
    auc_diff: float,
    accuracy_diff: float,
    f1_diff: float,
    feature_count: int,
    sample_count: int,
    early_stop_info: Dict[str, Any] | None = None,
) -> Dict[str, Any]:
    """结合多指标给出过拟合严重程度与建议.
    
    Parameters
    ----------
    model_name:
        模型名称
    model:
        模型对象
    train_metrics:
        训练集指标
    val_metrics:
        验证集指标
    auc_diff:
        训练集和验证集 AUC 差异
    accuracy_diff:
        训练集和验证集准确率差异
    f1_diff:
        训练集和验证集 F1 差异
    feature_count:
        特征数量
    sample_count:
        样本数量
    early_stop_info:
        早停信息字典，包含：
        - triggered: 是否触发早停
        - best_iteration/best_epoch: 最佳迭代/epoch
        - total_iterations/total_epochs: 总迭代/epoch数
    """
    triggers: List[str] = []
    recommendations: List[str] = []
    severity_score = 0
    underfitting_warnings: List[str] = []  # 欠拟合警告

    train_acc = train_metrics.accuracy
    val_acc = val_metrics.accuracy
    train_auc = train_metrics.auc
    val_auc = val_metrics.auc

    if accuracy_diff > 0.10:
        triggers.append(f"训练-验证准确率差 {accuracy_diff:.2%} > 10%")
        severity_score = max(severity_score, 1)
    elif accuracy_diff > 0.08:
        triggers.append(f"训练-验证准确率差 {accuracy_diff:.2%} > 8%")
        severity_score = max(severity_score, 1)

    if f1_diff > 0.08:
        triggers.append(f"训练-验证 F1 差 {f1_diff:.2%} > 8%")
        severity_score = max(severity_score, 1)

    if auc_diff > 0.15:
        triggers.append(f"训练-验证 AUC 差 {auc_diff:.3f} > 0.15")
        severity_score = max(severity_score, 2)
    elif auc_diff > 0.10:
        triggers.append(f"训练-验证 AUC 差 {auc_diff:.3f} > 0.10")
        severity_score = max(severity_score, 1)

    if train_acc is not None and val_acc is not None:
        if train_acc > 0.85 and val_acc < 0.60:
            triggers.append("训练准确率>85% 且验证准确率<60%（严重过拟合）")
            severity_score = max(severity_score, 3)
    if train_auc > 0.90 and val_auc < 0.65:
        triggers.append("训练 AUC>0.90 且验证 AUC<0.65（严重过拟合）")
        severity_score = max(severity_score, 3)

    if feature_count > max(sample_count / 10, 1):
        triggers.append(
            f"特征数量 {feature_count} > 样本数量/10（{sample_count/10:.1f}），存在特征过多风险"
        )
        severity_score = max(severity_score, 1)

    model_params = getattr(model, "config", None)
    if model_params and hasattr(model_params, "params"):
        params = model_params.params
        max_depth = params.get("max_depth")
        n_estimators = params.get("n_estimators")
        if max_depth is not None and max_depth > 6:
            triggers.append(f"max_depth={max_depth} > 6，模型可能过复杂")
            severity_score = max(severity_score, 1)
        if n_estimators is not None and n_estimators > 200:
            triggers.append(f"n_estimators={n_estimators} > 200，存在过拟合风险")
            severity_score = max(severity_score, 1)

    # 早停触发过早检测（<10轮时标记欠拟合）
    if early_stop_info is not None and early_stop_info.get("triggered", False):
        best_iteration = early_stop_info.get("best_iteration") or early_stop_info.get("best_epoch")
        total_iterations = early_stop_info.get("total_iterations") or early_stop_info.get("total_epochs")
        
        if best_iteration is not None and total_iterations is not None:
            # 计算早停触发的轮数（从最佳迭代到总迭代）
            early_stop_rounds = total_iterations - best_iteration
            if early_stop_rounds < 10:
                underfitting_warnings.append(
                    f"早停触发过早（{early_stop_rounds} 轮 < 10 轮），模型可能欠拟合，"
                    f"建议降低正则化或增加模型容量"
                )
                # 欠拟合时，建议降低正则化
                if "降低正则化" not in recommendations:
                    recommendations.append("降低正则化强度（降低 reg_lambda/weight_decay）")
                if "增加模型容量" not in recommendations:
                    recommendations.append("增加模型容量（提高 max_depth/n_estimators 或隐藏层规模）")

    severity_map = {0: "无", 1: "轻微", 2: "中等", 3: "严重"}
    severity = severity_map.get(severity_score, "无")

    if severity_score >= 1:
        recommendations.append("增加正则化力度（reg_lambda/weight_decay）")
        recommendations.append("降低模型复杂度（max_depth、n_estimators 或隐藏层规模）")
    if severity_score >= 2:
        recommendations.append("重新运行特征选择，剔除冗余特征")
        recommendations.append("扩充训练数据量或采用时间窗口滚动训练")
    if severity_score >= 3:
        recommendations.append("停止使用该模型，优先选择稳定模型并重新评估参数")

    return {
        "severity": severity,
        "triggers": triggers,
        "recommendations": recommendations,
        "feature_count": feature_count,
        "sample_count": sample_count,
        "severity_score": severity_score,
        "model_name": model_name,
        "underfitting_warnings": underfitting_warnings,  # 添加欠拟合警告
        "early_stop_info": early_stop_info,  # 保留早停信息
    }


def test_sequence_length_sensitivity(
    model_name: str,
    X_train: pd.DataFrame,
    y_train: pd.Series,
    X_val: pd.DataFrame,
    y_val: pd.Series,
    sequence_lengths: List[int] | None = None,
) -> Dict[str, Any]:
    """测试序列长度敏感性（针对时间序列模型）.
    
    测试不同 sequence_length 下的模型性能，性能差异>10%时标记敏感性。
    
    Parameters
    ----------
    model_name:
        模型名称（必须是 lstm、gru、temporal_cnn 或兼容的 cnn_1d）
    X_train:
        训练特征数据
    y_train:
        训练标签
    X_val:
        验证特征数据
    y_val:
        验证标签
    sequence_lengths:
        要测试的序列长度列表（默认 [10, 20, 30, 60]）
        
    Returns
    -------
    Dict[str, Any]
        包含测试结果的字典：
        - skipped: 是否跳过测试
        - results: 各序列长度下的性能结果
        - sensitivity_detected: 是否检测到敏感性
        - alerts: 警告信息列表
    """
    # 只对时间序列模型进行测试
    if model_name not in {"lstm", "gru", "temporal_cnn", "cnn_1d"}:
        return {
            "skipped": True,
            "reason": f"模型 {model_name} 不是时间序列模型，跳过序列长度敏感性测试",
        }
    
    if sequence_lengths is None:
        sequence_lengths = [10, 20, 30, 60]
    
    results: List[Dict[str, Any]] = []
    auc_values: List[float] = []
    
    for seq_len in sequence_lengths:
        try:
            # 检查验证集数据量是否足够（序列长度要求）
            if len(X_val) < seq_len:
                logger.warning(
                    f"验证集数据不足，跳过序列长度 {seq_len} 的测试 "
                    f"（验证集 {len(X_val)} 条 < 序列长度 {seq_len}）"
                )
                continue
            
            # 检查训练集数据量是否足够
            if len(X_train) < seq_len:
                logger.warning(
                    f"训练集数据不足，跳过序列长度 {seq_len} 的测试 "
                    f"（训练集 {len(X_train)} 条 < 序列长度 {seq_len}）"
                )
                continue
            
            # 创建模型并设置序列长度
            model = ModelFactory.create(model_name)
            if hasattr(model, "config") and hasattr(model.config, "params"):
                model.config.params["sequence_len"] = seq_len
                # 确保批次大小至少为2，避免单个样本时的维度问题
                if "batch_size" in model.config.params:
                    original_batch_size = model.config.params["batch_size"]
                    if original_batch_size == 1:
                        model.config.params["batch_size"] = min(2, len(X_train) // 2)
                        logger.debug(
                            f"调整批次大小从 {original_batch_size} 到 {model.config.params['batch_size']} "
                            f"以避免维度不匹配问题"
                        )
            
            # 训练模型（捕获维度不匹配错误）
            try:
                model.train(X_train, y_train, X_val, y_val)
            except (RuntimeError, ValueError) as e:
                error_msg = str(e)
                if "target size" in error_msg and "input size" in error_msg:
                    logger.warning(
                        f"序列长度 {seq_len} 测试失败（维度不匹配）: {e}。"
                        f"这可能是由于批次大小为1导致的，跳过此序列长度测试。"
                    )
                    continue
                else:
                    # 其他错误继续抛出
                    raise
            
            # 在验证集上评估
            # 注意：深度学习模型的 predict_proba 可能返回的样本数少于输入
            # 因为序列创建会减少样本数（需要前 seq_len 个样本作为序列起始）
            y_val_pred = model.predict_proba(X_val)
            
            # 对齐预测结果和标签（深度学习模型创建序列后会减少样本数）
            from ..utils import align_predictions
            y_val_aligned, y_val_pred_aligned = align_predictions(
                y_val.values,
                y_val_pred,
                model,
            )
            
            val_metrics = calculate_model_metrics(y_val_aligned, y_val_pred_aligned, None)
            
            results.append({
                "sequence_length": seq_len,
                "auc": float(val_metrics.auc),
                "accuracy": float(val_metrics.accuracy) if val_metrics.accuracy is not None else None,
                "f1": float(val_metrics.f1) if val_metrics.f1 is not None else None,
            })
            auc_values.append(float(val_metrics.auc))
            
        except Exception as exc:
            logger.warning(
                "序列长度敏感性测试失败（sequence_length=%s）: %s", seq_len, exc
            )
            return {
                "skipped": True,
                "reason": f"序列长度 {seq_len} 测试失败: {exc}",
            }
    
    if not auc_values:
        return {
            "skipped": True,
            "reason": "所有序列长度测试均失败",
        }
    
    # 计算性能差异
    max_auc = max(auc_values)
    min_auc = min(auc_values)
    auc_range = max_auc - min_auc
    auc_mean = np.mean(auc_values)
    sensitivity_ratio = auc_range / max(auc_mean, 1e-6) if auc_mean > 0 else 0.0
    
    alerts: List[str] = []
    sensitivity_detected = False
    
    if sensitivity_ratio > 0.10:  # 性能差异>10%
        sensitivity_detected = True
        alerts.append(
            f"序列长度敏感性检测：AUC 差异 {sensitivity_ratio:.2%} > 10%，"
            f"模型对序列长度敏感（范围: {min_auc:.3f} - {max_auc:.3f}）"
        )
    
    return {
        "skipped": False,
        "results": results,
        "sensitivity_detected": sensitivity_detected,
        "sensitivity_ratio": float(sensitivity_ratio),
        "auc_range": float(auc_range),
        "alerts": alerts,
    }


def test_temporal_decay(
    model,
    X_val: pd.DataFrame,
    y_val: pd.Series,
) -> Dict[str, Any]:
    """测试时间衰减（验证集不同时间段的性能）.
    
    将验证集分为早期、中期、近期三段，计算各段性能。
    近期性能明显下降时标记时间衰减风险。
    
    Parameters
    ----------
    model:
        已训练的模型对象
    X_val:
        验证特征数据（必须按时间排序）
    y_val:
        验证标签（必须按时间排序）
        
    Returns
    -------
    Dict[str, Any]
        包含测试结果的字典：
        - skipped: 是否跳过测试
        - segments: 各时间段的性能结果
        - decay_detected: 是否检测到时间衰减
        - alerts: 警告信息列表
    """
    if len(X_val) < 30:
        return {
            "skipped": True,
            "reason": f"验证集样本数 {len(X_val)} < 30，无法进行时间衰减测试",
        }
    
    # 将验证集分为三段：早期、中期、近期
    n = len(X_val)
    segment_size = n // 3
    
    segments = {
        "early": (0, segment_size),
        "middle": (segment_size, 2 * segment_size),
        "recent": (2 * segment_size, n),
    }
    
    segment_results: Dict[str, Dict[str, float]] = {}
    auc_values: List[float] = []
    
    for segment_name, (start, end) in segments.items():
        X_segment = X_val.iloc[start:end]
        y_segment = y_val.iloc[start:end]
        
        if len(X_segment) == 0:
            continue
        
        try:
            # 对于深度学习模型，需要检查序列长度要求
            # 如果模型需要序列数据，确保有足够的样本
            sequence_len = 1  # 默认值
            if hasattr(model, "config") and hasattr(model.config, "params"):
                sequence_len = model.config.params.get("sequence_len", 1)
            
            if len(X_segment) < sequence_len:
                logger.warning(
                    f"时间段 {segment_name} 样本数 {len(X_segment)} < 序列长度 {sequence_len}，跳过"
                )
                continue
            
            y_pred = model.predict_proba(X_segment)
            
            # 对齐预测结果和标签（深度学习模型可能减少样本数）
            from ..utils import align_predictions
            y_segment_aligned, y_pred_aligned = align_predictions(
                y_segment.values,
                y_pred,
                model,
            )
            
            # 确保对齐后的数据长度一致
            if len(y_segment_aligned) != len(y_pred_aligned):
                logger.warning(
                    f"时间段 {segment_name} 对齐后数据长度不一致: "
                    f"标签 {len(y_segment_aligned)} vs 预测 {len(y_pred_aligned)}，跳过"
                )
                continue
            
            metrics = calculate_model_metrics(y_segment_aligned, y_pred_aligned, None)
            
            segment_results[segment_name] = {
                "auc": float(metrics.auc),
                "accuracy": float(metrics.accuracy) if metrics.accuracy is not None else None,
                "f1": float(metrics.f1) if metrics.f1 is not None else None,
                "sample_count": len(X_segment),
            }
            auc_values.append(float(metrics.auc))
            
        except Exception as exc:
            logger.warning(
                "时间衰减测试失败（时间段 %s）: %s", segment_name, exc
            )
            return {
                "skipped": True,
                "reason": f"时间段 {segment_name} 测试失败: {exc}",
            }
    
    if len(segment_results) < 3:
        return {
            "skipped": True,
            "reason": "无法计算所有时间段的性能",
        }
    
    # 检查时间衰减：近期性能是否明显低于早期和中期
    early_auc = segment_results.get("early", {}).get("auc", 0.5)
    middle_auc = segment_results.get("middle", {}).get("auc", 0.5)
    recent_auc = segment_results.get("recent", {}).get("auc", 0.5)
    
    avg_early_middle = (early_auc + middle_auc) / 2
    decay_ratio = (avg_early_middle - recent_auc) / max(avg_early_middle, 1e-6)
    
    alerts: List[str] = []
    decay_detected = False
    
    if decay_ratio > 0.10:  # 近期性能下降>10%
        decay_detected = True
        alerts.append(
            f"时间衰减检测：近期性能（AUC={recent_auc:.3f}）明显低于早期/中期平均（AUC={avg_early_middle:.3f}），"
            f"下降幅度 {decay_ratio:.2%} > 10%，模型可能存在时间衰减风险"
        )
    
    return {
        "skipped": False,
        "segments": segment_results,
        "decay_detected": decay_detected,
        "decay_ratio": float(decay_ratio),
        "alerts": alerts,
    }


def test_prediction_stability(
    model,
    X_val: pd.DataFrame,
    n_samples: int = 10,
    variance_threshold: float = 0.1,
) -> Dict[str, Any]:
    """测试预测稳定性（多次预测方差）.
    
    对同一输入进行多次预测（启用 dropout），计算预测方差。
    预测方差>0.1时标记模型不稳定。
    
    Parameters
    ----------
    model:
        已训练的模型对象（必须是深度学习模型）
    X_val:
        验证特征数据
    n_samples:
        重复预测次数（默认10次）
    variance_threshold:
        方差阈值（默认0.1）
        
    Returns
    -------
    Dict[str, Any]
        包含测试结果的字典：
        - skipped: 是否跳过测试
        - mean_variance: 平均预测方差
        - max_variance: 最大预测方差
        - instability_detected: 是否检测到不稳定
        - alerts: 警告信息列表
    """
    # 只对深度学习模型进行测试（需要 dropout）
    model_name = getattr(model, "config", {}).name if hasattr(model, "config") else ""
    if model_name not in {"lstm", "gru", "temporal_cnn", "cnn_1d", "mlp"}:
        return {
            "skipped": True,
            "reason": f"模型 {model_name} 不是深度学习模型，跳过预测稳定性测试",
        }
    
    if len(X_val) == 0:
        return {
            "skipped": True,
            "reason": "验证集为空",
        }
    
    # 选择少量样本进行测试（避免计算成本过高）
    test_samples = min(50, len(X_val))
    X_test = X_val.iloc[:test_samples]
    
    # 多次预测（启用 dropout，模型处于训练模式）
    predictions_list: List[np.ndarray] = []
    
    try:
        # 对于 PyTorch 模型，需要设置为训练模式以启用 dropout
        if hasattr(model, "model") and hasattr(model.model, "train"):
            model.model.train()  # 启用 dropout
        
        for _ in range(n_samples):
            y_pred = model.predict_proba(X_test)
            predictions_list.append(y_pred)
        
        # 恢复评估模式
        if hasattr(model, "model") and hasattr(model.model, "eval"):
            model.model.eval()
            
    except Exception as exc:
        logger.warning("预测稳定性测试失败: %s", exc)
        return {
            "skipped": True,
            "reason": f"预测失败: {exc}",
        }
    
    if not predictions_list:
        return {
            "skipped": True,
            "reason": "无法获取预测结果",
        }
    
    # 计算每个样本的预测方差
    predictions_array = np.array(predictions_list)  # shape: (n_samples, n_test_samples)
    variances = np.var(predictions_array, axis=0)  # shape: (n_test_samples,)
    
    mean_variance = float(np.mean(variances))
    max_variance = float(np.max(variances))
    
    alerts: List[str] = []
    instability_detected = False
    
    if mean_variance > variance_threshold:
        instability_detected = True
        alerts.append(
            f"预测稳定性检测：平均预测方差 {mean_variance:.3f} > {variance_threshold}，"
            f"模型预测不稳定（最大方差: {max_variance:.3f}）"
        )
    
    return {
        "skipped": False,
        "mean_variance": mean_variance,
        "max_variance": max_variance,
        "instability_detected": instability_detected,
        "alerts": alerts,
        "test_samples": test_samples,
        "n_predictions": n_samples,
    }

