"""模型评估相关的通用工具函数."""

from __future__ import annotations

from typing import Iterable, Tuple

import numpy as np


def infer_sequence_length(model: object, default: int = 30) -> int:
    """从模型配置中推断序列长度."""
    config = getattr(model, "config", None)
    if config is None:
        return default
    # 优先从 dataclass 风格的 config.params 读取
    params = getattr(config, "params", None)
    if isinstance(params, dict) and "sequence_len" in params:
        return int(params["sequence_len"])
    # 兼容 config 自身就是字典的场景
    if isinstance(config, dict):
        param_dict = config.get("params") or {}
        if "sequence_len" in param_dict:
            return int(param_dict["sequence_len"])
        if "sequence_len" in config:
            return int(config["sequence_len"])
    return default


def align_predictions(
    y_true: Iterable[float],
    y_pred_proba: Iterable[float],
    model: object,
    default_sequence_len: int = 30,
) -> Tuple[np.ndarray, np.ndarray]:
    """对齐标签与预测概率长度，兼容序列模型."""
    y_array = np.asarray(list(y_true))
    pred_array = np.asarray(list(y_pred_proba))

    if len(y_array) == 0 or len(pred_array) == 0:
        return y_array, pred_array

    if len(pred_array) < len(y_array):
        sequence_len = infer_sequence_length(model, default_sequence_len)
        if sequence_len > 0 and len(y_array) > sequence_len:
            y_array = y_array[sequence_len:]
        else:
            y_array = y_array[-len(pred_array) :]

    min_len = min(len(y_array), len(pred_array))
    return y_array[:min_len], pred_array[:min_len]


def safe_binary_metrics(y_true: np.ndarray, y_pred_proba: np.ndarray) -> dict[str, float | None]:
    """计算二分类指标（AUC/Accuracy/F1），检测空样本等异常."""
    from sklearn.metrics import accuracy_score, f1_score, roc_auc_score

    metrics: dict[str, float | None] = {"auc": None, "accuracy": None, "f1": None}
    if y_true.size == 0 or y_pred_proba.size == 0:
        return metrics

    y_pred = (y_pred_proba >= 0.5).astype(int)
    try:
        metrics["auc"] = float(roc_auc_score(y_true, y_pred_proba))
    except ValueError:
        metrics["auc"] = None
    try:
        metrics["accuracy"] = float(accuracy_score(y_true, y_pred))
    except ValueError:
        metrics["accuracy"] = None
    try:
        metrics["f1"] = float(f1_score(y_true, y_pred))
    except ValueError:
        metrics["f1"] = None
    return metrics

