"""稳健性分析."""

from __future__ import annotations

import logging
from dataclasses import dataclass, replace
from typing import Dict, List, Sequence, Tuple

import numpy as np
import pandas as pd

from ..models.base import BaseQuantModel
from .config import BacktestConfig
from .metrics import BacktestMetrics, calculate_backtest_metrics
from .walk_forward import BacktestResult, WalkForwardBacktester

logger = logging.getLogger(__name__)


@dataclass
class SensitivityResult:
    """敏感性分析结果."""

    config_name: str
    metrics: BacktestMetrics
    model_rankings: Dict[str, int]  # 模型名称 -> 排名（基于综合评分）


@dataclass
class TimeSegmentStats:
    segment: str
    metrics: BacktestMetrics


@dataclass
class MarketRegimeStats:
    regime: str
    metrics: BacktestMetrics


@dataclass
class RobustnessReport:
    """稳健性分析报告."""

    sensitivity_results: Dict[str, List[SensitivityResult]]
    model_stability: Dict[str, float]
    parameter_robustness: Dict[str, float]
    time_segments: List[TimeSegmentStats]
    time_segment_variability: float
    market_regimes: List[MarketRegimeStats]
    feature_consistency: Dict[str, List[str]]
    risk_alerts: List[str]


class SensitivityAnalyzer:
    """参数敏感性分析器."""

    def __init__(self, base_config: BacktestConfig) -> None:
        self.base_config = base_config

    def analyze_threshold_sensitivity(
        self,
        models: Dict[str, BaseQuantModel],
        X: pd.DataFrame,
        prices: pd.DataFrame,
        scenarios: List[Dict[str, float]] | None = None,
    ) -> List[SensitivityResult]:
        """分析信号阈值敏感性.

        Parameters
        ----------
        models:
            模型字典
        X:
            特征数据
        prices:
            价格数据
        threshold_scenarios:
            阈值方案列表，每个方案包含 buy_threshold 和 sell_threshold
            如果为 None，使用默认方案（A/B/C）

        Returns
        -------
        敏感性分析结果列表
        """
        if scenarios is None:
            scenarios = [
                {"name": "方案A (宽松)", "buy_threshold": 0.55, "sell_threshold": 0.45},
                {"name": "方案B (默认)", "buy_threshold": 0.60, "sell_threshold": 0.40},
                {"name": "方案C (严格)", "buy_threshold": 0.65, "sell_threshold": 0.35},
            ]
        return self._run_scenarios(models, X, prices, scenarios)

    def analyze_stop_take_sensitivity(
        self,
        models: Dict[str, BaseQuantModel],
        X: pd.DataFrame,
        prices: pd.DataFrame,
        scenarios: List[Dict[str, float]] | None = None,
    ) -> List[SensitivityResult]:
        """止损/止盈敏感性分析."""
        if scenarios is None:
            scenarios = [
                {"name": "风控方案1 (-3/+8)", "stop_loss_pct": 0.03, "take_profit_pct": 0.08},
                {"name": "风控方案2 (-5/+10)", "stop_loss_pct": 0.05, "take_profit_pct": 0.10},
                {"name": "风控方案3 (-7/+12)", "stop_loss_pct": 0.07, "take_profit_pct": 0.12},
            ]
        return self._run_scenarios(models, X, prices, scenarios)

    def analyze_window_sensitivity(
        self,
        models: Dict[str, BaseQuantModel],
        X: pd.DataFrame,
        prices: pd.DataFrame,
        scenarios: List[Dict[str, int]] | None = None,
    ) -> List[SensitivityResult]:
        """窗口长度/步长敏感性分析."""
        if scenarios is None:
            scenarios = [
                {"name": "窗口方案1 (40/15)", "window_length": 40, "step_size": 15},
                {"name": "窗口方案2 (60/20)", "window_length": 60, "step_size": 20},
                {"name": "窗口方案3 (80/25)", "window_length": 80, "step_size": 25},
            ]
        return self._run_scenarios(models, X, prices, scenarios)

    def _run_scenarios(
        self,
        models: Dict[str, BaseQuantModel],
        X: pd.DataFrame,
        prices: pd.DataFrame,
        scenarios: Sequence[Dict[str, float]],
    ) -> List[SensitivityResult]:
        outputs: List[SensitivityResult] = []
        for scenario in scenarios:
            name = scenario.get("name", "敏感性方案")
            overrides = {k: v for k, v in scenario.items() if k != "name"}
            config = replace(self.base_config, **overrides)

            logger.info("执行敏感性分析: %s", name)
            backtester = WalkForwardBacktester(config)
            backtest_results = backtester.walk_forward_backtest(models, X, prices)

            model_metrics: Dict[str, BacktestMetrics] = {}
            for model_name, result_list in backtest_results.items():
                if result_list:
                    model_metrics[model_name] = calculate_backtest_metrics(result_list, config)

            if not model_metrics:
                continue

            model_rankings = self._rank_models(model_metrics)
            best_model = min(model_rankings.items(), key=lambda x: x[1])[0]
            best_metrics = model_metrics[best_model]

            outputs.append(
                SensitivityResult(
                    config_name=name,
                    metrics=best_metrics,
                    model_rankings=model_rankings,
                )
            )
        return outputs

    def _rank_models(self, model_metrics: Dict[str, BacktestMetrics]) -> Dict[str, int]:
        """根据综合评分对模型进行排名.

        Parameters
        ----------
        model_metrics:
            模型指标字典

        Returns
        -------
        模型排名字典，排名越小越好（1=最佳）
        """
        if not model_metrics:
            return {}

        # 计算综合评分（夏普比 + 信息比率 - 最大回撤）
        scores = {}
        for model_name, metrics in model_metrics.items():
            score = metrics.sharpe_ratio + metrics.information_ratio - abs(metrics.max_drawdown)
            scores[model_name] = score

        # 排序并分配排名
        sorted_models = sorted(scores.items(), key=lambda x: x[1], reverse=True)
        rankings = {model_name: rank + 1 for rank, (model_name, _) in enumerate(sorted_models)}

        return rankings


class RobustnessEvaluator:
    """稳健性评估器."""

    def evaluate_model_stability(
        self, sensitivity_results: List[SensitivityResult]
    ) -> Dict[str, float]:
        """评估模型排序稳定性.

        Parameters
        ----------
        sensitivity_results:
            敏感性分析结果

        Returns
        -------
        模型稳定性分数字典，分数越高越稳定
        """
        if not sensitivity_results:
            return {}

        # 收集所有模型名称
        all_models = set()
        for result in sensitivity_results:
            all_models.update(result.model_rankings.keys())

        # 计算每个模型在不同方案下的排名方差
        model_stability = {}
        for model_name in all_models:
            rankings = []
            for result in sensitivity_results:
                if model_name in result.model_rankings:
                    rankings.append(result.model_rankings[model_name])

            if rankings:
                # 稳定性分数 = 1 / (1 + 排名标准差)
                std = np.std(rankings)
                stability_score = 1.0 / (1.0 + std)
                model_stability[model_name] = float(stability_score)
            else:
                model_stability[model_name] = 0.0

        return model_stability

    def evaluate_parameter_robustness(
        self, sensitivity_results: List[SensitivityResult]
    ) -> Dict[str, float]:
        """评估策略参数鲁棒性.

        Parameters
        ----------
        sensitivity_results:
            敏感性分析结果

        Returns
        -------
        参数鲁棒性分数字典
        """
        if len(sensitivity_results) < 2:
            return {}

        # 计算不同参数方案下指标的变化系数（CV）
        metrics_list = [r.metrics for r in sensitivity_results]

        # 提取关键指标
        sharpe_ratios = [m.sharpe_ratio for m in metrics_list]
        max_drawdowns = [abs(m.max_drawdown) for m in metrics_list]
        annualized_returns = [m.annualized_return for m in metrics_list]

        # 计算变异系数（标准差/均值）
        def cv(values: List[float]) -> float:
            if not values or np.mean(values) == 0:
                return float("inf")
            return float(np.std(values) / abs(np.mean(values)))

        sharpe_cv = cv(sharpe_ratios)
        drawdown_cv = cv(max_drawdowns)
        return_cv = cv(annualized_returns)

        # 鲁棒性分数 = 1 / (1 + 平均CV)
        avg_cv = (sharpe_cv + drawdown_cv + return_cv) / 3
        robustness_score = 1.0 / (1.0 + avg_cv)

        return {
            "sharpe_ratio": 1.0 / (1.0 + sharpe_cv),
            "max_drawdown": 1.0 / (1.0 + drawdown_cv),
            "annualized_return": 1.0 / (1.0 + return_cv),
            "overall": float(robustness_score),
        }

    def generate_report(
        self,
        sensitivity_buckets: Dict[str, List[SensitivityResult]],
        best_model_results: List[BacktestResult],
        config: BacktestConfig,
        price_series: pd.Series,
        feature_importances: Dict[str, Dict[str, float]],
    ) -> RobustnessReport:
        """生成稳健性分析报告."""
        combined_results = [
            result for bucket in sensitivity_buckets.values() for result in bucket
        ]
        model_stability = self.evaluate_model_stability(combined_results)
        parameter_robustness = self.evaluate_parameter_robustness(combined_results)

        time_analyzer = TimeSegmentAnalyzer()
        time_segments, variability = time_analyzer.analyze(best_model_results, config)

        market_analyzer = MarketRegimeAnalyzer()
        market_regimes = market_analyzer.analyze(best_model_results, price_series, config)

        feature_consistency = analyze_feature_consistency(feature_importances)
        risk_alerts = detect_risk_alerts(best_model_results, price_series)

        return RobustnessReport(
            sensitivity_results=sensitivity_buckets,
            model_stability=model_stability,
            parameter_robustness=parameter_robustness,
            time_segments=time_segments,
            time_segment_variability=float(variability),
            market_regimes=market_regimes,
            feature_consistency=feature_consistency,
            risk_alerts=risk_alerts,
        )


class TimeSegmentAnalyzer:
    """按时间段评估稳健性."""

    def analyze(
        self,
        results: List[BacktestResult],
        config: BacktestConfig,
    ) -> Tuple[List[TimeSegmentStats], float]:
        returns = merge_daily_returns(results)
        if returns.empty:
            return [], 0.0

        n = len(returns)
        cut1 = max(1, n // 3)
        cut2 = max(cut1 + 1, 2 * n // 3)

        segments = [
            ("前 1/3", returns.iloc[:cut1]),
            ("中间 1/3", returns.iloc[cut1:cut2]),
            ("后 1/3", returns.iloc[cut2:]),
        ]

        stats: List[TimeSegmentStats] = []
        annualized = []
        for name, seg in segments:
            if seg.empty:
                continue
            metrics = metrics_from_returns(seg, config)
            stats.append(TimeSegmentStats(segment=name, metrics=metrics))
            annualized.append(metrics.annualized_return)

        variability = (
            np.std(annualized) / abs(np.mean(annualized))
            if annualized and np.mean(annualized) != 0
            else 0.0
        )
        return stats, variability


class MarketRegimeAnalyzer:
    """按市场状态评估稳健性."""

    def analyze(
        self,
        results: List[BacktestResult],
        price_series: pd.Series | pd.DataFrame,
        config: BacktestConfig,
    ) -> List[MarketRegimeStats]:
        returns = merge_daily_returns(results)
        close_series = extract_close_series(price_series)
        if returns.empty or close_series.empty:
            return []

        aligned_prices = close_series.reindex(returns.index, method="nearest").dropna()
        if aligned_prices.empty:
            return []

        base_price = aligned_prices.iloc[0]
        pct_change = aligned_prices / base_price - 1

        regimes = {
            "上涨市场": pct_change[pct_change >= 0.10].index,
            "下跌市场": pct_change[pct_change <= -0.10].index,
            "震荡市场": pct_change[(pct_change > -0.10) & (pct_change < 0.10)].index,
        }

        stats: List[MarketRegimeStats] = []
        for name, idx in regimes.items():
            regime_returns = returns.loc[idx]
            if regime_returns.empty:
                continue
            metrics = metrics_from_returns(regime_returns, config)
            stats.append(MarketRegimeStats(regime=name, metrics=metrics))
        return stats


def merge_daily_returns(results: List[BacktestResult]) -> pd.Series:
    series = [
        r.daily_returns for r in results if r.daily_returns is not None and not r.daily_returns.empty
    ]
    if not series:
        return pd.Series(dtype=float)
    merged = pd.concat(series).sort_index()
    merged = merged[~merged.index.duplicated(keep="last")]
    return merged


def metrics_from_returns(returns: pd.Series, config: BacktestConfig) -> BacktestMetrics:
    if returns.empty:
        synthetic = BacktestResult(
            equity_curve=pd.Series([config.initial_capital]),
            trades=[],
            positions=[],
            daily_returns=pd.Series(dtype=float),
            total_trades=0,
            win_rate=0.0,
            stop_loss_count=0,
            take_profit_count=0,
        )
        return calculate_backtest_metrics([synthetic], config)

    equity = (1 + returns).cumprod() * config.initial_capital
    result = BacktestResult(
        equity_curve=equity,
        trades=[],
        positions=[],
        daily_returns=returns,
        total_trades=0,
        win_rate=0.0,
        stop_loss_count=0,
        take_profit_count=0,
    )
    return calculate_backtest_metrics([result], config)


def analyze_feature_consistency(feature_importances: Dict[str, Dict[str, float]]) -> Dict[str, List[str]]:
    top_per_model: Dict[str, List[str]] = {}
    sets: List[set[str]] = []
    for model_name, importance in feature_importances.items():
        if not importance:
            continue
        top_items = sorted(importance.items(), key=lambda x: x[1], reverse=True)[:5]
        features = [name for name, _ in top_items]
        top_per_model[model_name] = features
        sets.append(set(features))

    common = sorted(set.intersection(*sets)) if sets else []
    formatted_top = [f"{model}: {', '.join(features)}" for model, features in top_per_model.items()]

    return {
        "top_features_per_model": formatted_top,
        "common_features": common,
    }


def detect_risk_alerts(
    results: List[BacktestResult],
    price_series: pd.Series | pd.DataFrame,
    drop_threshold: float = -0.05,
) -> List[str]:
    alerts: List[str] = []
    returns = merge_daily_returns(results)
    if returns.empty:
        return alerts

    min_return = returns.min()
    if min_return < drop_threshold:
        alerts.append(f"存在单日跌幅 {min_return:.2%}，超过 -5% 阈值")

    longest_run = 0
    current_run = 0
    cumulative_loss = 0.0
    max_loss = 0.0
    for r in returns:
        if r < 0:
            current_run += 1
            cumulative_loss += r
            max_loss = min(max_loss, cumulative_loss)
            longest_run = max(longest_run, current_run)
        else:
            current_run = 0
            cumulative_loss = 0.0
    if longest_run >= 10 and max_loss <= -0.15:
        alerts.append(
            f"出现连续 {longest_run} 日亏损，累计跌幅 {max_loss:.2%}，需关注连续亏损风险"
        )

    # 低成交量测试（如有 volume 数据）
    if isinstance(price_series, pd.DataFrame) and "volume" in price_series.columns:
        lowest = price_series.nsmallest(min(10, len(price_series)), "volume")
        if not lowest.empty and lowest["volume"].mean() < price_series["volume"].mean() * 0.2:
            alerts.append("检测到低成交量时段，可能存在流动性风险")

    return alerts


def extract_close_series(price_data: pd.Series | pd.DataFrame) -> pd.Series:
    if isinstance(price_data, pd.DataFrame):
        return price_data["close"]
    return price_data

