"""
基于因果推断的根因分析（CIRCA）
实现了基于回归假设检验和后代调整的评分器
"""

from typing import Callable, Dict, Sequence, Tuple, List, Any

import numpy as np

from .anm import ANMRegressor
from .base import Regressor
from ..base import Score
from ..base import Scorer
from ..common import DecomposableScorer
from ..common import zscore_conf
from ...model.case import CaseData
from ...model.graph import Graph
from ...model.graph import Node


class RHTScorer(DecomposableScorer):
    """
    基于回归假设检验的评分器
    通过回归分析和假设检验来评估节点的异常程度
    """

    def __init__(
            self,
            tau_max: int = 0,  # 最大时间延迟，仅作为备用
            regressor: Regressor = None,  # 回归器实例
            use_confidence: bool = False,  # 是否使用置信度作为评分
            **kwargs,
    ):
        """
        初始化评分器

        Args:
            tau_max: 考虑的最大时间延迟，作为边没有time_lag属性时的备用
            regressor: 回归器实例，默认使用ANM回归器
            use_confidence: 是否使用置信度代替Z分数作为最终评分
        """
        super().__init__(**kwargs)
        self._tau_max = max(tau_max, 0)
        self._regressor = regressor if regressor else ANMRegressor()
        self._use_confidence = use_confidence

    @staticmethod
    def _split_train_test(
            series_x: np.ndarray,  # 输入特征时间序列
            series_y: np.ndarray,  # 目标变量时间序列
            train_window: int,  # 训练窗口大小
            test_window: int,  # 测试窗口大小
    ):
        """
        将数据集分割为训练集和测试集
        按时间顺序划分，前面部分用于训练，后面部分用于测试
        """
        train_x: np.ndarray = series_x[:train_window, :]
        train_y: np.ndarray = series_y[:train_window]
        test_x: np.ndarray = series_x[-test_window:, :]
        test_y: np.ndarray = series_y[-test_window:]
        return train_x, test_x, train_y, test_y

    def split_data(
            self,
            data: Dict[Node, Sequence[float]],  # 所有节点的时间序列数据
            node: Node,  # 目标节点
            parents_with_edge_info: Dict[Node, Dict[str, Any]],  # 父节点及其边属性信息
            case_data: CaseData,  # 案例数据
    ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
        """
        准备训练和测试数据，使用每个父节点与当前节点之间的边属性中的time_lag

        处理步骤：
        1. 计算所有父节点的最大time_lag
        2. 根据各自的time_lag为每个父节点提取正确对齐的时间序列
        3. 构建特征矩阵和目标序列
        4. 分割训练集和测试集
        """
        T = len(data[node])  # 数据总长度

        # 计算最大时间延迟
        if parents_with_edge_info:
            lags = [edge_info.get('time_lag', self._tau_max) for edge_info in parents_with_edge_info.values()]
            lags = [lag if lag is not None else self._tau_max for lag in lags]
            max_time_lag = max(lags)
        else:
            max_time_lag = 0

        # 计算有效样本数
        num_samples = T - max_time_lag
        if num_samples <= 0:
            # 如果数据长度不足以支持max_time_lag，返回空数组
            series_x = np.zeros((0, len(parents_with_edge_info)))
            series_y = np.array([])
        else:
            if parents_with_edge_info:
                # 初始化特征矩阵
                series_x = np.zeros((num_samples, len(parents_with_edge_info)))
                for i, (parent, edge_info) in enumerate(parents_with_edge_info.items()):
                    lag = edge_info.get('time_lag', self._tau_max)
                    if lag is None:
                        lag = self._tau_max
                    parent_series = np.array(data[parent])
                    # 提取从 t=max_time_lag 到 t=T-1 时，父节点在 t-lag 的值
                    series_x[:, i] = parent_series[(max_time_lag - lag):(T - lag)]
            else:
                # 无父节点时，特征矩阵为空
                series_x = np.zeros((num_samples, 0))
            # 目标序列从max_time_lag开始
            series_y = np.array(data[node][max_time_lag:])

        # 分割训练集和测试集
        # 调整窗口大小以适应有效样本数
        train_window = min(case_data.train_window, num_samples)
        test_window = min(case_data.test_window, num_samples - train_window)
        return self._split_train_test(
            series_x=series_x,
            series_y=series_y,
            train_window=train_window,
            test_window=test_window,
        )

    def score_node(
            self,
            graph: Graph,  # 依赖图
            series: Dict[str, Dict[Node, Sequence[float]]],  # 时间序列数据 (包含normalized和original)
            node: Node,  # 待评分节点
            data: CaseData,  # 案例数据
    ) -> Score:
        """
        对单个节点进行评分

        评分流程：
        1. 获取节点的父节点及其边属性信息
        2. 根据有无父节点，选择使用标准化或原始时间序列数据
        3. 准备训练和测试数据
        4. 使用回归器计算Z分数
        5. 计算置信度并返回评分结果
        """
        # 获取父节点及其边属性信息
        parents_with_edge_info = graph.parents_with_edge_data(node)

        # 根据节点是否有父节点来选择使用标准化数据还是原始数据
        # 如果节点没有父节点，则使用原始数据；否则使用标准化数据。
        if not parents_with_edge_info:  # 检查 parents_with_edge_info 是否为空字典
            current_series_for_model = series['normalized']
        else:
            current_series_for_model = series['normalized']

        # 准备数据，使用带边属性的父节点信息
        train_x, test_x, train_y, test_y = self.split_data(
            current_series_for_model, node, parents_with_edge_info, data
        )

        # 使用回归器计算Z分数
        z_scores = self._regressor.score(
            train_x=train_x, test_x=test_x, train_y=train_y, test_y=test_y
        )

        # 聚合Z分数
        z_score = self._aggregator(abs(z_scores))

        # 计算置信度
        confidence = zscore_conf(z_score)

        # 根据配置选择使用置信度或Z分数作为评分
        if self._use_confidence:
            score = Score(confidence)
            score.key = (score.score, z_score)
        else:
            score = Score(z_score)

        # 保存额外信息
        score["z-score"] = z_score
        score["Confidence"] = confidence

        return score


class DAScorer(Scorer):
    """
    后代调整评分器
    通过考虑节点的后代（子节点）分数来调整节点的最终评分
    """

    def __init__(
            self,
            threshold: float = 0,  # 异常阈值
            aggregator: Callable[[Sequence[float]], float] = max,  # 聚合函数
            **kwargs,
    ):
        """
        初始化评分器

        Args:
            threshold: 异常阈值，只有分数超过此阈值的节点才会被考虑进行后代调整
            aggregator: 聚合函数，用于合并多个子节点的分数，默认取最大值
        """
        super().__init__(**kwargs)
        self._threshold = max(threshold, 0.0)
        self._aggregator = aggregator

    def score(
            self,
            graph: Graph,
            data: CaseData,
            current: float,
            scores: Dict[Node, Score] = None,
    ) -> Dict[Node, Score]:
        """
        执行评分过程

        工作流程：
        1. 对图进行拓扑排序，确定节点的层级关系
        2. 收集每个节点的子节点分数
        3. 根据子节点分数调整节点的最终评分
        4. 设置节点的排序键
        """
        # 按拓扑序对节点进行排序
        sorted_nodes = [
            {node for node in nodes if node in scores}
            for nodes in graph.topological_sort
        ]

        # 步骤1：设置拓扑序号
        for index, nodes in enumerate(sorted_nodes):
            for node in nodes:
                score = scores[node]
                score["index"] = index

        # 步骤2：收集子节点分数
        child_scores: Dict[Node, Dict[Node, float]] = {}
        for nodes in reversed(sorted_nodes):  # 从叶子节点开始
            for node in nodes:
                child_score: Dict[Node, float] = {}
                for child in graph.children(node):
                    if child in scores:
                        # 记录直接子节点的分数
                        child_score[child] = scores[child].score
                        # 如果子节点分数低于阈值，还要考虑子节点的子节点
                        if scores[child].score < self._threshold:
                            child_score.update(child_scores.get(child, {}))
                child_scores[node] = child_score

        # 步骤3：设置子节点分数
        for node, score in scores.items():
            if score.score >= self._threshold:  # 只调整超过阈值的节点
                child_score = child_scores[node]
                if child_score:  # 如果有子节点分数
                    # 聚合子节点分数并添加到节点分数中
                    child_score = self._aggregator(child_score.values())
                    score.score += child_score
                    score["child_score"] = child_score

        # 步骤4：设置排序键
        for score in scores.values():
            score.key = (score.score, -score["index"], score.get("z-score", 0))
        return scores
