"""
Define the data structure for algorithms as context
"""

import datetime
from typing import Dict
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Optional

import numpy as np

from .data_loader import DataLoader, MemoryDataLoader
from .graph import Graph
from .graph import Node


class CaseData:
    # pylint: disable=too-many-instance-attributes
    """
    Case data that algorithms can access
    """

    def __init__(
        self,
        data_loader: DataLoader,
        detect_time: float,
        interval: datetime.timedelta = datetime.timedelta(minutes=5),
        prune: bool = False,
        original_data_loader: Optional[DataLoader] = None,
    ):
        # pylint: disable=too-many-arguments
        self._data_loader = data_loader
        self._original_data_loader = original_data_loader or data_loader  # 如果未提供，使用标准化数据加载器
        self._detect_time = detect_time
        self._interval = interval
        self._prune = prune

        self._train_window = 0
        self._test_window = 0
        self._lookup_window_seconds = 0  # 总窗口的秒数，基于实际数据

        if isinstance(data_loader, MemoryDataLoader) and data_loader._data:
            # 假设 MemoryDataLoader._data 中的所有时间序列具有相同的时间戳和长度
            # 取第一个时间序列进行计算代表整体情况
            first_key = next(iter(data_loader._data), None)
            time_series = data_loader._data[first_key]
            timestamps = np.array([ts_tuple[0] for ts_tuple in time_series])
            interval_seconds = self._interval.total_seconds()
            epsilon = interval_seconds / 10.0  # 较小的容差，用于浮点比较

            # _train_window: detect_time 之前的数据点数量
            self._train_window = np.sum(
                timestamps < (self._detect_time - epsilon)
            )

            # _test_window: detect_time 及之后的数据点数量
            self._test_window = np.sum(
                timestamps >= (self._detect_time - epsilon)
            )

            # _lookup_window_seconds: 整个可用数据的时间跨度（秒）
            self._lookup_window_seconds = (
                timestamps[-1] - timestamps[0]
            ) + interval_seconds

    @property
    def data_loader(self) -> DataLoader:
        """
        用于访问标准化数据的加载器
        """
        return self._data_loader
        
    @property
    def original_data_loader(self) -> DataLoader:
        """
        用于访问原始未标准化数据的加载器
        """
        return self._original_data_loader

    @property
    def sli(self) -> Node:
        """
        The service level indicator (SLI) that is violated
        """
        return self._sli

    @property
    def detect_time(self) -> float:
        """
        Unix timestamp when the service level indicator (SLI) is violated
        """
        return self._detect_time

    @property
    def train_window(self) -> int:
        """
        Number of data points for learning the normal pattern (before detect_time)
        """
        return self._train_window

    @property
    def test_window(self) -> int:
        """
        Number of data points for analyzing the fault (at and after detect_time)
        """
        return self._test_window

    def load_data(
        self, graph: Graph = None  # 移除了 current 参数
    ) -> Dict[str, Dict[Node, Sequence[float]]]:
        """
        准备基于检测时间的标准化和原始数据。

        参数:
            graph: 可选图对象，用于筛选节点

        返回:
            包含两种数据的字典:
            {
                'normalized': {node1: [norm_values], node2: [norm_values], ...},
                'original': {node1: [orig_values], node2: [orig_values], ...}
            }
        """
        # 定义加载数据的通用函数
        def _load_from_loader(data_loader: DataLoader) -> Dict[Node, Sequence[float]]:
            nodes = data_loader.nodes if graph is None else graph.nodes
            interval_seconds = self._interval.total_seconds()

            # 计算加载的开始时间戳
            # detect_time 前面 self._train_window 个数据点
            start_load_ts = self._detect_time - (self._train_window * interval_seconds)

            # 计算加载的结束时间戳
            # detect_time 后面 (self._test_window - 1) 个区间 (如果 test_window > 0)
            if self._test_window > 0:
                end_load_ts = self._detect_time + (
                    (self._test_window - 1) * interval_seconds
                )
            elif self._train_window > 0:  # 只有训练数据，没有测试数据
                end_load_ts = (
                    self._detect_time - interval_seconds
                )  # 加载到 detect_time 的前一个点
            else:  # 没有训练数据也没有测试数据（例如 _train_window 和 _test_window 都为0）
                end_load_ts = (
                    self._detect_time
                )  # 默认加载到 detect_time，但 preprocess 可能返回空
                start_load_ts = self._detect_time  # 确保加载单个点或不加载

            series: Dict[Node, Sequence[float]] = {}
            for node in nodes:
                # DataLoader.preprocess 将负责在 start_load_ts 和 end_load_ts 之间精确截取
                node_data = data_loader.load(
                    entity=node.entity,
                    metric=node.metric,
                    start=start_load_ts,
                    end=end_load_ts,
                    interval=self._interval,
                )

                # 由于 preprocess 已经处理了截断和返回正确的值序列，
                # 不再需要 node_data[:length] 或 np.zeros(length) 的逻辑
                if self._prune:
                    if node_data and len(set(node_data)) > 1:
                        series[node] = node_data
                    # 如果 prune 为 True 但数据不符合条件，则不添加该节点数据
                else:
                    if node_data:
                        series[node] = node_data
                    else:
                        # 如果 preprocess 返回 None (例如，范围内无数据)
                        # 根据需求，可以填充全零或保持为 None/不添加
                        # 为保持与之前 np.zeros(length) 的某种相似性，这里可以不添加，或添加空序列
                        # 决定不添加，因为 preprocess 返回 None 意味着无有效数据
                        pass
            return series

        # 加载标准化数据和原始数据
        normalized_series = _load_from_loader(self._data_loader)
        original_series = _load_from_loader(self._original_data_loader)

        # 返回包含两种数据的字典
        return {
            'normalized': normalized_series,
            'original': original_series
        }


class Case:
    """
    Case data for evaluation
    """

    def __init__(self, data: CaseData, answer: Set[Node], graph: Graph):
        """
        初始化Case对象
        
        参数:
            data: 包含标准化和原始数据的CaseData对象
            answer: 根因节点集合
            graph: 因果图对象
        """
        self._data = data
        self._answer = answer
        self._graph = graph

    @property
    def data(self) -> CaseData:
        """
        返回案例数据（同时包含标准化和原始数据）
        """
        return self._data

    @property
    def answer(self) -> Set[Node]:
        """
        Ground truth for this case
        """
        return self._answer

    @property
    def graph(self) -> Graph:
        return self._graph
