"""
Simluation with vector auto-regression model
"""

import datetime
import os
import pickle
from typing import Callable
from typing import Dict
from typing import List
from typing import Sequence
from typing import Set

import networkx as nx
import numpy as np
from sklearn.preprocessing import StandardScaler

from ...graph.common import StaticGraphFactory
from ...model.case import Case
from ...model.case import CaseData
from ...model.data_loader import MemoryDataLoader
from ...model.graph import MemoryGraph
from ...model.graph import Node
from ...utils import ENCODING
from ...utils import dump_csv
from ...utils import dump_json
from ...utils import load_csv
from ...utils import load_json

_SLI = 0
ENTITY = "ingress&svc_duration"


class SimCase(Case):
    """
    Simulated case data for evaluation
    """

    DATA_FILENAME = "data.csv"
    ORIGINAL_DATA_FILENAME = "original_data.csv"  # 添加原始数据文件名
    INFO_FILENAME = "info.json"
    DETAIL_FILENAME = "details.pk"

    # 修改 SimCase 的 __init__ 方法，适配新的Case类设计
    def __init__(
            self,
            data: np.ndarray,
            causes: Set[List],  # 修改类型标注为 Set[List]
            length_normal: int,
            interval: datetime.timedelta = datetime.timedelta(minutes=1),
            case_data_params: dict = None,
            details: dict = None,
            graph: MemoryGraph = None,
            nodes: List[Node] = None,  # 节点参数
            original_data: np.ndarray = None  # 原始未标准化数据
    ):
        # pylint: disable=too-many-arguments
        # 存储原始数据
        self._time_series = data
        self._original_time_series = original_data  # 存储原始数据
        self._causes = causes
        self._length_normal = length_normal
        self._interval = interval
        self._case_data_params = {} if case_data_params is None else case_data_params
        self._details = details
        self._nodes = nodes  # 存储节点信息
        self._data = None  # 缓存CaseData对象
        
        # 创建 answer 集合
        answer = {Node(entity=cause[0], metric=str(cause[1])) for cause in causes}
        
        # 延迟初始化父类，通过data属性获取CaseData
        super().__init__(
            data=None,  # 暂时传入None，稍后通过属性访问器生成
            answer=answer,
            graph=graph
        )

    @property
    def data(self) -> CaseData:
        """
        返回包含标准化和原始数据的CaseData对象
        """
        try:
            if self._data is not None:
                return self._data

            _, num_node = self._time_series.shape
            interval = self._interval.total_seconds()
            
            # 构建标准化数据的data loader
            if self._nodes and len(self._nodes) == num_node:
                # 使用实际的节点信息作为键
                norm_data = {
                    f"{node.entity}|{node.metric}": [
                        (i * interval, value)
                        for i, value in enumerate(self._time_series[:, idx])
                    ]
                    for idx, node in enumerate(self._nodes)
                }
            else:
                # 如果没有节点信息，则退回到使用数字索引
                norm_data = {
                    str(node): [
                        (i * interval, value)
                        for i, value in enumerate(self._time_series[:, node])
                    ]
                    for node in range(num_node)
                }
            
            # 创建标准化数据的data loader
            norm_data_loader = MemoryDataLoader(data=norm_data)
            
            # 构建原始数据的data loader（如果有原始数据）
            original_data_loader = None
            if self._original_time_series is not None:
                if self._nodes and len(self._nodes) == num_node:
                    # 使用实际的节点信息作为键
                    orig_data = {
                        f"{node.entity}|{node.metric}": [
                            (i * interval, value)
                            for i, value in enumerate(self._original_time_series[:, idx])
                        ]
                        for idx, node in enumerate(self._nodes)
                    }
                else:
                    # 如果没有节点信息，则退回到使用数字索引
                    orig_data = {
                        str(node): [
                            (i * interval, value)
                            for i, value in enumerate(self._original_time_series[:, node])
                        ]
                        for node in range(num_node)
                    }
                original_data_loader = MemoryDataLoader(data=orig_data)
            
            # 创建CaseData对象，包含标准化和原始数据
            self._data = CaseData(
                data_loader=norm_data_loader,
                original_data_loader=original_data_loader,
                detect_time=self._length_normal * interval,
                **self._case_data_params,
            )

            return self._data
        except Exception as e:
            raise e

    @classmethod
    def load(cls, folder: str, **kwargs) -> "SimCase":
        """
        Load from a folder

        Parameters:
            folder: where the case is dumped
            **kwargs: Other parameters will be passed to construct SimCase
        """
        filename = os.path.join(folder, cls.DATA_FILENAME)
        data = np.array(list(load_csv(filename))).astype(float)
        
        # 尝试加载原始数据
        original_data = None
        original_filename = os.path.join(folder, cls.ORIGINAL_DATA_FILENAME)
        if os.path.exists(original_filename):
            original_data = np.array(list(load_csv(original_filename))).astype(float)
        
        info: dict = load_json(os.path.join(folder, cls.INFO_FILENAME))
        return SimCase(
            data=data,
            original_data=original_data,  # 传递原始数据
            causes=set(tuple(cause) for cause in info["causes"]),  # 将列表转换为元组以支持集合
            length_normal=info["length_normal"],
            **kwargs,
        )

    def dump(self, folder: str):
        """
        Dump into a folder
        """
        # Dump data into csv
        os.makedirs(folder, exist_ok=True)
        dump_csv(
            filename=os.path.join(folder, self.DATA_FILENAME), data=self._time_series
        )
        
        # 如果有原始数据，也保存
        if self._original_time_series is not None:
            dump_csv(
                filename=os.path.join(folder, self.ORIGINAL_DATA_FILENAME), 
                data=self._original_time_series
            )
            
        dump_json(
            filename=os.path.join(folder, self.INFO_FILENAME),
            data=dict(causes=list(self._causes), length_normal=self._length_normal),
        )
        if self._details:
            with open(os.path.join(folder, self.DETAIL_FILENAME), "wb") as obj:
                pickle.dump(self._details, obj)


class SimDataset:
    """
    A combination of graph and simulated cases
    """

    _INDEX_FILENAME = "index"
    GRAPH_FILENAME = "graph.json"
    CASES_FOLDER = "cases"

    def __init__(self, graph: MemoryGraph, cases: List[SimCase]):
        self._cases = cases
        self._graph = graph
        self._graph_factory = StaticGraphFactory(graph=graph)

    @property
    def cases(self) -> List[Case]:
        """
        Simulated cases
        """
        return self._cases

    @property
    def graph_factory(self) -> StaticGraphFactory:
        """
        The causal graph in the data generation process
        """
        return self._graph_factory

    @classmethod
    def load(cls, folder: str, **kwargs) -> "SimDataset":
        """从文件夹中加载模拟数据集

        参数:
            folder: 数据集所在的文件夹路径
            **kwargs: 其他参数将传递给SimCase构造函数
        """
        # 构建案例文件夹的完整路径
        cases_folder = os.path.join(folder, cls.CASES_FOLDER)

        # 从文件加载因果图结构
        graph = MemoryGraph.load(os.path.join(folder, cls.GRAPH_FILENAME))

        # 读取案例数量信息
        with open(
                os.path.join(cases_folder, cls._INDEX_FILENAME), encoding=ENCODING
        ) as obj:
            num_cases = int(next(obj))  # 读取第一行，获取案例总数

        # 加载所有案例
        # 每个案例保存在以索引命名的子文件夹中（0, 1, 2, ...）
        cases = [
            SimCase.load(os.path.join(cases_folder, str(index)), **kwargs)
            for index in range(num_cases)
        ]

        # 返回包含图和案例的数据集对象
        return SimDataset(graph=graph, cases=cases)

    def dump(self, folder: str):
        """
        Dump into a folder
        """
        # Dump data into csv
        cases_folder = os.path.join(folder, self.CASES_FOLDER)
        os.makedirs(cases_folder, exist_ok=True)
        self._graph.dump(os.path.join(folder, self.GRAPH_FILENAME))

        with open(
                os.path.join(cases_folder, self._INDEX_FILENAME), "w", encoding=ENCODING
        ) as obj:
            obj.write(str(len(self._cases)))
        for index, case in enumerate(self._cases):
            case.dump(os.path.join(cases_folder, str(index)))


def _normal_weight(rng: np.random.Generator) -> float:
    weight = rng.standard_normal()
    return np.sign(weight) * (abs(weight) + 0.2)


def _uniform_weight(rng: np.random.Generator) -> float:
    segments = [(-2.0, -0.5), (0.5, 2.0)]
    low, high = rng.choice(segments)
    return rng.uniform(low=low, high=high)


def generate_sedag(
        num_node: int,
        num_edge: int,
        weight_generator: Callable[[np.random.Generator], float] = _uniform_weight,
        rng: np.random.Generator = None,
) -> np.ndarray:
    """
    生成一个带权重的有向无环图（DAG），具有单一终点节点

    参数:
        num_node: 节点数量
        num_edge: 边的数量
        weight_generator: 权重生成函数，默认使用_uniform_weight生成[-2.0,-0.5]或[0.5,2.0]范围内的值
        rng: 随机数生成器，用于保证结果可复现

    返回:
        邻接矩阵，其中matrix[i,j]非零表示节点j是节点i的原因
    """
    # 确保边的数量在合理范围内：
    # 最少要保证图是连通的(num_node-1)
    # 最多不超过DAG的理论最大边数(n*(n-1)/2)
    num_edge = min(max(num_edge, num_node - 1), int(num_node * (num_node - 1) / 2))

    # 初始化随机数生成器
    if rng is None:
        rng = np.random.default_rng()

    # 初始化邻接矩阵
    matrix = np.zeros((num_node, num_node))

    # 第一步：确保图是连通的
    # 从节点1开始（节点0是终点），为每个节点添加一条入边
    for cause in range(1, num_node):
        # 随机选择一个编号更小的节点作为结果节点
        result = rng.integers(low=0, high=cause)
        # 设置因果关系的权重
        matrix[result, cause] = weight_generator(rng)

    # 第二步：添加额外的边直到达到指定数量
    # 减去已经添加的边数(num_node-1)
    num_edge -= num_node - 1
    while num_edge > 0:
        # 随机选择一对节点（确保cause > result以保持DAG性质）
        cause = rng.integers(low=1, high=num_node)
        result = rng.integers(low=0, high=cause)
        # 如果这条边还不存在，则添加它
        if not matrix[result, cause]:
            matrix[result, cause] = weight_generator(rng)
            num_edge -= 1

    return matrix


def generate_case(
        weight: np.ndarray,
        length_normal: int = 1440,  # 正常数据的长度，默认1440个时间点（1天）
        fault_duration: int = 2,  # 故障持续时间，默认2个时间点
        length_abnormal: int = 10,
        beta: float = 1e-1,  # 时间衰减因子，控制历史状态的影响
        tau: float = 3,  # 故障显著性阈值，以标准差的倍数表示
        sigmas: np.ndarray = None,  # 各节点的噪声强度
        fault: np.ndarray = None,  # 预定义的故障向量
        rng: np.random.Generator = None,  # 随机数生成器
) -> SimCase:
    """
    生成一个包含正常和异常数据的案例

    参数:
        weight: 累积权重矩阵，包含了所有可能的传播路径
    """
    # 初始化随机数生成器
    if rng is None:
        rng = np.random.default_rng()
    # 确保异常数据长度不小于故障持续时间
    length_abnormal = max(length_abnormal, fault_duration)

    # 初始化数据结构
    num_node, _ = weight.shape
    data: np.ndarray = np.zeros((0, num_node))
    # 如果没有指定噪声强度，则随机生成
    if sigmas is None:
        sigmas = rng.standard_exponential(num_node)

    # 初始化节点状态，加入随机噪声
    values = rng.standard_normal(num_node) * sigmas

    # 生成正常数据
    # 使用向量自回归(VAR)模型：x(t) = A'(beta * x(t-1) + epsilon(t))
    # 其中 A' 是累积权重矩阵，epsilon 是随机噪声
    for _ in range(length_normal):
        values = weight @ (beta * values + rng.standard_normal(num_node) * sigmas)
        data = np.append(data, [values], axis=0)

    # 计算SLI指标的均值和标准差，用于确定故障的显著性
    sli_mean: float = data[:, _SLI].mean()
    sli_sigma: float = data[:, _SLI].std()

    # 注入故障
    # falut 类似[0, 0, 0, 100, 0] 表示 idx=3 那个节点引入了 100 的故障
    if fault is None:  # 如果没有预定义故障
        # 随机选择1-2个故障源
        num_causes = min(rng.poisson(1) + 1, num_node)
        causes = rng.choice(num_node, size=num_causes, replace=False)
        # 初始化故障向量
        fault = np.zeros(num_node)
        alpha = rng.standard_exponential(size=num_causes)  # 故障强度
        epsilon = rng.standard_normal(num_node)  # 随机噪声
        # 调整故障强度直到产生显著影响
        while True:
            fault[causes] = alpha
            # 计算故障对SLI的影响
            sli_value: float = np.dot(
                weight[_SLI, :], beta * values + (epsilon + fault) * sigmas
            )
            # 如果影响超过阈值则停止
            if abs(sli_value - sli_mean) > tau * sli_sigma:
                break
            alpha *= 2  # 否则增加故障强度
    else:  # 使用预定义的故障
        causes: np.ndarray = np.where(fault)[0]
        assert causes.size  # 确保存在故障源

    # 生成故障期间的数据
    for _ in range(fault_duration):
        # 加入故障影响
        values = weight @ (
                beta * values + (rng.standard_normal(num_node) + fault) * sigmas
        )
        data = np.append(data, [values], axis=0)
    # 生成故障后的恢复数据
    for _ in range(length_abnormal - fault_duration):
        values = weight @ (beta * values + rng.standard_normal(num_node) * sigmas)
        data = np.append(data, [values], axis=0)

    # 保存原始数据用于未标准化数据
    original_data = data.copy()
    
    # 使用正常数据进行标准化
    scaler = StandardScaler().fit(data[:length_normal, :])
    data = np.around(scaler.transform(data), decimals=3)

    # 保存案例的详细信息
    details = dict(
        fault=fault,  # 故障向量
        sigmas=sigmas,  # 噪声强度
        stds=scaler.scale_,  # 标准化系数
        weight=weight,  # 权重矩阵
    )

    # 返回生成的案例，包括原始未标准化数据
    return SimCase(
        data=data,
        original_data=original_data,  # 添加原始未标准化数据
        causes=set(causes.tolist()),  # 故障源节点集合
        length_normal=length_normal,  # 正常数据长度
        details=details,  # 详细信息
    )


def generate(
        num_node: int, num_edge: int, num_cases: int = 100, rng: np.random.Generator = None
) -> SimDataset:
    """
    生成包含因果图和多个案例的数据集

    参数:
        num_node: 图中节点的数量
        num_edge: 图中边的数量
        num_cases: 要生成的案例数量，默认100个
        rng: 随机数生成器，用于保证结果可复现
    """
    # 如果没有提供随机数生成器，创建一个新的
    if rng is None:
        rng = np.random.default_rng()

    # 第一步：生成带权重的有向无环图（DAG）
    # matrix[i,j] != 0 表示节点j是节点i的原因
    matrix = generate_sedag(num_node=num_node, num_edge=num_edge, rng=rng)

    # 第二步：计算权重矩阵
    # prod: 存储矩阵的累积乘积
    # weight: 计算 I + A + A^2 + ... + A^(num_node-1)
    # 其中 A 是邻接矩阵，这样可以捕获多跳的因果关系
    prod = np.eye(num_node)  # 单位矩阵 I
    weight = np.eye(num_node)  # 初始化累积和为 I
    for _ in range(1, num_node):
        prod = prod @ matrix  # 计算 A 的下一个幂
        weight += prod  # 累加到权重矩阵中

    # 第三步：使用相同的权重矩阵生成多个案例
    # 这确保了所有案例具有相同的因果结构
    cases = [generate_case(weight=weight, rng=rng) for _ in range(num_cases)]

    # 第四步：构建NetworkX图对象
    # 从邻接矩阵中提取非零元素的位置，表示因果关系
    graph = nx.DiGraph(
        (
            (
                Node(entity=ENTITY, metric=str(cause)),  # 原因节点
                Node(entity=ENTITY, metric=str(result)),  # 结果节点
            )
            for result, cause in zip(*np.where(matrix))  # 遍历所有非零位置
        )
    )

    # 返回包含因果图和案例的数据集
    return SimDataset(cases=cases, graph=MemoryGraph(graph))
