"""
Common utilities
"""
from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed
import logging
from multiprocessing import Process
from multiprocessing import Queue
import os
import queue
from typing import Dict
from typing import List
from typing import Sequence
from typing import Set
from typing import Tuple

import numpy as np
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler

from .base import Score
from .base import Scorer
from ..graph import GraphFactory
from ..model.case import Case
from ..model.case import CaseData
from ..model.graph import Graph
from ..model.graph import LoadingInvalidGraphException
from ..model.graph import Node
from ..utils import dump_json
from ..utils import load_json
from ..utils import dump_csv  # 添加 dump_csv 导入
from ..utils import load_csv  # 添加 load_csv 导入
from ..utils import require_logging
from tqdm.auto import tqdm
import time
from .Evaluation import Evaluation

def pearson(series_a: np.ndarray, series_b: np.ndarray) -> float:
    """
    Pearson coefficient, checking constant
    """
    std_a: float = series_a.std()
    std_b: float = series_b.std()
    if std_a == 0 or std_b == 0:
        return 0
    prod: np.ndarray = (series_a - series_a.mean()) * (series_b - series_b.mean())
    return prod.sum() / (std_a * std_b * len(series_a))


def zscore(train_y: np.ndarray, test_y: np.ndarray) -> np.ndarray:
    """
    Estimate to what extend each value in test_y violates
    the normal distribution defined by train_y
    """
    scaler = StandardScaler().fit(train_y.reshape(-1, 1))
    return scaler.transform(test_y.reshape(-1, 1))[:, 0]


def zscore_conf(score: float) -> float:
    """
    Convert z-score into confidence about the hypothesis the score is abnormal
    """
    return 1 - 2 * norm.cdf(-abs(score))


class DecomposableScorer(Scorer):
    """
    可分解评分器基类
    特点：独立评估每个节点的异常程度，支持并行处理
    """

    def score_node(
        self,
        graph: Graph,            # 因果图
        series: Dict[Node, Sequence[float]],  # 节点时间序列数据
        node: Node,             # 待评分节点
        data: CaseData,         # 案例数据
    ) -> Score:
        """
        评估单个节点的异常程度
        
        由子类实现具体的评分策略
        """
        raise NotImplementedError

    def _score(
        self,
        candidates: Sequence[Node],    # 待评分的节点序列
        series: Dict[Node, Sequence[float]],  # 节点时间序列数据
        graph: Graph,                  # 因果图
        data: CaseData,               # 案例数据
    ):
        """
        对一组节点进行评分
        
        用于并行处理时的子任务
        """
        results: Dict[Node, Score] = {}
        # 逐个评估每个节点
        # 添加进度条，使用进程安全的 tqdm
        with tqdm(
            candidates,
            desc="Scoring nodes",
            ncols=100,
            position=0,
            leave=True,
            mininterval=0.5
        ) as pbar:
            for node in pbar:
                score = self.score_node(graph, series, node, data)
                if score is not None:
                    results[node] = score
                # 更新进度条描述，显示当前节点信息
                pbar.set_postfix_str(f"Node: {node.metric}")
        return results

    def score(
        self,
        graph: Graph,
        data: CaseData,
        current: float,
        scores: Dict[Node, Score] = None,
    ) -> Dict[Node, Score]:
        """
        评分主函数
        
        工作流程：
        1. 加载时间序列数据
        2. 确定待评分节点
        3. 并行或串行处理评分任务
        4. 合并或更新评分结果
        """
        # 加载时间序列数据
        series = data.load_data(graph)
        # 确定待评分节点列表
        candidates = list(series['normalized'].keys()) if scores is None else list(scores.keys())

        # 根据进程数选择并行或串行处理
        if self._max_workers >= 2:  # 并行处理
            results: Dict[Node, Score] = {}
            with ProcessPoolExecutor(max_workers=self._max_workers) as executor:
                # 将节点平均分配给多个进程
                tasks = [
                    executor.submit(
                        require_logging(self._score),
                        candidates=candidates[i :: self._max_workers],
                        series=series,
                        graph=graph,
                        data=data,
                    )
                    for i in range(self._max_workers)
                ]
                # 收集所有进程的结果
                for task in as_completed(tasks):
                    results.update(task.result())
        else:  # 串行处理
            results = self._score(
                candidates=candidates, series=series, graph=graph, data=data
            )

        # 处理评分结果
        if scores is None:
            return results  # 直接返回新的评分结果
        # 更新已有的评分结果
        return {node: scores[node].update(score) for node, score in results.items()}


class NSigmaScorer(DecomposableScorer):
    """
    N-Sigma评分器
    基于标准差（sigma）的异常检测方法，用于评估时间序列数据的异常程度
    
    原理：
    1. 使用训练窗口的数据建立正常行为基线
    2. 计算测试窗口数据相对于基线的Z分数
    3. 通过聚合函数计算最终异常分数
    """

    def score_node(
        self,
        graph: Graph,                           # 因果图
        series: Dict[Node, Sequence[float]],    # 所有节点的时间序列数据
        node: Node,                             # 待评分的节点
        data: CaseData,                         # 案例数据
    ) -> Score:
        # 将节点的时间序列转换为numpy数组
        series_y = np.array(series[node])
        # 提取训练窗口数据（用于建立正常行为基线）
        train_y: np.ndarray = series_y[: data.train_window]
        # 提取测试窗口数据（用于检测异常）
        test_y: np.ndarray = series_y[-data.test_window :]
        # 计算测试数据相对于训练数据的Z分数
        z_scores = zscore(train_y, test_y)
        # 使用聚合函数（如max、mean等）计算最终的异常分数
        z_score = self._aggregator(abs(z_scores))
        # 创建评分对象
        score = Score(z_score)
        # 保存原始Z分数供后续使用
        score["z-score"] = z_score
        return score


class Model:
    """
    算法组合类
    将图工厂和评分器组合在一起，形成完整的根因分析模型
    """

    def __init__(
        self,
        graph_factory: GraphFactory,    # 图工厂实例，用于生成因果图
        scorers: Sequence[Scorer],      # 评分器序列，用于评估节点异常程度
        names: Tuple[str, ...] = None,  # 自定义名称元组，用于命名图工厂和评分器
    ):
        """
        初始化模型
        命名规则：GraphFactory名称-Scorer1名称-Scorer2名称-...
        """
        if not scorers:
            raise ValueError("Please provide at least one scorer")
        self._graph_factory = graph_factory
        self._scorers = scorers
        num_scorers = len(scorers)
        # 处理自定义名称
        if names is None:
            names = []
        # 如果提供的名称不足，用None补齐
        names = list(names) + [None] * (1 + num_scorers - len(names))
        # 获取图工厂名称
        self._name_graph = names[0] or graph_factory.__class__.__name__
        # 获取评分器名称列表
        self._names_scorer = [
            obj.__class__.__name__ if name is None else name
            for name, obj in zip(names[1:][:num_scorers], scorers)
        ]
        # 组合最终模型名称
        self._name = "-".join([self._name_graph] + self._names_scorer)

    @staticmethod
    def dump(scores: Dict[Node, Score], filename: str):
        """
        将评分结果保存到文件
        scores: 节点及其对应的评分字典
        filename: 输出文件路径
        """
        data = [
            dict(node=node.asdict(), score=score.asdict())
            for node, score in scores.items()
        ]
        dump_json(filename=filename, data=data)

    @staticmethod
    def load(filename: str) -> Dict[Node, Score]:
        """
        从文件加载评分结果
        filename: 输入文件路径
        返回：节点及其对应的评分字典
        """
        data: List[Dict[str, dict]] = load_json(filename)
        return {Node(**item["node"]): Score(**item["score"]) for item in data}

    @property
    def name(self) -> str:
        """
        获取模型名称
        """
        return self._name

    def analyze(
        self, case: Case, current: float, output_dir: str = None
    ) -> List[Tuple[Node, Score]]:
        """
        执行根因分析
        
        参数：
        data: 案例数据
        current: 当前时间点
        output_dir: 输出目录，用于缓存中间结果
        
        返回：
        按评分降序排序的(节点,评分)列表
        """
        data = case.data
        graph = case.graph

        # 使用评分器序列对节点进行打分
        scores: Dict[Node, Score] = None
        for scorer in self._scorers:
            # 每个评分器都可以利用前一个评分器的结果
            scores = scorer.score(
                graph=graph, data=data, current=current, scores=scores
            )
        # 3. 按评分降序返回结果
        return sorted(scores.items(), key=lambda item: item[1].key, reverse=True)


def _analyze(
    consumer: Queue, model: Model, case: Case, current: float, output_dir: str
):
    try:
        ranks = model.analyze(
            case=case,
            current=current,
            output_dir=output_dir,
        )
        # 确保在进程结束前数据被写入队列
        consumer.put(ranks)
        # 添加一个小延迟确保数据写入
        time.sleep(0.1)
    except LoadingInvalidGraphException:
        consumer.put([])
        time.sleep(0.1)
    except Exception as e:
        logger = logging.getLogger(__name__)
        logger.error("Analysis failed: %s", str(e))
        consumer.put([])
        time.sleep(0.1)

def evaluate(
    model: Model,
    cases: Sequence[Case],
    delay: int = 300,
    output_dir: str = None,
    timeout: int = 5,
) -> Evaluation:
    """
    评估评分器组合的性能
    
    主要流程：
    1. 尝试从缓存加载已有结果
    2. 对每个测试案例进行分析
    3. 收集并保存评估结果
    """
    # 创建日志记录器，包含模型名称
    logger = logging.getLogger(
        ".".join([evaluate.__module__, evaluate.__name__, model.name])
    )
    # 初始化评估报告
    report = Evaluation()
    
    # 如果指定了输出目录，尝试加载缓存的结果
    if output_dir is not None:
        # 直接使用CSV文件作为主要存储格式
        output_filename = os.path.join(output_dir, f"{model.name}.csv")
        
        if os.path.exists(output_filename):
            # 如果存在缓存结果，直接加载并返回
            report.load(output_filename, [case.answer for case in cases])
            return report
        # 确保输出目录存在
        os.makedirs(output_dir, exist_ok=True)

    # 遍历所有测试案例
    for index, case in enumerate(cases):
        logger.debug("Analyze case %d", index)
        case_output_dir = None
        if output_dir is not None:
            case_output_dir = os.path.join(output_dir, str(index))

        # 创建进程间通信队列
        consumer = Queue()
        # 创建子进程执行分析任务
        task = Process(
            target=require_logging(_analyze),
            kwargs=dict(
                consumer=consumer,
                model=model,
                case=case,
                current=case.data.detect_time + delay,
                output_dir=case_output_dir,
            ),
        )
        
        # 启动分析进程
        task.start()
        
        try:
            # 先尝试从队列获取结果
            ranks = consumer.get(timeout=timeout)
            # 如果成功获取结果，等待进程结束
            task.join(timeout=1)
        except queue.Empty:
            # 如果获取超时，终止进程
            logger.warning("Timeout for case %d", index)
            task.terminate()
            task.join()
            ranks = []
        except Exception as e:
            # 处理其他可能的异常
            logger.error("Error processing case %d: %s", index, str(e))
            task.terminate()
            task.join()
            ranks = []
        
        # 直接传递完整的(节点,分数)元组列表，保留分数信息
        report(ranks=ranks, answers=case.answer)
        
        # 如果指定了输出目录，保存评估结果
        if output_dir is not None:
            report.dump(output_filename)
    
    return report
