"""
Compare algorithm combinations
"""

from concurrent.futures import ProcessPoolExecutor
from concurrent.futures import as_completed
import logging
from multiprocessing import Process
import os
from typing import Dict
from typing import List
from typing import Set
from typing import Tuple

from ...alg.common import Evaluation
from ...alg.common import Model
from ...alg.common import evaluate
from ...graph import GraphFactory
from ...model.case import Case
from ...utils import Timer
from ...utils import dump_csv
from ...utils import dump_json
from ...utils import load_csv
from ...utils import require_logging


IDEAL_METHOD = "GT-Ideal"



def report_evaluation(name: str, report: Evaluation, duration: float = None):
    """
    Return a tuple of evaluation OpenTelemetry.
    (name, AC@1, AC@3, AC@5, Avg@5, EP-AC@1, EP-AC@3, EP-AC@5, EP-Avg@5, Avg-Rank, Prefix-First-Pos, avg_duration)
    Duration will show at the end of the tuple if provided.
    """
    score = (
        name,
        report.accuracy(1),
        report.accuracy(3),
        report.accuracy(5),
        report.average(5),
        report.entity_prefix_accuracy(1),
        report.entity_prefix_accuracy(3),
        report.entity_prefix_accuracy(5),
        report.entity_prefix_average(5),
        report.avg_rank_position,
        report.avg_prefix_first_position,
    )
    if duration is not None:
        score += (duration,)
    return score


def report_ideal(cases: List[Case], name: str = IDEAL_METHOD):
    """
    Return the score tuple for the ideal algorithm
    """
    report = Evaluation()
    for case in cases:
        report(ranks=list(case.answer), answers=case.answer)
    return report_evaluation(name, report)


def _wrap_ideal_report(scores: List[tuple], cases: List[Case]):
    """包装评估报告，确保包含理想算法的结果

    Args:
        scores: 现有的评估分数列表，每项为(模型名称, AC@1, AC@3, AC@5, Avg@5[, avg_duration])
        cases: 测试案例列表，用于生成理想算法的评估结果

    Returns:
        添加了理想算法评估结果的分数列表
    """
    # 提取所有已评估模型的名称
    names = {score[0] for score in scores}
    # 如果还没有理想算法的结果，添加它
    if IDEAL_METHOD not in names:
        scores = scores + [report_ideal(cases=cases, name=IDEAL_METHOD)]
    return scores


def _save_scores(scores: List[tuple], report_filename: str):
    """保存评估结果到文件"""
    # 确保输出目录存在
    os.makedirs(os.path.dirname(report_filename), exist_ok=True)
    
    # 如果文件已存在，先删除
    if os.path.exists(report_filename):
        os.remove(report_filename)
    
    # 保存汇总报告，移除 duration 列
    dump_csv(
        filename=report_filename,
        data=[score[:11] for score in scores],  # 只保留前11列，移除最后的 duration
        headers=[
            "method",
            "AC@1",
            "AC@3",
            "AC@5",
            "Avg@5",
            "EP-AC@1",
            "EP-AC@3",
            "EP-AC@5",
            "EP-Avg@5",
            "Avg-Rank",
            "Prefix-First-Pos",
        ],
    )


def _evaluate(models: List[Model], cases: List[Case], report_filename: str, **kwargs):
    """评估多个模型在测试案例上的性能"""
    # 创建日志记录器
    logger = logging.getLogger(f"{run.__module__}.evaluate")
    # 存储所有模型的评估分数
    scores: List[Tuple[str, float, float, float, float, float]] = []
    # 计算案例数量（至少为1，避免除零错误）
    num_cases = max(len(cases), 1)

    # 逐个评估每个模型
    for model in models:
        name = model.name
        with Timer(name=name) as timer:
            report = evaluate(model, cases, **kwargs)
            duration = timer.duration
        score = report_evaluation(name, report, duration.total_seconds() / num_cases)
        scores.append(score)
        # 每评估完一个模型就保存一次结果
        _save_scores(scores, report_filename)
    return scores


def _load_cached_report(report_filename: str, models: List[Model], cases: List[Case]):
    """加载已缓存的评估报告"""
    scores = []
    cached_models = set()
    output_dir = os.path.dirname(report_filename)
    
    # 如果报告文件已存在，直接加载
    if os.path.exists(report_filename):
        reader = load_csv(report_filename)
        _ = next(reader)  # 跳过表头
        for row in reader:
            # 将所有数值字段转换为浮点数
            name = row[0]
            values = [float(x) if i > 0 else x for i, x in enumerate(row)]
            scores.append(tuple(values))
            cached_models.add(name)
    else:
        # 从各个模型的CSV文件构建评估结果
        for model in models:
            model_name = model.name
            model_csv = os.path.join(output_dir, f"{model_name}.csv")
            
            if os.path.exists(model_csv):
                report = Evaluation()
                answers = [case.answer for case in cases]
                report.load(model_csv, answers)
                
                # 计算评估指标并确保所有值都是浮点数
                score = report_evaluation(model_name, report)
                # 转换评估分数中的数值为浮点数
                converted_score = list(score)
                for i in range(1, len(converted_score)):
                    if converted_score[i] is not None:
                        converted_score[i] = float(converted_score[i])
                scores.append(tuple(converted_score))
                cached_models.add(model_name)
    
    remaining_models = [model for model in models if model.name not in cached_models]
    return scores, remaining_models


def run(
    models: List[Model],  # 待评估的模型列表
    cases: List[Case],  # 测试案例列表
    output_dir: str = None,  # 中间结果的输出目录
    report_filename: str = "report.csv",  # 评估报告的文件名
    delay: int = 300,  # 故障检测后到开始分析的延迟时间（秒）
    max_workers: int = 1,  # 并行处理的进程数
    timeout: int = 3600,  # 单个任务的超时时间（秒）
):
    """
    比较不同模型的性能

    主要功能：
    1. 为每个测试案例生成因果图（如果提供了图工厂）
    2. 并行评估多个模型的性能
    3. 生成包含多个性能指标的评估报告

    评估指标说明：
    - AC@K：Top-K准确率，即真实故障源在模型输出的前K个结果中的比例
    - Avg@5：平均排名（前5），反映模型排序的准确性
    - avg duration：平均运行时间，反映模型的效率
    """
    # 构建通用参数字典
    params = dict(delay=delay, output_dir=output_dir, timeout=timeout)

    # 第二阶段：加载已有的评估结果
    # 返回已有的评估分数和尚未评估的模型
    # scores, models = _load_cached_report(
    #     report_filename=report_filename,
    #     models=models,
    #     cases=cases
    # )


    scores = []

    # 第三阶段：评估模型性能
    if max_workers >= 2:  # 使用多进程并行评估
        with ProcessPoolExecutor(max_workers=max_workers) as executor:
            # 将模型评估任务平均分配给多个进程
            tasks = [
                executor.submit(
                    require_logging(_evaluate),
                    models=models[i::max_workers],
                    cases=cases,
                    report_filename=report_filename,  # 传入报告文件名
                    **params,
                )
                for i in range(max_workers)
            ]
            # 收集所有评估结果
            for task in as_completed(tasks):
                new_scores = task.result()
                scores += new_scores
                # 每完成一批评估就保存一次结果
                _save_scores(scores, report_filename)
    else:
        new_scores = _evaluate(
            models=models,
            cases=cases,
            report_filename=report_filename,  # 传入报告文件名
            **params,
        )
        scores += new_scores
        # 单进程模式下也保存结果
        _save_scores(scores, report_filename)
    # 保存包含理想算法的完整结果
    _save_scores(scores, report_filename)
