import logging
import yaml
import os
import copy
from pathlib import Path
from typing import Dict, Any, Optional, Union, List, Tuple, Set
from concurrent.futures import ThreadPoolExecutor, as_completed
import pandas as pd
import json
import threading

from source.data_processor.loaders.base_loader import BaseDataLoader, AnomalyRecord
from source.detect.detection_manager import DetectionManager
from source.localize.graph.CompositeCausalGraph import CompositeCausalGraph
from source.localize.circa.runner import CircaRunner
from source.data_processor.loaders.loader_factory import LoaderFactory
from source.representation.art_miner import ARTMiner
from source.representation.system_feature import SystemFeatureVector

logger = logging.getLogger(__name__)
# 创建一个锁用于多线程日志同步
log_lock = threading.Lock()


def extract_art_features(experiment_data, graph_provider, art_output_dir=None):
    """
    使用ARTMiner提取SLD特征

    参数:
        experiment_data: 实验数据对象
        graph_provider: 图结构提供者，需要有get_interface_dependencies方法
        art_output_dir: ART特征输出目录

    返回:
        SystemFeatureVector: SLD特征DataFrame或SystemFeatureVector对象
    """
    if art_output_dir:
        os.makedirs(art_output_dir, exist_ok=True)

    # 创建ARTMiner实例
    art_config = {
        "batch_size": 32,
        "epochs": 1000,  # 减少训练轮数以加快处理
        "window_size": 6,
        "max_gap": 60,
    }

    # 检查图结构提供者是否有效
    if not hasattr(graph_provider, "get_interface_dependencies"):
        logger.warning(
            "图结构提供者缺少get_interface_dependencies方法，无法提取ART特征"
        )
        return None

    # 创建ARTMiner实例并运行完整流程
    art_miner = ARTMiner(config=art_config, output_dir=art_output_dir)

    # 使用增强的run方法提取特征
    result = art_miner.analyze(experiment_data, graph_provider)

    # 检查结果是否成功
    if not result.get("success", False):
        logger.warning(f"ART特征提取失败: {result.get('error', '未知错误')}")
        return None

    return result


class ProcessingWorkflow:
    """
    处理工作流类，负责协调整个项目的数据处理流程

    主要功能:
    1. 数据加载 - 从不同数据源加载异常记录和实验数据
    2. 异常检测 - 对实验数据执行指标和追踪异常检测
    3. 图构建 - 构建系统依赖和因果关系图
    4. 根因分析 - 使用CIRCA执行根因定位
    5. 特征分析 - 使用ARTMiner提取系统和实例特征

    处理流程:
    └── 初始化配置和输出目录
        └── 处理所有数据源
            └── 加载数据源异常记录
                └── 处理单个异常记录
                    └── 加载实验数据
                        └── 异常检测
                        └── 图构建
                        └── CIRCA根因分析
                        └── ART特征提取和分析
    """

    def __init__(self, config: Dict[str, Any], output_base_dir: Union[str, Path]):
        """
        初始化处理工作流

        参数:
            config: 配置字典，包含数据源、处理参数等
            output_base_dir: 输出基础目录
        """
        logger.info("开始初始化处理工作流")
        self.config = config
        self.exp_id = config.get("exp_id", "default")
        self.output_dir = Path(output_base_dir) / f"run_{self.exp_id}"
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # ===== 数据库连接配置 =====
        neo4j_cfg = config.get("neo4j", {})
        self.neo4j_uri = neo4j_cfg.get("uri", "bolt://localhost:7687")
        self.neo4j_user = neo4j_cfg.get("user", "neo4j")
        self.neo4j_password = neo4j_cfg.get("password", "password")

        # ===== 处理模块配置 =====
        self.data_processing_config = config.get("data_processing", {})
        self.anomaly_detection_config = config.get("anomaly_detection", {})
        self.num_workers = self.data_processing_config.get("num_workers", 4)

        # ===== ART特征提取配置 =====
        self.art_mining_config = config.get(
            "art_mining",
            {
                "batch_size": 32,
                "epochs": 50,
                "window_size": 6,
                "max_gap": 60,
                "method": "num",
                "t_value": 3,
                "enabled": True,
                "return_vector": True,  # 默认返回特征向量对象
            },
        )

        # ===== 并行处理配置 =====
        self.num_loader_workers = max(1, self.num_workers // 2)
        self.num_processor_workers = self.num_workers

        # ===== 组织数据源按数据集名称 =====
        self.organized_data_sources = self._organize_data_sources_by_name()

        logger.info(f"处理工作流初始化完成，实验ID: {self.exp_id}")

    def _organize_data_sources_by_name(self) -> Dict[str, List[Dict[str, Any]]]:
        """
        将数据源按数据集名称（路径最后一部分）组织

        返回:
            Dict[str, List[Dict[str, Any]]]: 按数据集名称组织的数据源字典
        """
        organized_sources = {}
        data_sources = self.config.get("data_sources", [])

        for ds_config in data_sources:
            base_dir = ds_config.get("base_dir", "")
            if not base_dir:
                logger.warning(f"数据源配置缺少base_dir字段，跳过: {ds_config}")
                continue

            # 提取数据集名称（目录路径最后一部分）
            dataset_name = Path(base_dir).name

            # 将数据源添加到对应数据集名称的列表中
            if dataset_name not in organized_sources:
                organized_sources[dataset_name] = []
            organized_sources[dataset_name].append(ds_config)

            logger.info(f"数据集 '{dataset_name}' 添加了数据源: {base_dir}")

        logger.info(f"共组织了 {len(organized_sources)} 个数据集")
        return organized_sources

    #
    # ===== 第1部分: 数据加载 =====
    #

    def _load_datasource_anomaly_records(
        self, ds_config: Dict[str, Any], dataset_name: str
    ) -> Tuple[List[AnomalyRecord], Optional[BaseDataLoader], Path]:
        """
        加载单个数据源的异常记录

        参数:
            ds_config: 数据源配置字典
            dataset_name: 数据集名称

        返回:
            Tuple[List[AnomalyRecord], Optional[BaseDataLoader], Path]:
            (异常记录列表, 加载器实例, 输出目录)
        """
        ds_type = ds_config.get("type", "").lower()
        base_dir = ds_config.get("base_dir")

        # 创建以数据集名称命名的输出目录，而不是数据源类型
        ds_output_dir = self.output_dir / dataset_name
        ds_output_dir.mkdir(parents=True, exist_ok=True)

        with log_lock:
            logger.info(f"开始加载数据集 {dataset_name} (类型: {ds_type}) 的异常记录")

        loader = LoaderFactory.create_loader(
            ds_config, ds_output_dir, self.data_processing_config
        )
        if not loader:
            with log_lock:
                logger.error(
                    f"无法为数据集 {dataset_name} (类型: {ds_type}) 创建加载器，跳过"
                )
            return [], None, ds_output_dir

        anomaly_records = loader._load_anomaly_records()

        if not anomaly_records:
            with log_lock:
                logger.warning(
                    f"数据集 {dataset_name} (类型: {ds_type}) 未找到任何异常记录"
                )
            return [], loader, ds_output_dir

        with log_lock:
            logger.info(
                f"数据集 {dataset_name} (类型: {ds_type}) 成功加载 {len(anomaly_records)} 个异常记录"
            )
        return anomaly_records, loader, ds_output_dir

    #
    # ===== 第2部分: 异常处理 =====
    #

    def _process_single_task(
        self,
        loader: BaseDataLoader,
        anomaly_record: AnomalyRecord,
        ds_output_dir: Path,
        dataset_name: str,
    ) -> Tuple[str, Optional[Tuple]]:
        """
        处理单个异常记录任务

        参数:
            loader: 数据加载器实例
            anomaly_record: 异常记录对象
            ds_output_dir: 数据源输出目录
            dataset_name: 数据集名称

        返回:
            Tuple[str, Optional[Tuple]]: (数据集名称, 处理结果元组或None)
        """
        anomaly_id = anomaly_record.anomaly_id

        with log_lock:
            logger.debug(f"[{dataset_name}] 开始处理异常 {anomaly_id}")

        experiment_data = loader.load_experiment_data_from_record(anomaly_record)
        if not experiment_data:
            with log_lock:
                logger.error(f"[{dataset_name}] 无法为异常 {anomaly_id} 加载实验数据")
            return dataset_name, None

        result = self._process_single_experiment_data(
            experiment_data, ds_output_dir, dataset_name
        )

        with log_lock:
            logger.info(f"[{dataset_name}] 异常 {anomaly_id} 处理完成")
        return dataset_name, result

    #
    # ===== 第3部分: 实验数据处理 =====
    #

    def _process_single_experiment_data(
        self,
        experiment_data: Any,
        data_source_output_dir: Path,
        dataset_name: str,
    ) -> Optional[Tuple]:
        """
        处理单个实验数据（异常案例）

        参数:
            experiment_data: 实验数据对象
            data_source_output_dir: 数据源输出目录
            dataset_name: 数据集名称

        返回:
            Optional[Tuple]: (指标检测结果, 追踪检测结果)或None
        """
        if not hasattr(experiment_data, "anomaly_id"):
            with log_lock:
                logger.error(
                    f"[{dataset_name}] experiment_data 对象缺少 anomaly_id 属性"
                )
            return None

        anomaly_id = experiment_data.anomaly_id
        log_prefix = f"[{dataset_name}] "

        # ----- 第1步: 创建输出目录 -----
        anomaly_output_dir = data_source_output_dir / anomaly_id
        anomaly_output_dir.mkdir(parents=True, exist_ok=True)
        detection_output_dir = anomaly_output_dir / "detection_results"
        detection_output_dir.mkdir(parents=True, exist_ok=True)

        # ----- 第2步: 异常检测 -----
        logger.info(f"{log_prefix}开始为异常 {anomaly_id} 进行异常检测")

        detection_manager = DetectionManager(
            output_dir=str(detection_output_dir),
            metric_detector_params=self.anomaly_detection_config.get(
                "metric_detector", {}
            ),
            trace_detector_params=self.anomaly_detection_config.get(
                "trace_detector", {}
            ),
        )
        metric_result, trace_result = detection_manager.detect_anomalies(
            experiment_data
        )

        # ----- 第3步: 构建综合因果图 -----
        graph_cache_dir = anomaly_output_dir / "cache"
        graph_cache_dir.mkdir(parents=True, exist_ok=True)

        with CompositeCausalGraph(
            uri=self.neo4j_uri,
            user=self.neo4j_user,
            password=self.neo4j_password,
            cache_dir=str(graph_cache_dir),
            output_dir=str(anomaly_output_dir),
        ) as composite_builder:
            logger.info(f"{log_prefix}开始为异常 {anomaly_id} 构建综合因果图")

            graph_built = composite_builder.build_composite_graph(
                experiment_data,
                metric_result,
                trace_result,
            )

            if not graph_built:
                with log_lock:
                    logger.warning(
                        f"{log_prefix}未能为异常 {anomaly_id} 构建综合因果图，跳过后续定位"
                    )
                return (metric_result, trace_result)

            logger.info(f"{log_prefix}异常 {anomaly_id} 的综合因果图构建完成")

            # ----- 第4步: CIRCA根因分析 -----
            circa_output_dir = anomaly_output_dir / "circa"
            circa_output_dir.mkdir(parents=True, exist_ok=True)

            # 获取真实根因（如果有）
            ground_truth_causes = set()
            if hasattr(experiment_data, "anomaly_component") and hasattr(
                experiment_data, "anomaly_reason"
            ):
                ground_truth_causes = {
                    (experiment_data.anomaly_component, experiment_data.anomaly_reason)
                }
                with log_lock:
                    logger.info(
                        f"{log_prefix}异常 {anomaly_id} 的真实根因: {experiment_data.anomaly_component} - {experiment_data.anomaly_reason}"
                    )

            # CIRCA参数配置
            circa_cfg = self.config.get("circa", {})
            base_circa_params = {
                "ground_truth_causes": ground_truth_causes,
                "output_dir": str(circa_output_dir),
                "report_dir": str(circa_output_dir),
                "max_workers": circa_cfg.get(
                    "max_workers", self.num_processor_workers // 2 or 1
                ),
                "model_params_path": circa_cfg.get("model_params_path"),
                "seed": circa_cfg.get("seed", 42),
                "comparison_delay": circa_cfg.get("comparison_delay"),
                "cuda": circa_cfg.get("cuda", False),
            }

            # 运行CIRCA（故障前图）
            if composite_builder.pre_anomaly_graph:
                with log_lock:
                    logger.info(
                        f"{log_prefix}开始为异常 {anomaly_id} 执行CIRCA根因定位 (故障前图)"
                    )

                runner_pre = CircaRunner(
                    experiment_data=experiment_data,
                    causal_graph=composite_builder.pre_anomaly_graph,
                    **base_circa_params,
                )
                runner_pre.run_experiment()

                with log_lock:
                    logger.info(
                        f"{log_prefix}异常 {anomaly_id} 的CIRCA根因定位完成 (故障前图)"
                    )

            # 运行CIRCA（故障后图）
            if composite_builder.post_anomaly_graph:
                logger.info(
                    f"{log_prefix}开始为异常 {anomaly_id} 执行CIRCA根因定位 (故障后图)"
                )
                post_experiment_data = copy.deepcopy(experiment_data)
                post_experiment_data.anomaly_id = f"{anomaly_id}_post_anomaly"
                runner_post = CircaRunner(
                    experiment_data=post_experiment_data,
                    causal_graph=composite_builder.post_anomaly_graph,
                    **base_circa_params,
                )
                runner_post.run_experiment()
                logger.info(
                    f"{log_prefix}异常 {anomaly_id} 的CIRCA根因定位完成 (故障后图)"
                )

            # ----- 第5步: ART特征提取 -----
            if (
                self.art_mining_config.get("enabled", True)
                and composite_builder.art_features is None
            ):
                logger.info(f"{log_prefix}开始使用ARTMiner提取系统状态特征")

                art_output_dir = anomaly_output_dir / "art_features"
                art_output_dir.mkdir(parents=True, exist_ok=True)

                # 选择使用哪个图作为图结构提供者
                graph_provider = None
                if composite_builder.pre_anomaly_graph and hasattr(
                    composite_builder.pre_anomaly_graph, "get_interface_dependencies"
                ):
                    graph_provider = composite_builder.pre_anomaly_graph
                    with log_lock:
                        logger.info(
                            f"{log_prefix}使用故障前图作为ART特征提取的图结构提供者"
                        )

                if graph_provider:
                    # 提取ART特征
                    art_features = extract_art_features(
                        experiment_data,
                        graph_provider,
                        art_output_dir=art_output_dir,
                    )

                    # 存储特征到composite_builder以保持兼容性
                    composite_builder.art_features = art_features
                else:
                    logger.warning(
                        f"{log_prefix}未找到有效的图结构提供者，无法提取ART特征"
                    )

                logger.info(f"{log_prefix}ARTMiner系统状态特征提取完成")

        return (metric_result, trace_result)

    #
    # ===== 第4部分: 主处理流程 =====
    #

    def _process_dataset(
        self, dataset_name: str, data_sources: List[Dict[str, Any]]
    ) -> Tuple[str, List[Tuple]]:
        """
        处理单个数据集，用于多线程并行处理

        参数:
            dataset_name: 数据集名称
            data_sources: 数据源配置列表

        返回:
            Tuple[str, List[Tuple]]: (数据集名称, 处理结果列表)
        """
        with log_lock:
            logger.info(f"开始处理数据集: {dataset_name}")

        # 对单个数据集的所有数据源进行处理
        all_records = []
        current_loader = None
        ds_output_dir = None

        for ds_config in data_sources:
            records, loader, output_dir = self._load_datasource_anomaly_records(
                ds_config, dataset_name
            )
            if records and loader:
                all_records.extend(records)
                current_loader = loader  # 使用最后一个成功的加载器
                ds_output_dir = output_dir

        if not all_records or not current_loader or not ds_output_dir:
            with log_lock:
                logger.warning(
                    f"数据集 {dataset_name} 没有有效的异常记录或加载器，跳过"
                )
            return dataset_name, []

        # 处理所有异常记录
        results = []

        # 使用线程池并行处理异常记录
        with ThreadPoolExecutor(max_workers=self.num_processor_workers) as executor:
            # 提交所有任务
            future_to_record = {
                executor.submit(
                    self._process_single_task,
                    current_loader,
                    record,
                    ds_output_dir,
                    dataset_name,
                ): record
                for record in all_records
            }

            # 获取结果
            for future in as_completed(future_to_record):
                _, result = future.result()
                if result is not None:
                    results.append(result)

        with log_lock:
            logger.info(f"数据集 {dataset_name} 处理完成，共 {len(results)} 个结果")

        return dataset_name, results

    def process_all_data_sources(self) -> Dict[str, List[Tuple]]:
        """
        处理所有数据源的异常记录 - 工作流的主入口方法

        流程:
        1. 加载所有数据源的异常记录，按数据集名称组织
        2. 对每个数据集并行处理
        3. 收集和汇总处理结果

        返回:
            Dict[str, List[Tuple]]: 按数据集名称分组的处理结果字典
        """
        logger.info("开始处理所有数据集")
        results = {}

        if not self.organized_data_sources:
            logger.warning("未找到任何数据集")
            return results

        # 如果数据集数量少于线程数，调整线程数
        effective_workers = min(
            self.num_loader_workers, len(self.organized_data_sources)
        )

        # 使用线程池并行处理数据集
        with ThreadPoolExecutor(max_workers=effective_workers) as executor:
            # 提交所有任务
            future_to_dataset = {
                executor.submit(
                    self._process_dataset, dataset_name, data_sources
                ): dataset_name
                for dataset_name, data_sources in self.organized_data_sources.items()
            }

            # 获取结果
            for future in as_completed(future_to_dataset):
                dataset_name, dataset_results = future.result()
                if dataset_results:
                    results[dataset_name] = dataset_results

        # 输出处理结果统计
        total_anomalies = sum(len(ds_results) for ds_results in results.values())
        logger.info(
            f"所有数据集处理完成，共 {len(results)} 个数据集，{total_anomalies} 个处理结果"
        )

        for dataset_name, dataset_results in results.items():
            logger.info(f"数据集 '{dataset_name}': {len(dataset_results)} 个处理结果")

        return results


# 将静态方法移到类定义外部
def create_processing_workflow(
    config_path: Union[str, Path], output_base_dir: Union[str, Path]
) -> Optional[ProcessingWorkflow]:
    """从配置文件创建工作流实例"""
    config_path = Path(config_path)
    with open(config_path, "r", encoding="utf-8") as f:
        config = yaml.safe_load(f)

    if not config:
        logger.error(f"配置文件 {config_path} 为空或格式无效")
        return None

    return ProcessingWorkflow(config=config, output_base_dir=Path(output_base_dir))
