import logging
import os
from linecache import cache
import numpy as np
import json

import pandas as pd
import pickle
import threading
from abc import ABC, abstractmethod
from typing import List, Any, Optional, Dict, Union, Set, Tuple
from pathlib import Path
from datetime import datetime, timedelta


from source.data_processor.utils.experiment_data import ExperimentData
from source.public.config_manager import ConfigManager
from source.data_processor.utils.time_utils import ensure_timezone
from concurrent.futures import ThreadPoolExecutor, as_completed


# 配置基本日志
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


class AnomalyRecord:
    """
    统一的异常记录类，用于在不同的数据加载器之间传递标准化的故障记录
    """

    def __init__(
        self,
        anomaly_id: str,
        unix_timestamp: int,
        datetime_obj: datetime,
        level: str,
        reason: str,
        component: str,
        cmdb_id: Optional[str] = None,
        metric: Optional[str] = None,
        path: Optional[str] = None,
        extra_info: Optional[Dict] = None,
    ):
        """
        初始化异常记录对象

        参数:
            anomaly_id: 异常ID，通常是component_timestamp格式
            unix_timestamp: 故障发生的UNIX时间戳（秒级）
            datetime_obj: 故障发生的日期时间对象
            level: 故障级别（container, service, node等）
            reason: 故障原因
            component: 故障组件（涉及哪个组件）
            cmdb_id: 组件标识符（可选）
            metric: 故障相关的指标（可选）
            path: 记录原始数据文件夹路径
            extra_info: 额外信息（可选）
        """
        self.anomaly_id = anomaly_id
        self.unix_timestamp = unix_timestamp
        self.datetime_obj = datetime_obj
        self.level = level
        self.reason = reason
        self.component = component
        self.cmdb_id = cmdb_id
        self.metric = metric
        self.path = path  # 记录原始数据文件夹路径
        self.extra_info = extra_info if extra_info else {}

    def to_dict(self) -> Dict:
        """转换为字典表示"""
        return {
            "anomaly_id": self.anomaly_id,
            "unix_timestamp": self.unix_timestamp,
            "datetime_obj": self.datetime_obj,
            "level": self.level,
            "reason": self.reason,
            "component": self.component,
            "cmdb_id": self.cmdb_id,
            "metric": self.metric,
            "path": self.path,
            "extra_info": self.extra_info,
        }

    def __str__(self) -> str:
        """字符串表示"""
        return f"AnomalyRecord(id={self.anomaly_id}, timestamp={self.unix_timestamp}, component={self.component})"

    def __repr__(self) -> str:
        """代码表示"""
        return self.__str__()


class BaseDataLoader(ABC):
    """
    Abstract base class for data loaders.
    """

    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        use_cache: bool = True,
        cache_prefix: str = "base_loader",
        record_file_name: str = "record.csv",
    ):
        """
        初始化数据加载器

        参数:
            base_data_path: 基础数据路径
            output_dir: 输出目录
            use_cache: 是否使用缓存
            cache_prefix: 缓存前缀
            record_file_name: 记录文件名，默认为 'record.csv'
        """
        self.base_data_path = Path(base_data_path)
        self.output_dir = Path(output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)
        self.use_cache = use_cache
        self.cache_prefix = cache_prefix
        self.record_path = self.base_data_path / record_file_name
        self.details_path = self.base_data_path / "details.json"
        self.config_manager = ConfigManager()  # 初始化时创建配置管理器实例

        # 创建实例特定的锁，避免不同数据集之间的锁冲突
        self.file_lock = threading.Lock()
        self.details_lock = threading.Lock()

        # 生成一个唯一的实例标识符，用于调试
        self.instance_id = f"{self.__class__.__name__}_{id(self)}"

        if not self.base_data_path.exists():
            raise FileNotFoundError(f"基础数据路径不存在: {self.base_data_path}")
        logger.info(
            f"BaseDataLoader已初始化。实例ID: {self.instance_id}, 输出目录: {self.output_dir}, 使用缓存: {self.use_cache}, 故障记录文件: {self.record_path}"
        )

    @abstractmethod
    def load_metrics(
        self,
        start_time: datetime,
        end_time: datetime,
        record: AnomalyRecord,
        dependencies_df: Optional[pd.DataFrame] = None,
    ) -> pd.DataFrame:
        pass

    @abstractmethod
    def load_dependencies(
        self, start_time: datetime, end_time: datetime, record: AnomalyRecord
    ) -> pd.DataFrame:
        pass

    def _load_anomaly_records(self) -> List[AnomalyRecord]:
        pass

    def _fill_missing_timepoints(
        self,
        metrics_df: pd.DataFrame,
        time_col: str = "timestamp",
        cmdb_id_col: str = "cmdb_id",
        metric_col: str = "metric_name",
        value_col: str = "value",
        entity_type_col: str = "entity_type",
        freq: str = None,
    ) -> pd.DataFrame:
        """
        填充缺失的时间点，确保指标数据在时间窗口内连续

        参数:
            metrics_df: 指标DataFrame
            time_col: 时间戳列名
            cmdb_id_col: CMDB ID列名
            metric_col: 指标名列名
            value_col: 值列名
            entity_type_col: 实体类型列名
            freq: 采样频率，如为None则使用配置或默认值"60s"

        返回:
            填充后的DataFrame
        """
        if metrics_df is None or metrics_df.empty:
            return metrics_df

        # 如果未提供freq参数，从配置获取或使用默认值
        if freq is None:
            freq = self.config_manager.get("data_processing.sampling_rate", "60s")
            logger.debug(f"使用配置的采样频率: {freq}")

        required = [time_col, cmdb_id_col, metric_col, value_col, entity_type_col]
        if not all(col in metrics_df.columns for col in required):
            logger.warning(
                f"填充缺失: DataFrame缺少必要列: {required}。可用列: {metrics_df.columns}"
            )
            # 如果重要的分组列存在，尝试继续处理
            if not all(
                col in metrics_df.columns
                for col in [time_col, cmdb_id_col, metric_col, entity_type_col]
            ):
                return metrics_df

        # 确保time_col是datetime类型
        if not pd.api.types.is_datetime64_any_dtype(metrics_df[time_col]):
            metrics_df[time_col] = pd.to_datetime(metrics_df[time_col])

        min_time, max_time = metrics_df[time_col].min(), metrics_df[time_col].max()
        if pd.isna(min_time) or pd.isna(max_time):
            return metrics_df  # 无法处理

        # 创建完整的时间范围
        full_timerange = pd.date_range(start=min_time, end=max_time, freq=freq)
        if (
            full_timerange.empty and not metrics_df[time_col].empty
        ):  # 如果只有一个时间点
            full_timerange = pd.DatetimeIndex([metrics_df[time_col].min()])

        all_filled_dfs = []
        for (entity, cmdb, metric), group in metrics_df.groupby(
            [entity_type_col, cmdb_id_col, metric_col]
        ):
            group = group.set_index(time_col).sort_index()

            # 在 reindex 之前先处理重复值
            group = group[~group.index.duplicated(keep="last")]

            # 然后再进行 reindex
            reindexed_group = group.reindex(full_timerange)

            # 填充'value'为0，'original_value'使用均值或0
            if value_col in reindexed_group.columns:
                reindexed_group[value_col] = reindexed_group[value_col].fillna(0)
            if "original_value" in reindexed_group.columns:
                mean_original = group["original_value"].mean()
                reindexed_group["original_value"] = reindexed_group[
                    "original_value"
                ].fillna(mean_original if not pd.isna(mean_original) else 0)

            # 填充标识符列
            reindexed_group[cmdb_id_col] = cmdb
            reindexed_group[metric_col] = metric
            reindexed_group[entity_type_col] = entity

            # 对于其他列，先前向填充再后向填充
            for col in group.columns:
                if col not in [
                    value_col,
                    "original_value",
                    cmdb_id_col,
                    metric_col,
                    entity_type_col,
                ]:
                    reindexed_group[col] = reindexed_group[col].ffill().bfill()

            all_filled_dfs.append(
                reindexed_group.reset_index().rename(columns={"index": time_col})
            )

        if not all_filled_dfs:
            return metrics_df

        logger.debug(f"使用采样频率 {freq} 填充了 {len(all_filled_dfs)} 个指标组")
        final_df = pd.concat(all_filled_dfs, ignore_index=True)
        return final_df

    def validate_dataframe(
        self, df: Optional[pd.DataFrame], required_cols: List[str]
    ) -> bool:
        """
        验证DataFrame是否包含所需的列

        参数:
            df: 要验证的DataFrame，可能为None
            required_cols: 必需的列名列表

        返回:
            如果验证通过返回True，否则返回False

        逻辑:
            - 如果df为None或为空，且没有必需列，则返回True
            - 如果df为None或为空，但有必需列，则返回False
            - 如果df不为空，检查是否包含所有必需列
        """
        # 如果没有必需列，则空DataFrame或None都是有效的
        if not required_cols:
            return True

        # 如果有必需列，但DataFrame为None或为空，则无效
        if df is None or df.empty:
            logger.debug(f"DataFrame为None或为空，但需要这些列: {required_cols}")
            return False

        # 检查所有必需列是否存在
        missing = [col for col in required_cols if col not in df.columns]
        if missing:
            logger.error(
                f"DataFrame缺少必要的列: {missing}。可用列: {df.columns.tolist()}"
            )
            return False
        return True

    def remove_nan_rows(
        self, df: Optional[pd.DataFrame], entity_name: str = "数据"
    ) -> Optional[pd.DataFrame]:
        """
        删除DataFrame中包含NaN值的行

        参数:
            df: 要处理的DataFrame，可能为None
            entity_name: 用于日志记录的实体名称

        返回:
            处理后的DataFrame，如果输入为None则返回None
        """
        if df is None or df.empty:
            return df
        df = df.replace(["nan", ""], np.nan)
        original_count = len(df)
        df_clean = df.dropna(how="any")
        dropped_rows = original_count - len(df_clean)

        if dropped_rows > 0:
            logger.info(f"删除了包含NaN值的 {dropped_rows} 行{entity_name}")

        return df_clean

    def handle_details_json(
        self, dataset_experiment_data: Dict[str, ExperimentData]
    ) -> Dict[str, Any]:
        """
        处理details.json文件，确保所有时间段的实体和指标集一致

        如果details.json存在，则读取其中的信息；如果不存在，则基于当前数据集构建并保存。
        然后，使用details.json中的完整实体和指标集信息来补充dataset_experiment_data中缺失的数据。
        处理完毕后，将更新后的实验数据保存回缓存。

        该方法使用线程锁确保在多线程环境中的安全性。

        参数:
            dataset_experiment_data: 数据集的实验数据字典，键为异常ID

        返回:
            Dict[str, Any]: details.json的内容
        """
        logger.info(f"[{self.instance_id}] 开始处理details.json文件")

        # 先尝试读取已有的details.json（已有线程锁保护）
        details = self.read_details_json()

        # 提取当前数据的实体和指标信息
        current_details = self.extract_entity_metrics_from_data(dataset_experiment_data)

        # 使用实例特定的线程锁保护details.json的更新过程
        with self.details_lock:
            # 如果不存在details.json或读取失败，则基于当前数据构建
            if details is None:
                logger.info(
                    f"[{self.instance_id}] 未找到有效的details.json，基于当前数据构建"
                )
                details = current_details

                # 保存details.json
                self.write_details_json(details)
            else:
                logger.info(f"[{self.instance_id}] 已读取existing details.json文件")

                # 检查是否有新的实体类型
                has_updates = False
                for entity_type in current_details.get("entity_types", []):
                    if entity_type not in details.get("entity_types", []):
                        details.setdefault("entity_types", []).append(entity_type)
                        details["entity_types"].sort()
                        has_updates = True

                # 检查是否有新的实体ID
                for entity_type, ids in current_details.get("entity_ids", {}).items():
                    if entity_type not in details.get("entity_ids", {}):
                        details.setdefault("entity_ids", {})[entity_type] = ids
                        has_updates = True
                    else:
                        for entity_id in ids:
                            if entity_id not in details["entity_ids"][entity_type]:
                                details["entity_ids"][entity_type].append(entity_id)
                                details["entity_ids"][entity_type].sort()
                                has_updates = True

                # 检查是否有新的指标名称
                for entity_type, metrics in current_details.get(
                    "entity_metrics", {}
                ).items():
                    if entity_type not in details.get("entity_metrics", {}):
                        details.setdefault("entity_metrics", {})[entity_type] = metrics
                        has_updates = True
                    else:
                        for metric_name in metrics:
                            if (
                                metric_name
                                not in details["entity_metrics"][entity_type]
                            ):
                                details["entity_metrics"][entity_type].append(
                                    metric_name
                                )
                                details["entity_metrics"][entity_type].sort()
                                has_updates = True

                # 如果有更新，保存details.json
                if has_updates:
                    logger.info(f"[{self.instance_id}] details.json有更新，正在保存")
                    # write_details_json已经有线程锁保护，所以这里不需要再加锁
                    self.write_details_json(details)

        # 使用details.json中的信息补充dataset_experiment_data中缺失的数据
        self.complete_experiment_data(dataset_experiment_data, details)

        # 将更新后的实验数据保存回缓存
        if self.use_cache:
            for anomaly_id, exp_data in dataset_experiment_data.items():
                self.save_to_cache(exp_data)
                logger.debug(
                    f"[{self.instance_id}] 已将补充后的实验数据 {anomaly_id} 保存回缓存"
                )  # 改为debug级别，减少日志量

        logger.info(
            f"[{self.instance_id}] details.json处理完成，共处理 {len(dataset_experiment_data)} 个实验数据"
        )
        return details

    def get_details_path(self) -> Path:
        """获取details.json文件的路径"""
        return self.details_path

    def read_details_json(self) -> Optional[Dict[str, Any]]:
        """
        读取details.json文件，使用线程锁确保线程安全

        返回:
            Dict[str, Any] 或 None: 如果文件存在且格式正确，返回其内容；否则返回None
        """
        details_path = self.get_details_path()

        # 使用实例特定的线程锁保护文件检查和读取操作
        with self.details_lock:
            if not details_path.exists():
                logger.info(
                    f"[{self.instance_id}] details.json文件不存在: {details_path}"
                )
                return None

            try:
                with open(details_path, "r", encoding="utf-8") as f:
                    details = json.load(f)
                logger.info(
                    f"[{self.instance_id}] 成功读取details.json文件: {details_path}"
                )
                return details.copy()  # 返回副本，避免多线程修改同一对象
            except Exception as e:
                logger.error(f"[{self.instance_id}] 读取details.json文件失败: {e}")
                return None

    def write_details_json(self, details: Dict[str, Any]) -> bool:
        """
        写入details.json文件，使用线程锁确保线程安全

        参数:
            details: 要写入的详细信息

        返回:
            bool: 写入是否成功
        """
        details_path = self.get_details_path()

        # 创建一个副本，避免多线程修改同一对象
        details_copy = details.copy()

        # 使用实例特定的线程锁保护文件写入操作
        try:
            with open(details_path, "w", encoding="utf-8") as f:
                json.dump(details_copy, f, ensure_ascii=False, indent=2)
            logger.info(
                f"[{self.instance_id}] 成功写入details.json文件: {details_path}"
            )
            return True
        except Exception as e:
            logger.error(f"[{self.instance_id}] 写入details.json文件失败: {e}")
            return False

    def _extract_from_single_experiment(
        self, anomaly_id: str, exp_data: ExperimentData
    ) -> Tuple[Set[str], Dict[str, Set[str]], Dict[str, Set[str]]]:
        """
        从单个实验数据中提取实体类型、实体ID和指标名称，用于多线程并行处理

        参数:
            anomaly_id: 异常ID
            exp_data: 实验数据

        返回:
            Tuple[Set[str], Dict[str, Set[str]], Dict[str, Set[str]]]:
            (实体类型集合, 实体ID字典, 实体指标字典)
        """
        local_entity_types = set()
        local_entity_ids = {
            "container": set(),
            "node": set(),
            "service": set(),
            "interface": set(),
            "unknown": set(),
        }
        local_entity_metrics = {
            "container": set(),
            "node": set(),
            "service": set(),
            "interface": set(),
            "unknown": set(),
        }

        metrics_df = exp_data.metrics_df
        if metrics_df is None or metrics_df.empty:
            return local_entity_types, local_entity_ids, local_entity_metrics

        # 确保有entity_type列
        if "entity_type" not in metrics_df.columns:
            logger.warning(f"实验数据 {anomaly_id} 的指标DataFrame缺少entity_type列")
            metrics_df["entity_type"] = "unknown"

        # 提取实体类型、实体ID和指标名称
        for entity_type, entity_df in metrics_df.groupby("entity_type"):
            local_entity_types.add(entity_type)

            # 提取该实体类型的所有ID
            if "cmdb_id" in entity_df.columns:
                for cmdb_id in entity_df["cmdb_id"].unique():
                    if entity_type in local_entity_ids:
                        local_entity_ids[entity_type].add(cmdb_id)
                    else:
                        local_entity_ids["unknown"].add(cmdb_id)

            # 提取该实体类型的所有指标名称
            if "metric_name" in entity_df.columns:
                for metric_name in entity_df["metric_name"].unique():
                    if entity_type in local_entity_metrics:
                        local_entity_metrics[entity_type].add(metric_name)
                    else:
                        local_entity_metrics["unknown"].add(metric_name)

        return local_entity_types, local_entity_ids, local_entity_metrics

    def extract_entity_metrics_from_data(
        self, experiment_data_dict: Dict[str, ExperimentData]
    ) -> Dict[str, Any]:
        """
        从实验数据字典中提取所有实体ID和指标名称，支持多线程并行处理

        参数:
            experiment_data_dict: 实验数据字典，键为异常ID

        返回:
            Dict[str, Any]: 包含所有实体ID和指标名称的字典
        """
        # 全局集合，用于合并所有线程的结果
        entity_types = set()
        entity_ids = {
            "container": set(),
            "node": set(),
            "service": set(),
            "interface": set(),
            "unknown": set(),
        }
        entity_metrics = {
            "container": set(),
            "node": set(),
            "service": set(),
            "interface": set(),
            "unknown": set(),
        }

        # 确定线程数量，根据数据量调整
        num_workers = min(os.cpu_count() or 4, len(experiment_data_dict))

        if num_workers <= 1 or len(experiment_data_dict) <= 1:
            # 数据量小，直接串行处理
            for anomaly_id, exp_data in experiment_data_dict.items():
                local_types, local_ids, local_metrics = (
                    self._extract_from_single_experiment(anomaly_id, exp_data)
                )
                # 合并结果
                entity_types.update(local_types)
                for entity_type, ids in local_ids.items():
                    entity_ids[entity_type].update(ids)
                for entity_type, metrics in local_metrics.items():
                    entity_metrics[entity_type].update(metrics)
        else:
            # 数据量大，使用线程池并行处理
            with ThreadPoolExecutor(max_workers=num_workers) as executor:
                # 提交所有任务
                future_to_anomaly = {
                    executor.submit(
                        self._extract_from_single_experiment, anomaly_id, exp_data
                    ): anomaly_id
                    for anomaly_id, exp_data in experiment_data_dict.items()
                }

                # 获取结果并合并
                for future in as_completed(future_to_anomaly):
                    try:
                        local_types, local_ids, local_metrics = future.result()
                        # 合并结果
                        entity_types.update(local_types)
                        for entity_type, ids in local_ids.items():
                            entity_ids[entity_type].update(ids)
                        for entity_type, metrics in local_metrics.items():
                            entity_metrics[entity_type].update(metrics)
                    except Exception as e:
                        logger.error(f"提取实体和指标信息时出错: {e}")

        # 转换为有序列表（按字母排序）
        result = {
            "entity_types": sorted(list(entity_types)),
            "entity_ids": {k: sorted(list(v)) for k, v in entity_ids.items() if v},
            "entity_metrics": {
                k: sorted(list(v)) for k, v in entity_metrics.items() if v
            },
        }

        return result

    def save_anomaly_details_json(self, record: AnomalyRecord) -> bool:
        """
        将异常记录信息保存到对应故障路径中的details.json文件
        
        参数:
            record: 异常记录对象
        
        返回:
            bool: 保存是否成功
        """
        # 获取故障目录路径（与缓存目录在同一级）
        anomaly_dir = self.output_dir / record.anomaly_id
        anomaly_dir.mkdir(parents=True, exist_ok=True)
        details_path = anomaly_dir / "details.json"
        
        # 准备要保存的数据
        record_dict = record.to_dict()
        # 处理datetime对象，转换为ISO格式字符串
        if "datetime_obj" in record_dict:
            record_dict["datetime_obj"] = record_dict["datetime_obj"].isoformat()
        
        # 添加额外信息
        current_details = {
            "anomaly_details": record_dict,
            "processing_time": datetime.now().isoformat(),
            "data_source": str(self.base_data_path),
            "loader_type": self.__class__.__name__
        }
        
        # 使用线程锁保护文件读写
        with self.file_lock:
            # 如果文件已存在，读取并更新
            if details_path.exists():
                try:
                    with open(details_path, "r", encoding="utf-8") as f:
                        existing_details = json.load(f)
                    # 更新而不是覆盖
                    existing_details.update(current_details)
                    current_details = existing_details
                except Exception as e:
                    logger.warning(f"[{self.instance_id}] 读取已有故障元信息失败，将创建新文件: {e}")
            
            # 写入文件
            try:
                with open(details_path, "w", encoding="utf-8") as f:
                    json.dump(current_details, f, ensure_ascii=False, indent=2)
                logger.info(f"[{self.instance_id}] 成功保存故障元信息到: {details_path}")
                return True
            except Exception as e:
                logger.error(f"[{self.instance_id}] 保存故障元信息失败: {e}")
                return False

    def load_experiment_data_from_record(
        self, record: AnomalyRecord
    ) -> Optional[ExperimentData]:
        """
        基于异常记录加载实验数据

        参数:
            record: 异常记录对象

        返回:
            ExperimentData 对象或 None（如果加载失败）
        """
        output_path = self.output_dir / record.anomaly_id
        output_path.mkdir(parents=True, exist_ok=True)

        # 保存故障元信息到details.json
        self.save_anomaly_details_json(record)

        if self.use_cache:
            cached_data = self.load_from_cache(record.anomaly_id)
            if cached_data is not None:
                # 即使从缓存加载，也更新一下故障元信息（更新处理时间）
                self.save_anomaly_details_json(record)
                return cached_data

        logger.info(f"缓存未命中或已禁用: {record.anomaly_id}，从源数据处理")
        # 使用在类初始化时创建的config_manager实例
        delay_minutes = self.config_manager.get("data_processing.window.delay", 10)
        normal_before_minutes = self.config_manager.get(
            "data_processing.window.normal_before", 20
        )
        anomaly_after_minutes = self.config_manager.get(
            "data_processing.window.anomaly_after", 20
        )

        anomaly_dt = ensure_timezone(record.datetime_obj)
        observation_dt = anomaly_dt + timedelta(minutes=delay_minutes)
        start_time = observation_dt - timedelta(minutes=normal_before_minutes)
        end_time = observation_dt + timedelta(minutes=anomaly_after_minutes)

        logger.info(
            f"处理异常 {record.anomaly_id}：时间窗口 {start_time} 至 {end_time}"
        )

        dependencies_df = self.load_dependencies(start_time, end_time, record)
        # 确保依赖数据没有NaN行
        dependencies_df = self.remove_nan_rows(dependencies_df, "依赖关系数据")

        metrics_df = self.load_metrics(start_time, end_time, record, dependencies_df)
        if metrics_df is not None and not metrics_df.empty:
            # 确保timestamp列是datetime类型，以便_fill_missing_timepoints正常工作
            if (
                "timestamp" in metrics_df.columns
                and not pd.api.types.is_datetime64_any_dtype(metrics_df["timestamp"])
            ):
                metrics_df["timestamp"] = pd.to_datetime(metrics_df["timestamp"])

            # 从config_manager获取采样频率
            sampling_rate = self.config_manager.get(
                "data_processing.sampling_rate", "60s"
            )
            metrics_df = self._fill_missing_timepoints(metrics_df, freq=sampling_rate)

        logs_df = pd.DataFrame()  # 日志数据占位符

        exp_data = ExperimentData(
            anomaly_id=record.anomaly_id,
            anomaly_timestamp=record.unix_timestamp,
            anomaly_datetime=record.datetime_obj,
            anomaly_level=record.level,
            anomaly_reason=record.reason,
            anomaly_component=record.component,
            observation_time=int(observation_dt.timestamp()),
            metrics_df=metrics_df,
            dependencies_df=dependencies_df,
            logs_df=logs_df,
        )
        exp_data.set_time_window(start_time, end_time)
        logger.info(
            f"异常 {record.anomaly_id} 处理完成。指标数: {len(metrics_df if metrics_df is not None else [])}, 依赖关系数: {len(dependencies_df if dependencies_df is not None else [])}"
        )

        if exp_data and self.use_cache:
            self.save_to_cache(exp_data)
        return exp_data

    def get_cache_path(self, anomaly_id: str) -> Path:
        cache_dir = self.output_dir / anomaly_id / "cache"
        cache_dir.mkdir(parents=True, exist_ok=True)  # Ensure cache subdir exists
        return cache_dir / f"{self.cache_prefix}.pkl"

    @staticmethod
    def get_anomaly_id(component: str, timestamp: Union[int, str]) -> str:
        return f"{component}_{timestamp}"

    def save_to_cache(self, experiment_data: ExperimentData) -> bool:
        if not self.use_cache:
            return False
        cache_path = self.get_cache_path(experiment_data.anomaly_id)

        # 使用实例特定的线程锁保护缓存写入操作
        with self.file_lock:
            try:
                with open(cache_path, "wb") as f:
                    pickle.dump(experiment_data, f)
                logger.info(
                    f"[{self.instance_id}] Experiment data cached: {cache_path}"
                )
                return True
            except Exception as e:
                logger.error(
                    f"[{self.instance_id}] Failed to save cache for {experiment_data.anomaly_id}: {e}"
                )
                return False

    def load_from_cache(self, anomaly_id: str) -> Optional[ExperimentData]:
        cache_path = self.get_cache_path(anomaly_id)

        # 使用实例特定的线程锁保护缓存读取操作
        with self.file_lock:
            if not cache_path.exists():
                logger.debug(f"[{self.instance_id}] Cache does not exist: {cache_path}")
                return None
            try:
                with open(cache_path, "rb") as f:
                    data = pickle.load(f)
                logger.info(
                    f"[{self.instance_id}] Loaded experiment data from cache: {cache_path}"
                )
                return data
            except Exception as e:
                logger.warning(
                    f"[{self.instance_id}] Failed to load from cache {cache_path}: {e}. Re-processing."
                )
                return None

    def _complete_single_experiment_data(
        self,
        anomaly_id: str,
        exp_data: ExperimentData,
        entity_ids: Dict[str, List[str]],
        entity_metrics: Dict[str, List[str]],
    ) -> None:
        """
        补充单个实验数据，用于多线程并行处理

        参数:
            anomaly_id: 异常ID
            exp_data: 实验数据
            entity_ids: 实体ID字典
            entity_metrics: 实体指标字典
        """
        metrics_df = exp_data.metrics_df
        if metrics_df is None or metrics_df.empty:
            logger.warning(f"实验数据 {anomaly_id} 没有指标数据，跳过补充")
            return

        # 确保有timestamp列，并且是datetime类型
        if "timestamp" not in metrics_df.columns:
            logger.error(
                f"实验数据 {anomaly_id} 的指标DataFrame缺少timestamp列，跳过补充"
            )
            return

        if not pd.api.types.is_datetime64_any_dtype(metrics_df["timestamp"]):
            metrics_df["timestamp"] = pd.to_datetime(metrics_df["timestamp"])

        # 获取所有唯一的时间戳
        all_timestamps = metrics_df["timestamp"].unique()
        if len(all_timestamps) == 0:
            logger.warning(f"实验数据 {anomaly_id} 没有有效的时间戳，跳过补充")
            return

        # 对每种实体类型进行补充
        complete_dfs = []
        existing_dfs = []

        # 添加现有的数据
        existing_dfs.append(metrics_df.copy())

        # 获取metrics_df中已有的实体类型、实体ID和指标名称
        existing_entity_types = (
            set(metrics_df["entity_type"].unique())
            if "entity_type" in metrics_df.columns
            else set()
        )
        existing_combinations = set()

        if (
            "entity_type" in metrics_df.columns
            and "cmdb_id" in metrics_df.columns
            and "metric_name" in metrics_df.columns
        ):
            for _, row in (
                metrics_df[["entity_type", "cmdb_id", "metric_name"]]
                .drop_duplicates()
                .iterrows()
            ):
                existing_combinations.add(
                    (row["entity_type"], row["cmdb_id"], row["metric_name"])
                )

        # 对每种实体类型进行补充
        for entity_type, ids in entity_ids.items():
            # 如果实体类型不在metrics_df中，则跳过
            if entity_type not in existing_entity_types and entity_type not in [
                "container",
                "node",
                "service",
                "interface",
            ]:
                continue

            # 获取该实体类型的所有指标名称
            metrics = entity_metrics.get(entity_type, [])
            if not metrics:
                continue

            # 对每个实体ID和指标名称组合进行检查
            for cmdb_id in ids:
                for metric_name in metrics:
                    # 如果该组合已存在，则跳过
                    if (entity_type, cmdb_id, metric_name) in existing_combinations:
                        continue

                    # 创建补充数据
                    supplement_data = []
                    for ts in all_timestamps:
                        supplement_data.append(
                            {
                                "timestamp": ts,
                                "cmdb_id": cmdb_id,
                                "metric_name": metric_name,
                                "value": 0.0,  # 填充0值
                                "original_value": 0.0,
                                "entity_type": entity_type,
                            }
                        )

                    if supplement_data:
                        supplement_df = pd.DataFrame(supplement_data)
                        complete_dfs.append(supplement_df)
                        logger.debug(
                            f"为实验数据 {anomaly_id} 补充 {entity_type} 类型的 {cmdb_id}/{metric_name}"
                        )

        # 合并所有数据
        if complete_dfs:
            combined_df = pd.concat(existing_dfs + complete_dfs, ignore_index=True)
            exp_data.metrics_df = combined_df
            logger.info(
                f"实验数据 {anomaly_id} 补充完成，从 {len(metrics_df)} 行增加到 {len(combined_df)} 行"
            )
        else:
            logger.info(f"实验数据 {anomaly_id} 无需补充")

    def complete_experiment_data(
        self,
        dataset_experiment_data: Dict[str, ExperimentData],
        details: Dict[str, Any],
    ) -> None:
        """
        使用details.json中的信息补充dataset_experiment_data中缺失的数据

        对于每个实验数据，确保其包含details.json中定义的所有实体ID和指标名称。
        如果缺少某些实体ID或指标名称，则添加对应的空数据（填充0值）。

        该方法支持多线程并行处理，提高性能。

        参数:
            dataset_experiment_data: 数据集的实验数据字典，键为异常ID
            details: details.json的内容
        """
        logger.info("开始补充实验数据")

        # 检查details是否包含必要的信息
        if (
            not details
            or "entity_ids" not in details
            or "entity_metrics" not in details
        ):
            logger.warning("details.json缺少必要的信息，无法补充实验数据")
            return

        # 获取所有需要的实体ID和指标名称
        entity_ids = details.get("entity_ids", {})
        entity_metrics = details.get("entity_metrics", {})

        # 确定线程数量，根据数据量调整
        num_workers = min(os.cpu_count() or 4, len(dataset_experiment_data))
        if num_workers <= 1 or len(dataset_experiment_data) <= 1:
            # 数据量小，直接串行处理
            for anomaly_id, exp_data in dataset_experiment_data.items():
                self._complete_single_experiment_data(
                    anomaly_id, exp_data, entity_ids, entity_metrics
                )
        else:
            # 数据量大，使用线程池并行处理
            with ThreadPoolExecutor(max_workers=num_workers) as executor:
                futures = []
                for anomaly_id, exp_data in dataset_experiment_data.items():
                    future = executor.submit(
                        self._complete_single_experiment_data,
                        anomaly_id,
                        exp_data,
                        entity_ids,
                        entity_metrics,
                    )
                    futures.append(future)

                # 等待所有任务完成
                for future in as_completed(futures):
                    try:
                        future.result()  # 获取结果，捕获任何异常
                    except Exception as e:
                        logger.error(f"补充实验数据时出错: {e}")

        logger.info(f"完成 {len(dataset_experiment_data)} 个实验数据的补充")
