import pandas as pd
import logging
import numpy as np
import json
import pickle
import os
from datetime import datetime
from pathlib import Path
from neo4j import GraphDatabase
from neo4j.exceptions import Neo4jError
from tqdm.auto import tqdm
from collections import defaultdict, Counter
from typing import Dict, List, Set, Tuple, Optional
import multiprocessing as mp
from functools import partial
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
import bisect
from itertools import groupby
from operator import itemgetter

from .neoGraph import (
    NeoGraph,
)  # Assuming neoGraph.py is in the same directory or package
from source.data_processor.utils.data_utils import (
    ensure_datetime_column,
    filter_dataframe_by_time,
)
from source.data_processor.utils.time_utils import convert_timestamp_to_datetime

# 配置日志
logging.basicConfig(
    format="%(asctime)s [%(levelname)s] %(name)s: %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    level=logging.INFO,
)
logger = logging.getLogger(__name__)


class SystemDependencyGraphBuilder(NeoGraph):
    """
    优化的系统资源依赖图构建器，使用高效算法处理大量Trace数据

    主要优化策略：
    1. 向量化操作替代循环 - 使用pandas的向量化操作提高性能
    2. 并行处理大数据集 - 利用多进程处理提高吞吐量
    3. 内存友好的批处理 - 分批处理数据避免内存溢出
    4. 高效的数据结构和索引 - 使用优化的数据结构和索引加速查询
    5. 智能缓存和预计算 - 缓存常用数据减少重复计算

    主要功能：
    - 构建服务依赖关系图
    - 处理和分析trace数据
    - 计算服务调用统计
    - 管理实体和指标数据
    """

    def __init__(
        self,
        uri,
        user,
        password,
        cache_dir=None,
        batch_size=10000,  # 增大批处理大小
        active_graph_id: str = None,
        output_dir: str = None,
        n_workers: int = None,  # 并行处理器数量
        memory_limit_mb: int = 1024,  # 内存限制（MB）
    ):
        """
        初始化优化的系统依赖图构建器

        参数:
            uri: Neo4j数据库URI - 用于存储图数据
            user: Neo4j用户名 - 数据库认证用户
            password: Neo4j密码 - 数据库认证密码
            cache_dir: 缓存目录 - 用于存储中间结果和图缓存
            batch_size: 批处理大小 - 控制单次处理的数据量，默认10000
            active_graph_id: 活动图ID - 当前正在处理的图的唯一标识
            output_dir: 输出目录 - 存储处理结果和日志
            n_workers: 并行工作进程数量 - 默认使用CPU核心数
            memory_limit_mb: 内存使用限制(MB) - 控制内存消耗
        """
        super().__init__(uri, user, password, cache_dir, batch_size, active_graph_id)
        self.output_dir = output_dir
        self.n_workers = n_workers or min(
            mp.cpu_count(), 8
        )  # 确保至少有1个工作进程，最多8个
        if self.n_workers <= 0:
            self.n_workers = 1  # 如果无法获取CPU核心数，默认使用1个工作进程
        self.memory_limit_mb = memory_limit_mb

        # 初始化优化的数据结构
        self._init_optimized_structures()

        # 性能统计指标
        self.perf_stats = {
            "total_traces_processed": 0,  # 处理的总trace数
            "total_spans_processed": 0,   # 处理的总span数
            "processing_time": 0,         # 处理总耗时
            "memory_peak_mb": 0,          # 内存峰值
        }

    def _init_optimized_structures(self):
        """
        初始化优化的数据结构

        主要功能：
        1. 初始化实体类型映射
        2. 创建服务容器关系映射
        3. 设置调用关系统计
        4. 建立高效索引结构

        数据结构说明：
        - entity_types: 实体类型字典
        - interface_containers: 服务到容器的映射
        - container_interfaces: 容器到服务的映射
        - interface_calls: 服务调用统计
        - entity_metrics: 实体指标数据
        - span_to_interface: span到接口的映射
        - _span_index: span快速查询索引
        - _trace_index: trace快速查询索引
        """
        # 实体类型映射
        self.entity_types = {}  # entity_id -> type

        # 服务-容器映射关系
        self.interface_containers = defaultdict(set)  # service_name -> set(container_ids)
        self.container_interfaces = defaultdict(set)  # container_id -> set(service_names)

        # 调用关系统计
        self.interface_calls = defaultdict(
            lambda: {"call_count": 0, "total_latency": 0.0}
        )  # (caller, callee) -> stats

        # 实体指标数据
        self.entity_metrics = defaultdict(list)  # entity_id -> [(metric_id, name, value, timestamp), ...]

        # Span到接口的映射
        self.span_to_interface = {}  # span_id -> (service_name, container_id)

        # 优化的索引结构
        self._span_index = {}  # span_id -> (trace_id, service_name, container_id, timestamp)
        self._trace_index = {}  # trace_id -> [(span_id, timestamp, parent_id), ...]

        # 缓存
        self._service_containers_cache = {}  # 服务容器映射缓存
        self._call_relationships_cache = {}  # 调用关系缓存

    def build_graph(
        self,
        experiment_data,
        metric_result=None,
        custom_graph_id=None,
        time_range=None,
    ):
        """
        优化的图构建主方法

        主要步骤：
        1. 尝试从缓存加载已有图
        2. 构建基础结构和索引
        3. 构建服务调用关系
        4. 处理指标数据
        5. 保存图到缓存并更新状态

        参数:
            experiment_data: 实验数据对象，包含依赖关系和指标数据
            metric_result: 指标分析结果，用于标记异常指标
            custom_graph_id: 自定义图ID，如果为None则使用experiment_data.anomaly_id
            time_range: 时间范围元组(start_time, end_time)，用于过滤数据

        返回:
            (bool, str): (是否成功, 缓存文件路径)
        """
        start_time = datetime.now()
        graph_id = custom_graph_id if custom_graph_id else experiment_data.anomaly_id
        self.active_graph_id = graph_id

        logger.info(
            f"开始构建优化的系统依赖图, graph_id={graph_id}, 并行工作数={self.n_workers}"
        )

        # 尝试从缓存加载
        if self.cache_dir and self.load_graph_from_cache(graph_id):
            if hasattr(self, "loaded_from_cache_perf_stats"):
                self.perf_stats = self.loaded_from_cache_perf_stats
            logger.info(f"图 {graph_id} 从缓存加载成功。")
            return True, os.path.join(self.cache_dir, f"graph_{graph_id}")

        self._init_optimized_structures()  # 重新初始化数据结构

        try:
            # 第一阶段：高效预处理和索引构建
            logger.info("第一阶段：构建高效索引和基础结构")
            success = self._build_optimized_basic_structure(experiment_data)
            if not success:
                return False, None

            # 第二阶段：并行构建服务关系
            logger.info("第二阶段：并行构建服务调用关系")
            success = self._build_optimized_service_relationships(
                experiment_data, time_range
            )
            if not success:
                logger.warning("服务关系构建部分返回False，但继续处理")

            # 第三阶段：高效处理指标数据
            logger.info("第三阶段：批量处理指标数据")
            self._process_optimized_metrics(
                experiment_data, time_range, graph_id, metric_result
            )

            # 保存和统计
            saved_pkl_path = self._finalize_graph_build(graph_id)

            # 更新性能统计
            end_time = datetime.now()
            self.perf_stats["processing_time"] = (end_time - start_time).total_seconds()
            nodes_count = len(self.nodes) if hasattr(self, "nodes") and self.nodes is not None else 0
            relationships_count = len(self.relationships) if hasattr(self, "relationships") and self.relationships is not None else 0
            logger.info(f"图构建完成: {nodes_count}个节点, {relationships_count}个关系")
            logger.info(f"性能统计: {self.perf_stats}")

            return True, saved_pkl_path

        except Exception as e:
            logger.error(f"构建系统依赖图时发生错误: {e}")
            import traceback
            logger.error(traceback.format_exc())
            return False, None

    def _build_optimized_basic_structure(self, experiment_data) -> bool:
        """
        优化的基础结构构建 - 使用向量化操作和高效索引

        主要步骤：
        1. 检查依赖数据的有效性
        2. 向量化预处理数据
        3. 并行构建索引结构
        4. 批量提取服务-容器映射关系
        5. 补充全局服务信息

        参数:
            experiment_data: 实验数据对象，包含依赖关系数据

        返回:
            bool: 构建是否成功
        """
        dependencies_df = experiment_data.dependencies_df

        if dependencies_df is None or dependencies_df.empty:
            return self._handle_empty_dependencies(experiment_data)

        # 预处理：向量化清洗和标准化
        processed_df = self._vectorized_preprocess(dependencies_df)
        if processed_df.empty:
            logger.warning("向量化预处理后数据为空，可能影响后续步骤。")
            return self._handle_empty_dependencies(experiment_data)

        self.perf_stats["total_spans_processed"] = len(processed_df)

        # 并行构建索引
        self._build_parallel_indices(processed_df)

        # 批量提取映射关系
        self._extract_bulk_mappings(processed_df)

        # 补充全局信息
        self._supplement_global_info(experiment_data)

        return True

    def _vectorized_preprocess(self, df: pd.DataFrame) -> pd.DataFrame:
        """
        向量化预处理数据 - 避免逐行操作

        主要步骤：
        1. 检查必需列的存在性
        2. 处理时间戳列
        3. 清理无效数据
        4. 优化排序

        参数:
            df: 原始依赖数据DataFrame

        返回:
            pd.DataFrame: 预处理后的数据框
        """
        processed_df = df.copy()

        # 检查必需列
        required_cols = ["spanId", "serviceName", "traceId"]
        for col in required_cols:
            if col not in processed_df.columns:
                logger.error(f"必需列 {col} 在依赖数据中缺失。")
                return pd.DataFrame()

        # 处理时间戳列
        if "timestamp" in processed_df.columns:
            if not pd.api.types.is_datetime64_any_dtype(processed_df["timestamp"]):
                processed_df["timestamp"] = pd.to_datetime(
                    processed_df["timestamp"], errors="coerce"
                )
        else:
            logger.warning("依赖数据中缺少 'timestamp' 列。某些功能可能受影响。")

        # 向量化清洗：移除无效记录
        mask = (
            processed_df["spanId"].notna()
            & processed_df["serviceName"].notna()
            & processed_df["traceId"].notna()
        )
        processed_df = processed_df[mask].copy()

        # 优化排序
        sort_columns = ["traceId"]
        if "timestamp" in processed_df.columns and processed_df["timestamp"].notna().any():
            sort_columns.append("timestamp")
            processed_df = processed_df.sort_values(sort_columns, na_position="last")
        else:
            processed_df = processed_df.sort_values(sort_columns)

        logger.info(f"向量化预处理完成: {len(processed_df)} 条有效记录")
        return processed_df

    def _build_parallel_indices(self, df: pd.DataFrame):
        """
        并行构建高效索引结构

        主要功能：
        1. 根据数据量决定是否使用并行处理
        2. 对大数据集使用多进程构建索引
        3. 小数据集使用顺序处理
        4. 更新处理统计信息

        参数:
            df: 预处理后的DataFrame
        """
        if df.empty:
            logger.warning("用于构建索引的DataFrame为空。")
            return

        trace_groups = df.groupby("traceId")
        self.perf_stats["total_traces_processed"] = len(trace_groups)

        # 根据数据量选择处理方式
        if len(trace_groups) > 1000 and self.n_workers > 1:
            self._build_indices_multiprocess(trace_groups)
        else:
            self._build_indices_sequential(trace_groups)

    def _build_indices_multiprocess(self, trace_groups):
        """
        多进程构建索引 - 并行处理大量trace数据

        主要功能：
        1. 数据分块处理
        2. 启动多进程
        3. 合并处理结果
        4. 错误处理和回退

        参数:
            trace_groups: 按traceId分组的数据
        """
        # 准备数据块
        trace_data_list = list(trace_groups)
        chunk_size = max(1, len(trace_data_list) // self.n_workers)
        trace_chunks = [
            trace_data_list[i : i + chunk_size]
            for i in range(0, len(trace_data_list), chunk_size)
        ]

        # 并行处理
        try:
            with ThreadPoolExecutor(max_workers=self.n_workers) as executor:
                results = list(executor.map(self._process_trace_chunk, trace_chunks))

            # 合并结果
            for span_idx, trace_idx in results:
                self._span_index.update(span_idx)
                self._trace_index.update(trace_idx)
        except Exception as e:
            logger.error(f"多进程构建索引时发生错误: {e}. 尝试顺序构建。")
            self._span_index.clear()
            self._trace_index.clear()
            self._build_indices_sequential(trace_groups)

    def _build_indices_sequential(self, trace_groups):
        """
        顺序构建索引（用于小数据集或作为备用方案）

        主要功能：
        1. 顺序处理trace组
        2. 构建基础索引
        3. 维护处理进度
        4. 错误处理

        参数:
            trace_groups: 按traceId分组的数据
        """
        for trace_id, group in tqdm(trace_groups, desc="Sequential Index Building"):
            self._process_single_trace_group(trace_id, group)

    def _process_trace_chunk(
        self, trace_chunk: List[Tuple[str, pd.DataFrame]]
    ) -> Tuple[dict, dict]:
        """
        处理trace数据块 - 在子进程中运行

        主要功能：
        1. 并行处理trace数据块
        2. 构建span索引和trace索引
        3. 优化时间戳处理
        4. 处理父子关系

        参数:
            trace_chunk: 待处理的trace数据块，包含(trace_id, DataFrame)元组列表

        返回:
            Tuple[dict, dict]: (span索引字典, trace索引字典)
        """
        span_index_chunk = {}
        trace_index_chunk = {}

        for trace_id, group in trace_chunk:
            spans_data = []

            for _, row in group.iterrows():
                span_id = row["spanId"]
                service_name = row["serviceName"]
                container_id = row.get("cmdb_id")  # Use .get for optional columns
                timestamp = (
                    row.get("timestamp") if "timestamp" in row else None
                )  # Ensure timestamp exists
                parent_id = row.get("parentId")

                # 构建span索引
                span_index_chunk[span_id] = (
                    trace_id,
                    service_name,
                    container_id,
                    timestamp,
                )

                # 为trace索引准备数据
                # Ensure timestamp is valid for sorting, use a placeholder if None and sorting is critical
                spans_data.append(
                    (
                        span_id,
                        timestamp if pd.notna(timestamp) else datetime.min,
                        parent_id,
                    )
                )

            # 按时间戳排序span数据
            spans_data.sort(key=itemgetter(1))  # itemgetter(1) is timestamp
            trace_index_chunk[trace_id] = spans_data

        return span_index_chunk, trace_index_chunk

    def _process_single_trace_group(self, trace_id: str, group: pd.DataFrame):
        """
        处理单个trace组的数据

        主要功能：
        1. 提取span相关信息
        2. 更新span索引
        3. 处理时间戳
        4. 构建trace数据结构

        参数:
            trace_id: trace的唯一标识符
            group: 包含该trace所有span数据的DataFrame
        """
        spans_data = []

        for _, row in group.iterrows():
            span_id = row["spanId"]
            service_name = row["serviceName"]
            container_id = row.get("cmdb_id")
            timestamp = row.get("timestamp") if "timestamp" in row else None
            parent_id = row.get("parentId")

            # 更新span索引
            self._span_index[span_id] = (
                trace_id,
                service_name,
                container_id,
                timestamp,
            )

            # 收集trace数据
            spans_data.append(
                (span_id, timestamp if pd.notna(timestamp) else datetime.min, parent_id)
            )

        # 按时间戳排序并存储
        spans_data.sort(key=itemgetter(1))
        self._trace_index[trace_id] = spans_data

    def _extract_bulk_mappings(self, df: pd.DataFrame):
        """
        批量提取服务-容器映射关系 - 使用向量化操作

        主要功能：
        1. 提取唯一映射对
        2. 更新映射关系
        3. 记录实体类型
        4. 构建span映射

        参数:
            df: 包含映射关系的DataFrame
        """
        if df.empty or not ({"serviceName", "cmdb_id"} <= set(df.columns)):
            logger.warning("无法提取服务-容器映射：DataFrame为空或缺少必需列。")
            return

        # 向量化提取唯一的服务-容器对
        service_container_pairs = (
            df[["serviceName", "cmdb_id"]].dropna().drop_duplicates()
        )

        # 批量更新映射
        for _, row in service_container_pairs.iterrows():
            service = row["serviceName"]
            container = row["cmdb_id"]  # cmdb_id here is container_id

            self.interface_containers[service].add(container)
            self.container_interfaces[container].add(service)  # container is key

            # 记录实体类型
            self.entity_types[service] = "service"
            self.entity_types[container] = (
                "container"  # cmdb_id is treated as a container
            )

        # 更新span到接口的映射 (service_name, container_id)
        # This loop is fine, as _span_index should be populated by now.
        for span_id, span_data in self._span_index.items():
            # span_data = (trace_id, service_name, container_id, timestamp)
            service_name = span_data[1]
            container_id = span_data[2]  # This is the container_id from span_data
            if service_name:  # Container_id can be None for some services/spans
                self.span_to_interface[span_id] = (service_name, container_id)

        logger.info(f"批量映射完成: {len(service_container_pairs)} 个服务-容器对")

    def _build_optimized_service_relationships(
        self, experiment_data, time_range=None
    ) -> bool:
        """
        优化的服务关系构建 - 使用高效算法处理调用关系
        """
        dependencies_df = experiment_data.dependencies_df

        if dependencies_df is None or dependencies_df.empty:
            logger.info("依赖数据为空，跳过服务关系构建。")
            return True  # Not a failure, just no data to process

        # 应用时间过滤 (on a copy of the relevant subset of dependencies_df)
        # _vectorized_preprocess should have already handled timestamp conversion
        # Ensure 'parentId' and other necessary columns exist before proceeding
        if not ({"parentId", "spanId", "traceId"} <= set(dependencies_df.columns)):
            logger.error("构建服务关系所需列（parentId, spanId, traceId）缺失。")
            return False  # Critical columns missing

        filtered_df = self._apply_time_filter(dependencies_df, time_range)

        # 使用优化算法构建调用关系
        self._build_call_relationships_optimized(filtered_df)

        return True

    def _apply_time_filter(
        self, df: pd.DataFrame, time_range: Optional[Tuple[datetime, datetime]]
    ) -> pd.DataFrame:
        """
        高效应用时间过滤

        主要功能：
        1. 验证时间戳列
        2. 转换时间格式
        3. 应用时间范围过滤
        4. 处理无效时间戳

        参数:
            df: 待过滤的DataFrame
            time_range: 可选的时间范围元组(start_time, end_time)

        返回:
            pd.DataFrame: 过滤后的DataFrame
        """
        if not time_range or df.empty:
            return df

        start_time, end_time = time_range

        # 确保时间列是datetime类型 (should be already from _vectorized_preprocess)
        if "timestamp" in df.columns:
            # Ensure df here is a copy if it's going to be modified and original is needed elsewhere
            # If df['timestamp'] is already datetime, ensure_datetime_column might be redundant
            # df_filtered = ensure_datetime_column(df.copy(), 'timestamp') # Use a copy
            df_filtered = df.copy()
            if not pd.api.types.is_datetime64_any_dtype(df_filtered["timestamp"]):
                df_filtered["timestamp"] = pd.to_datetime(
                    df_filtered["timestamp"], errors="coerce"
                )

            # 向量化时间过滤
            # Handle NaT in timestamp column if any after coercion
            valid_time_mask = df_filtered["timestamp"].notna()
            mask = (
                (df_filtered["timestamp"] >= start_time)
                & (df_filtered["timestamp"] <= end_time)
                & valid_time_mask
            )

            result_df = df_filtered[mask]

            logger.info(f"时间过滤: {len(result_df)}/{len(df)} 条记录保留")
            return result_df
        else:
            logger.warning("时间过滤需要 'timestamp' 列，但该列不存在。返回原始数据。")
            return df

    def _build_call_relationships_optimized(self, df: pd.DataFrame):
        """
        优化的调用关系构建 - 避免嵌套循环

        主要功能：
        1. 分析服务间的调用关系
        2. 构建调用统计信息
        3. 处理父子span关系
        4. 优化性能和内存使用

        参数:
            df: 包含调用关系数据的DataFrame
        """
        if df.empty or "parentId" not in df.columns:
            logger.info("用于构建调用关系的DataFrame为空或缺少'parentId'。")
            return

        # 筛选有parent关系的记录
        parent_child_df = df[df["parentId"].notna()].copy()  # Use .copy()

        if parent_child_df.empty:
            logger.info("没有找到父子调用关系。")
            return

        call_relationships_data = []  # Store tuples for batch update

        for _, row in parent_child_df.iterrows():
            span_id = row["spanId"]
            parent_id = row["parentId"]
            trace_id = row["traceId"]  # trace_id is needed for _calculate_latency_fast

            # 使用索引快速查找
            if parent_id in self._span_index and span_id in self._span_index:
                parent_info = self._span_index[
                    parent_id
                ]  # (trace_id, service_name, container_id, timestamp)
                child_info = self._span_index[
                    span_id
                ]  # (trace_id, service_name, container_id, timestamp)

                # Ensure service names are not None
                caller_service = parent_info[1]
                callee_service = child_info[1]

                if caller_service is None or callee_service is None:
                    # logger.debug(f"Skipping call due to None service name: parent_id={parent_id}, span_id={span_id}")
                    continue

                # 跳过自调用 (service to same service)
                if caller_service != callee_service:
                    call_relationships_data.append(
                        {
                            "caller": caller_service,
                            "callee": callee_service,
                            "trace_id": parent_info[
                                0
                            ],  # trace_id from parent_info (should be same as child_info's trace_id)
                            "span_id": span_id,  # child span_id
                            "parent_id": parent_id,  # parent span_id
                        }
                    )
            # else:
            # logger.debug(f"Parent or child span not in _span_index. Parent: {parent_id}, Child: {span_id}")

        # 批量统计调用关系
        self._batch_update_call_stats(call_relationships_data)

        logger.info(
            f"优化调用关系构建完成: {len(call_relationships_data)} 个潜在调用对被处理"
        )

    def _batch_update_call_stats(self, call_relationships_data: List[Dict]):
        """
        批量更新调用统计 - 使用Counter提高效率

        主要功能：
        1. 统计调用次数
        2. 计算延迟指标
        3. 更新调用关系
        4. 优化批量处理

        参数:
            call_relationships_data: 调用关系数据列表
        """
        if not call_relationships_data:
            return

        # 使用Counter统计调用次数
        call_counter = Counter()
        # Latency sum and count for average calculation
        latency_stats = defaultdict(lambda: {"sum": 0.0, "count": 0})

        for rel_data in call_relationships_data:
            caller = rel_data["caller"]
            callee = rel_data["callee"]
            trace_id = rel_data["trace_id"]
            span_id = rel_data["span_id"]
            parent_id = rel_data["parent_id"]

            call_key = (caller, callee)
            call_counter[call_key] += 1

            # 计算延迟
            latency = self._calculate_latency_fast(trace_id, span_id, parent_id)  # ms
            if latency is not None and latency > 0:  # Ensure latency is valid
                latency_stats[call_key]["sum"] += latency
                latency_stats[call_key]["count"] += 1

        # 批量更新interface_calls
        for call_key, count in call_counter.items():
            self.interface_calls[call_key][
                "call_count"
            ] += count  # Accumulate if called multiple times
            if call_key in latency_stats and latency_stats[call_key]["count"] > 0:
                self.interface_calls[call_key]["total_latency"] += latency_stats[
                    call_key
                ]["sum"]
                # Optionally, store average latency if needed, or count for calculating it later
                if "latency_observation_count" not in self.interface_calls[call_key]:
                    self.interface_calls[call_key]["latency_observation_count"] = 0
                self.interface_calls[call_key][
                    "latency_observation_count"
                ] += latency_stats[call_key]["count"]

        logger.info(f"调用统计更新完成: {len(self.interface_calls)} 类接口调用关系")

    def _calculate_latency_fast(
        self, trace_id: str, span_id: str, parent_id: str
    ) -> Optional[float]:
        """
        快速计算调用延迟 (ms) - 使用预建索引

        主要功能：
        1. 计算span间的时间差
        2. 处理时间戳异常
        3. 验证父子关系
        4. 优化查询性能

        参数:
            trace_id: 追踪ID
            span_id: 当前spanID
            parent_id: 父spanID

        返回:
            Optional[float]: 延迟时间（毫秒），如果无法计算则返回None
        """
        # trace_id is passed but _span_index already contains trace_id, service, container, timestamp
        # We rely on _span_index for timestamps.

        span_info = self._span_index.get(span_id)
        parent_info = self._span_index.get(parent_id)

        if span_info and parent_info:
            span_time = span_info[3]  # timestamp is the 4th element (index 3)
            parent_time = parent_info[3]  # timestamp is the 4th element (index 3)

            # Ensure both timestamps are valid datetime objects
            if isinstance(span_time, datetime) and isinstance(parent_time, datetime):
                # Latency should not be negative. If child starts before parent, it's data issue or specific async model.
                if span_time >= parent_time:
                    return (
                        span_time - parent_time
                    ).total_seconds() * 1000.0  # Convert to milliseconds
                else:
                    # logger.debug(f"Negative latency calculated for span {span_id} (child) and {parent_id} (parent). Child: {span_time}, Parent: {parent_time}")
                    return None  # Or 0, or handle as anomaly
            # else:
            # logger.debug(f"Invalid timestamp types for latency calculation. Span: {span_id}, Parent: {parent_id}")
        # else:
        # logger.debug(f"Span or Parent info not found in _span_index for latency. Span: {span_id}, Parent: {parent_id}")

        return None  # Return None if timestamps are missing or invalid

    def _process_optimized_metrics(
        self, experiment_data, time_range, graph_id, metric_result
    ):
        """
        优化的指标处理 - 使用批量操作和向量化

        主要功能：
        1. 过滤和预处理指标数据
        2. 识别异常指标
        3. 批量更新指标信息
        4. 优化数据处理流程

        参数:
            experiment_data: 实验数据对象
            time_range: 时间范围过滤条件
            graph_id: 图ID
            metric_result: 指标分析结果
        """
        metrics_df = experiment_data.metrics_df

        if metrics_df is None or metrics_df.empty:
            logger.warning("指标数据为空，跳过指标处理")
            return

        # 应用时间过滤
        # Assuming 'timestamp' column in metrics_df is compatible with _apply_time_filter
        filtered_metrics = self._apply_time_filter(
            metrics_df.copy(), time_range
        )  # Use a copy

        if filtered_metrics.empty:
            logger.warning("时间过滤后指标数据为空。")
            return

        # 提取异常指标
        anomaly_metrics = self._extract_anomaly_metrics_fast(metric_result)

        # 向量化处理指标数据
        self._process_metrics_vectorized(filtered_metrics, graph_id, anomaly_metrics)

    def _extract_anomaly_metrics_fast(self, metric_result) -> Set[Tuple[str, str]]:
        """
        快速提取异常指标信息

        主要功能：
        1. 解析异常区间数据
        2. 提取异常指标标识
        3. 处理数据格式转换
        4. 优化集合操作

        参数:
            metric_result: 指标分析结果对象

        返回:
            Set[Tuple[str, str]]: 异常指标集合，每个元素为(cmdb_id, metric_name)
        """
        anomaly_metrics = set()

        if (
            metric_result
            and hasattr(metric_result, "anomaly_intervals_by_cmdb")
            and metric_result.anomaly_intervals_by_cmdb
            and isinstance(metric_result.anomaly_intervals_by_cmdb, dict)
        ):  # Check type

            for cmdb_id, intervals in metric_result.anomaly_intervals_by_cmdb.items():
                if isinstance(intervals, list):  # Check type of intervals
                    for interval in intervals:
                        if (
                            isinstance(interval, dict) and "metric_name" in interval
                        ):  # Check interval type and key
                            anomaly_metrics.add(
                                (str(cmdb_id), interval["metric_name"])
                            )  # Ensure cmdb_id is string

        return anomaly_metrics

    def _process_metrics_vectorized(
        self, metrics_df: pd.DataFrame, graph_id: str, anomaly_metrics: Set
    ):
        """
        向量化处理指标数据

        主要功能：
        1. 清理和验证指标数据
        2. 处理时间戳信息
        3. 更新实体指标
        4. 优化数据处理性能

        参数:
            metrics_df: 指标数据DataFrame
            graph_id: 图ID
            anomaly_metrics: 异常指标集合
        """
        if not ({"cmdb_id", "metric_name", "value"} <= set(metrics_df.columns)):
            logger.error("指标数据缺少 'cmdb_id', 'metric_name', 或 'value' 列。")
            return

        # 清理数据
        clean_metrics = metrics_df.dropna(
            subset=["cmdb_id", "metric_name", "value"]
        ).copy()  # Use .copy()

        # 分组取最新值
        if (
            "timestamp" in clean_metrics.columns
            and clean_metrics["timestamp"].notna().any()
        ):
            # Ensure timestamp is datetime before sort (should be from _apply_time_filter)
            if not pd.api.types.is_datetime64_any_dtype(clean_metrics["timestamp"]):
                clean_metrics["timestamp"] = pd.to_datetime(
                    clean_metrics["timestamp"], errors="coerce"
                )

            # Drop rows where timestamp became NaT after coercion if they cannot be handled
            clean_metrics.dropna(subset=["timestamp"], inplace=True)
            if clean_metrics.empty:
                logger.warning("所有指标数据因无效时间戳被移除。")
                return

            latest_metrics = (
                clean_metrics.sort_values("timestamp")
                .groupby(["cmdb_id", "metric_name"], as_index=False)  # as_index=False
                .last()
            )
        else:  # No timestamp or all are NaN
            logger.warning(
                "指标数据中缺少有效时间戳，将取分组后的最后一个值作为最新值。"
            )
            latest_metrics = clean_metrics.groupby(
                ["cmdb_id", "metric_name"], as_index=False
            ).last()

        # 批量处理指标
        known_entities = set(self.entity_types.keys())  # Get current known entities

        for _, row in latest_metrics.iterrows():
            cmdb_id = str(row["cmdb_id"])  # Ensure cmdb_id is string
            metric_name = row["metric_name"]
            value = row["value"]
            # Use .get for timestamp as it might not be present if grouped by last without timestamp
            timestamp_val = row.get("timestamp", datetime.now())
            # Ensure timestamp_val is a datetime object
            if not isinstance(timestamp_val, datetime):
                timestamp_val = pd.to_datetime(timestamp_val, errors="coerce")
                if pd.isna(timestamp_val):  # Fallback if conversion fails
                    timestamp_val = datetime.now()

            # 推断未知实体类型
            if cmdb_id not in known_entities:
                entity_type = self._infer_entity_type_fast(cmdb_id)
                self.entity_types[cmdb_id] = entity_type
                known_entities.add(
                    cmdb_id
                )  # Add to known_entities to avoid re-inferring

            # 生成指标ID并存储
            metric_id = self.generate_metric_id(cmdb_id, metric_name, graph_id)
            # is_anomalous = (cmdb_id, metric_name) in anomaly_metrics # This can be stored if needed

            self.entity_metrics[cmdb_id].append(
                (metric_id, metric_name, value, timestamp_val)
            )

        logger.info(f"向量化处理了 {len(latest_metrics)} 个指标")

    def _infer_entity_type_fast(self, entity_id: str) -> str:
        """
        快速推断实体类型 - 使用预编译的模式匹配

        主要功能：
        1. 分析实体ID特征
        2. 匹配实体类型模式
        3. 处理特殊情况
        4. 提供默认分类

        参数:
            entity_id: 实体ID

        返回:
            str: 推断出的实体类型
        """
        entity_id_lower = str(entity_id).lower()  # Ensure input is string

        # 使用更高效的字符串匹配
        if (
            "docker" in entity_id_lower or "container" in entity_id_lower
        ):  # cmdb_id for container
            return "container"
        elif (
            "node" in entity_id_lower or "host" in entity_id_lower
        ):  # cmdb_id for node/host
            return "node"
        # serviceName is usually the service identifier, cmdb_id for service might follow other patterns
        # This inference based on cmdb_id containing 'service' might be too broad
        # but kept for consistency if this was the previous logic.
        elif (
            "service" in entity_id_lower
            or "svc" in entity_id_lower
            or "db" in entity_id_lower
        ):
            return "service"
        else:
            # If cmdb_id doesn't give a clue, it might be a pod, or other resource type
            # Defaulting to 'unknown' is safe. Specific projects might have better heuristics.
            return "unknown"

    def _handle_empty_dependencies(self, experiment_data) -> bool:
        """
        处理空依赖数据的情况

        主要功能：
        1. 检查全局服务信息
        2. 构建基本图结构
        3. 记录处理状态
        4. 提供错误处理

        参数:
            experiment_data: 实验数据对象

        返回:
            bool: 处理是否成功
        """
        logger.warning("依赖数据 (dependencies_df) 为空。")
        # Check if ExperimentData itself provides global info that can be used
        # This is a fallback, the quality of the graph will be lower.
        all_services = (
            experiment_data.get_all_services()
            if hasattr(experiment_data, "get_all_services")
            else None
        )

        if all_services:  # If there's at least some global service info
            logger.info("依赖数据为空，尝试使用全局服务信息构建基本结构。")
            self._build_from_global_info_optimized(experiment_data)
            return True  # Indicate that some basic structure might have been built
        else:
            logger.error("依赖数据为空且无全局服务信息，无法构建图。")
            return False

    def _build_from_global_info_optimized(self, experiment_data):
        """
        优化的全局信息构建

        主要功能：
        1. 提取全局服务信息
        2. 构建服务容器映射
        3. 设置实体类型
        4. 优化数据结构更新

        参数:
            experiment_data: 实验数据对象
        """
        all_services = (
            experiment_data.get_all_services()
            if hasattr(experiment_data, "get_all_services")
            else []
        )
        all_containers = (
            experiment_data.get_all_containers()
            if hasattr(experiment_data, "get_all_containers")
            else []
        )
        service_container_mapping = (
            experiment_data.get_service_container_mapping()
            if hasattr(experiment_data, "get_service_container_mapping")
            else {}
        )

        # 批量设置实体类型
        for service_name in all_services:
            self.entity_types[service_name] = "service"

        for container_id in all_containers:
            self.entity_types[container_id] = (
                "container"  # Assuming cmdb_id for container
            )

        # 批量构建映射 (service to its containers, and container to its services)
        if isinstance(service_container_mapping, dict):
            for service, containers in service_container_mapping.items():
                if isinstance(containers, (list, set)):  # Ensure containers is iterable
                    self.interface_containers[service].update(containers)
                    for container in containers:
                        self.container_interfaces[container].add(service)
                        if (
                            container not in self.entity_types
                        ):  # Ensure container type is recorded
                            self.entity_types[container] = "container"
                if service not in self.entity_types:  # Ensure service type is recorded
                    self.entity_types[service] = "service"

    def _supplement_global_info(self, experiment_data):
        """
        补充全局信息 - 优化版本

        主要功能：
        1. 补充缺失服务信息
        2. 更新映射关系
        3. 维护实体类型
        4. 优化数据更新过程

        参数:
            experiment_data: 实验数据对象
        """
        all_services = (
            experiment_data.get_all_services()
            if hasattr(experiment_data, "get_all_services")
            else None
        )
        service_container_mapping = (
            experiment_data.get_service_container_mapping()
            if hasattr(experiment_data, "get_service_container_mapping")
            else None
        )

        if not all_services:  # No global service information to supplement with
            return

        # Identify services from global list that are not yet in entity_types or interface_containers
        # current_services_in_graph = set(self.entity_types.keys()) # More direct way to get all known entities
        # Or specifically services:
        current_services_in_graph = set(self.interface_containers.keys())
        for call_key in self.interface_calls.keys():
            current_services_in_graph.add(call_key[0])
            current_services_in_graph.add(call_key[1])

        missing_services = (
            set(all_services) - current_services_in_graph
        )  # Services in global list but not in graph yet

        if missing_services:
            logger.info(
                f"补充 {len(missing_services)} 个在全局列表但不在追踪数据中的服务。"
            )

            for service in missing_services:
                if (
                    service not in self.entity_types
                ):  # Add if not already typed (e.g. from metrics)
                    self.entity_types[service] = "service"

                # If service-container mapping exists globally, add this info
                if service_container_mapping and service in service_container_mapping:
                    containers = service_container_mapping[service]
                    if isinstance(containers, (list, set)):
                        self.interface_containers[service].update(
                            containers
                        )  # Add to service's containers
                        for container in containers:
                            self.container_interfaces[container].add(
                                service
                            )  # Add to container's services
                            if container not in self.entity_types:
                                self.entity_types[container] = "container"

    def _finalize_graph_build(self, graph_id: str) -> Optional[str]:
        """
        完成图构建的最终步骤

        主要功能：
        1. 保存图到缓存
        2. 更新状态文件
        3. 记录构建结果
        4. 处理错误情况

        参数:
            graph_id: 图ID

        返回:
            Optional[str]: 缓存文件路径，如果保存失败则返回None
        """
        saved_pkl_path = None

        if self.cache_dir:
            # Before saving, ensure nodes/relationships are populated if this class is responsible
            # Or if they are managed by the parent NeoGraph class based on these structures.
            # For now, assume they are ready or save_graph_to_cache handles it.
            self.save_graph_to_cache(
                graph_id
            )  # This method saves self.nodes, self.relationships etc.
            saved_pkl_path = os.path.join(
                self.cache_dir, f"graph_{graph_id}", "graph_data.pkl"
            )  # Corrected path

            # Update JSON output file
            self._update_output_json(graph_id, "dependency_graph_built", saved_pkl_path)

        return saved_pkl_path

    def _update_output_json(
        self, graph_id, build_phase, pkl_cache_file_path, causal_data=None
    ):
        """
        创建或更新output_dir中的JSON状态文件

        主要功能：
        1. 更新构建状态
        2. 记录性能统计
        3. 保存图摘要信息
        4. 处理因果数据

        参数:
            graph_id: 图ID
            build_phase: 构建阶段标识
            pkl_cache_file_path: 缓存文件路径
            causal_data: 可选的因果数据
        """
        if not self.output_dir:
            logger.warning("Output directory not set. Skipping _update_output_json.")
            return

        try:
            output_graph_dir = os.path.join(self.output_dir, "graph")
            os.makedirs(output_graph_dir, exist_ok=True)
            output_json_path = os.path.join(output_graph_dir, f"{graph_id}.json")

            json_data = {}
            if os.path.exists(output_json_path):
                try:
                    with open(output_json_path, "r") as f:
                        json_data = json.load(f)
                except json.JSONDecodeError:
                    logger.warning(
                        f"无法解析已存在的JSON文件: {output_json_path}。将创建新的文件。"
                    )

            json_data["graph_id"] = graph_id
            json_data["build_phase"] = build_phase
            json_data["pkl_cache_file_path"] = (
                pkl_cache_file_path  # Path to the .pkl file itself
            )
            json_data["last_updated"] = datetime.now().isoformat()

            json_data["performance_stats"] = self.perf_stats

            nodes_count = (
                len(self.nodes)
                if hasattr(self, "nodes") and self.nodes is not None
                else 0
            )
            relationships_count = (
                len(self.relationships)
                if hasattr(self, "relationships") and self.relationships is not None
                else 0
            )
            dependency_summary = {
                "nodes_count": nodes_count,
                "relationships_count": relationships_count,
                "entity_types_count": len(self.entity_types),
                "interface_containers_count": len(self.interface_containers),
                "container_interfaces_count": len(self.container_interfaces),
                "interface_calls_count": len(self.interface_calls),
                "span_to_interface_count": len(self.span_to_interface),
                "entity_metrics_count": len(self.entity_metrics),
                "span_index_count": len(self._span_index),
                "trace_index_count": len(self._trace_index),
                "total_traces_processed": self.perf_stats.get(
                    "total_traces_processed", 0
                ),
                "total_spans_processed": self.perf_stats.get(
                    "total_spans_processed", 0
                ),
            }
            json_data["dependency_graph_summary"] = dependency_summary

            dependency_graph_data = {}

            container_interfaces_dict = {
                k: list(v) for k, v in self.container_interfaces.items()
            }
            dependency_graph_data["container_interfaces"] = container_interfaces_dict

            interface_containers_dict = {
                k: list(v) for k, v in self.interface_containers.items()
            }
            dependency_graph_data["interface_containers"] = interface_containers_dict

            interface_calls_dict = {}
            for (src, dst), stats in self.interface_calls.items():
                key = f"{str(src)}:{str(dst)}"  # Ensure src/dst are strings
                interface_calls_dict[key] = stats
            dependency_graph_data["interface_calls"] = interface_calls_dict

            entity_metrics_dict = {}
            for entity_id, metrics_list in self.entity_metrics.items():
                serializable_metrics = []
                for metric_tuple in metrics_list:
                    if len(metric_tuple) == 4:
                        metric_id, metric_name, value, timestamp = metric_tuple
                        serializable_metrics.append(
                            {
                                "metric_id": metric_id,
                                "metric_name": metric_name,
                                "value": (
                                    float(value)
                                    if pd.notna(value)
                                    and isinstance(value, (int, float))
                                    else str(value)
                                ),  # Handle non-floatable
                                "timestamp": (
                                    timestamp.isoformat()
                                    if isinstance(timestamp, datetime)
                                    else str(timestamp)
                                ),
                            }
                        )
                entity_metrics_dict[str(entity_id)] = (
                    serializable_metrics  # Ensure entity_id is string
                )
            dependency_graph_data["entity_metrics"] = entity_metrics_dict

            json_data["dependency_graph_data"] = dependency_graph_data

            if build_phase == "causal_graph_built" and causal_data:
                causal_edges_list = []
                if (
                    "causal_edges" in causal_data
                    and causal_data["causal_edges"] is not None
                ):
                    for edge in causal_data["causal_edges"]:
                        if hasattr(edge, "to_dict") and callable(
                            getattr(edge, "to_dict")
                        ):
                            causal_edges_list.append(edge.to_dict())
                        else:
                            logger.warning(
                                f"CausalityEdge 对象 {edge}缺少to_dict方法，将尝试直接序列化"
                            )
                            causal_edges_list.append(
                                vars(edge) if hasattr(edge, "__dict__") else str(edge)
                            )
                json_data["causal_graph_data"] = {
                    "causal_edges_count": len(causal_edges_list),
                    "causal_edges": causal_edges_list,
                }
            elif (
                "causal_graph_data" in json_data and build_phase != "causal_graph_built"
            ):
                pass

            with open(output_json_path, "w") as f:
                json.dump(
                    json_data, f, indent=2, default=str
                )  # default=str handles non-serializable types

            logger.info(f"Output JSON状态文件已更新: {output_json_path}")

        except Exception as e:
            logger.error(f"更新Output JSON状态文件时发生错误: {e}")
            import traceback

            logger.error(traceback.format_exc())

    def get_pod_interfaces(self) -> Dict[str, List[str]]:
        """
        获取Pod（容器）与接口的映射关系

        主要功能：
        1. 返回容器ID到服务名称的映射
        2. 使用缓存优化性能
        3. 处理未初始化的图状态
        4. 提供容器关联的所有服务列表

        返回:
            Dict[str, List[str]]: 容器ID到其关联服务列表的映射
        """
        if not self.active_graph_id:  # Check if graph is built or loaded
            logger.error(
                "active_graph_id is not set. Call build_graph() or load_graph_from_cache() first."
            )
            # raise ValueError("active_graph_id is not set for querying pod interfaces.")
            return {}  # Return empty if not ready

        # Using self.container_interfaces which maps container_id to set of service_names
        # The term "interface" here means service_name associated with the container/pod.
        cache_key = f"pod_interfaces_{self.active_graph_id}"  # This caching might be excessive if data doesn't change post-build
        if (
            cache_key in self._service_containers_cache
        ):  # This cache is for service->container, not pod->interface
            # The current cache _service_containers_cache is for service -> containers, not pod -> interfaces
            # Let's directly use self.container_interfaces or a dedicated cache if needed.
            # For now, direct use:
            pass  # Fall through to direct computation if this cache isn't right for it.

        result = {}
        for container_id, service_names_set in self.container_interfaces.items():
            result[container_id] = list(service_names_set)

        # If caching is desired for this specific getter:
        # self._pod_interfaces_cache[cache_key] = result # Define _pod_interfaces_cache if used
        return result

    def get_interface_dependencies(self, reverse: bool = False) -> Dict[str, List[str]]:
        """
        获取服务接口之间的依赖关系

        主要功能：
        1. 分析服务调用关系
        2. 支持正向和反向依赖查询
        3. 使用缓存优化性能
        4. 过滤无效调用

        参数:
            reverse: 是否返回反向依赖关系（被谁依赖）

        返回:
            Dict[str, List[str]]: 服务到其依赖服务的映射
        """
        if not self.active_graph_id:
            logger.error(
                "active_graph_id is not set. Call build_graph() or load_graph_from_cache() first."
            )
            # raise ValueError("active_graph_id is not set for querying interface dependencies.")
            return {}

        # Using self.interface_calls which maps (caller_service, callee_service) to stats
        cache_key = f"interface_deps_{self.active_graph_id}_{str(reverse)}"  # Make reverse part of key
        if cache_key in self._call_relationships_cache:
            return self._call_relationships_cache[cache_key]

        result = defaultdict(list)

        for (caller, callee), stats_dict in self.interface_calls.items():
            if stats_dict.get("call_count", 0) > 0:  # Only consider actual calls
                if reverse:  # Dependencies on 'callee'
                    result[callee].append(caller)
                else:  # Dependencies of 'caller'
                    result[caller].append(callee)

        # Convert defaultdict to dict for consistent output and caching
        result_dict = {
            k: list(set(v)) for k, v in result.items()
        }  # Ensure unique dependencies
        self._call_relationships_cache[cache_key] = result_dict
        return result_dict

    def generate_metric_id(
        self, entity_id: str, metric_name: str, graph_id: str
    ) -> str:
        """
        生成统一的指标ID

        主要功能：
        1. 确保指标ID的唯一性
        2. 标准化ID格式
        3. 处理空值和异常情况

        参数:
            entity_id: 实体ID
            metric_name: 指标名称
            graph_id: 图ID

        返回:
            str: 格式化的指标ID
        """
        # Ensure components are strings and handle None or empty strings if necessary
        return f"{str(entity_id)}_{str(metric_name)}_{str(graph_id)}"

    def save_graph_to_cache(self, graph_id):
        """
        保存图结构到缓存 - 优化版本

        主要功能：
        1. 序列化图数据结构
        2. 保存性能统计信息
        3. 维护元数据
        4. 处理数据类型转换
        5. 错误恢复机制

        参数:
            graph_id: 图的唯一标识符

        返回:
            bool: 保存是否成功
        """
        if not self.cache_dir:
            logger.warning("缓存目录未设置，跳过保存到缓存。")
            return False

        try:
            # Path for this specific graph's cache files (e.g., /cache_dir/graph_mygraphid/)
            cache_path_for_graph_id = os.path.join(self.cache_dir, f"graph_{graph_id}")
            os.makedirs(cache_path_for_graph_id, exist_ok=True)

            # Metadata includes performance stats
            nodes_count = (
                len(self.nodes)
                if hasattr(self, "nodes") and self.nodes is not None
                else 0
            )
            relationships_count = (
                len(self.relationships)
                if hasattr(self, "relationships") and self.relationships is not None
                else 0
            )
            metadata = {
                "graph_id": graph_id,
                "nodes_count": nodes_count,
                "relationships_count": relationships_count,
                "created_at": datetime.now().isoformat(),
                "performance_stats": self.perf_stats,  # Include performance stats
                # Add other summary counts if useful for quick inspection from metadata
                "entity_types_count": len(self.entity_types),
                "interface_calls_count": len(self.interface_calls),
            }

            with open(os.path.join(cache_path_for_graph_id, "metadata.json"), "w") as f:
                json.dump(metadata, f, indent=2)

            # Graph data includes optimized structures
            graph_data = {
                "nodes": (
                    self.nodes if hasattr(self, "nodes") else {}
                ),  # From NeoGraph parent
                "relationships": (
                    self.relationships if hasattr(self, "relationships") else []
                ),  # From NeoGraph parent
                "entity_types": self.entity_types,
                "interface_containers": dict(
                    self.interface_containers
                ),  # Convert defaultdict
                "container_interfaces": dict(
                    self.container_interfaces
                ),  # Convert defaultdict
                "interface_calls": dict(self.interface_calls),  # Convert defaultdict
                "entity_metrics": dict(self.entity_metrics),  # Convert defaultdict
                "span_to_interface": self.span_to_interface,
                # Optimized indices
                "_span_index": self._span_index,
                "_trace_index": self._trace_index,
                # Caches that might be useful to persist if they are expensive to rebuild
                # "_service_containers_cache": self._service_containers_cache,
                # "_call_relationships_cache": self._call_relationships_cache,
                # Persist perf_stats also within the pickle for full state restoration
                "perf_stats": self.perf_stats,
            }

            with open(
                os.path.join(cache_path_for_graph_id, "graph_data.pkl"), "wb"
            ) as f:
                pickle.dump(graph_data, f, protocol=pickle.HIGHEST_PROTOCOL)

            logger.info(f"优化图结构已保存到缓存: {cache_path_for_graph_id}")
            return True

        except Exception as e:
            logger.error(f"保存图结构到缓存时发生错误: {e}")
            import traceback

            logger.error(traceback.format_exc())
            return False

    def load_graph_from_cache(self, graph_id):
        """
        从缓存加载图结构 - 优化版本

        主要功能：
        1. 反序列化图数据
        2. 恢复数据结构
        3. 验证缓存完整性
        4. 重建索引和统计
        5. 错误处理和日志

        参数:
            graph_id: 要加载的图的唯一标识符

        返回:
            bool: 加载是否成功
        """
        if not self.cache_dir:
            return False  # No cache directory to load from

        try:
            cache_path_for_graph_id = os.path.join(self.cache_dir, f"graph_{graph_id}")
            metadata_file = os.path.join(cache_path_for_graph_id, "metadata.json")
            graph_data_file = os.path.join(cache_path_for_graph_id, "graph_data.pkl")

            if not (os.path.exists(metadata_file) and os.path.exists(graph_data_file)):
                logger.info(f"图 {graph_id} 的缓存不存在于: {cache_path_for_graph_id}")
                return False

            with open(metadata_file, "r") as f:
                metadata = json.load(f)
            logger.info(f"发现图 {graph_id} 的缓存元数据: {metadata_file}")

            with open(graph_data_file, "rb") as f:
                graph_data = pickle.load(f)

            # Restore graph structures (nodes and relationships are from NeoGraph parent)
            if hasattr(self, "nodes") and "nodes" in graph_data:
                self.nodes = graph_data["nodes"]
            if hasattr(self, "relationships") and "relationships" in graph_data:
                self.relationships = graph_data["relationships"]

            self.entity_types = graph_data.get("entity_types", {})
            # Restore defaultdicts correctly
            self.interface_containers = defaultdict(
                set, graph_data.get("interface_containers", {})
            )
            self.container_interfaces = defaultdict(
                set, graph_data.get("container_interfaces", {})
            )

            default_call_stats = lambda: {
                "call_count": 0,
                "total_latency": 0.0,
            }  # Match initialization
            self.interface_calls = defaultdict(
                default_call_stats, graph_data.get("interface_calls", {})
            )

            self.entity_metrics = defaultdict(
                list, graph_data.get("entity_metrics", {})
            )
            self.span_to_interface = graph_data.get("span_to_interface", {})

            # Restore optimized indices
            self._span_index = graph_data.get("_span_index", {})
            self._trace_index = graph_data.get("_trace_index", {})

            # Restore caches if they were saved
            # self._service_containers_cache = graph_data.get("_service_containers_cache", {})
            # self._call_relationships_cache = graph_data.get("_call_relationships_cache", {})

            # Restore performance statistics
            self.perf_stats = graph_data.get(
                "perf_stats", self.perf_stats
            )  # Fallback to default if not in cache
            self.loaded_from_cache_perf_stats = (
                self.perf_stats
            )  # Store it for build_graph to use

            self.active_graph_id = graph_id  # Set active_graph_id upon successful load
            logger.info(f"成功从缓存加载优化图结构, 图ID: {graph_id}")
            return True

        except FileNotFoundError:
            logger.info(f"图 {graph_id} 的缓存文件未找到于: {cache_path_for_graph_id}")
            return False
        except Exception as e:
            logger.error(f"从缓存加载图结构时发生错误 for graph {graph_id}: {e}")
            import traceback

            logger.error(traceback.format_exc())
            return False
