import logging
import numpy as np
import pandas as pd
from typing import Dict, List, Tuple, Any, Optional, Set, FrozenSet
from datetime import datetime
import threading
import json
from pathlib import Path

logger = logging.getLogger(__name__)
log_lock = threading.Lock()


class TraceDetectionResult:
    """
    链路异常检测结果类，封装了整个故障 (anomaly_id) 下所有时间窗口的链路异常检测结果，
    并提供存储和恢复功能。
    保存两个文件：进度文件和异常结果文件。
    结果文件包含所有窗口的服务分数和异常链路。
    模式排名文件将包含基于服务调用图结构的模式。

    ID约定说明:
        - global_anomaly_id: 全局异常ID, 用于标识整个故障。
        - window_id: 窗口ID, 格式为"{global_anomaly_id}_window_{索引}"

    文件命名约定 (基于 global_anomaly_id):
        - 进度文件: "{base_output_dir}/trace_results/{global_anomaly_id}_progress.json"
        - 结果文件: "{base_output_dir}/trace_results/{global_anomaly_id}_results.json"
        - 模式排名文件: "{base_output_dir}/trace_results/{global_anomaly_id}_patterns.json"
    """

    def __init__(
        self,
        anomaly_id: str,
        output_dir_base: Optional[str] = None,
    ):
        if not anomaly_id:
            raise ValueError("anomaly_id 不能为空")
        self.anomaly_id = anomaly_id
        self.base_output_dir = Path(output_dir_base) if output_dir_base else Path("./")
        self.results_storage_dir = self.base_output_dir / "trace_results"
        self.windows_data: Dict[str, Dict[str, Any]] = {}
        self.progress_data: Dict[str, Any] = {
            "anomaly_id": self.anomaly_id,
            "status": "pending",
            "first_window_start_overall": None,
            "last_window_end_overall": None,
            "last_update_timestamp": None,
            "last_processed_window_index": -1,
            "total_windows_expected": 0,
            "error_message": None,
        }
        self.trace_patterns: Dict[str, Dict[str, Any]] = (
            {}
        )  # Stores graph-based patterns
        logger.debug(
            f"TraceDetectionResult initialized for {anomaly_id} at {self.base_output_dir}"
        )

    @property
    def is_overall_complete(self) -> bool:
        return self.progress_data.get("status") == "complete"

    def set_expected_total_windows(self, total_windows: int):
        self.progress_data["total_windows_expected"] = total_windows
        logger.debug(
            f"[{self.anomaly_id}] Expected total windows set to: {total_windows}"
        )

    def add_or_update_window_result(
        self,
        window_id: str,
        window_start_time: int,
        window_end_time: int,
        anomalous_traces: List[Dict],
        service_scores: Dict,
        max_anomaly_metric_ratio: float,
        detection_complete_for_window: bool,
    ):
        if not window_id.startswith(self.anomaly_id):
            logger.warning(
                f"Window ID '{window_id}' 与 global anomaly_id '{self.anomaly_id}' 前缀不匹配。"
            )
        current_window_result = {
            "window_id": window_id,
            "window_start_time": window_start_time,
            "window_end_time": window_end_time,
            "anomalous_traces": anomalous_traces,
            "service_scores": service_scores if service_scores else {},
            "max_anomaly_metric_ratio": max_anomaly_metric_ratio,
            "detection_complete_for_window": detection_complete_for_window,
            "last_update_for_window": datetime.now().timestamp(),
        }
        self.windows_data[window_id] = current_window_result
        logger.debug(
            f"[{self.anomaly_id}] Window '{window_id}' result added/updated. Anomalous traces: {len(anomalous_traces)}"
        )

        self.progress_data["status"] = "in_progress"
        if (
            self.progress_data["first_window_start_overall"] is None
            or window_start_time < self.progress_data["first_window_start_overall"]
        ):
            self.progress_data["first_window_start_overall"] = window_start_time
        if (
            self.progress_data["last_window_end_overall"] is None
            or window_end_time > self.progress_data["last_window_end_overall"]
        ):
            self.progress_data["last_window_end_overall"] = window_end_time
        self.progress_data["last_update_timestamp"] = datetime.now().timestamp()
        try:
            current_window_index = int(window_id.split("_window_")[-1])
            self.progress_data["last_processed_window_index"] = max(
                self.progress_data["last_processed_window_index"], current_window_index
            )
        except ValueError:
            logger.warning(
                f"无法从 window_id '{window_id}' 解析窗口索引 for progress update."
            )

    def save(self) -> None:
        with log_lock:
            try:
                self.results_storage_dir.mkdir(parents=True, exist_ok=True)
                progress_file, results_file = self._get_default_paths(
                    self.results_storage_dir, self.anomaly_id
                )
                patterns_file = (
                    self.results_storage_dir / f"{self.anomaly_id}_patterns.json"
                )

                progress_temp_file = progress_file.with_suffix(".tmp")
                self.progress_data["last_update_timestamp"] = datetime.now().timestamp()
                progress_data_to_save = self._serialize_dict(self.progress_data)
                with open(progress_temp_file, "w", encoding="utf-8") as f:
                    json.dump(progress_data_to_save, f, ensure_ascii=False, indent=4)
                self._validate_json_file(progress_temp_file)
                self._safe_replace_file(progress_temp_file, progress_file)
                logger.info(f"'{self.anomaly_id}' 的检测进度已保存到 {progress_file}")

                sorted_window_results = sorted(
                    list(self.windows_data.values()),
                    key=lambda x: x.get("window_start_time", 0),
                )
                compact_window_results = []
                for window in sorted_window_results:
                    compact_window = window.copy()
                    if "anomalous_traces" in compact_window:
                        compact_traces = []
                        for trace in compact_window["anomalous_traces"]:
                            compact_trace = {
                                "trace_id": trace.get("trace_id"),
                                "latency": trace.get("latency"),
                                "metric_ratio": trace.get("metric_ratio"),
                            }
                            if "normal_traces" in trace:
                                compact_normal_traces = []
                                for normal_trace in trace["normal_traces"]:
                                    compact_normal_traces.append(
                                        {
                                            "trace_id": normal_trace.get("trace_id"),
                                            "latency": normal_trace.get("latency"),
                                        }
                                    )
                                compact_trace["normal_traces"] = compact_normal_traces
                            compact_traces.append(compact_trace)
                        compact_window["anomalous_traces"] = compact_traces
                    compact_window_results.append(compact_window)

                results_to_save = {
                    "global_anomaly_id": self.anomaly_id,
                    "windows": self._serialize_list(compact_window_results),
                }
                results_temp_file = results_file.with_suffix(".tmp")
                with open(results_temp_file, "w", encoding="utf-8") as f:
                    json.dump(results_to_save, f, ensure_ascii=False, indent=4)
                self._validate_json_file(results_temp_file)
                self._safe_replace_file(results_temp_file, results_file)

                if self.trace_patterns:  # Check if there are patterns to save
                    patterns_temp_file = patterns_file.with_suffix(".tmp")
                    # Ensure trace_ids within patterns are lists for JSON serialization
                    # The generate_trace_patterns method now ensures trace_ids are sorted lists
                    patterns_to_save = {
                        "global_anomaly_id": self.anomaly_id,
                        "patterns": self._serialize_dict(self.trace_patterns),
                    }
                    with open(patterns_temp_file, "w", encoding="utf-8") as f:
                        json.dump(patterns_to_save, f, ensure_ascii=False, indent=4)
                    self._validate_json_file(patterns_temp_file)
                    self._safe_replace_file(patterns_temp_file, patterns_file)
                    logger.info(
                        f"'{self.anomaly_id}' 的链路模式排名已保存到 {patterns_file}"
                    )

                window_count = len(sorted_window_results)
                anomaly_count = sum(
                    len(window.get("anomalous_traces", []))
                    for window in sorted_window_results
                )
                pattern_count = len(self.trace_patterns)
                logger.info(
                    f"'{self.anomaly_id}' 的检测数据已保存: {window_count}个窗口, {anomaly_count}个异常链路, {pattern_count}个模式, 文件路径: {results_file}"
                )
            except Exception as e:
                logger.error(
                    f"保存 '{self.anomaly_id}' 的链路检测结果时出错: {e}", exc_info=True
                )
                self._try_recover_from_backup(progress_file, results_file)

    def _validate_json_file(self, file_path: Path) -> bool:
        try:
            if not file_path.exists():
                logger.warning(f"无法验证不存在的文件: {file_path}")
                return False
            with open(file_path, "r", encoding="utf-8") as f:
                json.load(f)
            return True
        except json.JSONDecodeError as e:
            logger.error(f"JSON文件 {file_path} 验证失败: {e}")
            return False
        except Exception as e:
            logger.error(f"验证文件 {file_path} 时发生错误: {e}")
            return False

    def _safe_replace_file(self, source_file: Path, target_file: Path) -> bool:
        try:
            backup_file = None
            if target_file.exists():
                backup_file = target_file.with_suffix(".bak")
                target_file.replace(backup_file)
            source_file.replace(target_file)
            if target_file.exists() and self._validate_json_file(target_file):
                if backup_file and backup_file.exists():
                    backup_file.unlink(missing_ok=True)
                return True
            else:
                if backup_file and backup_file.exists():
                    logger.warning(
                        f"目标文件 {target_file} 替换后验证失败，正在从备份恢复"
                    )
                    backup_file.replace(target_file)
                return False
        except Exception as e:
            logger.error(f"安全替换文件时发生错误: {e}", exc_info=True)
            return False

    def _try_recover_from_backup(self, progress_file: Path, results_file: Path) -> None:
        try:
            progress_backup = progress_file.with_suffix(".bak")
            if progress_backup.exists() and self._validate_json_file(progress_backup):
                progress_backup.replace(progress_file)
                logger.info(f"已从备份恢复进度文件: {progress_file}")
            results_backup = results_file.with_suffix(".bak")
            if results_backup.exists() and self._validate_json_file(results_backup):
                results_backup.replace(results_file)
                logger.info(f"已从备份恢复结果文件: {results_file}")
        except Exception as e:
            logger.error(f"尝试恢复备份文件时发生错误: {e}", exc_info=True)

    @classmethod
    def load(
        cls, anomaly_id: str, output_dir_base: Optional[str] = None
    ) -> Optional["TraceDetectionResult"]:
        if not anomaly_id:
            logger.warning("尝试加载结果但 anomaly_id 为空。")
            return None
        base_dir = Path(output_dir_base) if output_dir_base else Path("./")
        results_storage_dir = base_dir / "trace_results"
        progress_file, results_file = cls._get_default_paths(
            results_storage_dir, anomaly_id
        )
        patterns_file = results_storage_dir / f"patterns.json"
        instance = cls(anomaly_id, str(base_dir))
        loaded_progress_file = False
        if progress_file.exists():
            try:
                with open(progress_file, "r", encoding="utf-8") as f:
                    instance.progress_data = json.load(f)
                logger.info(f"从 {progress_file} 加载 '{anomaly_id}' 的进度数据成功。")
                loaded_progress_file = True
            except Exception as e:
                logger.error(f"加载进度文件 {progress_file} 出错: {e}", exc_info=True)
        else:
            logger.info(f"进度文件 {progress_file} 未找到 for '{anomaly_id}'.")

        loaded_results_file = False
        if results_file.exists():
            try:
                with open(results_file, "r", encoding="utf-8") as f:
                    loaded_results_data = json.load(f)
                if loaded_results_data.get("global_anomaly_id") != anomaly_id:
                    logger.warning(
                        f"结果文件 {results_file} 中的 global_anomaly_id "
                        f"('{loaded_results_data.get('global_anomaly_id')}') "
                        f"与请求的 anomaly_id ('{anomaly_id}') 不匹配。结果文件数据未加载。"
                    )
                else:
                    for window_dict in loaded_results_data.get("windows", []):
                        if window_id := window_dict.get("window_id"):
                            instance.windows_data[window_id] = window_dict
                    logger.info(
                        f"从 {results_file} 加载 '{anomaly_id}' 的 {len(instance.windows_data)} 个窗口结果成功。"
                    )
                    loaded_results_file = True
            except Exception as e:
                logger.error(f"加载结果文件 {results_file} 出错: {e}", exc_info=True)
        else:
            logger.info(f"结果文件 {results_file} 未找到 for '{anomaly_id}'.")

        loaded_patterns_file = False
        if patterns_file.exists():
            try:
                with open(patterns_file, "r", encoding="utf-8") as f:
                    loaded_patterns_data = json.load(f)
                if loaded_patterns_data.get("global_anomaly_id") != anomaly_id:
                    logger.warning(
                        f"模式排名文件 {patterns_file} 中的 global_anomaly_id "
                        f"('{loaded_patterns_data.get('global_anomaly_id')}') "
                        f"与请求的 anomaly_id ('{anomaly_id}') 不匹配。模式排名数据未加载。"
                    )
                else:
                    # Deserialize trace_ids from list back to set if necessary for internal logic,
                    # but patterns are generally read and used as-is.
                    # The save method stores them as lists.
                    instance.trace_patterns = loaded_patterns_data.get("patterns", {})
                    logger.info(
                        f"从 {patterns_file} 加载 '{anomaly_id}' 的 {len(instance.trace_patterns)} 个模式排名成功。"
                    )
                    loaded_patterns_file = True
            except Exception as e:
                logger.error(
                    f"加载模式排名文件 {patterns_file} 出错: {e}", exc_info=True
                )
        else:
            logger.info(f"模式排名文件 {patterns_file} 未找到 for '{anomaly_id}'.")

        if (
            not loaded_progress_file
            and not loaded_results_file
            and not loaded_patterns_file
        ):
            logger.info(
                f"'{anomaly_id}' 的进度、结果和模式排名文件均未找到或加载失败。将创建新实例。"
            )
            return None
        return instance

    def get_last_processed_window_index(self) -> int:
        return self.progress_data.get("last_processed_window_index", -1)

    def is_window_processed(self, window_id: str) -> bool:
        win_data = self.windows_data.get(window_id)
        return bool(win_data and win_data.get("detection_complete_for_window", False))

    def mark_overall_complete(self, error_message: Optional[str] = None) -> None:
        if error_message:
            self.progress_data["status"] = "error"
            self.progress_data["error_message"] = error_message
            logger.error(f"'{self.anomaly_id}' 检测因错误而终止: {error_message}")
        else:
            if self.progress_data.get("status") != "error":
                self.progress_data["status"] = "complete"
                logger.info(f"'{self.anomaly_id}' 的所有窗口检测已成功完成。")
        self.progress_data["last_update_timestamp"] = datetime.now().timestamp()

    def get_summary(self) -> Dict[str, Any]:
        total_anomalous_traces = 0
        anomalous_window_count = 0
        unique_anomalous_services = set()
        max_metric_ratio_overall = 0.0
        all_anomalous_latencies = []
        all_normal_latencies = []

        for window_data in self.windows_data.values():
            anomalous_traces_in_window = window_data.get("anomalous_traces", [])
            if anomalous_traces_in_window:
                total_anomalous_traces += len(anomalous_traces_in_window)
                anomalous_window_count += 1
                for trace_info in anomalous_traces_in_window:
                    if trace_info.get("metric_ratio", 0) > max_metric_ratio_overall:
                        max_metric_ratio_overall = trace_info.get("metric_ratio", 0)
                    if "latency" in trace_info:
                        all_anomalous_latencies.append(trace_info["latency"])
                    normal_traces = trace_info.get("normal_traces", [])
                    for normal_trace in normal_traces:
                        if "latency" in normal_trace:
                            all_normal_latencies.append(normal_trace["latency"])
            service_scores_data = window_data.get("service_scores")
            if service_scores_data:
                for service, scores in service_scores_data.items():
                    if scores.get("score", 0) > 0.5:  # Example threshold
                        unique_anomalous_services.add(service)

        avg_anomalous_latency = (
            sum(all_anomalous_latencies) / len(all_anomalous_latencies)
            if all_anomalous_latencies
            else 0
        )
        avg_normal_latency = (
            sum(all_normal_latencies) / len(all_normal_latencies)
            if all_normal_latencies
            else 0
        )
        total_patterns = len(self.trace_patterns)
        top_ranked_patterns = []
        avg_pattern_scores = []

        # Correctly access pattern data for summary
        for pattern_id, pattern_info in sorted(
            self.trace_patterns.items(),
            key=lambda x: x[1].get(
                "avg_metric_ratio", 0
            ),  # Use existing avg_metric_ratio
            reverse=True,
        )[:3]:
            top_ranked_patterns.append(
                {
                    "pattern_id": pattern_id,  # Use the actual pattern_id key
                    "rank": pattern_info.get("rank", 0),
                    # "graph_structure": pattern_info.get("graph_structure", []), # Or other relevant info
                    "avg_metric_ratio": pattern_info.get("avg_metric_ratio", 0),
                    "trace_count": pattern_info.get("trace_count", 0),
                }
            )
            avg_pattern_scores.append(pattern_info.get("avg_metric_ratio", 0))

        overall_avg_pattern_score = (
            sum(avg_pattern_scores) / len(avg_pattern_scores)
            if avg_pattern_scores
            else 0
        )
        summary = {
            "global_anomaly_id": self.anomaly_id,
            "status": self.progress_data.get("status"),
            "total_windows_processed": self.progress_data.get(
                "last_processed_window_index", -1
            )
            + 1,
            "total_windows_expected": self.progress_data.get(
                "total_windows_expected", 0
            ),
            "first_window_start_overall": self.progress_data.get(
                "first_window_start_overall"
            ),
            "last_window_end_overall": self.progress_data.get(
                "last_window_end_overall"
            ),
            "total_anomalous_traces_found": total_anomalous_traces,
            "windows_with_anomalies": anomalous_window_count,
            "max_metric_ratio_overall": max_metric_ratio_overall,
            "avg_anomalous_latency": avg_anomalous_latency,
            "avg_normal_latency": avg_normal_latency,
            "count_unique_anomalous_services_score_gt_0.5": len(
                unique_anomalous_services
            ),
            "total_trace_patterns": total_patterns,
            "top_ranked_patterns": top_ranked_patterns,  # This now contains more relevant info
            "avg_pattern_score": overall_avg_pattern_score,  # Avg score of top patterns or all patterns
            "last_update_timestamp": self.progress_data.get("last_update_timestamp"),
        }
        if self.progress_data.get("error_message"):
            summary["error_message"] = self.progress_data["error_message"]
        return summary

    def _serialize_value(self, value: Any) -> Any:
        if isinstance(value, dict):
            return self._serialize_dict(value)
        if isinstance(value, list):
            return self._serialize_list(value)
        if isinstance(value, set):  # Sets (like trace_ids before sorting)
            return sorted(list(value))
        if isinstance(
            value, frozenset
        ):  # For graph_repr if it were directly serialized
            return sorted(
                [str(item) for item in value]
            )  # Example: convert to sorted list of strings
        if isinstance(
            value,
            (
                np.int64,
                np.int32,
                np.int16,
                np.int8,
                np.uint64,
                np.uint32,
                np.uint16,
                np.uint8,
            ),
        ):
            return int(value)
        if isinstance(value, (np.float64, np.float32, np.float16)):
            if np.isnan(value):
                return None
            if np.isinf(value):
                return "Infinity" if value > 0 else "-Infinity"
            return float(value)
        if isinstance(value, np.ndarray):
            return value.tolist()
        if isinstance(value, pd.Timestamp):
            return value.isoformat()
        if isinstance(value, Path):
            return str(value)
        if pd.isna(value):
            return None
        return value

    def _serialize_dict(self, data: Optional[Dict]) -> Optional[Dict]:
        if data is None:
            return None
        return {
            self._serialize_value(k): self._serialize_value(v) for k, v in data.items()
        }

    def _serialize_list(self, data: Optional[List]) -> Optional[List]:
        if data is None:
            return None
        return [self._serialize_value(item) for item in data]

    @staticmethod
    def _get_default_paths(
        results_storage_dir: Path, anomaly_id: str
    ) -> Tuple[Path, Path]:
        if not anomaly_id:
            raise ValueError("anomaly_id 不能为空以确定文件路径。")
        progress_file = results_storage_dir / f"progress.json"
        results_file = results_storage_dir / f"results.json"
        return progress_file, results_file

    @classmethod
    def repair_incomplete_json(
        cls, anomaly_id: str, output_dir_base: Optional[str] = None
    ) -> bool:
        base_dir = Path(output_dir_base) if output_dir_base else Path("./")
        results_storage_dir = base_dir / "trace_results"
        _, results_file = cls._get_default_paths(
            results_storage_dir, anomaly_id
        )  # Also consider progress and patterns files

        files_to_check = [
            results_file,
            results_storage_dir / f"progress.json",
            results_storage_dir / f"patterns.json",
        ]
        all_repaired = True
        for file_to_repair in files_to_check:
            if not file_to_repair.exists():
                logger.info(f"文件 {file_to_repair} 不存在，跳过修复。")
                continue  # Or set all_repaired to False if existence is mandatory

            logger.info(f"开始尝试修复文件: {file_to_repair}")
            backup_file = file_to_repair.with_suffix(".bak")
            repaired_this_file = False
            if backup_file.exists():
                try:
                    with open(backup_file, "r", encoding="utf-8") as f:
                        json.load(f)
                    backup_file.replace(file_to_repair)
                    logger.info(f"成功使用备份文件修复 {file_to_repair}")
                    repaired_this_file = True
                except Exception as e:
                    logger.warning(f"备份文件 {backup_file} 也损坏或无法使用: {e}")

            if not repaired_this_file:
                try:
                    with open(file_to_repair, "r+", encoding="utf-8") as f:
                        file_content = f.read()
                        try:
                            json.loads(file_content)
                            logger.info(f"文件 {file_to_repair} JSON有效，无需修复。")
                            repaired_this_file = True
                        except json.JSONDecodeError as json_err:
                            logger.info(
                                f"检测到 {file_to_repair} JSON不完整，尝试修复: {json_err.msg} at pos {json_err.pos}"
                            )

                            # Create a pre-repair backup
                            repair_backup_path = file_to_repair.with_suffix(
                                f".repair_bak_{datetime.now().strftime('%Y%m%d%H%M%S')}"
                            )
                            with open(
                                repair_backup_path, "w", encoding="utf-8"
                            ) as bak_f:
                                bak_f.write(file_content)
                            logger.info(
                                f"为 {file_to_repair} 创建修复前备份: {repair_backup_path}"
                            )

                            fixed_content = cls._balance_json_brackets(file_content)
                            f.seek(0)
                            f.write(fixed_content)
                            f.truncate()

                        # Validate after attempting fix
                        f.seek(0)
                        json.load(f)  # Will raise error if still invalid
                        logger.info(f"成功修复文件 {file_to_repair}")
                        repaired_this_file = True

                except json.JSONDecodeError as final_json_err:
                    logger.error(
                        f"修复尝试失败 {file_to_repair}, JSON仍然无效: {final_json_err.msg}"
                    )
                    # Optionally, restore from .repair_bak if one was made and is preferred over potentially more corrupt state
                    all_repaired = False
                except Exception as e:
                    logger.error(
                        f"修复文件 {file_to_repair} 时发生未预期的错误: {e}",
                        exc_info=True,
                    )
                    all_repaired = False
            if not repaired_this_file:  # If neither backup restore nor balancing worked
                all_repaired = False

        return all_repaired

    @staticmethod
    def _balance_json_brackets(content: str) -> str:
        # Trim trailing whitespace or commas that might break parsing before final bracket
        content = content.rstrip(", \t\n\r")

        open_braces = content.count("{")
        close_braces = content.count("}")
        open_brackets = content.count("[")
        close_brackets = content.count("]")

        fixed_content = content

        # Simplistic: attempt to close based on the outermost unclosed structure
        # This is highly heuristic. A proper parser or incremental JSON fixer is better.

        # Try to fix unterminated strings first (very basic)
        if fixed_content.count('"') % 2 != 0:
            # If it looks like a value was being written, try to close it
            # Example: ..., "key": "unterminated_val
            last_char = fixed_content[-1]
            if fixed_content.rfind('": "') > fixed_content.rfind(
                '",'
            ):  # likely unterminated string value
                if last_char != '"':
                    fixed_content += '"'

        # Add missing closing brackets/braces
        # This order might matter: if inside an array of objects, close object first, then array.
        # The current simple approach just appends.
        if open_brackets > close_brackets:
            fixed_content += "]" * (open_brackets - close_brackets)
            logger.info(f"尝试添加 {open_brackets - close_brackets} 个 ']'")

        if open_braces > close_braces:
            fixed_content += "}" * (open_braces - close_braces)
            logger.info(f"尝试添加 {open_braces - close_braces} 个 '}}'")

        # A common issue: trailing comma before a closing bracket/brace
        # if fixed_content.endswith(',') and (fixed_content.endswith(",]") or fixed_content.endswith(",}")):
        #    pass # This is valid in some parsers, but strict JSON disallows
        # elif fixed_content.endswith(','): # If just ends with comma, might need a structure close
        #    # This is too presumptive.
        #    pass

        return fixed_content

    def _build_service_level_graph_representation(
        self,
        trace_spans_df: pd.DataFrame,
    ) -> FrozenSet[Tuple[str, str]]:
        """
        构建单个追踪的服务级别调用图的规范表示。
        图表示为 (调用者服务, 被调用者服务) 元组的 frozenset。
        此表示是规范且可哈希的，适用于字典键。
        假设 trace_spans_df 包含列: spanId, parentId, serviceName
        """
        if trace_spans_df.empty:
            return frozenset()

        span_to_service: Dict[str, str] = {}
        for _, row in trace_spans_df.iterrows():
            span_id = row["spanId"]
            # 使用 "UnknownService" 作为服务名称缺失时的默认值
            service_name = row.get("serviceName", "UnknownService")
            span_to_service[span_id] = service_name

        service_edges = set()
        for _, span_row in trace_spans_df.iterrows():
            parent_id = span_row.get("parentId")
            current_span_id = span_row["spanId"]

            # parentId 必须是有效的 span ID (不是 None, "0", NaN 等)
            if parent_id and parent_id != "0" and pd.notna(parent_id):
                caller_service = span_to_service.get(
                    str(parent_id)
                )  # Ensure parent_id is string if read as numeric
                callee_service = span_to_service.get(str(current_span_id))

                if caller_service and callee_service:
                    service_edges.add((caller_service, callee_service))

        return frozenset(service_edges)

    def generate_trace_patterns(
        self, dependencies_df: pd.DataFrame
    ) -> Dict[str, Dict[str, Any]]:
        """
        [REFACTORED] 生成基于服务级别调用图结构的 Trace 模式排名。
        一个模式由一个独特的服务调用图结构定义。
        评分方式：每个模式的评分为其包含的异常 Trace 的 metric_ratio 平均值。
        """
        logger.info(f"'{self.anomaly_id}': 开始生成链路模式排名 (基于图结构)")

        all_anomalous_traces_data = []
        for window_data in self.windows_data.values():
            if "anomalous_traces" in window_data:
                all_anomalous_traces_data.extend(window_data["anomalous_traces"])

        if not all_anomalous_traces_data:
            logger.warning(f"'{self.anomaly_id}': 未找到异常链路，无法生成模式排名。")
            self.trace_patterns = {}
            return {}

        trace_id_to_metric_ratio_map = {
            trace.get("trace_id"): float(trace.get("metric_ratio", 0.0))
            for trace in all_anomalous_traces_data
            if trace.get("trace_id")
        }
        anomalous_trace_ids_set = set(trace_id_to_metric_ratio_map.keys())

        if dependencies_df.empty or "traceId" not in dependencies_df.columns:
            logger.warning(
                f"'{self.anomaly_id}': dependencies_df 为空或缺少 'traceId' 列，无法构建路径。"
            )
            self.trace_patterns = {}
            return {}

        # 筛选出与异常链路相关的span，并确保类型正确
        # Ensure traceId in dependencies_df matches type of anomalous_trace_ids_set keys
        # If trace_ids are strings, ensure dependencies_df['traceId'] is also string.
        try:
            # Attempt conversion if types might mismatch (e.g. int vs str)
            # This is a common source of empty joins if types are not aligned.
            # Example: if anomalous_trace_ids are strings:
            if anomalous_trace_ids_set and isinstance(
                next(iter(anomalous_trace_ids_set)), str
            ):
                dependencies_df["traceId"] = dependencies_df["traceId"].astype(str)
        except Exception as e:
            logger.warning(f"Could not ensure traceId type compatibility: {e}")

        relevant_spans_df = dependencies_df[
            dependencies_df["traceId"].isin(anomalous_trace_ids_set)
        ]

        if relevant_spans_df.empty:
            logger.warning(
                f"'{self.anomaly_id}': 未在 dependencies_df 中找到与异常链路相关的span。"
            )
            self.trace_patterns = {}
            return {}

        # Map: canonical_graph_representation (FrozenSet) -> set of trace_ids
        graph_to_trace_ids: Dict[FrozenSet[Tuple[str, str]], Set[str]] = {}

        # 按 traceId 分组，以便独立处理每个trace的span
        for trace_id, spans_for_trace_df in relevant_spans_df.groupby("traceId"):
            # trace_id from groupby will match the type in relevant_spans_df['traceId']
            if trace_id not in anomalous_trace_ids_set:  # Defensive check
                continue

            if trace_id == '0ffe10777394f02bc636d26b4447fde1':
                print("ok")
            # 构建此追踪的服务级别图表示
            service_graph_repr = self._build_service_level_graph_representation(
                spans_for_trace_df
            )

            if service_graph_repr not in graph_to_trace_ids:
                graph_to_trace_ids[service_graph_repr] = set()
            graph_to_trace_ids[service_graph_repr].add(
                trace_id
            )  # trace_id is already correct type

        # 创建模式结果
        patterns_result: Dict[str, Dict[str, Any]] = {}
        pattern_idx_counter = 1
        for graph_repr, related_trace_ids_set in graph_to_trace_ids.items():
            pattern_id = f"pattern_{pattern_idx_counter}"
            pattern_idx_counter += 1

            metric_ratios_for_pattern = [
                trace_id_to_metric_ratio_map.get(tid, 0.0)
                for tid in related_trace_ids_set
            ]
            avg_score = (
                sum(metric_ratios_for_pattern) / len(metric_ratios_for_pattern)
                if metric_ratios_for_pattern
                else 0.0
            )

            # 将 frozenset of tuples 转换为人类可读的字符串列表以用于JSON输出
            readable_graph_edges = sorted(
                [f"{edge[0]}->{edge[1]}" for edge in graph_repr]
            )

            patterns_result[pattern_id] = {
                "graph_structure": readable_graph_edges,  # 代表模式的图结构
                "trace_count": len(related_trace_ids_set),
                "avg_metric_ratio": avg_score,
                # 将 trace_ids 存储为排序列表以确保一致的输出
                "trace_ids": sorted(list(related_trace_ids_set)),
            }

        # 基于平均分数对模式进行排序 (降序)
        sorted_patterns_list = sorted(
            patterns_result.items(),
            key=lambda item: item[1]["avg_metric_ratio"],
            reverse=True,
        )

        # 分配排名并转换回字典
        final_ranked_patterns: Dict[str, Dict[str, Any]] = {}
        for i, (p_id, pattern_info) in enumerate(sorted_patterns_list):
            pattern_info["rank"] = i + 1
            final_ranked_patterns[p_id] = (
                pattern_info  # Use original p_id from patterns_result
            )

        self.trace_patterns = final_ranked_patterns
        logger.info(
            f"'{self.anomaly_id}': 成功生成 {len(self.trace_patterns)} 个链路模式排名 (基于图结构)"
        )
        return self.trace_patterns

    def get_pattern_rankings(self) -> Dict[str, Dict[str, Any]]:
        return self.trace_patterns

    def get_pattern_traces(self, pattern_id: str) -> Dict[str, Any]:
        """
        获取特定模式的所有Trace详细信息。
        模式现在由其服务级别的调用图结构定义。
        """
        if pattern_id not in self.trace_patterns:
            logger.warning(f"模式ID '{pattern_id}' 不存在")
            return {}

        pattern_info = self.trace_patterns[pattern_id]
        # trace_ids is already a list from generate_trace_patterns
        trace_ids_in_pattern: List[str] = pattern_info.get("trace_ids", [])

        traces_detail = []
        # 为快速查找创建trace_id到其详细信息的映射
        trace_id_to_detail_map: Dict[str, Dict[str, Any]] = {}
        for window_data in self.windows_data.values():
            anomalous_traces_in_window = window_data.get("anomalous_traces", [])
            for trace in anomalous_traces_in_window:
                t_id = trace.get("trace_id")
                if (
                    t_id
                    and t_id
                    in trace_ids_in_pattern  # Efficiently check if t_id is in the list
                    and t_id not in trace_id_to_detail_map  # 确保只添加一次
                ):
                    trace_id_to_detail_map[t_id] = {
                        "trace_id": t_id,
                        "latency": trace.get("latency"),
                        "metric_ratio": trace.get("metric_ratio"),
                        "window_id": window_data.get("window_id"),
                        "window_start_time": window_data.get("window_start_time"),
                        "window_end_time": window_data.get("window_end_time"),
                        # 'normal_traces': trace.get('normal_traces') # 可选
                    }

        # 按原始pattern_info中的trace_ids顺序收集（如果顺序重要）
        for t_id in trace_ids_in_pattern:  # trace_ids_in_pattern is already a list
            if t_id in trace_id_to_detail_map:
                traces_detail.append(trace_id_to_detail_map[t_id])
            else:
                logger.warning(
                    f"Trace ID '{t_id}' 来自模式 '{pattern_id}' 但在窗口数据中未找到详细信息。"
                )

        result = {
            "pattern_id": pattern_id,
            "graph_structure": pattern_info.get(
                "graph_structure", []
            ),  # 从 "path" 更改
            "rank": pattern_info.get("rank", 0),
            "avg_metric_ratio": pattern_info.get("avg_metric_ratio", 0),
            "trace_count": pattern_info.get("trace_count", 0),
            "traces": traces_detail,
        }
        return result
