import logging
import numpy as np
import pandas as pd
from typing import Dict, List, Tuple, Any, Optional, Set
from datetime import datetime
import threading  # Not strictly used by TraceDetector itself now, but kept if TraceDetectionResult uses it
from pathlib import Path
from scipy.cluster.hierarchy import linkage, fcluster  # Hierarchical clustering
from sklearn.cluster import KMeans  # K-Means clustering
from .trace_detection_result import TraceDetectionResult

logger = logging.getLogger(__name__)


# --- End of Dummy TraceDetectionResult ---


class TraceDetector:
    """
    链路异常检测器，使用滑动窗口方法识别服务调用拓扑中的异常链路。
    基于TraceRank算法，实现两阶段聚类（层次聚类+K-Means聚类）来检测异常链路。
    此版本针对特征构建和聚类输入进行了修正和优化，并实现了K-Means的迭代式离群点移除，
    同时将结构组内的正常链路信息嵌套在异常链路结果中。
    """

    def __init__(
        self,
        step_minutes: int = 5,
        alpha: float = 3.0,
        min_traces_for_kmeans_initial: int = 5,
        hierarchical_cluster_distance_threshold: float = 2.0,
        k_means_anomaly_threshold_factor: float = 2.0,
        kmeans_outlier_percentage_threshold: float = 0.01,
        output_dir: Optional[str] = None,
    ):
        self.step_minutes = step_minutes
        self.alpha = alpha
        self.window_size_minutes = int(step_minutes * alpha)
        self.min_traces_for_kmeans_initial = max(2, min_traces_for_kmeans_initial)
        self.hierarchical_distance_threshold = hierarchical_cluster_distance_threshold
        self.k_means_anomaly_threshold_factor = k_means_anomaly_threshold_factor
        self.kmeans_outlier_percentage_threshold = kmeans_outlier_percentage_threshold
        self.output_dir_base = (
            Path(output_dir) if output_dir else Path("./result_data_detector")
        )
        logger.info(
            f"初始化链路异常检测器，参数: "
            f"step_minutes={step_minutes}, alpha={alpha}, window_size_minutes={self.window_size_minutes}, "
            f"min_traces_for_kmeans_initial={self.min_traces_for_kmeans_initial}, "
            f"hierarchical_distance_threshold={self.hierarchical_distance_threshold}, "
            f"k_means_anomaly_threshold_factor={self.k_means_anomaly_threshold_factor}, "
            f"kmeans_outlier_percentage_threshold={self.kmeans_outlier_percentage_threshold}, "
            f"output_dir_base: {self.output_dir_base}"
        )

    def _prepare_structural_and_latency_data(
        self, window_df: pd.DataFrame, all_unique_services_list: List[str]
    ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray], Optional[List[str]]]:
        if window_df.empty or not all_unique_services_list:
            return None, None, None

        # 修改聚合函数，将'first'改为'sum'以计算每个trace的所有span的latency总和
        trace_level_agg = (
            window_df.groupby("traceId").agg(latency=("latency", "sum")).reset_index()
        )
        logger.debug(
            f"计算每个trace的总延迟 (所有span的latency之和)，共 {len(trace_level_agg)} 条trace"
        )

        if (
            trace_level_agg.empty
            or "latency" not in trace_level_agg.columns
            or trace_level_agg["latency"].isnull().all()
        ):
            logger.warning("聚合后未找到有效的链路级数据或所有延迟数据为空。")
            return None, None, None
        trace_level_agg.dropna(subset=["latency"], inplace=True)
        if trace_level_agg.empty:
            logger.warning("所有链路在聚合后均无有效延迟数据。")
            return None, None, None
        ordered_trace_ids = trace_level_agg["traceId"].tolist()
        trace_latencies = trace_level_agg["latency"].values
        num_traces = len(ordered_trace_ids)
        service_to_idx = {
            service: i for i, service in enumerate(all_unique_services_list)
        }
        num_unique_services = len(all_unique_services_list)
        structural_vectors = np.zeros((num_traces, num_unique_services), dtype=int)
        valid_spans_df = window_df[["traceId", "cmdb_id"]].copy()
        valid_spans_df.dropna(subset=["cmdb_id"], inplace=True)
        if "cmdb_id" in valid_spans_df.columns:
            valid_spans_df["cmdb_id"] = valid_spans_df["cmdb_id"].astype(str)
        else:
            logger.warning(
                "_prepare_structural_and_latency_data: cmdb_id column missing after dropna."
            )
            return None, None, None
        service_counts_per_trace = valid_spans_df.groupby(["traceId", "cmdb_id"]).size()
        trace_id_to_row_idx = {
            trace_id: i for i, trace_id in enumerate(ordered_trace_ids)
        }
        for (
            trace_id_count,
            cmdb_id_count,
        ), count_val in service_counts_per_trace.items():
            if (
                trace_id_count in trace_id_to_row_idx
                and cmdb_id_count in service_to_idx
            ):
                structural_vectors[
                    trace_id_to_row_idx[trace_id_count], service_to_idx[cmdb_id_count]
                ] = count_val
        return structural_vectors, trace_latencies, ordered_trace_ids

    def _perform_hierarchical_clustering(
        self, structural_vectors: np.ndarray
    ) -> Optional[np.ndarray]:
        if structural_vectors is None or structural_vectors.shape[0] < 2:
            logger.warning("层次聚类至少需要2个有效样本（链路）。")
            return None
        try:
            linkage_matrix = linkage(
                structural_vectors, method="ward", metric="euclidean"
            )
            return fcluster(
                linkage_matrix,
                t=self.hierarchical_distance_threshold,
                criterion="distance",
            )
        except Exception as e:
            logger.error(f"层次聚类失败: {e}", exc_info=True)
            return None

    def _detect_anomalies_with_kmeans(
        self,
        all_trace_latencies_in_hc_input: np.ndarray,
        all_trace_ids_in_hc_input: List[str],
    ) -> Tuple[Set[str], Set[str], float]:
        anomalous_trace_ids_for_this_hc: Set[str] = set()
        normal_trace_ids_in_anomalous_hc: Set[str] = set()
        max_metric_ratio_for_this_hc: float = 0.0
        active_latencies = all_trace_latencies_in_hc_input.copy()
        active_trace_ids = list(all_trace_ids_in_hc_input)

        while True:
            num_current_active_traces = len(active_latencies)
            if num_current_active_traces < 2:
                logger.debug(
                    f"  K-Means迭代：样本数 ({num_current_active_traces}) 过少，停止迭代。"
                )
                break
            if num_current_active_traces < max(
                2, int(self.min_traces_for_kmeans_initial * 0.1)
            ):
                logger.debug(
                    f"  K-Means迭代：样本数 ({num_current_active_traces}) 已低于动态启发式阈值，停止迭代。"
                )
                break
            latencies_for_kmeans_iter = active_latencies.reshape(-1, 1)
            try:
                kmeans = KMeans(n_clusters=2, random_state=42, n_init="auto")
                sub_cluster_labels_iter = kmeans.fit_predict(latencies_for_kmeans_iter)
            except (
                Exception
            ) as e_km:  # Catching generic Exception as KMeans can raise various issues
                logger.error(
                    f"  K-Means迭代时发生错误 (样本数: {num_current_active_traces}): {e_km}",
                    exc_info=True,
                )
                break
            size_sub_cluster0 = np.sum(sub_cluster_labels_iter == 0)
            size_sub_cluster1 = np.sum(sub_cluster_labels_iter == 1)
            if size_sub_cluster0 == 0 or size_sub_cluster1 == 0:
                logger.debug(
                    f"  K-Means迭代：一个子簇为空 (大小: {size_sub_cluster0}, {size_sub_cluster1})。停止迭代。"
                )
                break
            smaller_sub_cluster_size = min(size_sub_cluster0, size_sub_cluster1)
            if (
                smaller_sub_cluster_size
                < num_current_active_traces * self.kmeans_outlier_percentage_threshold
            ):
                outlier_sub_label = 0 if size_sub_cluster0 < size_sub_cluster1 else 1
                keep_indices = np.where(sub_cluster_labels_iter != outlier_sub_label)[0]
                if (
                    len(keep_indices) == num_current_active_traces
                    or len(keep_indices) == 0
                ):
                    logger.debug(
                        f"  K-Means迭代：离群点移除未改变数据或移除所有数据。停止。"
                    )
                    break
                active_latencies = active_latencies[keep_indices]
                active_trace_ids = [active_trace_ids[i] for i in keep_indices]
            else:
                if len(active_latencies) < 2:
                    break
                final_centroids = kmeans.cluster_centers_.flatten()
                final_centroids.sort()
                low_latency_centroid, high_latency_centroid = (
                    final_centroids[0],
                    final_centroids[1],
                )
                min_centroid_for_ratio = max(low_latency_centroid, 1e-9)
                current_metric_ratio = (
                    high_latency_centroid - low_latency_centroid
                ) / min_centroid_for_ratio
                max_metric_ratio_for_this_hc = current_metric_ratio
                if current_metric_ratio > self.k_means_anomaly_threshold_factor:
                    original_final_centroids = kmeans.cluster_centers_.flatten()
                    high_latency_final_sub_cluster_label = (
                        1
                        if original_final_centroids[1] > original_final_centroids[0]
                        else 0
                    )
                    low_latency_final_sub_cluster_label = (
                        1 - high_latency_final_sub_cluster_label
                    )
                    for idx_in_active_set, final_sub_label in enumerate(
                        sub_cluster_labels_iter
                    ):
                        if final_sub_label == high_latency_final_sub_cluster_label:
                            anomalous_trace_ids_for_this_hc.add(
                                active_trace_ids[idx_in_active_set]
                            )
                        elif final_sub_label == low_latency_final_sub_cluster_label:
                            normal_trace_ids_in_anomalous_hc.add(
                                active_trace_ids[idx_in_active_set]
                            )
                break
        return (
            anomalous_trace_ids_for_this_hc,
            normal_trace_ids_in_anomalous_hc,
            max_metric_ratio_for_this_hc,
        )

    def detect_anomalies(
        self, dependencies_df: pd.DataFrame, anomaly_id: str
    ) -> TraceDetectionResult:
        if not anomaly_id:
            logger.error("anomaly_id 不能为空。")
            return []

        # 检查结果文件是否存在且有效，如果不完整则尝试修复
        try:
            from pathlib import Path

            results_dir = Path(self.output_dir_base) / "trace_results"
            results_file = results_dir / f"{anomaly_id}_results.json"
            if results_file.exists():
                try:
                    # 尝试验证文件完整性
                    import json

                    with open(results_file, "r", encoding="utf-8") as f:
                        json.load(f)
                except json.JSONDecodeError:
                    logger.warning(
                        f"发现不完整的JSON结果文件: {results_file}，尝试修复..."
                    )
                    # 尝试修复不完整的JSON文件
                    TraceDetectionResult.repair_incomplete_json(
                        anomaly_id, str(self.output_dir_base)
                    )
        except Exception as e:
            logger.warning(f"检查或修复结果文件时出错: {e}")

        overall_detection_result = TraceDetectionResult.load(
            anomaly_id, str(self.output_dir_base)
        )
        if overall_detection_result is None:
            overall_detection_result = TraceDetectionResult(
                anomaly_id, str(self.output_dir_base)
            )
            logger.info(
                f"为 anomaly_id '{anomaly_id}' 创建了新的 TraceDetectionResult 对象。"
            )
        elif overall_detection_result.is_overall_complete:
            logger.info(
                f"anomaly_id '{anomaly_id}' 的检测已完成并已加载。跳过重新检测。"
            )
            return overall_detection_result
        else:
            logger.info(
                f"为 anomaly_id '{anomaly_id}' 加载了已有的 TraceDetectionResult 对象，将从断点继续。"
            )

        if dependencies_df.empty:
            logger.warning(f"'{anomaly_id}': 链路数据为空。")
            overall_detection_result.mark_overall_complete(
                error_message="输入链路数据为空"
            )
            overall_detection_result.save()
            return overall_detection_result
        required_input_cols = [
            "timestamp",
            "traceId",
            "spanId",
            "parentId",
            "cmdb_id",
            "latency",
        ]
        if not all(col in dependencies_df.columns for col in required_input_cols):
            msg = f"输入链路数据缺少必需列: {[c for c in required_input_cols if c not in dependencies_df.columns]}"
            logger.error(f"'{anomaly_id}': {msg}")
            overall_detection_result.mark_overall_complete(error_message=msg)
            overall_detection_result.save()
            return overall_detection_result
        trace_df = dependencies_df.copy()
        try:
            if not pd.api.types.is_datetime64_any_dtype(trace_df["timestamp"]):
                trace_df["timestamp"] = pd.to_datetime(trace_df["timestamp"])
        except Exception as e:
            msg = f"无法将'timestamp'列转换为datetime类型: {e}"
            logger.error(f"'{anomaly_id}': {msg}", exc_info=True)
            overall_detection_result.mark_overall_complete(error_message=msg)
            overall_detection_result.save()
            return overall_detection_result

        logger.info(
            f"'{anomaly_id}': 开始链路异常检测，共有 {len(trace_df)} 条原始span数据。"
        )
        min_time, max_time = trace_df["timestamp"].min(), trace_df["timestamp"].max()
        if pd.isna(min_time) or pd.isna(max_time) or min_time >= max_time:
            err_msg = f"数据时间范围无效 (min: {min_time}, max: {max_time})"
            if min_time == max_time and not pd.isna(min_time):
                err_msg = f"数据时间范围过小，min_time ({min_time}) 等于 max_time ({max_time})。"
            elif min_time > max_time:
                err_msg = f"数据时间范围反转，min_time ({min_time}) 大于 max_time ({max_time})。"
            logger.error(f"'{anomaly_id}': {err_msg}")
            overall_detection_result.mark_overall_complete(error_message=err_msg)
            overall_detection_result.save()
            return overall_detection_result

        step, window_size = pd.Timedelta(minutes=self.step_minutes), pd.Timedelta(
            minutes=self.window_size_minutes
        )
        start_times = []
        current_scan_time = min_time
        while current_scan_time <= max_time:
            if current_scan_time + window_size <= max_time + step:
                start_times.append(current_scan_time)
            elif not start_times and min_time + window_size >= max_time:
                start_times.append(min_time)
                break
            current_scan_time += step
        if not start_times:
            warn_msg = f"'{anomaly_id}': 无法生成有效滑动窗口。数据时间范围 ({min_time} to {max_time}) 可能过小或步长不当。"
            logger.warning(warn_msg)
            overall_detection_result.set_expected_total_windows(0)
            overall_detection_result.mark_overall_complete(
                error_message="数据不足或配置不当以生成滑动窗口"
            )
            overall_detection_result.save()
            return overall_detection_result

        overall_detection_result.set_expected_total_windows(len(start_times))
        last_processed_idx = overall_detection_result.get_last_processed_window_index()
        logger.info(
            f"'{anomaly_id}': 预计处理 {len(start_times)} 个窗口。上次处理到索引: {last_processed_idx}。"
        )

        try:
            # 导入tqdm用于显示进度
            from tqdm import tqdm

            # 使用tqdm创建进度条，设置起始位置为last_processed_idx + 1
            pbar = tqdm(
                total=len(start_times),
                initial=last_processed_idx + 1,
                desc=f"处理 {anomaly_id} 的窗口",
                unit="窗口",
                ncols=100,
            )
        except ImportError:
            logger.warning(
                "未找到tqdm库，将不显示进度条。可以通过pip install tqdm安装。"
            )
            pbar = None

        # 移除了save_interval和windows_processed_since_last_save相关代码

        for i, window_start_dt in enumerate(start_times):
            window_id = f"{anomaly_id}_window_{i}"
            if (
                i <= last_processed_idx
                and overall_detection_result.is_window_processed(window_id)
            ):
                logger.debug(
                    f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}): 已处理并加载，跳过。"
                )
                continue

            window_end_dt = window_start_dt + window_size
            logger.debug(
                f"处理滑动窗口 {i + 1}/{len(start_times)} (ID: {window_id}): {window_start_dt} - {window_end_dt}"
            )
            window_start_ts_int, window_end_ts_int = int(
                window_start_dt.timestamp()
            ), int(window_end_dt.timestamp())
            window_df_current = trace_df[
                (trace_df["timestamp"] >= window_start_dt)
                & (trace_df["timestamp"] < window_end_dt)
            ]

            # 用于存储当前窗口最终输出的异常链路信息
            current_anomalous_traces_info_for_output: List[Dict[str, Any]] = []
            max_anomaly_metric_ratio_for_window: float = 0.0
            current_service_scores = {}
            window_detection_completed_flag = True

            # 收集所有被判定为异常的traceId，用于后续计算service_scores
            all_anomalous_trace_ids_this_window: Set[str] = set()

            if len(window_df_current) < 10:  # 启发式：原始span数据过少
                logger.debug(
                    f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}) 的原始span数据不足，标记为空白。"
                )
            else:
                try:
                    all_unique_services_in_window = sorted(
                        list(window_df_current["cmdb_id"].dropna().astype(str).unique())
                    )
                    if not all_unique_services_in_window:
                        logger.debug(
                            f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}): 未找到唯一服务。"
                        )
                    else:
                        structural_vectors, trace_latencies, ordered_trace_ids = (
                            self._prepare_structural_and_latency_data(
                                window_df_current, all_unique_services_in_window
                            )
                        )

                        if (
                            not ordered_trace_ids
                            or len(ordered_trace_ids)
                            < self.min_traces_for_kmeans_initial
                        ):
                            logger.debug(
                                f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}): 有效链路数 ({len(ordered_trace_ids or [])}) 不足。"
                            )
                        else:
                            hierarchical_cluster_labels = (
                                self._perform_hierarchical_clustering(
                                    structural_vectors
                                )
                            )
                            if (
                                hierarchical_cluster_labels is not None
                                and len(hierarchical_cluster_labels) > 0
                            ):
                                # 优化：不再存储服务列表，仅保留trace_id
                                # 创建映射以便快速查找trace_id是否存在
                                trace_latency_map = {}
                                for i_trace, trace_id in enumerate(ordered_trace_ids):
                                    trace_latency_map[trace_id] = float(
                                        trace_latencies[i_trace]
                                    )

                                unique_hc_labels_in_window = np.unique(
                                    hierarchical_cluster_labels
                                )
                                for hc_label in unique_hc_labels_in_window:
                                    member_indices = np.where(
                                        hierarchical_cluster_labels == hc_label
                                    )[0]
                                    if (
                                        len(member_indices)
                                        < self.min_traces_for_kmeans_initial
                                    ):
                                        continue

                                    current_hc_trace_latencies = trace_latencies[
                                        member_indices
                                    ]
                                    current_hc_trace_ids = [
                                        ordered_trace_ids[j] for j in member_indices
                                    ]

                                    (
                                        anomalous_ids_this_hc,
                                        normal_ids_in_anom_group_this_hc,
                                        metric_ratio_this_hc,
                                    ) = self._detect_anomalies_with_kmeans(
                                        current_hc_trace_latencies, current_hc_trace_ids
                                    )

                                    max_anomaly_metric_ratio_for_window = max(
                                        max_anomaly_metric_ratio_for_window,
                                        metric_ratio_this_hc,
                                    )
                                    all_anomalous_trace_ids_this_window.update(
                                        anomalous_ids_this_hc
                                    )  # 收集所有异常ID用于打分

                                    if anomalous_ids_this_hc:  # 此HC簇被判定有异常
                                        # 为这个HC簇的正常链路（如果有）准备输出结构 - 优化：仅保留必要信息
                                        normal_siblings_output_list = []
                                        for (
                                            norm_tid
                                        ) in normal_ids_in_anom_group_this_hc:
                                            normal_siblings_output_list.append(
                                                {
                                                    "trace_id": norm_tid,
                                                    "latency": trace_latency_map.get(
                                                        norm_tid, 0.0
                                                    ),
                                                }
                                            )

                                        # 为这个HC簇的异常链路准备输出结构 - 优化：仅保留必要信息
                                        for anom_tid in anomalous_ids_this_hc:
                                            current_anomalous_traces_info_for_output.append(
                                                {
                                                    "trace_id": anom_tid,
                                                    "normal_traces": normal_siblings_output_list,
                                                    "metric_ratio": metric_ratio_this_hc,
                                                    "latency": trace_latency_map.get(
                                                        anom_tid, 0.0
                                                    ),
                                                }
                                            )
                            else:
                                logger.debug(
                                    f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}): 层次聚类未产生有效标签。"
                                )

                    if (
                        all_anomalous_trace_ids_this_window
                    ):  # 如果整个窗口有任何异常链路
                        anomalous_df_for_scores = window_df_current[
                            window_df_current["traceId"].isin(
                                all_anomalous_trace_ids_this_window
                            )
                        ]
                        current_service_scores = self._calculate_service_scores(
                            window_df_current, anomalous_df_for_scores
                        )
                    else:
                        logger.debug(
                            f"窗口 {i + 1}/{len(start_times)} (ID: {window_id}): 未检测到任何异常链路。"
                        )

                except Exception as e:
                    logger.error(
                        f"处理窗口 {i + 1}/{len(start_times)} (ID: {window_id}) 时发生主流程错误: {e}",
                        exc_info=True,
                    )
                    window_detection_completed_flag = False

            overall_detection_result.add_or_update_window_result(
                window_id=window_id,
                window_start_time=window_start_ts_int,
                window_end_time=window_end_ts_int,
                anomalous_traces=current_anomalous_traces_info_for_output,
                service_scores=current_service_scores,
                max_anomaly_metric_ratio=max_anomaly_metric_ratio_for_window,
                detection_complete_for_window=window_detection_completed_flag,
            )

            # 更新进度条
            if pbar is not None:
                pbar.update(1)

            # 输出处理进度信息（每处理10%的窗口或至少20个窗口）
            log_interval = max(1, min(20, len(start_times) // 10))
            if (i + 1) % log_interval == 0:
                logger.info(
                    f"已处理 {i + 1}/{len(start_times)} 个窗口 ({(i + 1) / len(start_times) * 100:.1f}%)，继续处理..."
                )

        # 关闭进度条
        if pbar is not None:
            pbar.close()

        # 所有窗口处理完毕后，标记总体完成并保存结果（只保存一次）
        if not overall_detection_result.is_overall_complete:
            try:
                # 生成Trace模式排名
                logger.info(f"'{anomaly_id}': 开始生成链路模式排名")
                trace_patterns = overall_detection_result.generate_trace_patterns(
                    dependencies_df
                )
                if trace_patterns:
                    top_patterns = {
                        k: v for i, (k, v) in enumerate(trace_patterns.items()) if i < 5
                    }
                    logger.info(
                        f"'{anomaly_id}': 成功生成链路模式排名，前5名模式: {list(top_patterns.keys())}"
                    )
                else:
                    logger.warning(f"'{anomaly_id}': 未能生成有效的链路模式排名")

                overall_detection_result.mark_overall_complete()

                # 最终保存添加重试机制
                max_retry = 3
                retry_count = 0
                save_success = False

                while retry_count < max_retry and not save_success:
                    try:
                        overall_detection_result.save()
                        save_success = True
                        logger.info(
                            f"'{anomaly_id}': 所有窗口处理完成。最终结果已成功保存。"
                        )
                    except Exception as e:
                        retry_count += 1
                        logger.warning(
                            f"保存结果时出错 (尝试 {retry_count}/{max_retry}): {e}"
                        )
                        if retry_count < max_retry:
                            import time

                            time.sleep(1)  # 短暂延迟后重试

                if not save_success:
                    logger.error(
                        f"'{anomaly_id}': 所有窗口处理完成，但结果保存失败，达到最大重试次数。"
                    )

                logger.info(
                    f"'{anomaly_id}': 所有窗口处理完成。最终结果摘要: {overall_detection_result.get_summary()}"
                )

            except Exception as e:
                logger.error(
                    f"'{anomaly_id}': 标记处理完成时发生错误: {e}", exc_info=True
                )
        else:
            logger.info(
                f"'{anomaly_id}': 所有窗口处理完成。最终结果摘要: {overall_detection_result.get_summary()}"
            )

        return overall_detection_result

    def _calculate_service_scores(
        self, window_df: pd.DataFrame, anomalous_window_df: pd.DataFrame
    ) -> Dict[str, Dict[str, Any]]:
        all_services_in_window = set(window_df["cmdb_id"].dropna().astype(str).unique())
        service_in_total_traces = {service: 0 for service in all_services_in_window}
        service_in_anomalous_traces = {service: 0 for service in all_services_in_window}
        anomalous_trace_ids_in_window = set(anomalous_window_df["traceId"].unique())
        num_anomalous_traces_total = len(anomalous_trace_ids_in_window)

        for service_cmdb_id in all_services_in_window:
            traces_with_service = set(
                window_df[window_df["cmdb_id"] == service_cmdb_id]["traceId"].unique()
            )
            service_in_total_traces[service_cmdb_id] = len(traces_with_service)
            service_in_anomalous_traces[service_cmdb_id] = len(
                traces_with_service.intersection(anomalous_trace_ids_in_window)
            )

        service_scores_simplified = {}
        for service in all_services_in_window:
            e_f = service_in_anomalous_traces[service]
            e_p = service_in_total_traces[service] - e_f
            n_f = num_anomalous_traces_total - e_f
            ochiai_score = self._calculate_ochiai_score(e_f, e_p, n_f)
            service_scores_simplified[str(service)] = {
                "score": ochiai_score,
                "total_trace_count_with_service": service_in_total_traces[service],
                "anomalous_trace_count_with_service": e_f,
            }
        return service_scores_simplified

    def _calculate_ochiai_score(self, e_f: int, e_p: int, n_f: int) -> float:
        if e_f == 0:
            return 0.0
        denominator_part1 = e_f + n_f
        denominator_part2 = e_f + e_p
        if denominator_part1 == 0 or denominator_part2 == 0:
            return 0.0
        denominator = np.sqrt(denominator_part1 * denominator_part2)
        if denominator == 0:
            return 0.0
        return e_f / denominator
