import pandas as pd
from datetime import datetime
from pathlib import Path
from source.data_processor.utils.time_utils import convert_timestamp_to_datetime
from source.data_processor.utils.data_utils import (
    ensure_datetime_column,
    filter_dataframe_by_time,
)
from typing import Dict, List, Set, Tuple, Optional


class ExperimentData:
    """
    实验数据对象，封装一个故障案例的所有相关数据。
    包括故障信息、指标数据、依赖关系数据和日志数据。
    """

    def __init__(
        self,
        anomaly_id: str,
        anomaly_timestamp: int,
        anomaly_datetime: datetime,
        anomaly_level: str,
        anomaly_reason: str,
        anomaly_component: str,
        observation_time: float,
        metrics_df: pd.DataFrame = None,
        dependencies_df: pd.DataFrame = None,
        logs_df: pd.DataFrame = None,
    ):
        """
        初始化实验数据对象。

        参数:
            anomaly_id: 故障ID，一般用timestamp表示
            anomaly_timestamp: 故障发生的时间戳（秒级）
            anomaly_datetime: 故障发生的日期时间对象
            anomaly_level: 故障级别(pod, service, node等)
            anomaly_reason: 故障原因
            anomaly_component: 故障组件(涉及哪个组件)
            observation_time: 观察到故障的时间点（秒级时间戳）
            metrics_df: 包含指标数据的DataFrame
            dependencies_df: 包含依赖关系的DataFrame
            logs_df: 包含日志数据的DataFrame
        """
        self.anomaly_id = anomaly_id
        self.anomaly_timestamp = anomaly_timestamp
        self.anomaly_datetime = anomaly_datetime
        self.anomaly_level = anomaly_level
        self.anomaly_reason = anomaly_reason
        self.anomaly_component = anomaly_component
        self.observation_time = observation_time

        # 数据DataFrame
        # metrics_df的列结构说明:
        # - timestamp: 时间戳，表示指标采集的时间点，datetime类型
        # - cmdb_id: 组件ID，表示指标所属的具体组件（容器、节点或服务）
        # - metric_name: 指标名称，表示具体的监控指标（如CPU使用率、内存使用量等）
        # - value: 标准化后的指标值
        # - original_value: 原始指标值，未经标准化处理
        # - entity_type: 实体类型，表示指标所属的实体类型（container、node或service）
        # - type: 与entity_type相同，用于标识指标类型（container、node或service）
        # - ref: 引用关系，对于容器指标表示其所在的节点名称，其他类型为空字符串
        self.metrics_df = metrics_df if metrics_df is not None else pd.DataFrame()
        self.dependencies_df = (
            dependencies_df if dependencies_df is not None else pd.DataFrame()
        )
        self.logs_df = logs_df if logs_df is not None else pd.DataFrame()

        # 时间窗口信息(可以在后续处理中设置)
        self.start_time = None
        self.end_time = None

        # 缓存的全局信息
        self._all_services = None
        self._all_containers = None
        self._service_container_mapping = None
        self._container_service_mapping = None
        # 新增: 存储所有的cmdb_ids
        self._all_cmdb_ids = None

    def set_time_window(self, start_time: datetime, end_time: datetime):
        """设置数据的时间窗口"""
        self.start_time = start_time
        self.end_time = end_time

    def get_normal_time_range(self) -> Tuple[datetime, datetime]:
        """获取正常（故障前）的时间范围"""
        if not self.start_time or not self.anomaly_datetime:
            raise ValueError("未设置开始时间或故障时间，无法获取正常时间范围")
        return self.start_time, self.anomaly_datetime

    def get_anomaly_time_range(self) -> Tuple[datetime, datetime]:
        """获取异常（故障后）的时间范围"""
        if not self.anomaly_datetime or not self.end_time:
            raise ValueError("未设置故障时间或结束时间，无法获取异常时间范围")
        return self.anomaly_datetime, self.end_time

    def _ensure_global_mappings(self):
        """确保全局映射属性已初始化"""
        if (
            self._all_services is None
            or self._all_containers is None
            or self._service_container_mapping is None
            or self._container_service_mapping is None
        ):
            self._extract_global_mappings()

    def get_all_services(self) -> Set[str]:
        """获取所有服务名称的集合"""
        self._ensure_global_mappings()
        return self._all_services

    def get_all_containers(self) -> Set[str]:
        """获取所有容器ID的集合"""
        self._ensure_global_mappings()
        return self._all_containers
        
    def get_all_cmdb_ids(self) -> Set[str]:
        """获取所有cmdb_id的集合"""
        # 如果_all_cmdb_ids已经初始化，则直接返回
        if self._all_cmdb_ids is not None:
            return self._all_cmdb_ids
            
        # 否则从metrics_df中提取
        cmdb_ids = set()
        if not self.metrics_df.empty and "cmdb_id" in self.metrics_df.columns:
            cmdb_ids.update(self.metrics_df["cmdb_id"].dropna().unique())
            
        # 同时从dependencies_df中提取
        if not self.dependencies_df.empty and "cmdb_id" in self.dependencies_df.columns:
            cmdb_ids.update(self.dependencies_df["cmdb_id"].dropna().unique())
            
        # 缓存结果
        self._all_cmdb_ids = cmdb_ids
        return self._all_cmdb_ids

    def get_service_container_mapping(self) -> Dict[str, Set[str]]:
        """获取服务到容器的映射关系"""
        self._ensure_global_mappings()
        return self._service_container_mapping

    def get_container_service_mapping(self) -> Dict[str, Set[str]]:
        """获取容器到服务的映射关系"""
        self._ensure_global_mappings()
        return self._container_service_mapping

    def _extract_global_mappings(self):
        """从依赖数据中提取全局的服务和容器映射信息"""
        all_services = set()
        all_containers = set()
        service_container_mapping = {}
        container_service_mapping = {}

        if not self.dependencies_df.empty:
            # 提取serviceName和cmdb_id列，去掉NaN值
            valid_df = self.dependencies_df[["serviceName", "cmdb_id"]].dropna()

            # 提取所有唯一的服务名称和容器ID
            if "serviceName" in valid_df.columns:
                all_services.update(valid_df["serviceName"].unique())

            if "cmdb_id" in valid_df.columns:
                all_containers.update(valid_df["cmdb_id"].unique())

            # 构建映射关系
            for _, row in valid_df.iterrows():
                service_name = row["serviceName"]
                container_id = row["cmdb_id"]

                if service_name not in service_container_mapping:
                    service_container_mapping[service_name] = set()
                service_container_mapping[service_name].add(container_id)

                if container_id not in container_service_mapping:
                    container_service_mapping[container_id] = set()
                container_service_mapping[container_id].add(service_name)

        # 从指标数据中补充容器信息
        if (
            not self.metrics_df.empty
            and "cmdb_id" in self.metrics_df.columns
            and "entity_type" in self.metrics_df.columns
        ):
            container_metrics = (
                self.metrics_df[self.metrics_df["entity_type"] == "container"][
                    "cmdb_id"
                ]
                .dropna()
                .unique()
            )
            all_containers.update(container_metrics)

        # 更新缓存
        self._all_services = all_services
        self._all_containers = all_containers
        self._service_container_mapping = service_container_mapping
        self._container_service_mapping = container_service_mapping
        
        # 新增: 同时更新_all_cmdb_ids
        self._all_cmdb_ids = set()
        self._all_cmdb_ids.update(all_containers)
        if not self.metrics_df.empty and "cmdb_id" in self.metrics_df.columns:
            self._all_cmdb_ids.update(self.metrics_df["cmdb_id"].dropna().unique())

    def _filter_dataframe_by_time(
        self, df: pd.DataFrame, start_time: datetime, end_time: datetime
    ) -> pd.DataFrame:
        """根据时间范围过滤DataFrame"""
        if df.empty:
            return df

        df_copy = ensure_datetime_column(df.copy(), "timestamp")
        return df_copy[
            (df_copy["timestamp"] >= start_time) & (df_copy["timestamp"] <= end_time)
        ]

    def filter_by_time_range(
        self, time_range: Tuple[datetime, datetime]
    ) -> "ExperimentData":
        """根据时间范围过滤数据，返回一个包含过滤后数据的新ExperimentData对象"""
        start_time, end_time = time_range

        # 创建一个新的ExperimentData对象，保留基本信息
        filtered_data = ExperimentData(
            anomaly_id=self.anomaly_id,
            anomaly_timestamp=self.anomaly_timestamp,
            anomaly_datetime=self.anomaly_datetime,
            anomaly_level=self.anomaly_level,
            anomaly_reason=self.anomaly_reason,
            anomaly_component=self.anomaly_component,
            observation_time=self.observation_time,
        )

        # 过滤数据
        filtered_data.metrics_df = self._filter_dataframe_by_time(
            self.metrics_df, start_time, end_time
        )
        filtered_data.dependencies_df = self._filter_dataframe_by_time(
            self.dependencies_df, start_time, end_time
        )
        filtered_data.logs_df = self._filter_dataframe_by_time(
            self.logs_df, start_time, end_time
        )

        # 设置时间窗口
        filtered_data.set_time_window(start_time, end_time)

        # 复制全局映射缓存（如果已存在）
        if self._all_services is not None:
            filtered_data._all_services = self._all_services
            filtered_data._all_containers = self._all_containers
            filtered_data._service_container_mapping = self._service_container_mapping
            filtered_data._container_service_mapping = self._container_service_mapping
            
        # 新增: 复制_all_cmdb_ids
        if self._all_cmdb_ids is not None:
            filtered_data._all_cmdb_ids = self._all_cmdb_ids

        return filtered_data

    def create_time_filtered_copy(self, phase: str) -> Optional["ExperimentData"]:
        """根据阶段（正常或异常）创建一个时间过滤后的实验数据副本"""
        try:
            if phase in ("normal", "pre_anomaly"):
                time_range = self.get_normal_time_range()
            elif phase in ("anomaly", "post_anomaly"):
                time_range = self.get_anomaly_time_range()
            else:
                raise ValueError(
                    f"无效的阶段: {phase}，有效值为 'normal'/'pre_anomaly' 或 'anomaly'/'post_anomaly'"
                )

            return self.filter_by_time_range(time_range)

        except ValueError as e:
            print(f"创建过滤后的实验数据副本失败: {e}")
            return None

    def split_data_by_anomaly_time(self, window_size=None):
        """
        将数据分为故障前和故障后两部分，可选保持窗口连续性

        参数:
            window_size: 时间窗口大小（如果提供，将确保故障后数据包含足够的故障前数据以维持窗口连续性）

        返回:
            tuple: (故障前数据, 故障后数据)，其中故障后数据可能包含部分故障前数据以保持窗口连续性
        """
        # 确保时间窗口已设置
        if not self.start_time or not self.end_time:
            raise ValueError("未设置时间窗口，无法分割数据")

        # 使用filter_by_time_range创建故障前数据
        pre_anomaly_data = self.filter_by_time_range(
            (self.start_time, self.anomaly_datetime)
        )

        # 确定故障后数据的起始时间
        post_start_time = self.start_time if window_size else self.anomaly_datetime

        if window_size and not self.metrics_df.empty:
            # 如果指定了窗口大小，确保故障后数据包含足够的历史数据以保持窗口连续性
            # 获取排序后的所有时间戳
            df_copy = ensure_datetime_column(self.metrics_df.copy(), "timestamp")
            all_timestamps = sorted(df_copy["timestamp"].unique())

            # 找到异常时间点的索引位置
            anomaly_idx = None
            for i, ts in enumerate(all_timestamps):
                if ts >= self.anomaly_datetime:
                    anomaly_idx = i
                    break

            # 计算需要包含的历史数据点数量
            if anomaly_idx is not None and anomaly_idx >= window_size:
                # 找到应该包含的历史数据的开始时间点
                history_start_idx = anomaly_idx - window_size + 1
                post_start_time = all_timestamps[history_start_idx]

        # 创建故障后数据，确保包含所需的历史数据
        post_anomaly_data = self.filter_by_time_range((post_start_time, self.end_time))

        # 更新ID
        pre_anomaly_data.anomaly_id = f"{self.anomaly_id}_pre"
        post_anomaly_data.anomaly_id = f"{self.anomaly_id}_post"

        return pre_anomaly_data, post_anomaly_data

    def save_to_directory(self, output_dir: str or Path):
        """将实验数据保存到指定目录"""
        output_path = Path(output_dir)
        output_path.mkdir(parents=True, exist_ok=True)

        # 保存故障信息
        anomaly_info = {
            "anomaly_id": [self.anomaly_id],
            "timestamp": [self.anomaly_timestamp],
            "datetime": [self.anomaly_datetime.strftime("%Y-%m-%d %H:%M:%S")],
            "level": [self.anomaly_level],
            "reason": [self.anomaly_reason],
            "component": [self.anomaly_component],
            "observation_time": [self.observation_time],
        }

        pd.DataFrame(anomaly_info).to_csv(output_path / "anomaly_info.csv", index=False)

        # 保存数据DataFrame
        if not self.metrics_df.empty:
            self.metrics_df.to_csv(output_path / "OpenTelemetry.csv", index=False)
        if not self.dependencies_df.empty:
            self.dependencies_df.to_csv(output_path / "dependencies.csv", index=False)
        if not self.logs_df.empty:
            self.logs_df.to_csv(output_path / "logs.csv", index=False)

    def __str__(self):
        """返回对象的字符串表示"""
        all_services = self.get_all_services()
        all_containers = self.get_all_containers()
        all_cmdb_ids = self.get_all_cmdb_ids()  # 新增: 获取所有cmdb_ids

        return (
            f"故障ID: {self.anomaly_id}\n"
            f"时间戳: {self.anomaly_timestamp}\n"
            f"日期时间: {self.anomaly_datetime}\n"
            f"故障级别: {self.anomaly_level}\n"
            f"故障原因: {self.anomaly_reason}\n"
            f"故障组件: {self.anomaly_component}\n"
            f"观察时间: {self.observation_time}\n"
            f"指标数据行数: {len(self.metrics_df)}\n"
            f"依赖关系行数: {len(self.dependencies_df)}\n"
            f"日志数据行数: {len(self.logs_df)}\n"
            f"全局服务数: {len(all_services)}\n"
            f"全局容器数: {len(all_containers)}\n"
            f"全局CMDB ID数: {len(all_cmdb_ids)}"  # 新增: 显示cmdb_ids数量
        )

    def __repr__(self):
        """返回对象的代码表示"""
        return f"ExperimentData(anomaly_id='{self.anomaly_id}', anomaly_timestamp={self.anomaly_timestamp})"
