import logging
import os

import pandas as pd
from abc import ABC, abstractmethod
from typing import List, Any, Optional, Dict, Tuple, Set
from pathlib import Path
from datetime import datetime, timedelta

from source.data_processor.loaders.base_loader import AnomalyRecord, BaseDataLoader
from source.public.config_manager import ConfigManager

from source.data_processor.utils.data_utils import smooth_timeseries, standardize_data
from source.data_processor.utils.time_utils import ensure_timezone

# 配置基本日志
logging.basicConfig(
    level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)


class CommonDataLoader(BaseDataLoader):
    """
    Common base class for specific data loaders (Telecom, Market, Bank).
    Handles shared logic for loading telemetry data, processing metrics, and dependencies.
    """

    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        sampling_rate: str = "60s",
        smooth_method: str = "linear",
        smooth_limit: int = 5,
        num_workers: int = 1,
        use_cache: bool = True,
        cache_prefix: str = "common_loader",
        trace_file_name: str = "trace_span.csv",
    ):
        super().__init__(
            base_data_path, output_dir, use_cache, cache_prefix, record_file_name
        )
        self.sampling_rate = sampling_rate
        self.smooth_method = smooth_method
        self.smooth_limit = smooth_limit
        self.num_workers = num_workers
        self.telemetry_path = self.base_data_path / "telemetry"
        self.trace_file_name = trace_file_name  # Common trace file name
        self.config_manager = ConfigManager()
        # 默认情况下需要重采样，Telecom数据需要重采样，Market和Bank不需要
        self.needs_resampling = True

        # These should be defined by subclasses
        self.metric_files_info: Dict[str, str] = {}
        self.trace_required_cols: List[str] = []
        self.trace_timestamp_col: str = "timestamp"  # Default, can be 'startTime' etc.
        self.trace_timestamp_unit: str = "ms"  # Default
        self.trace_latency_col_original: str = (
            "latency"  # Default, can be 'duration', 'elapsedTime'
        )
        self.trace_latency_col_target: str = "latency"

        # For generating metrics from trace
        self.trace_service_identifier_col: str = (
            "cmdb_id"  # Default, can be 'dsName', 'operation_name'
        )
        self.trace_span_id_col_for_count: str = "id"  # Default, can be 'span_id'
        self.generated_metrics_entity_type: str = "service"  # Default

        # For processing trace for dependencies
        self.dep_trace_id_col: str = "traceId"  # Target name
        self.dep_span_id_col: str = "spanId"  # Target name
        self.dep_parent_id_col: str = (
            "parentId"  # Target name (can be from source or derived)
        )
        self.dep_cmdb_id_col: str = "cmdb_id"  # Source cmdb_id for the span
        self.dep_latency_col: str = "latency"  # Target name

        if (
            self.record_path and not self.record_path.exists()
        ):  # check after self.record_path is set
            logger.warning(f"Anomaly record file may not exist yet: {self.record_path}")


    def _get_date_folders_for_window(
        self, start_time: datetime, end_time: datetime
    ) -> List[str]:
        if not self.telemetry_path.exists():
            logger.warning(
                f"Telemetry data directory does not exist: {self.telemetry_path}"
            )
            return []

        start_time_local = ensure_timezone(start_time)
        end_time_local = ensure_timezone(end_time)

        date_folders = set()
        current_dt = start_time_local.replace(hour=0, minute=0, second=0, microsecond=0)
        while current_dt <= end_time_local:
            folder_name = current_dt.strftime("%Y_%m_%d")
            if (self.telemetry_path / folder_name).is_dir():
                date_folders.add(folder_name)
            current_dt += timedelta(days=1)

        if not date_folders:
            logger.warning(
                f"No date folders found in {self.telemetry_path} for window {start_time_local} - {end_time_local}"
            )
        return sorted(list(date_folders))

    def _process_loaded_metrics(self, metrics_df: pd.DataFrame) -> pd.DataFrame:
        if metrics_df.empty:
            return pd.DataFrame()

        processed_metrics_list = []
        if "timestamp" not in metrics_df.columns:
            logger.error("Metrics DataFrame missing 'timestamp' column.")
            return pd.DataFrame()

        metrics_df["timestamp"] = pd.to_datetime(
            metrics_df["timestamp"]
        )  # Ensure datetime

        # Ensure 'entity_type' exists
        if "entity_type" not in metrics_df.columns:
            logger.warning(
                "Metrics DataFrame missing 'entity_type', defaulting to 'unknown'."
            )
            metrics_df["entity_type"] = "unknown"

        metrics_df = metrics_df.set_index("timestamp").sort_index()

        for (cmdb_id, metric_name, entity_type), group in metrics_df.groupby(
            ["cmdb_id", "metric_name", "entity_type"]
        ):
            group = group.copy()
            group["value"] = pd.to_numeric(group["value"], errors="coerce")
            group.dropna(subset=["value"], inplace=True)
            if group.empty:
                continue

            # 根据数据源是否需要重采样来决定处理方式
            if self.needs_resampling:
                # 需要重采样 (Telecom数据)
                logger.debug(
                    f"对 {entity_type} 类型的 {cmdb_id}/{metric_name} 进行重采样，采样率: {self.sampling_rate}"
                )
                resampled = group["value"].resample(self.sampling_rate).mean()
                if resampled.isna().any():
                    resampled = smooth_timeseries(
                        resampled, method=self.smooth_method, limit=self.smooth_limit
                    )

                if resampled.empty or resampled.isna().all():
                    continue

                standardized_values, original_values = standardize_data(resampled)

                processed_df = pd.DataFrame(
                    {
                        "timestamp": resampled.index,
                        "cmdb_id": cmdb_id,
                        "metric_name": metric_name,
                        "value": (
                            standardized_values.values
                            if not standardized_values.empty
                            else 0.0
                        ),
                        "original_value": (
                            original_values.values if not original_values.empty else 0.0
                        ),
                        "entity_type": entity_type,
                    }
                )
            else:
                # 不需要重采样 (Market和Bank数据，已经按60s采样)
                logger.debug(
                    f"{entity_type} 类型的 {cmdb_id}/{metric_name} 已采样，跳过重采样"
                )
                standardized_values, original_values = standardize_data(group["value"])

                processed_df = pd.DataFrame(
                    {
                        "timestamp": group.index,
                        "cmdb_id": cmdb_id,
                        "metric_name": metric_name,
                        "value": (
                            standardized_values.values
                            if not standardized_values.empty
                            else 0.0
                        ),
                        "original_value": (
                            original_values.values if not original_values.empty else 0.0
                        ),
                        "entity_type": entity_type,
                    }
                )

            # Hook for subclasses to add more columns
            processed_df = self._add_specific_metric_columns(
                processed_df, (cmdb_id, metric_name, entity_type), group
            )

            # Fill NaNs that might have occurred during processing
            processed_df["value"].fillna(0, inplace=True)
            mean_original = processed_df["original_value"].mean()
            processed_df["original_value"].fillna(
                mean_original if pd.notna(mean_original) else 0, inplace=True
            )

            processed_metrics_list.append(processed_df)

        if not processed_metrics_list:
            return pd.DataFrame()
        final_df = pd.concat(processed_metrics_list, ignore_index=True)
        logger.info(
            f"Processed {len(final_df)} metric records (resampled, smoothed, standardized)."
        )
        return final_df

    def _add_specific_metric_columns(
        self,
        processed_df: pd.DataFrame,
        group_key: Tuple,
        original_group_df: pd.DataFrame,
    ) -> pd.DataFrame:
        """Hook for subclasses to add specific columns to processed metrics."""
        return processed_df  # Default implementation does nothing

    @abstractmethod
    def _read_single_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        """To be implemented by subclasses for dataset-specific metric file reading."""
        pass

    def _load_and_filter_trace_data_for_window(
        self, date_folders: List[str], start_time: datetime, end_time: datetime
    ) -> pd.DataFrame:
        all_trace_dfs = []
        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)

        for date_folder in date_folders:
            trace_file_path = (
                self.telemetry_path / date_folder / "trace" / self.trace_file_name
            )
            if not trace_file_path.exists():
                logger.debug(f"Trace file not found: {trace_file_path}")
                continue

            try:
                daily_trace_df = pd.read_csv(trace_file_path)
            except Exception as e:
                logger.error(f"Error reading trace file {trace_file_path}: {e}")
                continue

            if not self.validate_dataframe(daily_trace_df, self.trace_required_cols):
                logger.warning(
                    f"Trace file {trace_file_path} has incorrect format. Skipping."
                )
                continue

            df = daily_trace_df[self.trace_required_cols].copy()

            # Timestamp conversion
            ts_col = self.trace_timestamp_col
            if ts_col not in df.columns:
                logger.error(
                    f"Timestamp column '{ts_col}' not found in trace file {trace_file_path}. Available: {df.columns.tolist()}"
                )
                continue

            try:
                if pd.api.types.is_numeric_dtype(df[ts_col]):
                    df[ts_col] = pd.to_datetime(
                        df[ts_col], unit=self.trace_timestamp_unit, utc=True
                    ).dt.tz_convert("Asia/Shanghai")
                else:  # Assuming it's a string that can be parsed
                    df[ts_col] = pd.to_datetime(df[ts_col], utc=True).dt.tz_convert(
                        "Asia/Shanghai"
                    )
            except Exception as e:
                logger.error(
                    f"Error converting timestamp in {trace_file_path} for column {ts_col}: {e}"
                )
                continue

            df.rename(
                columns={ts_col: "timestamp"}, inplace=True
            )  # Standardize to 'timestamp'

            # Latency column
            if self.trace_latency_col_original != self.trace_latency_col_target:
                df.rename(
                    columns={
                        self.trace_latency_col_original: self.trace_latency_col_target
                    },
                    inplace=True,
                )

            df = df[
                (df["timestamp"] >= start_time_aware)
                & (df["timestamp"] <= end_time_aware)
            ]
            if not df.empty:
                all_trace_dfs.append(df)

        if not all_trace_dfs:
            return pd.DataFrame()
        combined = pd.concat(all_trace_dfs, ignore_index=True)
        logger.info(
            f"Loaded {len(combined)} trace records from {len(date_folders)} folders."
        )
        return combined

    def _generate_metrics_from_trace(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        """
        从trace数据中生成指标。
        该方法既可处理原始trace数据，也可处理标准化后的依赖数据(dependencies_df)。

        参数:
            trace_df: 可以是原始trace数据或标准化后的dependencies_df

        返回:
            包含生成的指标数据的DataFrame
        """
        if trace_df.empty:
            return pd.DataFrame()

        generated_metrics_list = []

        # 检查trace_df是否已经是标准化格式
        is_standardized = all(
            col in trace_df.columns
            for col in [
                "timestamp",
                "traceId",
                "spanId",
                "parentId",
                "serviceName",
                "cmdb_id",
                "latency",
            ]
        )

        # 确定服务标识符列
        service_id_col = (
            "serviceName" if is_standardized else self.trace_service_identifier_col
        )
        if service_id_col not in trace_df.columns:
            logger.warning(
                f"Trace数据缺少服务标识符列 '{service_id_col}'，无法生成指标"
            )
            return pd.DataFrame()

        # 确定latency列
        latency_col = "latency" if is_standardized else self.trace_latency_col_target
        if latency_col not in trace_df.columns:
            logger.error(f"Trace数据缺少延迟列 '{latency_col}'，无法生成指标")
            return pd.DataFrame()

        # 确保latency列为数值类型
        trace_df[latency_col] = pd.to_numeric(trace_df[latency_col], errors="coerce")

        # 确定用于请求计数的span_id列
        count_col = "spanId" if is_standardized else self.trace_span_id_col_for_count
        if count_col not in trace_df.columns:
            logger.warning(f"未找到用于计数的列 '{count_col}'，尝试备用方案")
            # 备用方案
            potential_id_cols = [
                c for c in ["spanId", "id", "span_id"] if c in trace_df.columns
            ]
            if not potential_id_cols:
                logger.error("无法找到可用于请求计数的列")
                return pd.DataFrame()
            count_col = potential_id_cols[0]
            logger.info(f"使用 '{count_col}' 列进行请求计数")

        for service_id, group in trace_df.groupby(service_id_col):
            if pd.isna(service_id) or not str(service_id).strip():
                continue

            group = group.set_index("timestamp").sort_index()

            # 计算请求数和平均延迟
            request_count = group[count_col].resample(self.sampling_rate).count()
            avg_latency = group[latency_col].resample(self.sampling_rate).mean()

            # 填充重采样可能产生的NaN
            request_count = request_count.fillna(0)
            avg_latency = avg_latency.fillna(0)

            service_metrics_agg = pd.DataFrame(
                {"request_count": request_count, "avg_response_time": avg_latency}
            ).reset_index()

            if not service_metrics_agg.empty:
                # 对于标准化数据，serviceName成为新的cmdb_id
                service_metrics_agg["cmdb_id"] = service_id
                melted = service_metrics_agg.melt(
                    id_vars=["timestamp", "cmdb_id"],
                    var_name="metric_name",
                    value_name="value",
                )
                melted["entity_type"] = self.generated_metrics_entity_type
                generated_metrics_list.append(melted)

        if not generated_metrics_list:
            return pd.DataFrame()
        final_df = pd.concat(generated_metrics_list, ignore_index=True)
        logger.info(f"从trace数据生成了 {len(final_df)} 条指标记录")
        return final_df

    @abstractmethod
    def _prepare_dependencies_dataframe(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        """
        Subclasses implement this to prepare the DataFrame for dependency processing.
        This involves:
        1. Selecting necessary columns from raw trace_df.
        2. Renaming columns to standard names: 'timestamp', 'traceId', 'spanId', 'parentId' (if exists),
           'cmdb_id' (original CI of the span), 'latency'.
        3. Deriving 'parentId' if not available or needs adjustment.
        4. Deriving 'serviceName' for the dependency link.
        Should return a DataFrame with columns: ['timestamp', 'traceId', 'spanId', 'parentId', 'serviceName', 'cmdb_id', 'latency']
        """
        pass

    # --- Main public methods for data loading ---
    def load_metrics(
        self,
        start_time: datetime,
        end_time: datetime,
        record: AnomalyRecord,
        dependencies_df: Optional[pd.DataFrame] = None,
    ) -> pd.DataFrame:
        """
        加载指定时间窗口内的指标数据。

        参数:
            start_time: 开始时间
            end_time: 结束时间
            record: 异常记录
            dependencies_df: 可选，已标准化的trace依赖数据（来自load_dependencies）

        返回:
            处理后的指标DataFrame
        """
        logger.info(
            f"正在加载 {record.anomaly_id} 的指标数据，时间范围: {start_time} 至 {end_time}"
        )
        all_metrics_dfs = []
        date_folders = self._get_date_folders_for_window(start_time, end_time)
        if not date_folders:
            return pd.DataFrame()

        # 1. 从指标文件中加载数据
        for date_folder in date_folders:
            metric_folder_path = self.telemetry_path / date_folder / "metric"
            if metric_folder_path.is_dir():
                for file_name, entity_type in self.metric_files_info.items():
                    metric_file_path = metric_folder_path / file_name
                    df = self._read_single_metric_file(
                        metric_file_path, entity_type, start_time, end_time
                    )
                    if not df.empty:
                        all_metrics_dfs.append(df)

        # 2. 从trace数据中生成指标
        # 注意：此处的dependencies_df已是标准化的trace数据，由load_dependencies处理并返回
        # 已包含标准列: "timestamp", "traceId", "spanId", "parentId", "serviceName", "cmdb_id", "latency"
        if dependencies_df is not None and not dependencies_df.empty:
            trace_derived_metrics = self._generate_metrics_from_trace(dependencies_df)
            if not trace_derived_metrics.empty:
                all_metrics_dfs.append(trace_derived_metrics)

        if not all_metrics_dfs:
            logger.warning("未找到任何来源的指标数据")
            return pd.DataFrame()

        combined_metrics = pd.concat(all_metrics_dfs, ignore_index=True)
        if combined_metrics.empty:
            return pd.DataFrame()

        return self._process_loaded_metrics(combined_metrics)

    def load_dependencies(
        self, start_time: datetime, end_time: datetime, record: AnomalyRecord
    ) -> pd.DataFrame:
        """
        加载指定时间窗口内的依赖关系数据。

        该方法执行的流程：
        1. 获取时间窗口内的日期文件夹
        2. 调用_load_and_filter_trace_data_for_window加载原始trace数据
        3. 调用_prepare_dependencies_dataframe将原始trace数据转换为标准格式
        4. 验证和处理标准化后的数据

        参数:
            start_time: 开始时间
            end_time: 结束时间
            record: 异常记录

        返回:
            标准化的依赖关系DataFrame，包含以下列:
            ["timestamp", "traceId", "spanId", "parentId", "serviceName", "cmdb_id", "latency"]
        """
        logger.info(
            f"正在加载 {record.anomaly_id} 的依赖关系数据，时间范围: {start_time} 至 {end_time}"
        )

        # 1. 获取时间窗口内的日期文件夹
        date_folders = self._get_date_folders_for_window(start_time, end_time)
        if not date_folders:
            return pd.DataFrame()

        # 2. 加载原始trace数据
        raw_trace_df = self._load_and_filter_trace_data_for_window(
            date_folders, start_time, end_time
        )
        if raw_trace_df.empty:
            logger.warning("未找到指定时间窗口内的原始trace数据")
            return pd.DataFrame()

        # 3. 将原始trace数据转换为标准格式
        # 每个具体的数据加载器子类需要实现_prepare_dependencies_dataframe方法
        # 将其特定格式的trace数据转换为标准格式
        dependencies_df = self._prepare_dependencies_dataframe(raw_trace_df.copy())

        # 4. 验证和处理标准化后的数据
        required_dep_cols = [
            "timestamp",
            "traceId",
            "spanId",
            "parentId",
            "serviceName",
            "cmdb_id",
            "latency",
        ]
        if not self.validate_dataframe(dependencies_df, required_dep_cols):
            logger.error("处理后的依赖关系数据缺少必要列")
            return pd.DataFrame()

        # 确保数据类型正确
        dependencies_df["timestamp"] = pd.to_datetime(dependencies_df["timestamp"])
        for col in ["traceId", "spanId", "parentId", "serviceName", "cmdb_id"]:
            dependencies_df[col] = dependencies_df[col].astype(str)
        dependencies_df["latency"] = pd.to_numeric(
            dependencies_df["latency"], errors="coerce"
        )

        # 统一处理parentId：如果parentId不存在于任何spanId中，则设置为"0"
        all_span_ids = set(dependencies_df["spanId"].unique())
        mask = ~dependencies_df["parentId"].isin(all_span_ids) & (dependencies_df["parentId"] != "0")
        if mask.any():
            logger.info(f"发现 {mask.sum()} 条记录的parentId不存在于任何spanId中，已将其设置为\"0\"")
            dependencies_df.loc[mask, "parentId"] = "0"

        # 最终选择和去重
        dependencies_df = (
            dependencies_df[required_dep_cols].drop_duplicates().reset_index(drop=True)
        )

        logger.info(f"从trace数据中处理得到 {len(dependencies_df)} 条依赖关系记录")
        return dependencies_df
    def _load_anomaly_records(self) -> List[AnomalyRecord]:
        """
        加载并处理故障记录，返回 AnomalyRecord 对象列表。
        实现 BaseDataLoader._load_anomaly_records 抽象方法。
        与 Market 和 Telecom 的实现类似。
        """
        logger.info(f"正在加载故障记录文件: {self.record_path}")

        anomaly_df = pd.read_csv(self.record_path)
        # 根据 record.csv 的实际列名调整，这里假设与 market_loader 类似
        required_cols = ["timestamp", "level", "reason", "component"]
        if not self.validate_dataframe(anomaly_df, required_cols):
            logger.error(f"故障记录文件列: {anomaly_df.columns.tolist()}")
            raise ValueError(f"故障记录文件缺少必要列: {required_cols}")

        # 处理时间戳
        anomaly_df["timestamp"] = pd.to_numeric(
            anomaly_df["timestamp"], errors="coerce"
        )
        anomaly_df.dropna(subset=["timestamp"], inplace=True)
        anomaly_df["timestamp"] = anomaly_df["timestamp"].astype(
            int
        )  # 确保是秒级时间戳

        # 转换为带时区的 datetime 对象 (上海时区)
        anomaly_df["anomaly_dt"] = pd.to_datetime(
            anomaly_df["timestamp"],
            unit="s",
            utc=True,  # 假设record.csv中的时间戳是UTC秒级
        ).dt.tz_convert("Asia/Shanghai")

        records = []
        for _, row in anomaly_df.iterrows():
            anomaly_id = self.get_anomaly_id(row["component"], row["timestamp"])
            record = AnomalyRecord(
                anomaly_id=anomaly_id,
                unix_timestamp=int(row["timestamp"]),
                datetime_obj=row["anomaly_dt"],
                level=row["level"],
                reason=row["reason"],
                component=row["component"],
                cmdb_id=row.get("cmdb_id"),  # 如果record.csv中有cmdb_id
                # 其他字段根据需要从row中获取
            )
            records.append(record)

        logger.info(f"成功加载 {len(records)} 条故障记录")
        return records

class TelecomDataLoader(CommonDataLoader):
    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        sampling_rate: str = "60s",  # 显式接收采样率参数，默认值为60s
        **kwargs,
    ):
        # Telecom数据需要按传入的采样率进行重采样
        super().__init__(
            base_data_path,
            output_dir,
            record_file_name,
            sampling_rate=sampling_rate,  # 使用传入的采样率
            cache_prefix="telecom_loader",
            **kwargs,
        )
        # 明确设置需要重采样（虽然默认就是True）
        self.needs_resampling = True

        # 其他初始化保持不变
        self.metric_files_info = {
            "metric_container.csv": "container",
            "metric_middleware.csv": "container",  # Or "middleware" if distinct processing
            "metric_node.csv": "node",
            "metric_service.csv": "service",
        }
        self.trace_file_name = (
            "trace_span.csv"  # Explicitly set, though could be default
        )
        self.trace_required_cols = [
            "startTime",
            "elapsedTime",
            "traceId",
            "id",
            "pid",
            "cmdb_id",
            "dsName",
        ]
        self.trace_timestamp_col = "startTime"
        self.trace_timestamp_unit = "ms"
        self.trace_latency_col_original = "elapsedTime"
        self.trace_latency_col_target = "latency"

        # For generating metrics from trace
        self.trace_service_identifier_col = (
            "dsName"  # Service identifier in Telecom trace
        )
        self.trace_span_id_col_for_count = "id"  # Span identifier for counting
        self.generated_metrics_entity_type = (
            "interface"  # Telecom calls these 'interface' metrics
        )

    def _read_single_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        if not file_path.exists():
            return pd.DataFrame()
        df = pd.read_csv(file_path)

        # Telecom metric files have 'name' for metric_name and 'timestamp' is ms
        required = ["timestamp", "cmdb_id", "value", "name"]
        if not self.validate_dataframe(df, required):
            logger.warning(f"Telecom metric file {file_path.name} invalid, skipping.")
            return pd.DataFrame()

        df = df[required].copy()
        df["timestamp"] = pd.to_datetime(
            df["timestamp"], unit="ms", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        df = df[
            (df["timestamp"] >= start_time_aware) & (df["timestamp"] <= end_time_aware)
        ]

        if not df.empty:
            df.rename(columns={"name": "metric_name"}, inplace=True)
            df["entity_type"] = entity_type
        return df

    def _prepare_dependencies_dataframe(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        # Telecom specific trace columns: "traceId", "id" (spanId), "pid" (parentId), "dsName" (serviceName)
        # "cmdb_id" is the host/container id.
        # "elapsedTime" is latency.

        # Select and rename
        deps = trace_df.rename(
            columns={
                "id": "spanId",  # spanId is 'id' in Telecom trace
                "pid": "parentId",  # parentId is 'pid'
                "dsName": "serviceName",  # serviceName is 'dsName'
#                 callType,startTime,elapsedTime,success,traceId,id,pid,cmdb_id,dsName,serviceName
#                 JDBC,1586534400335,2.0,True,01df517164d1c0365586,407d617164d1c14f2613,6e02217164d1c14b2607,docker_006,db_003,
#                 JDBC,1586534400331,2.0,True,01df517164d1c0365586,02d8317164d1c14b2610,6e02217164d1c14b2607,docker_006,db_003,
#                 JDBC,1586534400300,2.0,True,01df517164d1c0365586,b250a17164d1c12c2580,2910817164d1c12a2575,docker_006,db_003,
            }
        )

        # 如果parentId列不存在或全部为空，设置为"0"（表示根span）
        if "parentId" not in deps.columns or deps["parentId"].isnull().all():
            # 将所有parentId设为"0"，表示所有span都是根span
            deps["parentId"] = "0"

        # Ensure all required columns for the final step are present
        final_cols = [
            "timestamp",
            "traceId",
            "spanId",
            "parentId",
            "serviceName",
            "cmdb_id",
            self.trace_latency_col_target,
        ]
        # Rename latency col if not already done
        if (
            self.trace_latency_col_target not in deps.columns
            and self.trace_latency_col_original in deps.columns
        ):
            deps.rename(
                columns={
                    self.trace_latency_col_original: self.trace_latency_col_target
                },
                inplace=True,
            )

        # Ensure cmdb_id from trace is kept as 'cmdb_id' (host/container of the span)
        # serviceName is already 'dsName'

        return deps


class MarketDataLoader(CommonDataLoader):
    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        **kwargs,
    ):
        # Market数据已经按60s采样，无需再次重采样
        kwargs["sampling_rate"] = "60s"  # 强制使用60s作为采样率
        super().__init__(
            base_data_path,
            output_dir,
            record_file_name,
            cache_prefix="market_loader",
            **kwargs,
        )
        self.needs_resampling = False  # 设置为不需要重采样

        self.metric_files_info = {
            "metric_container.csv": "container",
            "metric_node.csv": "node",
        }
        # Trace columns for Market: timestamp,cmdb_id,span_id,trace_id,duration,operation_name,parent_span
        self.trace_required_cols = [
            "timestamp",
            "cmdb_id",
            "span_id",
            "trace_id",
            "duration",
            "operation_name",
            "parent_span",
        ]
        self.trace_timestamp_col = "timestamp"  # Already named 'timestamp'
        self.trace_timestamp_unit = "ms"
        self.trace_latency_col_original = "duration"
        self.trace_latency_col_target = "latency"

        self.trace_service_identifier_col = (
            "operation_name"  # Service is 'operation_name'
        )
        self.trace_span_id_col_for_count = "span_id"
        self.generated_metrics_entity_type = "service"

    def _read_single_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        if not file_path.exists():
            return pd.DataFrame()
        df = pd.read_csv(file_path)

        # Market metric files have 'kpi_name' and timestamp is seconds
        required = ["timestamp", "cmdb_id", "value", "kpi_name"]
        if not self.validate_dataframe(df, required):
            logger.warning(f"Market metric file {file_path.name} invalid, skipping.")
            return pd.DataFrame()

        df = df[required].copy()
        df["timestamp"] = pd.to_datetime(
            df["timestamp"] * 1000, unit="ms", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        if (
            entity_type == "container"
        ):  # Market specific cmdb_id splitting for containers
            split_cmdb = df["cmdb_id"].str.split(".", n=1, expand=True)
            if split_cmdb.shape[1] == 2:
                df[["node_cmdb_id", "cmdb_id"]] = split_cmdb
                df.dropna(subset=["cmdb_id", "node_cmdb_id"], inplace=True)
            else:  # Handle cases where split doesn't produce two columns
                logger.warning(
                    f"Could not split 'cmdb_id' in {file_path.name} as expected. Keeping original 'cmdb_id'."
                )
                df["node_cmdb_id"] = None  # Or some default

        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        df = df[
            (df["timestamp"] >= start_time_aware) & (df["timestamp"] <= end_time_aware)
        ]

        if not df.empty:
            df.rename(columns={"kpi_name": "metric_name"}, inplace=True)
            df["entity_type"] = entity_type
        return df

    def _add_specific_metric_columns(
        self,
        processed_df: pd.DataFrame,
        group_key: Tuple,
        original_group_df: pd.DataFrame,
    ) -> pd.DataFrame:
        # For Market, if 'node_cmdb_id' was in original group (after _read_single_metric_file)
        if "node_cmdb_id" in original_group_df.columns:
            # Get a single consistent value for node_cmdb_id for this group
            node_id_series = original_group_df["node_cmdb_id"].dropna()
            if not node_id_series.empty:
                processed_df["node_cmdb_id"] = node_id_series.iloc[0]
        return processed_df

    def _prepare_dependencies_dataframe(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        # Market trace: timestamp,cmdb_id,span_id,trace_id,duration,operation_name,parent_span
        deps = trace_df.rename(
            columns={
                "parent_span": "parentId",
                "operation_name": "serviceName",
                "trace_id": "traceId",  # 添加trace_id到traceId的映射
                "span_id": "spanId",  # 添加span_id到spanId的映射
                # timestamp,cmdb_id,span_id,trace_id,duration,type,status_code,operation_name,parent_span
            }
        )

        # Ensure latency column is named as per trace_latency_col_target
        if (
            self.trace_latency_col_target not in deps.columns
            and self.trace_latency_col_original in deps.columns
        ):
            deps.rename(
                columns={
                    self.trace_latency_col_original: self.trace_latency_col_target
                },
                inplace=True,
            )

        return deps


class BankDataLoader(CommonDataLoader):
    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        **kwargs,
    ):
        # Bank数据已经按60s采样，无需再次重采样
        kwargs["sampling_rate"] = "60s"  # 强制使用60s作为采样率
        super().__init__(
            base_data_path,
            output_dir,
            record_file_name,
            cache_prefix="bank_loader",
            **kwargs,
        )
        self.needs_resampling = False  # 设置为不需要重采样

        self.metric_files_info = {  # Bank only has container metrics from file
            "metric_container.csv": "container"
        }
        # Trace: timestamp(float ms), cmdb_id, parent_id, span_id, trace_id, duration
        self.trace_required_cols = [
            "timestamp",
            "cmdb_id",
            "parent_id",
            "span_id",
            "trace_id",
            "duration",
        ]
        self.trace_timestamp_col = "timestamp"  # Already 'timestamp'
        self.trace_timestamp_unit = "ms"  # It's float ms in bank
        self.trace_latency_col_original = "duration"
        self.trace_latency_col_target = "latency"

        # For generating metrics from trace (Bank specific: service is cmdb_id + _svc)
        self.trace_service_identifier_col = "cmdb_id"  # Base for service name
        self.trace_span_id_col_for_count = "span_id"
        self.generated_metrics_entity_type = "service"

    def _read_single_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        # Bank specific: metric_container.csv, timestamp is seconds, 'kpi_name'
        if not file_path.exists():
            return pd.DataFrame()
        df = pd.read_csv(file_path)

        required = ["timestamp", "cmdb_id", "kpi_name", "value"]
        if not self.validate_dataframe(df, required):
            logger.warning(f"Bank metric file {file_path.name} invalid, skipping.")
            return pd.DataFrame()

        df = df[required].copy()
        df["timestamp"] = pd.to_datetime(
            df["timestamp"], unit="s", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        df = df[
            (df["timestamp"] >= start_time_aware) & (df["timestamp"] <= end_time_aware)
        ]

        if not df.empty:
            df.rename(columns={"kpi_name": "metric_name"}, inplace=True)
            df["entity_type"] = entity_type  # Should be 'container'
        return df

    def _generate_metrics_from_trace(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        """
        Bank特有的覆盖：为每个容器创建服务ID为<container_cmdb_id>_svc。
        处理原始trace数据或标准化后的依赖数据。
        """
        if trace_df.empty:
            return pd.DataFrame()

        trace_df_copy = trace_df.copy()

        # 检查是否为标准化数据
        is_standardized = all(
            col in trace_df.columns
            for col in [
                "timestamp",
                "traceId",
                "spanId",
                "parentId",
                "serviceName",
                "cmdb_id",
                "latency",
            ]
        )

        # 对于标准化数据，serviceName已经是<container_cmdb_id>_svc格式，不需要特殊处理
        if is_standardized:
            return super()._generate_metrics_from_trace(trace_df_copy)

        # 对于原始数据，执行Bank特有的服务ID创建逻辑
        if "cmdb_id" not in trace_df_copy.columns:
            logger.warning("无法为Bank数据创建服务ID：缺少cmdb_id列")
            return pd.DataFrame()

        # 构造service_cmdb_id
        trace_df_copy["service_cmdb_id_for_metrics"] = trace_df_copy["cmdb_id"] + "_svc"

        # 临时修改trace_service_identifier_col
        original_identifier_col = self.trace_service_identifier_col
        self.trace_service_identifier_col = "service_cmdb_id_for_metrics"

        # 调用父类方法处理
        generated_df = super()._generate_metrics_from_trace(trace_df_copy)

        # 恢复原始设置
        self.trace_service_identifier_col = original_identifier_col

        return generated_df

    def _prepare_dependencies_dataframe(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        # Bank trace: timestamp(float ms), cmdb_id, parent_id, span_id, trace_id, duration
        deps = trace_df.rename(
            columns={
                "parent_id": "parentId",  # Bank trace has 'parent_id'
                "span_id": "spanId",
                "trace_id": "traceId",
            }
        )

        # Bank specific: serviceName is cmdb_id + "_svc"
        deps["serviceName"] = deps["cmdb_id"] + "_svc"

        # Clean up parentId: if parentId is same as spanId, it's a root, set to '0'
        if (
            "span_id" in deps.columns and "parentId" in deps.columns
        ):  # span_id should be 'spanId' by now if renamed correctly
            # Assuming span_id was renamed to spanId earlier by _load_and_filter... if not, adjust
            span_id_col_to_compare = "spanId" if "spanId" in deps.columns else "span_id"
            deps.loc[deps["parentId"] == deps[span_id_col_to_compare], "parentId"] = "0"

        # Ensure latency column is named as per trace_latency_col_target
        if (
            self.trace_latency_col_target not in deps.columns
            and self.trace_latency_col_original in deps.columns
        ):
            deps.rename(
                columns={
                    self.trace_latency_col_original: self.trace_latency_col_target
                },
                inplace=True,
            )

        return deps
