import logging
import pandas as pd
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple, Set
import concurrent.futures
import threading
import os
import json

# 导入基类和工具类
from .base_loader import BaseDataLoader, AnomalyRecord
from ..utils.data_utils import standardize_data, smooth_timeseries
from ..utils.time_utils import ensure_timezone
from ..utils.experiment_data import ExperimentData
from source.public.config_manager import ConfigManager

logger = logging.getLogger(__name__)
log_lock = threading.Lock()


class TelemetryLoader(BaseDataLoader):
    """用于处理OpenTelemetry数据集的专用数据加载器。"""

    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        sampling_rate: str = "60s",  # 保留参数，但会提示数据已按15s采样
        smooth_method: str = "linear",
        smooth_limit: int = 5,
        num_workers: int = 1,
        use_cache: bool = True,
    ):
        super().__init__(
            base_data_path=base_data_path,
            output_dir=output_dir,
            use_cache=use_cache,
            cache_prefix="telemetry_loader",
        )
        self.record_path = Path(self.base_data_path) / record_file_name
        self.sampling_rate = sampling_rate  # 用户仍可指定，但实际处理时会基于数据特性
        self.smooth_method = smooth_method
        self.smooth_limit = smooth_limit
        self.num_workers = num_workers
        self.config_manager = ConfigManager()

        if not self.record_path.exists():
            raise FileNotFoundError(f"故障记录文件不存在: {self.record_path}")

        logger.info("Telemetry 数据加载器初始化完毕。")
        logger.info(f"故障记录文件: {self.record_path}")
        # 更新日志，明确数据已按15秒采样
        logger.info(
            f"注意: 输入数据已按15秒采样，将直接使用此采样率进行处理。传入的 sampling_rate ('{self.sampling_rate}') 在此加载器中不用于重采样。"
        )
        logger.info(f"平滑方法: {self.smooth_method} (limit: {self.smooth_limit})")

    def _load_anomaly_records(self) -> List[AnomalyRecord]:
        """
        加载并处理故障记录，返回AnomalyRecord对象列表。
        从record.csv文件中读取故障记录，并转换为AnomalyRecord对象。
        """
        logger.info(f"正在加载故障记录文件: {self.record_path}")

        try:
            # 读取record.csv文件
            anomaly_df = pd.read_csv(self.record_path)
            
            # 验证必要的列是否存在
            required_cols = ["exp_name", "metric", "cmdb_id", "timestamp", "datetime", "load"]
            if not self.validate_dataframe(anomaly_df, required_cols):
                logger.error(f"故障记录文件列: {anomaly_df.columns.tolist()}")
                raise ValueError(f"故障记录文件缺少必要列: {required_cols}")
            
            # 处理时间戳
            anomaly_df["datetime"] = pd.to_numeric(anomaly_df["datetime"], errors="coerce")
            anomaly_df.dropna(subset=["datetime"], inplace=True)
            anomaly_df["datetime"] = anomaly_df["datetime"].astype(int)  # 确保是秒级时间戳
            
            # 将timestamp列转换为带时区的datetime对象
            anomaly_df["datetime_obj"] = pd.to_datetime(
                anomaly_df["timestamp"],
                format="%Y-%m-%d %H:%M:%S",
                errors="coerce",
            ).dt.tz_localize("Asia/Shanghai")
            
            # 删除datetime_obj为NaT的行
            anomaly_df.dropna(subset=["datetime_obj"], inplace=True)
            
            records = []
            for _, row in anomaly_df.iterrows():
                try:
                    component = row["cmdb_id"]
                    level = "container"  # 现阶段默认为容器级别
                    reason = row["metric"]  # 使用metric作为故障原因描述

                    # 生成anomaly_id
                    anomaly_id = self.get_anomaly_id(component, row["datetime"])

                    # 额外信息可包含负载等
                    extra_info = {"load": row["load"]}

                    record = AnomalyRecord(
                        anomaly_id=anomaly_id,
                        unix_timestamp=int(row["datetime"]),
                        datetime_obj=row["datetime_obj"],
                        level=level,
                        reason=reason,
                        component=component,
                        cmdb_id=row["cmdb_id"],
                        metric=row["metric"],
                        path=row["exp_name"],
                        extra_info=extra_info,
                    )

                    records.append(record)

                except Exception as e:
                    logger.warning(f"处理行 {_} 时出错: {e}")
                    continue
                
            logger.info(f"成功加载 {len(records)} 条故障记录")
            return records
            
        except Exception as e:
            logger.error(f"加载故障记录时出错: {e}")
            return []  # 返回空列表而不是抛出异常，以便程序可以继续运行

    def get_anomaly_records(self) -> List[AnomalyRecord]:
        """
        获取所有故障记录。
        
        返回:
            List[AnomalyRecord]: 故障记录列表
        """
        return self._load_anomaly_records()

    def load_dataset(self) -> Dict[str, ExperimentData]:
        """
        加载整个数据集，包括所有故障记录和相关的实验数据。
        
        返回:
            Dict[str, ExperimentData]: 键为异常ID，值为实验数据的字典
        """
        records = self._load_anomaly_records()
        dataset = {}
        
        for record in records:
            try:
                exp_data = self.load_experiment_data_from_record(record)
                if exp_data:
                    dataset[record.anomaly_id] = exp_data
            except Exception as e:
                logger.error(f"加载实验数据 {record.anomaly_id} 时出错: {e}")
        
        logger.info(f"成功加载 {len(dataset)} 条实验数据")
        
        # 处理details.json文件，确保所有时间段的实体和指标集一致
        if dataset:
            self.handle_details_json(dataset)
        
        return dataset

    def _get_experiment_folder(self, record: AnomalyRecord) -> Optional[Path]:
        """根据故障记录获取实验文件夹路径。"""
        relative_path = record.path
        if not relative_path:
            logger.error("AnomalyRecord 中的 path 为空，无法定位实验文件夹")
            return None

        # 拼接基础路径获取完整实验目录
        full_path = Path(self.base_data_path) / relative_path

        if full_path.exists():
            return full_path

        logger.warning(f"按路径 {full_path} 未找到实验文件夹，尝试回退搜索")

        # 回退：仅根据最后一级目录名进行模糊匹配
        target_dir_name = Path(relative_path).name
        for root, dirs, _ in os.walk(self.base_data_path):
            for d in dirs:
                if d == target_dir_name:
                    candidate = Path(root) / d
                    logger.info(f"通过模糊匹配找到实验文件夹: {candidate}")
                    return candidate

        logger.error(f"未找到与故障记录匹配的实验文件夹，path: {relative_path}")
        return None

    def _read_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        """读取并处理单个指标文件。"""
        if not file_path.exists():
            logger.debug(f"指标文件不存在: {file_path}")
            return pd.DataFrame()

        df = pd.read_csv(file_path)

        # 验证必要列存在
        if entity_type == "container":
            required_cols = ["timestamp", "cmdb_id", "metric", "value"]
        else:
            required_cols = ["timestamp", "cmdb_id", "metric", "value"]

        if not self.validate_dataframe(df, required_cols):
            logger.warning(f"指标文件 {file_path.name} 缺少列或格式不正确，跳过。")
            return pd.DataFrame()

        # 确保时间戳是毫秒级的datetime
        df["timestamp"] = pd.to_datetime(
            df["timestamp"], unit="ms", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        # 过滤指定时间范围内的数据
        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        df = df[
            (df["timestamp"] >= start_time_aware) & (df["timestamp"] <= end_time_aware)
        ]

        if not df.empty:
            # 重命名列以符合标准格式
            df.rename(columns={"metric": "metric_name"}, inplace=True)
            df["entity_type"] = entity_type
            
            # 添加type列，值为entity_type
            df["type"] = entity_type
            
            # 添加ref列
            if entity_type == "container" and "node" in df.columns:
                # 对于容器指标，ref列为node列的值（表示容器所在的节点）
                df["ref"] = df["node"]
            else:
                # 对于其他类型的指标，ref列为空字符串
                df["ref"] = ""

            # 确保有value列
            if "value" not in df.columns:
                logger.warning(f"指标文件缺少value列: {file_path}")
                return pd.DataFrame()

            # 确保值是数值型
            df["value"] = pd.to_numeric(df["value"], errors="coerce")
            df.dropna(subset=["value"], inplace=True)

        return df

    def load_metrics(
        self,
        start_time: datetime,
        end_time: datetime,
        record: AnomalyRecord,
        dependencies_df: Optional[pd.DataFrame] = None,
    ) -> pd.DataFrame:

        experiment_folder = self._get_experiment_folder(record)
        """加载并处理指定时间窗口内的所有指标数据。"""
        logger.info(
            f"开始加载指标数据，时间窗口: {start_time} 到 {end_time}，实验文件夹: {experiment_folder}"
        )

        all_metrics_dfs = []

        if not experiment_folder.exists():
            logger.warning(f"实验文件夹不存在: {experiment_folder}")
            return pd.DataFrame()

        # 1. 读取容器指标文件
        container_metric_path = experiment_folder / "metric.csv"
        if container_metric_path.exists():
            container_df = self._read_metric_file(
                container_metric_path, "container", start_time, end_time
            )
            if not container_df.empty:
                all_metrics_dfs.append(container_df)
        else:
            logger.warning(f"容器指标文件不存在: {container_metric_path}")

        # 2. 读取节点指标文件
        node_metric_path = experiment_folder / "node_metric.csv"
        if node_metric_path.exists():
            node_df = self._read_metric_file(
                node_metric_path, "node", start_time, end_time
            )
            if not node_df.empty:
                all_metrics_dfs.append(node_df)
        else:
            logger.warning(f"节点指标文件不存在: {node_metric_path}")

        # 3. 读取服务指标文件
        svc_metric_path = experiment_folder / "svc_metric.csv"
        if svc_metric_path.exists():
            svc_df = self._read_metric_file(
                svc_metric_path, "service", start_time, end_time
            )
            if not svc_df.empty:
                all_metrics_dfs.append(svc_df)
        else:
            logger.warning(f"服务指标文件不存在: {svc_metric_path}")

        if not all_metrics_dfs:
            logger.warning("没有找到任何指标数据")
            return pd.DataFrame()

        # 合并所有指标数据
        combined_metrics_df = pd.concat(all_metrics_dfs, ignore_index=True)
        logger.info(f"合并后共有 {len(combined_metrics_df)} 条原始指标记录")

        # 处理合并的指标数据
        return self._process_loaded_metrics(combined_metrics_df)

    def _process_loaded_metrics(self, metrics_df: pd.DataFrame) -> pd.DataFrame:
        """对加载的原始指标数据进行平滑和标准化处理 (数据已按15s采样)。"""
        if metrics_df.empty:
            logger.warning("输入的指标数据为空")
            return pd.DataFrame()

        # 确保 timestamp 是 datetime 类型
        if "timestamp" not in metrics_df.columns:
            logger.error("指标DataFrame缺少 'timestamp' 列")
            return pd.DataFrame()

        # 将所有列转换为正确的数据类型
        try:
            metrics_df["timestamp"] = pd.to_datetime(metrics_df["timestamp"])
            metrics_df["value"] = pd.to_numeric(metrics_df["value"], errors="coerce")
        except Exception as e:
            logger.error(f"转换数据类型失败: {e}")
            return pd.DataFrame()

        # 使用timestamp作为索引并排序
        if metrics_df.index.name != "timestamp":
            metrics_df = metrics_df.set_index("timestamp").sort_index()
        elif not isinstance(metrics_df.index, pd.DatetimeIndex):
            metrics_df.index = pd.to_datetime(metrics_df.index)
            metrics_df = metrics_df.sort_index()

        # 收集所有唯一的组件ID、指标名称和实体类型
        all_entity_types = metrics_df["entity_type"].unique()

        # 创建字典，存储各实体类型下的所有指标名称
        entity_metrics = {}
        # 为每个entity_type收集其特有的cmdb_id
        entity_cmdb_ids = {}

        for entity_type in all_entity_types:
            # 获取该entity_type下的所有指标名称
            entity_df = metrics_df[metrics_df["entity_type"] == entity_type]
            entity_metrics[entity_type] = entity_df["metric_name"].unique()

            # 获取该entity_type下的所有cmdb_id
            entity_cmdb_ids[entity_type] = entity_df["cmdb_id"].unique()

            logger.debug(
                f"实体类型 {entity_type} 有 {len(entity_metrics[entity_type])} 个不同指标，"
                f"{len(entity_cmdb_ids[entity_type])} 个组件"
            )

        # 记录数据基本信息
        logger.info(
            f"原始数据: {len(metrics_df)}行, "
            f"{sum(len(ids) for ids in entity_cmdb_ids.values())}个实体类型-组件组合, "
            f"{sum(len(ms) for ms in entity_metrics.values())}个实体类型-指标组合, "
            f"{len(all_entity_types)}个实体类型"
        )

        # 为每个实体类型收集最完整的时间索引
        time_indices = {}
        for entity_type in all_entity_types:
            entity_df = metrics_df[metrics_df["entity_type"] == entity_type]
            if not entity_df.empty:
                # 找出数据点最多的时间序列作为基准索引
                group_counts = entity_df.groupby(["cmdb_id", "metric_name"]).size()
                if not group_counts.empty:
                    max_count_idx = group_counts.idxmax()
                    max_group = entity_df[
                        (entity_df["cmdb_id"] == max_count_idx[0])
                        & (entity_df["metric_name"] == max_count_idx[1])
                    ]
                    time_indices[entity_type] = max_group.index.unique()
                    logger.debug(
                        f"实体类型 {entity_type} 基准索引有 {len(time_indices[entity_type])} 个时间点"
                    )

        # 处理所有存在的指标数据
        processed_metrics_list = []
        processed_combinations = set()  # 跟踪已处理的组合

        # 根据组合分组处理数据
        for (cmdb_id, metric_name, entity_type), group in metrics_df.groupby(
            ["cmdb_id", "metric_name", "entity_type"]
        ):
            try:
                group = group.copy()
                group.dropna(subset=["value"], inplace=True)

                if group.empty:
                    logger.debug(f"组件 {cmdb_id}, 指标 {metric_name} 数据为空，跳过")
                    continue

                # 数据平滑处理
                current_series = group["value"]
                if current_series.isna().any():
                    smoothed_series = smooth_timeseries(
                        current_series,
                        method=self.smooth_method,
                        limit=self.smooth_limit,
                    )
                    current_series = smoothed_series

                if current_series.empty or current_series.isna().all():
                    logger.debug(
                        f"组件 {cmdb_id}, 指标 {metric_name} 平滑后数据全为NaN，跳过"
                    )
                    continue

                # 标准化处理
                standardized_values, original_values = standardize_data(current_series)

                # 确保时间索引一致 - 使用当前数据的索引作为基准
                valid_indices = standardized_values.dropna().index
                if len(valid_indices) == 0:
                    logger.warning(
                        f"组件 {cmdb_id}, 指标 {metric_name} 标准化后无有效数据，使用零填充"
                    )
                    # 创建零填充数据
                    if (
                        entity_type in time_indices
                        and len(time_indices[entity_type]) > 0
                    ):
                        base_index = time_indices[entity_type]
                        standardized_values = pd.Series(0.0, index=base_index)
                        original_values = pd.Series(0.0, index=base_index)
                        valid_indices = base_index
                        
                        # 创建处理后的DataFrame，包含type和ref列
                        processed_df = pd.DataFrame(
                            {
                                "timestamp": valid_indices,
                                "cmdb_id": cmdb_id,
                                "metric_name": metric_name,
                                "value": 0.0,
                                "original_value": 0.0,
                                "entity_type": entity_type,
                                "type": entity_type,  # 使用entity_type填充type列
                                "ref": "",  # 使用空字符串填充ref列
                            }
                        )
                    else:
                        continue
                else:
                    # 创建处理后的DataFrame
                    processed_df = pd.DataFrame(
                        {
                            "timestamp": valid_indices,
                            "cmdb_id": cmdb_id,
                            "metric_name": metric_name,
                            "value": standardized_values.loc[valid_indices].values,
                            "original_value": original_values.loc[valid_indices].values,
                            "entity_type": entity_type,
                            "type": group["type"].iloc[0] if "type" in group.columns else entity_type,  # 保留type列
                            "ref": group["ref"].iloc[0] if "ref" in group.columns else "",  # 保留ref列
                        }
                    )

                # 确保没有NaN值
                if (
                    processed_df["value"].isna().any()
                    or processed_df["original_value"].isna().any()
                ):
                    mean_original = (
                        processed_df["original_value"].mean()
                        if not processed_df.empty
                        else 0
                    )
                    processed_df["value"].fillna(0, inplace=True)
                    processed_df["original_value"].fillna(mean_original, inplace=True)

                processed_metrics_list.append(processed_df)
                processed_combinations.add((cmdb_id, metric_name, entity_type))
                logger.debug(
                    f"成功处理: 组件 {cmdb_id}, 指标 {metric_name}, 实体类型 {entity_type}, 数据点 {len(processed_df)}"
                )

            except Exception as e:
                logger.error(f"处理组件 {cmdb_id}, 指标 {metric_name} 时出错: {e}")
                continue

        # 填充缺失的指标
        fill_count = 0
        for entity_type in all_entity_types:
            if entity_type not in time_indices or len(time_indices[entity_type]) == 0:
                logger.warning(f"实体类型 {entity_type} 没有有效的时间索引，跳过补齐")
                continue

            base_index = time_indices[entity_type]
            metric_names = entity_metrics[entity_type]

            # 修改：只为当前entity_type下的cmdb_id填充指标
            for cmdb_id in entity_cmdb_ids[entity_type]:
                for metric_name in metric_names:
                    if (cmdb_id, metric_name, entity_type) in processed_combinations:
                        continue  # 已处理的组合跳过

                    # 创建填充DataFrame
                    filled_df = pd.DataFrame(
                        {
                            "timestamp": base_index,
                            "cmdb_id": cmdb_id,
                            "metric_name": metric_name,
                            "value": 0.0,  # 使用0填充
                            "original_value": 0.0,
                            "entity_type": entity_type,
                            "type": entity_type,  # 使用entity_type填充type列
                            "ref": "",  # 使用空字符串填充ref列
                        }
                    )

                    processed_metrics_list.append(filled_df)
                    fill_count += 1
                    logger.debug(
                        f"为实体类型 {entity_type} 的组件 {cmdb_id} 填充指标 {metric_name}"
                    )

        if fill_count > 0:
            logger.info(f"已为 {fill_count} 个缺失的组件-指标组合填充零值")

        if not processed_metrics_list:
            logger.warning("处理后没有有效的指标数据")
            return pd.DataFrame()

        # 合并所有处理后的数据
        try:
            final_df = pd.concat(processed_metrics_list, ignore_index=True)

            # 检查每个实体类型的处理后数据情况
            for entity_type in all_entity_types:
                entity_result = final_df[final_df["entity_type"] == entity_type]
                entity_cmdb_count = entity_result["cmdb_id"].nunique()
                entity_metric_count = entity_result["metric_name"].nunique()
                logger.info(
                    f"实体类型 {entity_type}: {len(entity_result)} 行数据, "
                    f"{entity_cmdb_count} 个组件, {entity_metric_count} 个指标"
                )

            logger.info(
                f"成功处理 {len(final_df)} 条指标记录，总共包含 {final_df['cmdb_id'].nunique()} 个组件, "
                f"{final_df['metric_name'].nunique()} 个指标"
            )
            return final_df
        except Exception as e:
            logger.error(f"合并处理后的指标数据失败: {e}")
            return pd.DataFrame()

    def load_dependencies(
        self, start_time: datetime, end_time: datetime, record: AnomalyRecord
    ) -> pd.DataFrame:
        experiment_folder = self._get_experiment_folder(record)
        """加载并处理指定时间窗口内的所有指标数据。"""
        logger.info(
            f"开始加载指标数据，时间窗口: {start_time} 到 {end_time}，实验文件夹: {experiment_folder}"
        )

        request_durations_path = experiment_folder / "request_durations.csv"
        if not request_durations_path.exists():
            logger.warning(f"请求持续时间文件不存在: {request_durations_path}")
            return pd.DataFrame()

        # 读取请求持续时间文件
        request_df = pd.read_csv(request_durations_path)
        required_cols = [
            "Timestamp",
            "Trace ID",
            "Span ID",
            "Parent ID",
            "Pod Name",
            "Service Name",
            "API",
            "Duration",
        ]

        if not self.validate_dataframe(request_df, required_cols):
            logger.warning(f"请求持续时间文件缺少必要的列，跳过。")
            return pd.DataFrame()

        # 转换时间戳为datetime
        request_df["timestamp"] = pd.to_datetime(
            request_df["Timestamp"], unit="us", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        # 过滤指定时间范围内的数据
        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        request_df = request_df[
            (request_df["timestamp"] >= start_time_aware)
            & (request_df["timestamp"] <= end_time_aware)
        ]

        if request_df.empty:
            logger.warning("过滤后的请求持续时间数据为空")
            return pd.DataFrame()

        # 转换为标准依赖关系格式
        dependencies_df = pd.DataFrame(
            {
                "timestamp": request_df["timestamp"],
                "traceId": request_df["Trace ID"],
                "spanId": request_df["Span ID"],
                "parentId": request_df["Parent ID"],
                "serviceName": request_df["API"],
                "cmdb_id": request_df["Pod Name"],  # 使用Pod Name作为cmdb_id
                "latency": request_df["Duration"],
            }
        )

        logger.info(f"成功处理 {len(dependencies_df)} 条依赖关系记录")
        return dependencies_df
