import logging
import pandas as pd
from datetime import datetime, timedelta
from pathlib import Path
from typing import List, Dict, Any, Optional, Tuple
import concurrent.futures
import threading
import os
import glob

# 导入基类和工具类
from .base_loader import BaseDataLoader, AnomalyRecord
from ..utils.data_utils import standardize_data, smooth_timeseries
from ..utils.time_utils import ensure_timezone
from ..utils.experiment_data import ExperimentData
from public.config_manager import ConfigManager

logger = logging.getLogger(__name__)
log_lock = threading.Lock()


class TelemetryLoader(BaseDataLoader):
    """用于处理SN数据集的专用数据加载器。"""

    def __init__(
        self,
        base_data_path: str,
        output_dir: str,
        record_file_name: str = "record.csv",
        sampling_rate: str = "60s",  # 保留参数，但会提示数据已按15s采样
        smooth_method: str = "linear",
        smooth_limit: int = 5,
        num_workers: int = 1,
        use_cache: bool = True,
    ):
        super().__init__(
            base_data_path=base_data_path,
            output_dir=output_dir,
            use_cache=use_cache,
            cache_prefix="telemetry_loader",
        )
        self.record_path = Path(self.base_data_path)
        self.sampling_rate = sampling_rate  # 用户仍可指定，但实际处理时会基于数据特性
        self.smooth_method = smooth_method
        self.smooth_limit = smooth_limit
        self.num_workers = num_workers
        self.config_manager = ConfigManager()

        if not self.record_path.exists():
            raise FileNotFoundError(f"故障记录文件不存在: {self.record_path}")

        logger.info("Telemetry 数据加载器初始化完毕。")
        logger.info(f"故障记录文件: {self.record_path}")
        # 更新日志，明确数据已按15秒采样
        logger.info(
            f"注意: 输入数据已按15秒采样，将直接使用此采样率进行处理。传入的 sampling_rate ('{self.sampling_rate}') 在此加载器中不用于重采样。"
        )
        logger.info(f"平滑方法: {self.smooth_method} (limit: {self.smooth_limit})")

    def _load_anomaly_records(self) -> List[AnomalyRecord]:
        """
        Loads and processes anomaly records from `self.record_path`.
        This version is generic and expects specific columns. Subclasses can override.
        """
        if self.record_path is None or not self.record_path.exists():
            logger.error(
                f"Record path is not set or file does not exist: {self.record_path}"
            )
            return []
        logger.info(f"Loading anomaly records from: {self.record_path}")

        # 按组读取故障数据
        json_files = glob.glob(os.path.join(self.record_path, "SN.fault-*.json"))
        dfs = []
        for json_file in json_files:
            with open(json_file, 'r') as f:
                data = json.load(f)
                base_name = os.path.splitext(os.path.basename(json_file))[0]
                exp_name = base_name.replace("fault-", "", 1)
                
                # 展开 faults 数组，并保留顶层字段（如果需要）
                df = json_normalize(data, 'faults')  # start 是顶层字段
                df['exp_name'] = exp_name
                dfs.append(df)

        anomaly_df = pd.concat(dfs, ignore_index=True)

        # 重命名列以匹配代码要求
        anomaly_df.rename(columns={
            "start": "timestamp",  # 使用顶层 start 或故障自身的 start？
            "name": "cmdb_id"      # 假设 name 对应 cmdb_id
        }, inplace=True)

        # 验证必需列
        required_cols = ["timestamp", "cmdb_id"]
        if not self.validate_dataframe(anomaly_df, required_cols):
            logger.error(f"Missing required columns. Actual columns: {anomaly_df.columns.tolist()}")
            raise ValueError(f"Anomaly record file {self.record_path} missing required columns")

        # 时间处理逻辑改造
        # 直接使用故障的 start 字段（如果需用顶层 start，此处需要调整）
        
        anomaly_df["datetime_numeric"] = pd.to_numeric(anomaly_df["timestamp"], errors="coerce")
        try:
            anomaly_df["datetime_obj"] = pd.to_datetime(anomaly_df["datetime_numeric"], unit="s", utc=True)
        except pd.errors.OutOfBoundsDatetime:
            anomaly_df["datetime_obj"] = pd.to_datetime(anomaly_df["datetime_numeric"], unit="ms", utc=True)

        anomaly_df["datetime_obj"] = anomaly_df["datetime_obj"].dt.tz_convert("Asia/Shanghai")
        anomaly_df["unix_timestamp"] = (anomaly_df["datetime_obj"].astype("int64") // 10**9).astype(int)

        # 构建记录对象（适配新字段）
        records = []
        for _, row in anomaly_df.iterrows():
            records.append(
                AnomalyRecord(
                    anomaly_id=self.get_anomaly_id(row["cmdb_id"], row["unix_timestamp"]),
                    unix_timestamp=row["unix_timestamp"],
                    datetime_obj=row["datetime_obj"],
                    level=row.get("level", "unknown"),
                    reason=row.get("fault", "unknown"),  # 使用 fault 字段
                    component=row["cmdb_id"],            # 与 cmdb_id 相同
                    exp_name=row.get("exp_name"),
                    cmdb_id=row["cmdb_id"],
                    metric=row.get("fault"),             # 或保留原始 metric 逻辑
                    extra_info={
                        "duration": row.get("duration"),  # 保留额外信息
                        "original_data": row.to_dict()
                    }
                )
            )

        
        logger.info(f"Successfully loaded {len(records)} anomaly records.")
        return records

    def _get_experiment_folder(self, record: AnomalyRecord) -> Optional[Path]:
        """根据故障记录获取实验文件夹路径。"""
        # 直接使用exp_name查找对应的实验文件夹
        exp_name = record.exp_name

        # 首先尝试完全匹配
        for item in os.listdir(self.base_data_path):
            item_path = Path(self.base_data_path) / item
            if item_path.is_dir() and item == exp_name:
                logger.info(f"找到完全匹配的实验文件夹: {item_path}")
                return item_path

        # 如果没有完全匹配，尝试部分匹配
        for item in os.listdir(self.base_data_path):
            item_path = Path(self.base_data_path) / item
            if item_path.is_dir() and exp_name in item:
                logger.warning(f"找到部分匹配的实验文件夹: {item_path}")
                return item_path

        logger.error(f"未找到与故障记录匹配的实验文件夹，exp_name: {exp_name}")
        return None

    def _read_metric_file(
        self,
        file_path: Path,
        entity_type: str,
        start_time: datetime,
        end_time: datetime,
    ) -> pd.DataFrame:
        """读取并处理单个指标文件。"""
        if not file_path.exists():
            logger.debug(f"指标文件不存在: {file_path}")
            return pd.DataFrame()

        df = pd.read_csv(file_path)

        # 验证必要列存在
        required_cols = ["timestamp", "cmdb_id", "metric", "value"]


        if not self.validate_dataframe(df, required_cols):
            logger.warning(f"指标文件 {file_path.name} 缺少列或格式不正确，跳过。")
            return pd.DataFrame()

        # 确保时间戳是毫秒级的datetime
        df["timestamp"] = pd.to_datetime(
            df["timestamp"], unit="ms", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        # 过滤指定时间范围内的数据
        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        df = df[
            (df["timestamp"] >= start_time_aware) & (df["timestamp"] <= end_time_aware)
        ]

        if not df.empty:
            # 重命名列以符合标准格式
            df.rename(columns={"metric": "metric_name"}, inplace=True)
            df["entity_type"] = entity_type

            # 确保有value列
            if "value" not in df.columns:
                logger.warning(f"指标文件缺少value列: {file_path}")
                return pd.DataFrame()

            # 确保值是数值型
            df["value"] = pd.to_numeric(df["value"], errors="coerce")
            df.dropna(subset=["value"], inplace=True)

        return df

    def load_metrics(
        self,
        start_time: datetime,
        end_time: datetime,
        record: AnomalyRecord,
        dependencies_df: Optional[pd.DataFrame] = None,
    ) -> pd.DataFrame:

        experiment_folder = self._get_experiment_folder(record)
        """加载并处理指定时间窗口内的所有指标数据。"""
        logger.info(
            f"开始加载指标数据，时间窗口: {start_time} 到 {end_time}，实验文件夹: {experiment_folder}"
        )

        all_metrics_dfs = []

        if not experiment_folder.exists():
            logger.warning(f"实验文件夹不存在: {experiment_folder}")
            return pd.DataFrame()

        # 1. 读取容器指标文件
        container_metric_path = experiment_folder / "metric.csv"
        if container_metric_path.exists():
            container_df = self._read_metric_file(
                container_metric_path, "container", start_time, end_time
            )
            if not container_df.empty:
                all_metrics_dfs.append(container_df)
        else:
            logger.warning(f"容器指标文件不存在: {container_metric_path}")

        # 2. 读取节点指标文件
        node_metric_path = experiment_folder / "node_metric.csv"
        if node_metric_path.exists():
            node_df = self._read_metric_file(
                node_metric_path, "node", start_time, end_time
            )
            if not node_df.empty:
                all_metrics_dfs.append(node_df)
        else:
            logger.warning(f"节点指标文件不存在: {node_metric_path}")

        # 3. 读取服务指标文件
        svc_metric_path = experiment_folder / "svc_metric.csv"
        if svc_metric_path.exists():
            svc_df = self._read_metric_file(
                svc_metric_path, "service", start_time, end_time
            )
            if not svc_df.empty:
                all_metrics_dfs.append(svc_df)
        else:
            logger.warning(f"服务指标文件不存在: {svc_metric_path}")

        if not all_metrics_dfs:
            logger.warning("没有找到任何指标数据")
            return pd.DataFrame()

        # 合并所有指标数据
        combined_metrics_df = pd.concat(all_metrics_dfs, ignore_index=True)
        logger.info(f"合并后共有 {len(combined_metrics_df)} 条原始指标记录")

        # 处理合并的指标数据
        return self._process_loaded_metrics(combined_metrics_df)

    def _process_loaded_metrics(self, metrics_df: pd.DataFrame) -> pd.DataFrame:
        """对加载的原始指标数据进行平滑和标准化处理 (数据已按15s采样)。"""
        if metrics_df.empty:
            logger.warning("输入的指标数据为空")
            return pd.DataFrame()

        # 确保 timestamp 是 datetime 类型
        if "timestamp" not in metrics_df.columns:
            logger.error("指标DataFrame缺少 'timestamp' 列")
            return pd.DataFrame()

        # 将所有列转换为正确的数据类型
        try:
            metrics_df["timestamp"] = pd.to_datetime(metrics_df["timestamp"])
            metrics_df["value"] = pd.to_numeric(metrics_df["value"], errors="coerce")
        except Exception as e:
            logger.error(f"转换数据类型失败: {e}")
            return pd.DataFrame()

        # 使用timestamp作为索引并排序
        if metrics_df.index.name != "timestamp":
            metrics_df = metrics_df.set_index("timestamp").sort_index()
        elif not isinstance(metrics_df.index, pd.DatetimeIndex):
            metrics_df.index = pd.to_datetime(metrics_df.index)
            metrics_df = metrics_df.sort_index()

        # 收集所有唯一的组件ID、指标名称和实体类型
        all_entity_types = metrics_df["entity_type"].unique()

        # 创建字典，存储各实体类型下的所有指标名称
        entity_metrics = {}
        # 为每个entity_type收集其特有的cmdb_id
        entity_cmdb_ids = {}

        for entity_type in all_entity_types:
            # 获取该entity_type下的所有指标名称
            entity_df = metrics_df[metrics_df["entity_type"] == entity_type]
            entity_metrics[entity_type] = entity_df["metric_name"].unique()

            # 获取该entity_type下的所有cmdb_id
            entity_cmdb_ids[entity_type] = entity_df["cmdb_id"].unique()

            logger.debug(
                f"实体类型 {entity_type} 有 {len(entity_metrics[entity_type])} 个不同指标，"
                f"{len(entity_cmdb_ids[entity_type])} 个组件"
            )

        # 记录数据基本信息
        logger.info(
            f"原始数据: {len(metrics_df)}行, "
            f"{sum(len(ids) for ids in entity_cmdb_ids.values())}个实体类型-组件组合, "
            f"{sum(len(ms) for ms in entity_metrics.values())}个实体类型-指标组合, "
            f"{len(all_entity_types)}个实体类型"
        )

        # 为每个实体类型收集最完整的时间索引
        time_indices = {}
        for entity_type in all_entity_types:
            entity_df = metrics_df[metrics_df["entity_type"] == entity_type]
            if not entity_df.empty:
                # 找出数据点最多的时间序列作为基准索引
                group_counts = entity_df.groupby(["cmdb_id", "metric_name"]).size()
                if not group_counts.empty:
                    max_count_idx = group_counts.idxmax()
                    max_group = entity_df[
                        (entity_df["cmdb_id"] == max_count_idx[0])
                        & (entity_df["metric_name"] == max_count_idx[1])
                    ]
                    time_indices[entity_type] = max_group.index.unique()
                    logger.debug(
                        f"实体类型 {entity_type} 基准索引有 {len(time_indices[entity_type])} 个时间点"
                    )

        # 处理所有存在的指标数据
        processed_metrics_list = []
        processed_combinations = set()  # 跟踪已处理的组合

        # 根据组合分组处理数据
        for (cmdb_id, metric_name, entity_type), group in metrics_df.groupby(
            ["cmdb_id", "metric_name", "entity_type"]
        ):
            try:
                group = group.copy()
                group.dropna(subset=["value"], inplace=True)

                if group.empty:
                    logger.debug(f"组件 {cmdb_id}, 指标 {metric_name} 数据为空，跳过")
                    continue

                # 数据平滑处理
                current_series = group["value"]
                if current_series.isna().any():
                    smoothed_series = smooth_timeseries(
                        current_series,
                        method=self.smooth_method,
                        limit=self.smooth_limit,
                    )
                    current_series = smoothed_series

                if current_series.empty or current_series.isna().all():
                    logger.debug(
                        f"组件 {cmdb_id}, 指标 {metric_name} 平滑后数据全为NaN，跳过"
                    )
                    continue

                # 标准化处理
                standardized_values, original_values = standardize_data(current_series)

                # 确保时间索引一致 - 使用当前数据的索引作为基准
                valid_indices = standardized_values.dropna().index
                if len(valid_indices) == 0:
                    logger.warning(
                        f"组件 {cmdb_id}, 指标 {metric_name} 标准化后无有效数据，使用零填充"
                    )
                    # 创建零填充数据
                    if (
                        entity_type in time_indices
                        and len(time_indices[entity_type]) > 0
                    ):
                        base_index = time_indices[entity_type]
                        standardized_values = pd.Series(0.0, index=base_index)
                        original_values = pd.Series(0.0, index=base_index)
                        valid_indices = base_index
                    else:
                        continue

                # 创建处理后的DataFrame
                processed_df = pd.DataFrame(
                    {
                        "timestamp": valid_indices,
                        "cmdb_id": cmdb_id,
                        "metric_name": metric_name,
                        "value": standardized_values.loc[valid_indices].values,
                        "original_value": original_values.loc[valid_indices].values,
                        "entity_type": entity_type,
                    }
                )

                # 确保没有NaN值
                if (
                    processed_df["value"].isna().any()
                    or processed_df["original_value"].isna().any()
                ):
                    mean_original = (
                        processed_df["original_value"].mean()
                        if not processed_df.empty
                        else 0
                    )
                    processed_df["value"].fillna(0, inplace=True)
                    processed_df["original_value"].fillna(mean_original, inplace=True)

                processed_metrics_list.append(processed_df)
                processed_combinations.add((cmdb_id, metric_name, entity_type))
                logger.debug(
                    f"成功处理: 组件 {cmdb_id}, 指标 {metric_name}, 实体类型 {entity_type}, 数据点 {len(processed_df)}"
                )

            except Exception as e:
                logger.error(f"处理组件 {cmdb_id}, 指标 {metric_name} 时出错: {e}")
                continue

        # 填充缺失的指标
        fill_count = 0
        for entity_type in all_entity_types:
            if entity_type not in time_indices or len(time_indices[entity_type]) == 0:
                logger.warning(f"实体类型 {entity_type} 没有有效的时间索引，跳过补齐")
                continue

            base_index = time_indices[entity_type]
            metric_names = entity_metrics[entity_type]

            # 修改：只为当前entity_type下的cmdb_id填充指标
            for cmdb_id in entity_cmdb_ids[entity_type]:
                for metric_name in metric_names:
                    if (cmdb_id, metric_name, entity_type) in processed_combinations:
                        continue  # 已处理的组合跳过

                    # 创建填充DataFrame
                    filled_df = pd.DataFrame(
                        {
                            "timestamp": base_index,
                            "cmdb_id": cmdb_id,
                            "metric_name": metric_name,
                            "value": 0.0,  # 使用0填充
                            "original_value": 0.0,
                            "entity_type": entity_type,
                        }
                    )

                    processed_metrics_list.append(filled_df)
                    fill_count += 1
                    logger.debug(
                        f"为实体类型 {entity_type} 的组件 {cmdb_id} 填充指标 {metric_name}"
                    )

        if fill_count > 0:
            logger.info(f"已为 {fill_count} 个缺失的组件-指标组合填充零值")

        if not processed_metrics_list:
            logger.warning("处理后没有有效的指标数据")
            return pd.DataFrame()

        # 合并所有处理后的数据
        try:
            final_df = pd.concat(processed_metrics_list, ignore_index=True)

            # 检查每个实体类型的处理后数据情况
            for entity_type in all_entity_types:
                entity_result = final_df[final_df["entity_type"] == entity_type]
                entity_cmdb_count = entity_result["cmdb_id"].nunique()
                entity_metric_count = entity_result["metric_name"].nunique()
                logger.info(
                    f"实体类型 {entity_type}: {len(entity_result)} 行数据, "
                    f"{entity_cmdb_count} 个组件, {entity_metric_count} 个指标"
                )

            logger.info(
                f"成功处理 {len(final_df)} 条指标记录，总共包含 {final_df['cmdb_id'].nunique()} 个组件, "
                f"{final_df['metric_name'].nunique()} 个指标"
            )
            return final_df
        except Exception as e:
            logger.error(f"合并处理后的指标数据失败: {e}")
            return pd.DataFrame()

    def load_dependencies(
        self, start_time: datetime, end_time: datetime, record: AnomalyRecord
    ) -> pd.DataFrame:
        experiment_folder = self._get_experiment_folder(record)
        """加载并处理指定时间窗口内的所有指标数据。"""
        logger.info(
            f"开始加载指标数据，时间窗口: {start_time} 到 {end_time}，实验文件夹: {experiment_folder}"
        )

        request_durations_path = experiment_folder / "request_durations.csv"
        if not request_durations_path.exists():
            logger.warning(f"请求持续时间文件不存在: {request_durations_path}")
            return pd.DataFrame()

        # 读取请求持续时间文件
        request_df = pd.read_csv(request_durations_path)
        required_cols = [
            "Timestamp",
            "Trace ID",
            "Span ID",
            "Parent ID",
            "Pod Name",
            "Service Name",
            "API",
            "Duration",
        ]

        if not self.validate_dataframe(request_df, required_cols):
            logger.warning(f"请求持续时间文件缺少必要的列，跳过。")
            return pd.DataFrame()

        # 转换时间戳为datetime
        request_df["timestamp"] = pd.to_datetime(
            request_df["Timestamp"], unit="us", utc=True
        ).dt.tz_convert("Asia/Shanghai")

        # 过滤指定时间范围内的数据
        start_time_aware = ensure_timezone(start_time)
        end_time_aware = ensure_timezone(end_time)
        request_df = request_df[
            (request_df["timestamp"] >= start_time_aware)
            & (request_df["timestamp"] <= end_time_aware)
        ]

        if request_df.empty:
            logger.warning("过滤后的请求持续时间数据为空")
            return pd.DataFrame()

        # 转换为标准依赖关系格式
        dependencies_df = pd.DataFrame(
            {
                "timestamp": request_df["timestamp"],
                "traceId": request_df["Trace ID"],
                "spanId": request_df["Span ID"],
                "parentId": request_df["Parent ID"],
                "serviceName": request_df["API"],
                "cmdb_id": request_df["Pod Name"],  # 使用Pod Name作为cmdb_id
                "latency": request_df["Duration"],
            }
        )

        logger.info(f"成功处理 {len(dependencies_df)} 条依赖关系记录")
        return dependencies_df

    def load_trace_metric(self):
        result_list = []
        trace_data = record.exp_name+"/spans.json"
        with open(json_file, 'r') as f:
            datas = json.load(f)
            for data in datas:
                processes = data.get("processes", {})
                spans = data.get("spans", [])
                # 创建节点字典，键为spanID，值为节点信息
                nodes = {}
                for span in spans:
                    trace_id = span["traceID"]
                    span_id = span["spanID"]
                    startTime = span["startTime"]
                    db_system = ""
                    pod_name = ""
                    peer_service_name = ""
                    kind = ""
                    server_address = ""
                    process = processes.get(span["processID"], {})
                    tags = span["tags"]
                    for tag in tags:
                        if tag["key"] == "span.kind":
                            kind = tag["value"]
                        if tag["key"] == "db.system":
                            db_system = tag["value"]
                        if tag["key"] == "peer.service":
                            peer_service_name = tag["value"]
                        if tag["key"] == "server.address":
                            server_address = tag["value"]
    
                    for proces in process["tags"]:
                        if proces["key"] == "'k8s.pod.name'" or proces["key"] == "host.name":
                            pod_name = proces["value"]
                            break
                    if pod_name == "":
                        service = process.get("serviceName", "unknown")
                        for item in pod_names:
                            if service in item:
                                pod_name = item
                                break
    
                    nodes[span_id] = {
                        "traceId":trace_id,
                        "startTime": startTime,
                        "span": span,
                        "process": process,
                        "kind": kind,
                        "db_system": db_system,
                        "peer_service_name": peer_service_name,
                        "pod_name": pod_name,
                        "server_address": server_address,
                        "children": []
                    }
    
                # 建立父子关系
                for span in spans:
                    current_span_id = span["spanID"]
                    for ref in span.get("references", []):
                        if ref.get("refType") == "CHILD_OF":
                            parent_span_id = ref["spanID"]
                            parent_node = nodes.get(parent_span_id)
                            if parent_node:
                                parent_node["children"].append(nodes[current_span_id])
    
                # 查找根节点（没有父节点的span）
                root_nodes = []
                for span in spans:
                    span_id = span["spanID"]
                    has_parent = any(
                        ref["refType"] == "CHILD_OF"
                        for ref in span.get("references", [])
                    )
                    if not has_parent:
                        root_nodes.append(nodes[span_id])
    
                # 递归打印树形结构（修改后）
                def print_tree(node, level=0, parent_id=0):
                    if node["kind"] == "server" or node["kind"] == "producer" or node[
                        "db_system"] != "" or node["span"]["operationName"] == 'SQL Statement' or node["span"]["operationName"] == 'Connection (pooled)':
                        # 处理缩进和打印
                        indent = "│   " * (level - 1) + "├── " if level > 0 else ""
                        operation = node["span"]["operationName"]
                        startTime = node["span"]["startTime"]
                        kind = node["kind"]
                        span_id = node["span"]["spanID"]
                        duration = node["span"]["duration"]
                        pod = node["pod_name"]
                        traceId = node["traceId"]
                        service = node["process"].get("serviceName", "unknown")
                        if node["peer_service_name"] != "":
                            service = node["peer_service_name"]
                            for item in pod_names:
                                if service in item:
                                    pod = item
                                    break
                        if node["db_system"] != "":
                            service = node["server_address"]
                            for item in pod_names:
                                if service in item:
                                    pod = item
                                    break
                        if node["span"]["operationName"] == 'SQL Statement' or node["span"]["operationName"] == 'Connection (pooled)':
                            service = "otel-mysql"
                            pod = ""
                        if pod == "":
                            pods = metric_get_pod_name(from_time/1000000, to_time/1000000, service)
                            if len(pods) > 0:
                                pod = pods[0]
                        # print(f"{indent}{operation} [{service},{pod},{kind}] ({span_id}) {duration}μs")
                        result_list.append([startTime, traceId, span_id, parent_id, pod, service, operation, duration])
                        new_parent_id = span_id
                        next_level = level + 1
                    else:
                        # 不保留节点，继承父级ID且层级不变
                        new_parent_id = parent_id
                        next_level = level
    
                    # 递归处理子节点
                    for child in node["children"]:
                        print_tree(child, next_level, new_parent_id)
    
                # 从根节点开始遍历
                for root in root_nodes:
                    print_tree(root)
    
            return result_list

