import json
from pathlib import Path
import pandas as pd
from datetime import datetime
from typing import List, Dict, Optional
from data_processor.loaders.base_loader import AnomalyRecord
from .common_loader import CommonDataLoader  # 假设CommonDataLoader在common_loader模块中

logger = logging.getLogger(__name__)

class SNDataLoader(CommonDataLoader):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # 保持原有初始化参数
        self.trace_required_cols = [
            "traceID", "spanID", "references", 
            "startTime", "duration", "processID",
            "operationName"
        ]

    def _prepare_dependencies_dataframe(self, trace_df: pd.DataFrame) -> pd.DataFrame:
        """处理SN特有的Jaeger格式追踪数据"""
        if trace_df.empty:
            return pd.DataFrame()

        # 展开processes信息
        processes = trace_df["processes"].iloc[0]  # 假设单条trace处理
        process_map = {
            pid: proc["serviceName"] 
            for pid, proc in processes.items()
        }

        # 处理每个span
        processed_spans = []
        for _, span in trace_df.iterrows():
            # 1. 提取父spanID
            parent_span_id = "0"
            for ref in span.get("references", []):
                if ref.get("refType") == "CHILD_OF":
                    parent_span_id = ref["spanID"]
                    break

            # 2. 获取服务名称
            service_name = process_map.get(
                span["processID"], 
                f"unknown_{span['processID']}"
            )

            processed_spans.append({
                "traceId": span["traceID"],
                "spanId": span["spanID"],
                "parentId": parent_span_id,
                "timestamp": pd.to_datetime(
                    span["startTime"], 
                    unit="us",  # 微秒级时间戳
                    utc=True
                ).tz_convert("Asia/Shanghai"),
                "latency": span["duration"] / 1000,  # 转换微秒到毫秒
                "serviceName": service_name,
                "operationName": span["operationName"],
                "cmdb_id": service_name  # 使用服务名作为cmdb_id
            })

        if not processed_spans:
            return pd.DataFrame()

        deps = pd.DataFrame(processed_spans)
        
        # 标准化列名
        deps = deps.rename(columns={
            "traceId": "traceId",
            "spanId": "spanId",
            "parentId": "parentId",
            "timestamp": "timestamp",
            "latency": "latency",
            "serviceName": "serviceName",
            "cmdb_id": "cmdb_id"
        })[["timestamp", "traceId", "spanId", "parentId", "serviceName", "cmdb_id", "latency"]]

        # 处理根span的parentId
        deps["parentId"] = deps["parentId"].replace({"": "0"})
        return deps

    def _load_and_filter_trace_data_for_window(self, date_folders, start_time, end_time):
        """覆盖父类方法处理JSON格式"""
        all_trace = []
        for date_folder in date_folders:
            trace_file = self.telemetry_path / date_folder / "trace" / self.trace_file_name
            if not trace_file.exists():
                continue
            
            with open(trace_file, "r") as f:
                trace_data = json.load(f)
                # 每个trace作为单独行处理
                for trace in trace_data:
                    spans = pd.json_normalize(trace["spans"])
                    spans["processes"] = [trace["processes"]] * len(spans)
                    all_trace.append(spans)

        if not all_trace:
            return pd.DataFrame()
        
        full_df = pd.concat(all_trace, ignore_index=True)
        return self._filter_by_time(full_df, start_time, end_time)

    def _filter_by_time(self, df: pd.DataFrame, start: datetime, end: datetime) -> pd.DataFrame:
        """时间过滤优化"""
        if df.empty:
            return df
        
        # 转换时间列
        df["_ts"] = pd.to_datetime(df["startTime"], unit="us", utc=True)
        df["_ts"] = df["_ts"].dt.tz_convert("Asia/Shanghai")
        
        # 时间范围过滤
        mask = (df["_ts"] >= start) & (df["_ts"] <= end)
        return df[mask].copy()