import os
import re
import logging
import uuid
import json
from typing import Dict, List, Set, Tuple, Optional, Any
from pathlib import Path
import pandas as pd
from datetime import datetime

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)

def generate_scenario_id() -> str:
    """生成唯一的场景ID"""
    return f"scenario_{uuid.uuid4().hex[:8]}"

def load_details_json(folder_path: str) -> Dict[str, Any]:
    """
    从details.json文件中加载故障信息
    
    参数:
        folder_path: 场景文件夹路径
        
    返回:
        故障信息字典，如果文件不存在或读取失败则返回空字典
    """
    details_path = Path(folder_path) / "details.json"
    if not details_path.exists():
        logger.warning(f"details.json不存在: {details_path}")
        return {}
        
    try:
        with open(details_path, 'r') as f:
            details = json.load(f)
        return details
    except Exception as e:
        logger.error(f"读取details.json失败: {details_path}, 错误: {e}")
        return {}

def parse_folder_name(folder_name: str) -> Tuple[Optional[str], Optional[str]]:
    """
    从文件夹名称中解析故障组件和故障类型
    
    格式：{prefix}_{timestamp}_{component}-{reason}
    """
    try:
        # 使用正则表达式匹配模式
        pattern = r'.*_\d+_\d+_\d+_\d+_(\w+)-(\w+)$'
        match = re.match(pattern, folder_name)
        
        if match:
            component = match.group(1)
            reason = match.group(2)
            return component, reason
        else:
            # 尝试更宽松的匹配
            pattern = r'.*_(\w+)-(\w+)$'
            match = re.match(pattern, folder_name)
            if match:
                component = match.group(1)
                reason = match.group(2)
                return component, reason
    except Exception as e:
        logger.warning(f"解析文件夹名称失败: {folder_name}, 错误: {e}")
    
    return None, None

def reconstruct_traces(dependencies_df: pd.DataFrame) -> Dict[str, List]:
    """
    从dependencies_df重建所有Traces
    返回: {trace_id -> [spans]}
    """
    if dependencies_df is None or dependencies_df.empty:
        return {}
    
    traces = {}
    
    # 确保必要的列存在
    required_cols = ["traceId", "spanId", "parentSpanId", "cmdb_id", "startTime", "endTime"]
    if not all(col in dependencies_df.columns for col in required_cols):
        logger.warning(f"依赖关系DataFrame缺少必要列: {required_cols}")
        return {}
    
    # 按traceId分组
    for trace_id, group in dependencies_df.groupby("traceId"):
        spans = []
        for _, row in group.iterrows():
            span = {
                "spanId": row["spanId"],
                "parentSpanId": row["parentSpanId"],
                "cmdb_id": row["cmdb_id"],
                "startTime": row["startTime"],
                "endTime": row["endTime"]
            }
            # 添加其他可能存在的列
            for col in group.columns:
                if col not in span and not pd.isna(row[col]):
                    span[col] = row[col]
            spans.append(span)
        
        # 按startTime排序
        spans.sort(key=lambda x: x["startTime"])
        traces[trace_id] = spans
    
    return traces

def trace_contains_component(spans: List[Dict], component: str) -> bool:
    """
    检查Trace是否包含指定的组件
    """
    for span in spans:
        if "cmdb_id" in span and span["cmdb_id"] == component:
            return True
    return False

def get_trace_length(exp_data, trace_id: str) -> int:
    """
    获取指定Trace的长度（span数量）
    """
    if exp_data.dependencies_df is None or exp_data.dependencies_df.empty:
        return 0
    
    if "traceId" not in exp_data.dependencies_df.columns:
        return 0
    
    return len(exp_data.dependencies_df[exp_data.dependencies_df["traceId"] == trace_id])

def find_scenario_by_id(scenarios: List, scenario_id: str):
    """
    根据ID查找场景
    """
    for scenario in scenarios:
        if scenario.scenario_id == scenario_id:
            return scenario
    return None

def ensure_dir(directory: str or Path) -> Path:
    """
    确保目录存在，如果不存在则创建
    """
    path = Path(directory)
    path.mkdir(parents=True, exist_ok=True)
    return path

def write_json_file(file_path: str or Path, data: Any) -> bool:
    """
    将数据写入JSON文件
    """
    try:
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        return True
    except Exception as e:
        logger.error(f"写入JSON文件失败: {file_path}, 错误: {e}")
        return False
