"""
Dashboard 数据访问层
从数据库读取历史数据，提供聚合和统计功能
"""

import json
from datetime import datetime, timedelta
from typing import List, Dict, Optional, Any, Tuple
import pandas as pd
from sqlalchemy import func, and_, desc

import sys
from pathlib import Path

# 添加项目根目录到 Python 路径
sys.path.insert(0, str(Path(__file__).parent.parent))

from database.db_manager import DatabaseManager
from database.schema import (
    Service, ServiceMetric, Endpoint, EndpointMetric,
    Trace, CollectionLog
)


class DashboardDataAccess:
    """Dashboard 数据访问类"""
    
    def __init__(self, db_manager: DatabaseManager):
        """
        初始化数据访问层
        
        Args:
            db_manager: 数据库管理器
        """
        self.db_manager = db_manager
    
    # ==================== 服务相关 ====================
    
    def get_all_services(self) -> List[Dict[str, str]]:
        """
        获取所有服务列表
        
        Returns:
            List[Dict]: [{'service_id': 'xxx', 'name': 'xxx'}, ...]
        """
        with self.db_manager.get_session() as session:
            services = session.query(Service).order_by(Service.name).all()
            # 在会话关闭前提取所有需要的数据
            return [
                {
                    'service_id': s.service_id,
                    'name': s.name,
                    'layer': s.layer,
                    'group': s.group,
                    'collected_at': s.collected_at
                }
                for s in services
            ]
    
    def get_service_metrics_dataframe(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime,
        metric_names: List[str] = None
    ) -> pd.DataFrame:
        """
        获取服务指标数据（DataFrame 格式）
        
        Args:
            service_name: 服务名称
            start_time: 开始时间
            end_time: 结束时间
            metric_names: 指标名称列表，None 表示所有
        
        Returns:
            pd.DataFrame: 包含 timestamp, metric_name, value 列
        """
        with self.db_manager.get_session() as session:
            # 获取服务
            service = session.query(Service).filter_by(service_id=service_name).first()
            if not service:
                return pd.DataFrame(columns=['timestamp', 'metric_name', 'value'])
            
            # 获取指标
            query = session.query(ServiceMetric).filter(ServiceMetric.service_id == service.id)
            
            if metric_names:
                query = query.filter(ServiceMetric.metric_name.in_(metric_names))
            if start_time:
                query = query.filter(ServiceMetric.timestamp >= start_time)
            if end_time:
                query = query.filter(ServiceMetric.timestamp <= end_time)
            
            metrics = query.order_by(ServiceMetric.timestamp).all()
            
            # 在会话关闭前提取所有数据
            data = [
                {
                    'timestamp': metric.timestamp,
                    'metric_name': metric.metric_name,
                    'value': metric.value
                }
                for metric in metrics
            ]
        
        df = pd.DataFrame(data)
        if not df.empty:
            df = df.sort_values('timestamp')
        
        return df
    
    def get_service_statistics(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime
    ) -> Dict[str, Any]:
        """
        获取服务统计信息
        
        Returns:
            Dict: 包含各类统计指标
        """
        stats = self.db_manager.get_service_statistics(
            service_name,
            start_time,
            end_time
        )
        
        # 添加更多统计信息
        df = self.get_service_metrics_dataframe(
            service_name,
            start_time,
            end_time,
            metric_names=['service_resp_time', 'service_cpm', 'service_sla']
        )
        
        if not df.empty:
            # 响应时间统计
            resp_time_df = df[df['metric_name'] == 'service_resp_time']
            if not resp_time_df.empty:
                stats['response_time'] = {
                    'avg': resp_time_df['value'].mean(),
                    'min': resp_time_df['value'].min(),
                    'max': resp_time_df['value'].max(),
                    'p50': resp_time_df['value'].quantile(0.5),
                    'p95': resp_time_df['value'].quantile(0.95),
                    'p99': resp_time_df['value'].quantile(0.99)
                }
            
            # 吞吐量统计
            cpm_df = df[df['metric_name'] == 'service_cpm']
            if not cpm_df.empty:
                stats['throughput'] = {
                    'avg': cpm_df['value'].mean(),
                    'max': cpm_df['value'].max(),
                    'total': cpm_df['value'].sum()
                }
            
            # SLA 统计
            sla_df = df[df['metric_name'] == 'service_sla']
            if not sla_df.empty:
                stats['sla'] = {
                    'avg': sla_df['value'].mean(),
                    'min': sla_df['value'].min()
                }
        
        return stats
    
    # ==================== 端点相关 ====================
    
    def get_service_endpoints(self, service_name: str) -> List[Dict[str, Any]]:
        """
        获取服务的所有端点
        
        Returns:
            List[Dict]: 端点列表
        """
        with self.db_manager.get_session() as session:
            service = session.query(Service).filter_by(service_id=service_name).first()
            if not service:
                return []
            
            endpoints = session.query(Endpoint).filter_by(service_id=service.id).order_by(Endpoint.name).all()
            
            # 在会话关闭前提取所有数据
            return [
                {
                    'endpoint_id': e.endpoint_id,
                    'name': e.name,
                    'collected_at': e.collected_at
                }
                for e in endpoints
            ]
    
    def get_endpoint_metrics_dataframe(
        self,
        endpoint_id: str,
        start_time: datetime,
        end_time: datetime,
        metric_names: List[str] = None
    ) -> pd.DataFrame:
        """
        获取端点指标数据（DataFrame 格式）
        
        Returns:
            pd.DataFrame: 包含 timestamp, metric_name, value 列
        """
        with self.db_manager.get_session() as session:
            endpoint = session.query(Endpoint).filter_by(endpoint_id=endpoint_id).first()
            if not endpoint:
                return pd.DataFrame(columns=['timestamp', 'metric_name', 'value'])
            
            query = session.query(EndpointMetric).filter(EndpointMetric.endpoint_id == endpoint.id)
            
            if metric_names:
                query = query.filter(EndpointMetric.metric_name.in_(metric_names))
            if start_time:
                query = query.filter(EndpointMetric.timestamp >= start_time)
            if end_time:
                query = query.filter(EndpointMetric.timestamp <= end_time)
            
            metrics = query.order_by(EndpointMetric.timestamp).all()
            
            # 在会话关闭前提取所有数据
            data = [
                {
                    'timestamp': metric.timestamp,
                    'metric_name': metric.metric_name,
                    'value': metric.value
                }
                for metric in metrics
            ]
        
        df = pd.DataFrame(data)
        if not df.empty:
            df = df.sort_values('timestamp')
        
        return df
    
    def get_top_slow_endpoints(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime,
        top_n: int = 10
    ) -> List[Dict[str, Any]]:
        """
        获取最慢的 N 个端点
        
        Returns:
            List[Dict]: [{'endpoint_name': 'xxx', 'avg_response_time': 123}, ...]
        """
        endpoints = self.get_service_endpoints(service_name)
        
        endpoint_stats = []
        for endpoint in endpoints:
            df = self.get_endpoint_metrics_dataframe(
                endpoint['endpoint_id'],
                start_time,
                end_time,
                metric_names=['endpoint_resp_time']
            )
            
            if not df.empty:
                avg_resp_time = df['value'].mean()
                endpoint_stats.append({
                    'endpoint_id': endpoint['endpoint_id'],
                    'endpoint_name': endpoint['name'],
                    'avg_response_time': avg_resp_time,
                    'max_response_time': df['value'].max(),
                    'min_response_time': df['value'].min(),
                    'request_count': len(df)
                })
        
        # 按平均响应时间降序排序
        endpoint_stats.sort(key=lambda x: x['avg_response_time'], reverse=True)
        
        return endpoint_stats[:top_n]
    
    def get_endpoint_response_time_distribution(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime,
        bins: int = 20
    ) -> Tuple[List[float], List[int]]:
        """
        获取端点响应时间分布（直方图数据）
        
        Args:
            service_name: 服务名称
            start_time: 开始时间
            end_time: 结束时间
            bins: 直方图分箱数量
        
        Returns:
            Tuple: (bin_edges, counts)
        """
        endpoints = self.get_service_endpoints(service_name)
        
        all_response_times = []
        for endpoint in endpoints:
            df = self.get_endpoint_metrics_dataframe(
                endpoint['endpoint_id'],
                start_time,
                end_time,
                metric_names=['endpoint_resp_time']
            )
            if not df.empty:
                all_response_times.extend(df['value'].tolist())
        
        if not all_response_times:
            return ([], [])
        
        # 计算直方图
        hist, bin_edges = pd.cut(all_response_times, bins=bins, retbins=True)
        counts = hist.value_counts().sort_index().tolist()
        
        return (bin_edges.tolist(), counts)
    
    # ==================== Trace 相关 ====================
    
    def get_traces_dataframe(
        self,
        service_name: str = None,
        start_time: datetime = None,
        end_time: datetime = None,
        is_error: bool = None,
        limit: int = 100
    ) -> pd.DataFrame:
        """
        获取 Trace 数据（DataFrame 格式）
        
        Returns:
            pd.DataFrame: 包含 trace_id, duration, start_time, is_error, endpoint_names 列
        """
        with self.db_manager.get_session() as session:
            query = session.query(Trace)
            
            if service_name:
                service = session.query(Service).filter_by(service_id=service_name).first()
                if service:
                    query = query.filter(Trace.service_id == service.id)
                else:
                    return pd.DataFrame(columns=['trace_id', 'segment_id', 'duration', 'start_time', 
                                                'is_error', 'endpoint_names', 'endpoint_names_str', 'collected_at'])
            
            if start_time:
                query = query.filter(Trace.start_time >= start_time)
            if end_time:
                query = query.filter(Trace.start_time <= end_time)
            if is_error is not None:
                query = query.filter(Trace.is_error == is_error)
            
            traces = query.order_by(desc(Trace.start_time)).limit(limit).all()
            
            # 在会话关闭前提取所有数据
            data = []
            for trace in traces:
                endpoint_names = json.loads(trace.endpoint_names) if trace.endpoint_names else []
                data.append({
                    'trace_id': trace.trace_id,
                    'segment_id': trace.segment_id,
                    'duration': trace.duration,
                    'start_time': trace.start_time,
                    'is_error': trace.is_error,
                    'endpoint_names': endpoint_names,
                    'endpoint_names_str': ', '.join(endpoint_names) if endpoint_names else 'N/A',
                    'collected_at': trace.collected_at
                })
        
        df = pd.DataFrame(data)
        if not df.empty:
            df = df.sort_values('start_time', ascending=False)
        
        return df
    
    def get_trace_duration_distribution(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime,
        bins: int = 20
    ) -> Tuple[List[float], List[int]]:
        """
        获取 Trace 时长分布（直方图数据）
        
        Returns:
            Tuple: (bin_edges, counts)
        """
        df = self.get_traces_dataframe(
            service_name=service_name,
            start_time=start_time,
            end_time=end_time,
            limit=10000  # 增加限制以获取更多数据
        )
        
        if df.empty:
            return ([], [])
        
        # 计算直方图
        hist, bin_edges = pd.cut(df['duration'], bins=bins, retbins=True)
        counts = hist.value_counts().sort_index().tolist()
        
        return (bin_edges.tolist(), counts)
    
    def get_error_trace_statistics(
        self,
        service_name: str,
        start_time: datetime,
        end_time: datetime
    ) -> Dict[str, Any]:
        """
        获取错误 Trace 统计
        
        Returns:
            Dict: 错误统计信息
        """
        all_traces_df = self.get_traces_dataframe(
            service_name=service_name,
            start_time=start_time,
            end_time=end_time,
            limit=10000
        )
        
        error_traces_df = self.get_traces_dataframe(
            service_name=service_name,
            start_time=start_time,
            end_time=end_time,
            is_error=True,
            limit=10000
        )
        
        total_count = len(all_traces_df)
        error_count = len(error_traces_df)
        
        # 计算平均响应时间
        avg_duration = None
        if not all_traces_df.empty and 'duration' in all_traces_df.columns:
            avg_duration = all_traces_df['duration'].mean()
        
        return {
            'total_traces': total_count,
            'error_traces': error_count,
            'success_traces': total_count - error_count,
            'error_rate': (error_count / total_count * 100) if total_count > 0 else 0,
            'success_rate': ((total_count - error_count) / total_count * 100) if total_count > 0 else 0,
            'avg_duration': avg_duration
        }
    
    # ==================== 采集日志相关 ====================
    
    def get_collection_logs_dataframe(
        self,
        start_time: datetime = None,
        end_time: datetime = None,
        status: str = None,
        limit: int = 100
    ) -> pd.DataFrame:
        """
        获取采集日志（DataFrame 格式）
        
        Returns:
            pd.DataFrame: 采集日志数据
        """
        with self.db_manager.get_session() as session:
            query = session.query(CollectionLog)
            
            if start_time:
                query = query.filter(CollectionLog.collected_at >= start_time)
            if end_time:
                query = query.filter(CollectionLog.collected_at <= end_time)
            if status:
                query = query.filter(CollectionLog.status == status)
            
            logs = query.order_by(desc(CollectionLog.collected_at)).limit(limit).all()
            
            # 在会话关闭前提取所有数据
            data = []
            for log in logs:
                error_details = json.loads(log.error_details) if log.error_details else {}
                data.append({
                    'collected_at': log.collected_at,
                    'status': log.status,
                    'message': log.message,
                    'services_collected': log.services_collected,
                    'endpoints_collected': log.endpoints_collected,
                    'traces_collected': log.traces_collected,
                    'metrics_collected': log.metrics_collected,
                    'error_count': len(error_details.get('errors', [])) if error_details else 0,
                    'errors': error_details.get('errors', []) if error_details else []
                })
        
        df = pd.DataFrame(data)
        if not df.empty:
            df = df.sort_values('collected_at', ascending=False)
        
        return df
    
    def get_latest_collection_status(self) -> Optional[Dict[str, Any]]:
        """
        获取最新的采集状态
        
        Returns:
            Dict: 最新采集日志信息
        """
        with self.db_manager.get_session() as session:
            log = session.query(CollectionLog).order_by(desc(CollectionLog.collected_at)).first()
            if not log:
                return None
            
            error_details = json.loads(log.error_details) if log.error_details else {}
            
            return {
                'collected_at': log.collected_at,
                'status': log.status,
                'message': log.message,
                'services_collected': log.services_collected,
                'endpoints_collected': log.endpoints_collected,
                'traces_collected': log.traces_collected,
                'metrics_collected': log.metrics_collected,
                'errors': error_details.get('errors', []) if error_details else []
            }
    
    # ==================== 时间范围辅助方法 ====================
    
    @staticmethod
    def get_time_range_preset(preset: str) -> Tuple[datetime, datetime]:
        """
        获取预设时间范围
        
        Args:
            preset: 预设名称，如 '1h', '6h', '24h', '7d', '30d'
        
        Returns:
            Tuple[datetime, datetime]: (start_time, end_time)
        """
        end_time = datetime.utcnow()
        
        if preset == '1h':
            start_time = end_time - timedelta(hours=1)
        elif preset == '6h':
            start_time = end_time - timedelta(hours=6)
        elif preset == '24h':
            start_time = end_time - timedelta(hours=24)
        elif preset == '7d':
            start_time = end_time - timedelta(days=7)
        elif preset == '30d':
            start_time = end_time - timedelta(days=30)
        else:
            # 默认 24 小时
            start_time = end_time - timedelta(hours=24)
        
        return (start_time, end_time)
    
    @staticmethod
    def format_duration(milliseconds: float) -> str:
        """
        格式化时长
        
        Args:
            milliseconds: 毫秒数
        
        Returns:
            str: 格式化后的时长字符串
        """
        if milliseconds < 1000:
            return f"{milliseconds:.0f} ms"
        elif milliseconds < 60000:
            return f"{milliseconds / 1000:.2f} s"
        else:
            return f"{milliseconds / 60000:.2f} min"
    
    # ==================== Streamlit 辅助方法 ====================
    
    def get_service_metrics_history(
        self,
        service_id: str,
        start_time: datetime,
        end_time: datetime
    ) -> List[Dict[str, Any]]:
        """
        获取服务指标历史（列表格式，用于 Streamlit）
        
        Returns:
            List[Dict]: [{'metric_name': 'xxx', 'value': xxx, 'timestamp': xxx}, ...]
        """
        df = self.get_service_metrics_dataframe(service_id, start_time, end_time)
        
        if df.empty:
            return []
        
        return df.to_dict('records')
    
    def get_endpoints_by_service(self, service_id: str) -> List[Dict[str, Any]]:
        """
        获取服务的所有端点
        
        Returns:
            List[Dict]: [{'endpoint_id': 'xxx', 'endpoint_name': 'xxx'}, ...]
        """
        with self.db_manager.get_session() as session:
            service = session.query(Service).filter_by(service_id=service_id).first()
            if not service:
                return []
            
            endpoints = session.query(Endpoint).filter_by(service_id=service.id).all()
            
            result = []
            for ep in endpoints:
                result.append({
                    'endpoint_id': ep.endpoint_id,
                    'endpoint_name': ep.name
                })
            
            return result
    
    def get_endpoint_metrics_history(
        self,
        endpoint_id: str,
        start_time: datetime,
        end_time: datetime
    ) -> List[Dict[str, Any]]:
        """
        获取端点指标历史（列表格式，用于 Streamlit）
        
        Returns:
            List[Dict]: [{'metric_name': 'xxx', 'value': xxx, 'timestamp': xxx}, ...]
        """
        with self.db_manager.get_session() as session:
            endpoint = session.query(Endpoint).filter_by(endpoint_id=endpoint_id).first()
            if not endpoint:
                return []
            
            query = session.query(EndpointMetric).filter(EndpointMetric.endpoint_id == endpoint.id)
            
            if start_time:
                query = query.filter(EndpointMetric.timestamp >= start_time)
            if end_time:
                query = query.filter(EndpointMetric.timestamp <= end_time)
            
            metrics = query.order_by(EndpointMetric.timestamp).all()
            
            result = []
            for m in metrics:
                result.append({
                    'metric_name': m.metric_name,
                    'value': m.value,
                    'timestamp': m.timestamp
                })
            
            return result
    
    def get_traces_list(
        self,
        service_id: str,
        start_time: datetime,
        end_time: datetime,
        limit: int = 50
    ) -> List[Dict[str, Any]]:
        """
        获取 Trace 列表（用于 Streamlit 显示）
        
        Returns:
            List[Dict]: [{'trace_id': 'xxx', 'duration': xxx, ...}, ...]
        """
        with self.db_manager.get_session() as session:
            service = session.query(Service).filter_by(service_id=service_id).first()
            if not service:
                return []
            
            query = session.query(Trace).filter(Trace.service_id == service.id)
            
            if start_time:
                query = query.filter(Trace.start_time >= start_time)
            if end_time:
                query = query.filter(Trace.start_time <= end_time)
            
            traces = query.order_by(desc(Trace.start_time)).limit(limit).all()
            
            result = []
            for t in traces:
                result.append({
                    'trace_id': t.trace_id,
                    'segment_id': t.segment_id,
                    'endpoint_names': t.endpoint_names,
                    'duration': t.duration,
                    'start_time': t.start_time,
                    'is_error': t.is_error
                })
            
            return result
    
    # ==================== CSV 导出功能 ====================
    
    def export_services_to_csv(
        self,
        service_name: Optional[str] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None
    ) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """
        导出服务数据和服务指标到 DataFrame
        
        Args:
            service_name: 服务名（模糊匹配），None 表示所有服务
            start_time: 开始时间
            end_time: 结束时间
        
        Returns:
            Tuple[pd.DataFrame, pd.DataFrame]: (服务列表, 服务指标时间序列)
        """
        with self.db_manager.get_session() as session:
            # 查询服务列表
            service_query = session.query(Service)
            
            if service_name:
                service_query = service_query.filter(Service.name.like(f'%{service_name}%'))
            
            if start_time and end_time:
                service_query = service_query.filter(
                    and_(
                        Service.collected_at >= start_time,
                        Service.collected_at <= end_time
                    )
                )
            
            services = service_query.all()
            
            # 转换服务列表为 DataFrame
            services_data = []
            service_ids = []
            for s in services:
                services_data.append({
                    'service_id': s.service_id,
                    'service_name': s.name,
                    'layer': s.layer or 'N/A',
                    'collected_at': s.collected_at
                })
                service_ids.append(s.id)
            
            services_df = pd.DataFrame(services_data)
            
            # 查询服务指标
            if service_ids:
                metrics_query = session.query(
                    Service.service_id,
                    Service.name,
                    ServiceMetric.metric_name,
                    ServiceMetric.value,
                    ServiceMetric.timestamp
                ).join(Service, ServiceMetric.service_id == Service.id).filter(
                    Service.id.in_(service_ids)
                )
                
                if start_time and end_time:
                    metrics_query = metrics_query.filter(
                        and_(
                            ServiceMetric.timestamp >= start_time,
                            ServiceMetric.timestamp <= end_time
                        )
                    )
                
                metrics = metrics_query.order_by(
                    Service.name,
                    ServiceMetric.metric_name,
                    ServiceMetric.timestamp
                ).all()
                
                metrics_data = []
                for m in metrics:
                    metrics_data.append({
                        'service_id': m.service_id,
                        'service_name': m.name,
                        'metric_name': m.metric_name,
                        'value': m.value,
                        'timestamp': m.timestamp
                    })
                
                metrics_df = pd.DataFrame(metrics_data)
            else:
                metrics_df = pd.DataFrame()
        
        return services_df, metrics_df
    
    def export_endpoints_to_csv(
        self,
        service_name: Optional[str] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None
    ) -> Tuple[pd.DataFrame, pd.DataFrame]:
        """
        导出端点数据和端点指标到 DataFrame
        
        Args:
            service_name: 服务名（模糊匹配），None 表示所有服务
            start_time: 开始时间
            end_time: 结束时间
        
        Returns:
            Tuple[pd.DataFrame, pd.DataFrame]: (端点列表, 端点指标时间序列)
        """
        with self.db_manager.get_session() as session:
            # 查询端点列表
            endpoint_query = session.query(
                Endpoint.endpoint_id,
                Service.service_id,
                Service.name.label('service_name'),
                Endpoint.name.label('endpoint_name'),
                Endpoint.collected_at
            ).join(Service, Endpoint.service_id == Service.id)
            
            if service_name:
                endpoint_query = endpoint_query.filter(Service.name.like(f'%{service_name}%'))
            
            if start_time and end_time:
                endpoint_query = endpoint_query.filter(
                    and_(
                        Endpoint.collected_at >= start_time,
                        Endpoint.collected_at <= end_time
                    )
                )
            
            endpoints = endpoint_query.all()
            
            # 转换端点列表为 DataFrame
            endpoints_data = []
            endpoint_internal_ids = []
            for e in endpoints:
                endpoints_data.append({
                    'endpoint_id': e.endpoint_id,
                    'service_id': e.service_id,
                    'service_name': e.service_name,
                    'endpoint_name': e.endpoint_name,
                    'collected_at': e.collected_at
                })
                # 需要获取内部 ID
                ep = session.query(Endpoint).filter(Endpoint.endpoint_id == e.endpoint_id).first()
                if ep:
                    endpoint_internal_ids.append(ep.id)
            
            endpoints_df = pd.DataFrame(endpoints_data)
            
            # 查询端点指标
            if endpoint_internal_ids:
                metrics_query = session.query(
                    Endpoint.endpoint_id,
                    Service.name.label('service_name'),
                    Endpoint.name.label('endpoint_name'),
                    EndpointMetric.metric_name,
                    EndpointMetric.value,
                    EndpointMetric.timestamp
                ).join(Endpoint, EndpointMetric.endpoint_id == Endpoint.id).join(
                    Service, Endpoint.service_id == Service.id
                ).filter(
                    Endpoint.id.in_(endpoint_internal_ids)
                )
                
                if start_time and end_time:
                    metrics_query = metrics_query.filter(
                        and_(
                            EndpointMetric.timestamp >= start_time,
                            EndpointMetric.timestamp <= end_time
                        )
                    )
                
                metrics = metrics_query.order_by(
                    Service.name,
                    Endpoint.name,
                    EndpointMetric.metric_name,
                    EndpointMetric.timestamp
                ).all()
                
                metrics_data = []
                for m in metrics:
                    metrics_data.append({
                        'endpoint_id': m.endpoint_id,
                        'service_name': m.service_name,
                        'endpoint_name': m.endpoint_name,
                        'metric_name': m.metric_name,
                        'value': m.value,
                        'timestamp': m.timestamp
                    })
                
                metrics_df = pd.DataFrame(metrics_data)
            else:
                metrics_df = pd.DataFrame()
        
        return endpoints_df, metrics_df
    
    def export_traces_to_csv(
        self,
        service_name: Optional[str] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None
    ) -> pd.DataFrame:
        """
        导出 Trace 数据到 DataFrame
        
        Args:
            service_name: 服务名（模糊匹配），None 表示所有服务
            start_time: 开始时间
            end_time: 结束时间
        
        Returns:
            pd.DataFrame: Trace 数据
        """
        with self.db_manager.get_session() as session:
            trace_query = session.query(
                Trace.trace_id,
                Trace.segment_id,
                Service.name.label('service_name'),
                Trace.endpoint_names,
                Trace.duration,
                Trace.start_time,
                Trace.is_error
            ).join(Service, Trace.service_id == Service.id)
            
            if service_name:
                trace_query = trace_query.filter(Service.name.like(f'%{service_name}%'))
            
            if start_time and end_time:
                trace_query = trace_query.filter(
                    and_(
                        Trace.start_time >= start_time,
                        Trace.start_time <= end_time
                    )
                )
            
            traces = trace_query.order_by(desc(Trace.start_time)).all()
            
            # 转换为 DataFrame
            traces_data = []
            for t in traces:
                traces_data.append({
                    'trace_id': t.trace_id,
                    'segment_id': t.segment_id,
                    'service_name': t.service_name,
                    'endpoint_names': t.endpoint_names or 'N/A',
                    'duration_ms': t.duration,
                    'start_time': t.start_time,
                    'is_error': t.is_error
                })
            
            traces_df = pd.DataFrame(traces_data)
        
        return traces_df

