#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
日志查询服务
支持 Spring API 日志和 Minio 审计日志的查询统计
"""
import pymysql
import logging
import time
from datetime import datetime, timedelta
from typing import Dict, List, Any, Optional, Tuple
from app.utils.mysql_db import get_db_cursor

# 配置日志
logger = logging.getLogger(__name__)


class LogService:
    """日志查询服务类"""
    
    def __init__(self):
        """初始化日志服务，默认连接StarRocks 65环境"""
        self.default_config_id = None  # 将在首次使用时动态获取
    
    def _get_starrocks_connection(self):
        """
        获取StarRocks连接（65环境的tds库）
        
        Returns:
            pymysql.Connection: StarRocks连接对象
        """
        with get_db_cursor() as cursor:
            # 从service_config表获取StarRocks 65环境配置
            cursor.execute("""
                SELECT id, host, port, username, password
                FROM service_config
                WHERE service_type = 'starrocks'
                  AND (config_name LIKE '%65%' OR config_name LIKE '%STARROCKS(65)%')
                  AND is_active = 1
                  AND is_deleted = 0
                LIMIT 1
            """)
            config = cursor.fetchone()
            
            if not config:
                raise Exception("未找到StarRocks 65环境配置，请先在设置中配置")
            
            # 保存config_id供后续使用
            if not self.default_config_id:
                self.default_config_id = config.get('id')
            
            logger.info(f"连接StarRocks 65环境: {config.get('host')}:{config.get('port')}/tds")
            
            # 添加重试逻辑（与 MySQL/StarRocks 保持一致）
            max_retries = 3
            connection = None
            last_error = None
            
            for retry in range(max_retries):
                try:
                    if retry > 0:
                        logger.info(f"🔄 重试StarRocks连接 (尝试{retry+1}/{max_retries})")
                    
                    connection = pymysql.connect(
                        host=config.get('host'),
                        port=int(config.get('port')),
                        user=config.get('username'),
                        password=config.get('password'),
                        database='tds',
                        charset='utf8mb4',
                        cursorclass=pymysql.cursors.DictCursor
                    )
                    
                    logger.info(f"✅ StarRocks连接成功")
                    return connection
                    
                except Exception as e:
                    last_error = e
                    logger.error(f"StarRocks连接失败 (尝试{retry+1}/{max_retries}): {e}")
                    
                    if retry < max_retries - 1:
                        # 递增等待：第1次失败等30秒，第2次失败等60秒
                        wait_time = 30 if retry == 0 else 60
                        logger.info(f"⏰ 等待{wait_time}秒后重试...")
                        time.sleep(wait_time)
                    else:
                        # 3次都失败了
                        logger.error(f"❌ StarRocks连接重试{max_retries}次均失败")
            
            # 最终失败，抛出最后的异常
            raise last_error
    
    def _parse_time_range(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Tuple[str, str]:
        """
        解析时间范围，设置默认值
        
        Args:
            start_time: 开始时间字符串，格式：'YYYY-MM-DD HH:MM:SS'
            end_time: 结束时间字符串，格式：'YYYY-MM-DD HH:MM:SS'
        
        Returns:
            tuple: (start_time, end_time)
        """
        if not end_time:
            end_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        
        if not start_time:
            # 默认查询今日数据
            start_time = datetime.now().strftime('%Y-%m-%d 00:00:00')
        
        return start_time, end_time
    
    def _build_time_filter(self, start_time: Optional[str] = None, end_time: Optional[str] = None, 
                          time_field: str = 'log_timestamp') -> Tuple[str, List]:
        """
        构建可选的时间过滤SQL条件
        
        Args:
            start_time: 开始时间字符串，格式：'YYYY-MM-DD HH:MM:SS'，None表示不过滤
            end_time: 结束时间字符串，格式：'YYYY-MM-DD HH:MM:SS'，None表示不过滤
            time_field: 时间字段名，默认为'log_timestamp'
        
        Returns:
            tuple: (SQL WHERE条件字符串, 参数列表)
                   如果没有时间过滤，返回 ('', [])
        """
        conditions = []
        params = []
        
        # 如果两个时间参数都没有，返回空条件
        if not start_time and not end_time:
            return '', []
        
        # 如果有开始时间
        if start_time:
            conditions.append(f"{time_field} >= %s")
            params.append(start_time)
        
        # 如果有结束时间
        if end_time:
            conditions.append(f"{time_field} <= %s")
            params.append(end_time)
        
        # 组合条件
        if conditions:
            return ' AND ' + ' AND '.join(conditions), params
        
        return '', []
    
    def _format_datetime(self, dt) -> str:
        """
        格式化日期时间，统一为秒级精度
        
        Args:
            dt: datetime对象或字符串
        
        Returns:
            str: 格式化后的时间字符串 (YYYY-MM-DD HH:MM:SS)，如果为None则返回None
        """
        if dt is None:
            return None
        
        if isinstance(dt, datetime):
            return dt.strftime('%Y-%m-%d %H:%M:%S')
        
        # 如果是字符串，截取前19个字符（到秒）
        if isinstance(dt, str):
            return dt[:19] if len(dt) >= 19 else dt
        
        return str(dt)[:19]
    
    # ========== Spring API 日志查询方法 ==========
    
    def get_service_list(self) -> List[str]:
        """
        获取所有可用的服务名称列表
        
        Returns:
            List[str]: 服务名称列表，按名称排序
        """
        conn = self._get_starrocks_connection()
        try:
            with conn.cursor() as cursor:
                sql = """
                SELECT DISTINCT service_name
                FROM tds_spring_api_log
                WHERE service_name IS NOT NULL
                  AND service_name != ''
                ORDER BY service_name
                """
                cursor.execute(sql)
                results = cursor.fetchall()
                
                services = [row['service_name'] for row in results if row.get('service_name')]
                logger.info(f"获取到 {len(services)} 个服务")
                return services
                
        finally:
            conn.close()
    
    def get_api_overview(self, start_time: Optional[str] = None, end_time: Optional[str] = None, 
                        service_name: Optional[str] = None) -> Dict[str, Any]:
        """
        获取API日志概览统计
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            service_name: 服务名称（可选）
        
        Returns:
            dict: 概览统计数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            
            # 如果有时间范围，计算QPS；否则QPS设为0
            if start_time and end_time:
                qps_sql = f"COUNT(*) / NULLIF(TIMESTAMPDIFF(SECOND, %s, %s), 0) as qps"
                qps_params = [start_time, end_time]
            else:
                qps_sql = "0 as qps"
                qps_params = []
            
            sql = f"""
            SELECT 
                COUNT(*) as total_requests,
                AVG(response_time) as avg_response_time,
                COUNT(CASE WHEN success = 1 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) as success_rate,
                COUNT(CASE WHEN success = 0 THEN 1 END) as error_count,
                {qps_sql}
            FROM tds_spring_api_log
            WHERE 1=1
            {time_filter}
            """
            
            params = qps_params + time_params
            
            if service_name:
                sql += " AND service_name = %s"
                params.append(service_name)
            
            cursor.execute(sql, params)
            result = cursor.fetchone()
            
            return {
                'total_requests': int(result.get('total_requests') or 0),
                'avg_response_time': round(float(result.get('avg_response_time') or 0), 2),
                'success_rate': round(float(result.get('success_rate') or 0), 2),
                'error_count': int(result.get('error_count') or 0),
                'qps': round(float(result.get('qps') or 0), 2),
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
        
        except Exception as e:
            logger.error(f"获取API概览统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_service_health(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, Any]:
        """
        获取服务健康度排行
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
        
        Returns:
            dict: 服务健康度数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            
            # 如果有时间范围，计算QPS；否则QPS设为0
            if start_time and end_time:
                qps_sql = "COUNT(*) / NULLIF(TIMESTAMPDIFF(SECOND, %s, %s), 0) as qps"
                qps_params = [start_time, end_time]
            else:
                qps_sql = "0 as qps"
                qps_params = []
            
            sql = f"""
            SELECT 
                service_name,
                COUNT(*) as total_requests,
                {qps_sql},
                COUNT(CASE WHEN success = 1 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) as success_rate,
                AVG(response_time) as avg_response_time,
                COUNT(CASE WHEN success = 0 THEN 1 END) as error_count,
                MAX(log_timestamp) as last_request_time,
                CASE 
                    WHEN COUNT(CASE WHEN success = 0 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) < 1 THEN 'healthy'
                    WHEN COUNT(CASE WHEN success = 0 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) < 5 THEN 'warning'
                    ELSE 'critical'
                END as health_status
            FROM tds_spring_api_log
            WHERE 1=1
            {time_filter}
            GROUP BY service_name
            ORDER BY error_count DESC, avg_response_time DESC
            """
            
            params = qps_params + time_params
            cursor.execute(sql, params)
            services = cursor.fetchall()
            
            # 格式化数据
            formatted_services = []
            for svc in services:
                formatted_services.append({
                    'service_name': svc.get('service_name'),
                    'total_requests': int(svc.get('total_requests') or 0),
                    'qps': round(float(svc.get('qps') or 0), 2),
                    'success_rate': round(float(svc.get('success_rate') or 0), 2),
                    'avg_response_time': round(float(svc.get('avg_response_time') or 0), 2),
                    'error_count': int(svc.get('error_count') or 0),
                    'last_request_time': str(svc.get('last_request_time') or ''),
                    'health_status': svc.get('health_status') or 'unknown'
                })
            
            return {'services': formatted_services}
        
        except Exception as e:
            logger.error(f"获取服务健康度排行失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_api_trend(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                     service_names: Optional[List[str]] = None, granularity: str = 'hour') -> Dict[str, Any]:
        """
        获取请求趋势数据
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            service_names: 服务名称列表（可选）
            granularity: 时间粒度（hour/minute/day）
        
        Returns:
            dict: 趋势数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            
            # 根据粒度选择时间格式
            time_format = {
                'hour': '%%Y-%%m-%%d %%H:00:00',
                'minute': '%%Y-%%m-%%d %%H:%%i:00',
                'day': '%%Y-%%m-%%d 00:00:00'
            }.get(granularity, '%%Y-%%m-%%d %%H:00:00')
            
            sql = f"""
            SELECT 
                date_format(log_timestamp, '{time_format}') as time_bucket,
                service_name,
                COUNT(*) as request_count,
                AVG(response_time) as avg_response_time,
                COUNT(CASE WHEN success = 0 THEN 1 END) as error_count
            FROM tds_spring_api_log
            WHERE 1=1
            {time_filter}
            """
            
            params = time_params
            
            if service_names:
                placeholders = ','.join(['%s'] * len(service_names))
                sql += f" AND service_name IN ({placeholders})"
                params.extend(service_names)
            
            sql += " GROUP BY time_bucket, service_name ORDER BY time_bucket, service_name"
            
            cursor.execute(sql, params)
            results = cursor.fetchall()
            
            # 重组数据结构
            trends_dict = {}
            for row in results:
                time_bucket = str(row.get('time_bucket'))
                service_name = row.get('service_name')
                
                if time_bucket not in trends_dict:
                    trends_dict[time_bucket] = {'time': time_bucket}
                
                trends_dict[time_bucket][service_name] = {
                    'request_count': int(row.get('request_count') or 0),
                    'avg_response_time': round(float(row.get('avg_response_time') or 0), 2),
                    'error_count': int(row.get('error_count') or 0)
                }
            
            trends = list(trends_dict.values())
            
            return {'trends': trends}
        
        except Exception as e:
            logger.error(f"获取请求趋势失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_service_apis(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                        service_name: Optional[str] = None, top_n: int = 20) -> Dict[str, Any]:
        """
        获取各服务接口调用情况
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            service_name: 服务名称（可选）
            top_n: 每个服务返回Top N接口
        
        Returns:
            dict: 各服务接口调用数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            
            sql = f"""
            SELECT 
                service_name,
                api_path,
                http_method,
                COUNT(*) as request_count,
                AVG(response_time) as avg_response_time,
                MIN(response_time) as min_response_time,
                MAX(response_time) as max_response_time,
                COUNT(CASE WHEN success = 0 THEN 1 END) as error_count,
                COUNT(CASE WHEN success = 1 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) as success_rate
            FROM tds_spring_api_log
            WHERE 1=1
            {time_filter}
            """
            
            params = time_params
            
            if service_name:
                sql += " AND service_name = %s"
                params.append(service_name)
            
            sql += " GROUP BY service_name, api_path, http_method"
            sql += " ORDER BY service_name, request_count DESC"
            
            cursor.execute(sql, params)
            results = cursor.fetchall()
            
            # 按服务分组
            services_dict = {}
            for row in results:
                svc_name = row.get('service_name')
                if svc_name not in services_dict:
                    services_dict[svc_name] = {
                        'service_name': svc_name,
                        'total_apis': 0,
                        'apis': []
                    }
                
                # 限制每个服务的接口数量
                if len(services_dict[svc_name]['apis']) < top_n:
                    services_dict[svc_name]['apis'].append({
                        'api_path': row.get('api_path'),
                        'http_method': row.get('http_method'),
                        'request_count': int(row.get('request_count') or 0),
                        'avg_response_time': round(float(row.get('avg_response_time') or 0), 2),
                        'min_response_time': round(float(row.get('min_response_time') or 0), 2),
                        'max_response_time': round(float(row.get('max_response_time') or 0), 2),
                        'error_count': int(row.get('error_count') or 0),
                        'success_rate': round(float(row.get('success_rate') or 0), 2)
                    })
                    services_dict[svc_name]['total_apis'] += 1
            
            services = list(services_dict.values())
            
            return {'services': services}
        
        except Exception as e:
            logger.error(f"获取服务接口调用情况失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_api_errors(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                      service_name: Optional[str] = None, top_n: int = 20) -> Dict[str, Any]:
        """
        获取错误分析数据
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            service_name: 服务名称（可选）
            top_n: 返回Top N错误
        
        Returns:
            dict: 错误分析数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            
            sql = f"""
            SELECT 
                service_name,
                api_path,
                http_method,
                status_code,
                message,
                COUNT(*) as error_count,
                COUNT(DISTINCT client_ip) as affected_clients,
                COUNT(DISTINCT trace_id) as affected_traces,
                MAX(log_timestamp) as last_error_time
            FROM tds_spring_api_log
            WHERE success = 0
            {time_filter}
            """
            
            params = time_params
            
            if service_name:
                sql += " AND service_name = %s"
                params.append(service_name)
            
            sql += " GROUP BY service_name, api_path, http_method, status_code, message"
            sql += " ORDER BY error_count DESC"
            sql += f" LIMIT {top_n}"
            
            cursor.execute(sql, params)
            errors = cursor.fetchall()
            
            # 格式化数据
            formatted_errors = []
            for err in errors:
                formatted_errors.append({
                    'service_name': err.get('service_name') or '',
                    'api_path': err.get('api_path') or '',
                    'http_method': err.get('http_method') or '',
                    'status_code': int(err.get('status_code') or 0),
                    'message': err.get('message') or '',
                    'error_count': int(err.get('error_count') or 0),
                    'affected_clients': int(err.get('affected_clients') or 0),
                    'affected_traces': int(err.get('affected_traces') or 0),
                    'last_error_time': self._format_datetime(err.get('last_error_time')) or ''
                })
            
            return {'errors': formatted_errors}
        
        except Exception as e:
            logger.error(f"获取错误分析失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_api_logs(self, filters: Dict[str, Any], page: int = 1, page_size: int = 50, 
                     sort_field: str = 'log_timestamp', sort_order: str = 'desc') -> Dict[str, Any]:
        """
        获取日志列表（分页）
        
        Args:
            filters: 筛选条件字典
            page: 页码
            page_size: 每页大小
            sort_field: 排序字段，默认 log_timestamp
            sort_order: 排序方向，desc 或 asc，默认 desc
        
        Returns:
            dict: 日志列表数据
        """
        start_time = filters.get('start_time')
        end_time = filters.get('end_time')
        
        # 排序字段白名单（防止SQL注入）
        allowed_sort_fields = ['log_timestamp', 'response_time', 'status_code']
        if sort_field not in allowed_sort_fields:
            sort_field = 'log_timestamp'
        
        # 排序方向验证
        sort_order = sort_order.lower()
        if sort_order not in ['asc', 'desc']:
            sort_order = 'desc'
        
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建WHERE条件
            where_conditions = []
            params = []
            
            # 构建时间过滤条件（可选）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'log_timestamp')
            if time_filter:
                # time_filter以' AND '开头，去掉后添加到conditions
                where_conditions.append(time_filter[5:])  # 去掉开头的' AND '
                params.extend(time_params)
            
            if filters.get('service_name'):
                where_conditions.append("service_name = %s")
                params.append(filters['service_name'])
            
            if filters.get('api_path'):
                where_conditions.append("api_path LIKE %s")
                params.append(f"%{filters['api_path']}%")
            
            if filters.get('http_method'):
                where_conditions.append("http_method = %s")
                params.append(filters['http_method'])
            
            if filters.get('status_code'):
                where_conditions.append("status_code = %s")
                params.append(filters['status_code'])
            
            if 'success' in filters and filters['success'] is not None:
                where_conditions.append("success = %s")
                params.append(1 if filters['success'] else 0)
            
            if filters.get('environment'):
                where_conditions.append("environment = %s")
                params.append(filters['environment'])
            
            if filters.get('request_body'):
                where_conditions.append("(CAST(request_body AS VARCHAR) LIKE %s OR json_string(request_body) LIKE %s)")
                params.append(f"%{filters['request_body']}%")
                params.append(f"%{filters['request_body']}%")
            
            if filters.get('response_body'):
                where_conditions.append("(CAST(response_body AS VARCHAR) LIKE %s OR json_string(response_body) LIKE %s)")
                params.append(f"%{filters['response_body']}%")
                params.append(f"%{filters['response_body']}%")
            
            where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
            
            # 查询总数
            count_sql = f"SELECT COUNT(*) as total FROM tds_spring_api_log WHERE {where_clause}"
            cursor.execute(count_sql, params)
            total = cursor.fetchone().get('total', 0)
            
            # 查询全局统计（基于筛选条件）
            stats_sql = f"""
            SELECT 
                COUNT(CASE WHEN success = 1 THEN 1 END) as success_count,
                COUNT(CASE WHEN success = 0 THEN 1 END) as failed_count,
                COUNT(CASE WHEN response_time > 1000 THEN 1 END) as slow_count
            FROM tds_spring_api_log 
            WHERE {where_clause}
            """
            cursor.execute(stats_sql, params)
            stats = cursor.fetchone()
            success_count = int(stats.get('success_count', 0))
            failed_count = int(stats.get('failed_count', 0))
            slow_count = int(stats.get('slow_count', 0))
            
            # 查询数据
            offset = (page - 1) * page_size
            data_sql = f"""
            SELECT 
                log_timestamp,
                trace_id,
                service_name,
                api_path,
                http_method,
                api_status,
                status_code,
                response_time,
                success,
                client_ip,
                user_agent,
                server_ip,
                server_port,
                environment,
                cluster,
                node_name,
                host_name,
                host_ip,
                log_level,
                logger_name,
                app_name,
                log_type,
                log_path,
                CAST(request_body AS VARCHAR) as request_body,
                CAST(response_body AS VARCHAR) as response_body,
                CAST(request_headers AS VARCHAR) as request_headers,
                CAST(response_headers AS VARCHAR) as response_headers,
                CAST(tags AS VARCHAR) as tags,
                message
            FROM tds_spring_api_log
            WHERE {where_clause}
            ORDER BY {sort_field} {sort_order}
            LIMIT {page_size} OFFSET {offset}
            """
            
            cursor.execute(data_sql, params)
            logs = cursor.fetchall()
            
            # 格式化数据
            formatted_logs = []
            for log in logs:
                formatted_logs.append({
                    'log_timestamp': self._format_datetime(log.get('log_timestamp')) or '',
                    'trace_id': log.get('trace_id') or '',
                    'service_name': log.get('service_name') or '',
                    'api_path': log.get('api_path') or '',
                    'http_method': log.get('http_method') or '',
                    'api_status': log.get('api_status') or '',
                    'status_code': int(log.get('status_code') or 0),
                    'response_time': int(log.get('response_time') or 0),
                    'success': bool(log.get('success') or 0),
                    'client_ip': log.get('client_ip') or '',
                    'user_agent': log.get('user_agent') or '',
                    'server_ip': log.get('server_ip') or '',
                    'server_port': log.get('server_port') or '',
                    'environment': log.get('environment') or '',
                    'cluster': log.get('cluster') or '',
                    'node_name': log.get('node_name') or '',
                    'host_name': log.get('host_name') or '',
                    'host_ip': log.get('host_ip') or '',
                    'log_level': log.get('log_level') or '',
                    'logger_name': log.get('logger_name') or '',
                    'app_name': log.get('app_name') or '',
                    'log_type': log.get('log_type') or '',
                    'log_path': log.get('log_path') or '',
                    'request_body': log.get('request_body'),
                    'response_body': log.get('response_body'),
                    'request_headers': log.get('request_headers'),
                    'response_headers': log.get('response_headers'),
                    'tags': log.get('tags'),
                    'message': log.get('message') or ''
                })
            
            return {
                'total': int(total),
                'page': page,
                'page_size': page_size,
                'logs': formatted_logs,
                'stats': {
                    'success_count': success_count,
                    'failed_count': failed_count,
                    'slow_count': slow_count
                }
            }
        
        except Exception as e:
            logger.error(f"获取日志列表失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    # ========== Minio 审计日志查询方法 ==========
    
    def get_minio_overview(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, Any]:
        """
        获取Minio概览统计
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
        
        Returns:
            dict: Minio概览数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件（使用 etl_date，已经是东八区时间）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'etl_date')
            
            sql = f"""
            SELECT 
                COUNT(*) as total_requests,
                AVG(time_to_response/1000000) as avg_response_time,
                COUNT(CASE WHEN status_code < 400 THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) as success_rate,
                COUNT(CASE WHEN status_code >= 400 THEN 1 END) as error_count,
                SUM(rx_bytes) as total_upload_bytes,
                SUM(tx_bytes) as total_download_bytes
            FROM tds_minio_audit_log
            WHERE 1=1
            {time_filter}
            """
            
            cursor.execute(sql, time_params)
            result = cursor.fetchone()
            
            return {
                'total_requests': int(result.get('total_requests') or 0),
                'avg_response_time': round(float(result.get('avg_response_time') or 0), 2),
                'success_rate': round(float(result.get('success_rate') or 0), 2),
                'error_count': int(result.get('error_count') or 0),
                'total_upload_bytes': int(result.get('total_upload_bytes') or 0),
                'total_download_bytes': int(result.get('total_download_bytes') or 0),
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
        
        except Exception as e:
            logger.error(f"获取Minio概览统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_minio_api_stats(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, Any]:
        """
        获取Minio API类型分布统计
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
        
        Returns:
            dict: API统计数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件（使用 etl_date，已经是东八区时间）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'etl_date')
            
            sql = f"""
            SELECT 
                api_name,
                COUNT(*) as request_count,
                COUNT(CASE WHEN status_code >= 400 THEN 1 END) as error_count,
                AVG(time_to_response/1000000) as avg_response_time,
                SUM(tx_bytes) as total_bytes
            FROM tds_minio_audit_log
            WHERE 1=1
            {time_filter}
            GROUP BY api_name
            ORDER BY request_count DESC
            """
            
            cursor.execute(sql, time_params)
            apis = cursor.fetchall()
            
            # 格式化数据
            formatted_apis = []
            for api in apis:
                formatted_apis.append({
                    'api_name': api.get('api_name') or '',
                    'request_count': int(api.get('request_count') or 0),
                    'error_count': int(api.get('error_count') or 0),
                    'avg_response_time': round(float(api.get('avg_response_time') or 0), 2),
                    'total_bytes': int(api.get('total_bytes') or 0)
                })
            
            return {'apis': formatted_apis}
        
        except Exception as e:
            logger.error(f"获取Minio API统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_minio_bucket_stats(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, Any]:
        """
        获取Minio桶统计
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
        
        Returns:
            dict: 桶统计数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件（使用 etl_date，已经是东八区时间）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'etl_date')
            
            sql = f"""
            SELECT 
                bucket,
                COUNT(*) as request_count,
                SUM(rx_bytes) as upload_bytes,
                SUM(tx_bytes) as download_bytes,
                COUNT(CASE WHEN status_code >= 400 THEN 1 END) as error_count,
                COUNT(CASE WHEN status_code < 400 THEN 1 END) as success_count
            FROM tds_minio_audit_log
            WHERE 1=1
            {time_filter}
              AND bucket IS NOT NULL
              AND bucket != ''
              AND bucket NOT LIKE '%%.%%'  -- 排除包含点号的（通常是文件名）
              AND bucket NOT LIKE '%%/%%'  -- 排除包含斜杠的（通常是路径）
              AND bucket NOT LIKE '%%http%%'  -- 排除包含http的（URL片段）
              AND bucket NOT LIKE '%%25%%'  -- 排除包含URL编码的
              AND LENGTH(bucket) > 2       -- 排除过短的名称
              AND LENGTH(bucket) < 50      -- 排除过长的异常值
              AND bucket NOT IN ('api', 'v2', 'v1', 'json', 'xml')  -- 排除常见的非桶名
              AND bucket NOT REGEXP '^[0-9]+$'  -- 排除纯数字
              AND bucket REGEXP '^[a-zA-Z][a-zA-Z0-9_-]*$'  -- 只保留字母开头、包含字母数字下划线横线的标准桶名
            GROUP BY bucket
            HAVING COUNT(CASE WHEN status_code < 400 THEN 1 END) > 0  -- 只保留至少有一个成功请求的桶
            ORDER BY request_count DESC
            """
            
            cursor.execute(sql, time_params)
            buckets = cursor.fetchall()
            
            # 格式化数据
            formatted_buckets = []
            for bucket in buckets:
                formatted_buckets.append({
                    'bucket': bucket.get('bucket') or '',
                    'request_count': int(bucket.get('request_count') or 0),
                    'upload_bytes': int(bucket.get('upload_bytes') or 0),
                    'download_bytes': int(bucket.get('download_bytes') or 0),
                    'error_count': int(bucket.get('error_count') or 0)
                })
            
            return {'buckets': formatted_buckets}
        
        except Exception as e:
            logger.error(f"获取Minio桶统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_minio_trend(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                       granularity: str = 'hour') -> Dict[str, Any]:
        """
        获取Minio请求趋势
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            granularity: 时间粒度（hour/minute/day）
        
        Returns:
            dict: 趋势数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件（使用 etl_date，已经是东八区时间）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'etl_date')
            
            # 根据粒度选择时间格式
            time_format = {
                'hour': '%%Y-%%m-%%d %%H:00:00',
                'minute': '%%Y-%%m-%%d %%H:%%i:00',
                'day': '%%Y-%%m-%%d 00:00:00'
            }.get(granularity, '%%Y-%%m-%%d %%H:00:00')
            
            sql = f"""
            SELECT 
                date_format(etl_date, '{time_format}') as time_bucket,
                COUNT(*) as request_count,
                SUM(rx_bytes) as total_rx_bytes,
                SUM(tx_bytes) as total_tx_bytes,
                AVG(time_to_response/1000000) as avg_response_ms
            FROM tds_minio_audit_log
            WHERE 1=1
            {time_filter}
            GROUP BY time_bucket
            ORDER BY time_bucket
            """
            
            cursor.execute(sql, time_params)
            results = cursor.fetchall()
            
            # 格式化数据
            trends = []
            for row in results:
                trends.append({
                    'time': str(row.get('time_bucket')),
                    'request_count': int(row.get('request_count') or 0),
                    'total_rx_bytes': int(row.get('total_rx_bytes') or 0),
                    'total_tx_bytes': int(row.get('total_tx_bytes') or 0),
                    'avg_response_ms': round(float(row.get('avg_response_ms') or 0), 2)
                })
            
            return {'trends': trends}
        
        except Exception as e:
            logger.error(f"获取Minio趋势失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_minio_logs(self, filters: Dict[str, Any], page: int = 1, page_size: int = 50,
                       sort_field: str = 'etl_date', sort_order: str = 'desc') -> Dict[str, Any]:
        """
        获取Minio审计日志列表
        
        ⚠️ 重要说明：
        - 前端展示使用 etl_date 字段（东八区时间，数据入库时间）
        - event_time 是 Minio 原始事件时间（UTC 时间），保留但不用于展示
        
        Args:
            filters: 筛选条件字典
            page: 页码
            page_size: 每页大小
            sort_field: 排序字段，默认 etl_date
            sort_order: 排序方向，desc 或 asc，默认 desc
        
        Returns:
            dict: 日志列表和总数
        """
        # 排序字段白名单（防止SQL注入）
        # 兼容前端：前端发送 event_time，后端映射到 etl_date
        allowed_sort_fields = ['event_time', 'etl_date', 'time_to_response', 'status_code']
        if sort_field not in allowed_sort_fields:
            sort_field = 'etl_date'
        
        # 字段映射：前端的 event_time 映射到后端的 etl_date
        if sort_field == 'event_time':
            sort_field = 'etl_date'
        
        # 排序方向验证
        sort_order = sort_order.lower()
        if sort_order not in ['asc', 'desc']:
            sort_order = 'desc'
        
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建查询条件
            where_clauses = []
            params = []
            
            # 构建时间过滤条件（使用 etl_date，已经是东八区时间）
            start_time = filters.get('start_time')
            end_time = filters.get('end_time')
            
            if start_time:
                where_clauses.append('etl_date >= %s')
                params.append(start_time)
            if end_time:
                where_clauses.append('etl_date <= %s')
                params.append(end_time)
            
            if 'api' in filters:
                where_clauses.append('api_name = %s')
                params.append(filters['api'])
            
            if 'bucket' in filters:
                where_clauses.append('bucket LIKE %s')
                params.append(f'%{filters["bucket"]}%')
            
            if 'object_key' in filters:
                where_clauses.append('object_key LIKE %s')
                params.append(f'%{filters["object_key"]}%')
            
            if 'user_agent' in filters:
                where_clauses.append('user_agent LIKE %s')
                params.append(f'%{filters["user_agent"]}%')
            
            if 'status_code' in filters:
                where_clauses.append('status_code = %s')
                params.append(filters['status_code'])
            
            where_sql = ' AND '.join(where_clauses) if where_clauses else '1=1'
            
            # 查询总数
            count_sql = f"""
            SELECT COUNT(*) as total
            FROM tds_minio_audit_log
            WHERE {where_sql}
            """
            
            cursor.execute(count_sql, params)
            total = cursor.fetchone().get('total', 0)
            
            # 查询全局统计（基于筛选条件）
            # 注意：使用 %% 转义 LIKE 中的 %
            stats_sql = f"""
            SELECT 
                COUNT(CASE WHEN api_name LIKE '%%Get%%' THEN 1 END) as get_count,
                COUNT(CASE WHEN api_name LIKE '%%Put%%' THEN 1 END) as put_count,
                COUNT(CASE WHEN time_to_response > 1000000 THEN 1 END) as slow_count
            FROM tds_minio_audit_log
            WHERE {where_sql}
            """
            cursor.execute(stats_sql, params)
            stats = cursor.fetchone()
            get_count = int(stats.get('get_count', 0))
            put_count = int(stats.get('put_count', 0))
            slow_count = int(stats.get('slow_count', 0))
            
            # 查询日志列表（使用 etl_date 作为展示时间）
            offset = (page - 1) * page_size
            list_sql = f"""
            SELECT 
                etl_date,
                version,
                deployment_id,
                api_name,
                bucket,
                object_key,
                status_code,
                api_status,
                rx_bytes,
                tx_bytes,
                tx_headers,
                time_to_response,
                remote_host,
                request_id,
                user_agent,
                event,
                trigger,
                request_header,
                response_header,
                tags,
                access_key,
                parent_user
            FROM tds_minio_audit_log
            WHERE {where_sql}
            ORDER BY {sort_field} {sort_order}
            LIMIT {page_size} OFFSET {offset}
            """
            
            cursor.execute(list_sql, params)
            logs = cursor.fetchall()
            
            # 格式化数据
            formatted_logs = []
            for log in logs:
                formatted_logs.append({
                    'event_time': self._format_datetime(log.get('etl_date')) or '',  # 使用 etl_date 作为展示时间
                    'version': log.get('version') or '',
                    'deployment_id': log.get('deployment_id') or '',
                    'api': log.get('api_name') or '',
                    'bucket': log.get('bucket') or '',
                    'object_key': log.get('object_key') or '',
                    'status_code': int(log.get('status_code') or 0),
                    'api_status': log.get('api_status') or '',
                    'rx_bytes': int(log.get('rx_bytes') or 0),
                    'tx_bytes': int(log.get('tx_bytes') or 0),
                    'tx_headers': int(log.get('tx_headers') or 0),
                    'time_to_response': int(log.get('time_to_response') or 0),
                    'remote_host': log.get('remote_host') or '',
                    'request_id': log.get('request_id') or '',
                    'user_agent': log.get('user_agent') or '',
                    'event': log.get('event') or '',
                    'trigger': log.get('trigger') or '',
                    'request_header': log.get('request_header'),
                    'response_header': log.get('response_header'),
                    'tags': log.get('tags'),
                    'access_key': log.get('access_key') or '',
                    'parent_user': log.get('parent_user') or ''
                })
            
            return {
                'total': int(total),
                'page': page,
                'page_size': page_size,
                'logs': formatted_logs,
                'stats': {
                    'get_count': get_count,
                    'put_count': put_count,
                    'slow_count': slow_count
                }
            }
        
        except Exception as e:
            logger.error(f"获取Minio日志列表失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    # ========== Flink Job 日志查询方法 ==========
    
    def get_flink_job_list(self) -> List[str]:
        """
        获取所有 Flink Job 名称列表（使用 project 字段）
        
        Returns:
            List[str]: Job 名称列表，按名称排序
        """
        conn = self._get_starrocks_connection()
        try:
            with conn.cursor() as cursor:
                sql = """
                SELECT DISTINCT project
                FROM tds_flink_job_logs
                WHERE project IS NOT NULL
                  AND project != ''
                ORDER BY project
                """
                cursor.execute(sql)
                results = cursor.fetchall()
                
                jobs = [row['project'] for row in results if row.get('project')]
                logger.info(f"获取到 {len(jobs)} 个 Flink Job")
                return jobs
                
        finally:
            conn.close()
    
    def get_flink_job_overview(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                               job_name: Optional[str] = None) -> Dict[str, Any]:
        """
        获取 Flink Job 日志概览统计
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            job_name: Job 名称（可选，使用 project 字段）
        
        Returns:
            dict: 概览统计数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件（使用 details_timestamp）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            
            sql = f"""
            SELECT 
                COUNT(*) as total_logs,
                COUNT(DISTINCT project) as total_jobs,
                COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) as error_count,
                COUNT(CASE WHEN details_level = 'WARN' THEN 1 END) as warn_count,
                COUNT(CASE WHEN details_level = 'INFO' THEN 1 END) as info_count
            FROM tds_flink_job_logs
            WHERE 1=1
            {time_filter}
            """
            
            params = time_params
            
            if job_name:
                sql += " AND project = %s"
                params.append(job_name)
            
            cursor.execute(sql, params)
            result = cursor.fetchone()
            
            total_logs = int(result.get('total_logs') or 0)
            error_count = int(result.get('error_count') or 0)
            
            # 计算错误率
            error_rate = (error_count / total_logs * 100) if total_logs > 0 else 0
            
            return {
                'total_logs': total_logs,
                'total_jobs': int(result.get('total_jobs') or 0),
                'error_count': error_count,
                'warn_count': int(result.get('warn_count') or 0),
                'info_count': int(result.get('info_count') or 0),
                'error_rate': round(error_rate, 2),
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
        
        except Exception as e:
            logger.error(f"获取 Flink Job 概览统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_flink_job_health(self, start_time: Optional[str] = None, end_time: Optional[str] = None) -> Dict[str, Any]:
        """
        获取类健康度排行
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
        
        Returns:
            dict: 类健康度数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            
            sql = f"""
            SELECT 
                COALESCE(details_class_path, details_class) as class_path,
                MAX(details_class) as class_name,
                COUNT(*) as total_logs,
                COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) as error_count,
                COUNT(CASE WHEN details_level = 'WARN' THEN 1 END) as warn_count,
                COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) as error_rate,
                MAX(details_timestamp) as last_log_time,
                CASE 
                    WHEN COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) = 0 THEN 'healthy'
                    WHEN COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) * 100.0 / NULLIF(COUNT(*), 0) < 5 THEN 'warning'
                    ELSE 'critical'
                END as health_status
            FROM tds_flink_job_logs
            WHERE 1=1
            {time_filter}
            GROUP BY COALESCE(details_class_path, details_class)
            ORDER BY error_count DESC, total_logs DESC
            LIMIT 20
            """
            
            cursor.execute(sql, time_params)
            classes = cursor.fetchall()
            
            # 格式化数据
            formatted_classes = []
            for cls in classes:
                formatted_classes.append({
                    'class_name': cls.get('class_name') or '',
                    'class_path': cls.get('class_path') or '',
                    'total_logs': int(cls.get('total_logs') or 0),
                    'error_count': int(cls.get('error_count') or 0),
                    'warn_count': int(cls.get('warn_count') or 0),
                    'error_rate': round(float(cls.get('error_rate') or 0), 2),
                    'last_log_time': str(cls.get('last_log_time') or ''),
                    'health_status': cls.get('health_status') or 'unknown'
                })
            
            return {'classes': formatted_classes}
        
        except Exception as e:
            logger.error(f"获取 Flink Job 健康度排行失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_flink_job_trend(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                           job_names: Optional[List[str]] = None, granularity: str = 'hour') -> Dict[str, Any]:
        """
        获取类日志趋势数据（按日志级别汇总）
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            job_names: 已废弃，保留参数兼容性
            granularity: 时间粒度（hour/minute/day）
        
        Returns:
            dict: 趋势数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            
            # 根据粒度选择时间格式
            time_format = {
                'hour': '%%Y-%%m-%%d %%H:00:00',
                'minute': '%%Y-%%m-%%d %%H:%%i:00',
                'day': '%%Y-%%m-%%d 00:00:00'
            }.get(granularity, '%%Y-%%m-%%d %%H:00:00')
            
            sql = f"""
            SELECT 
                date_format(details_timestamp, '{time_format}') as time_bucket,
                details_level,
                COUNT(*) as log_count
            FROM tds_flink_job_logs
            WHERE 1=1
            {time_filter}
            GROUP BY time_bucket, details_level
            ORDER BY time_bucket, details_level
            """
            
            cursor.execute(sql, time_params)
            results = cursor.fetchall()
            
            # 重组数据结构
            trends_dict = {}
            for row in results:
                time_bucket = str(row.get('time_bucket'))
                log_level = row.get('details_level')
                
                if time_bucket not in trends_dict:
                    trends_dict[time_bucket] = {'time': time_bucket}
                
                trends_dict[time_bucket][log_level] = {
                    'log_count': int(row.get('log_count') or 0)
                }
            
            trends = list(trends_dict.values())
            
            return {'trends': trends}
        
        except Exception as e:
            logger.error(f"获取 Flink Job 日志趋势失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_flink_job_errors(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                            job_name: Optional[str] = None, log_level: str = 'ERROR', top_n: int = 20) -> Dict[str, Any]:
        """
        获取 Flink Job 错误/警告/异常分析数据
        
        Args:
            start_time: 开始时间（可选，None表示不限制）
            end_time: 结束时间（可选，None表示不限制）
            job_name: Job 名称（可选）
            log_level: 日志级别（ERROR、WARN 或 EXCEPTION）
            top_n: 返回Top N
        
        Returns:
            dict: 分析数据
        """
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            
            # 一次性查询所有级别的数据（ERROR、WARN、EXCEPTION）
            # 简化查询，只获取聚合统计，消息内容后续单独查
            sql = f"""
            SELECT 
                details_level,
                COALESCE(details_class_path, details_class) as class_path,
                MAX(details_class) as class_name,
                COUNT(*) as error_count,
                MAX(details_timestamp) as last_error_time,
                MIN(details_timestamp) as first_error_time
            FROM tds_flink_job_logs
            WHERE details_level IN ('ERROR', 'WARN', 'EXCEPTION')
            {time_filter}
            """
            
            params = time_params
            
            if job_name:
                sql += " AND project = %s"
                params.append(job_name)
            
            sql += " GROUP BY details_level, COALESCE(details_class_path, details_class)"
            sql += " ORDER BY error_count DESC"
            
            # 调试日志
            logger.info(f"执行错误分析查询（所有级别）, 参数: {params}")
            
            cursor.execute(sql, params)
            all_results = cursor.fetchall()
            
            logger.info(f"查询结果总数: {len(all_results)}")
            
            # 按日志级别分组数据
            errors_by_level = {'ERROR': [], 'WARN': [], 'EXCEPTION': []}
            
            # 为每条记录获取最新的消息（简化版：使用类名作为消息）
            for err in all_results:
                level = err.get('details_level')
                if level in errors_by_level:
                    errors_by_level[level].append({
                        'class_name': err.get('class_name') or '',
                        'class_path': err.get('class_path') or '',
                        'message': f"{err.get('class_name')} 相关错误",  # 简化消息，避免复杂查询
                        'error_count': int(err.get('error_count') or 0),
                        'last_error_time': self._format_datetime(err.get('last_error_time')) or '',
                        'first_error_time': self._format_datetime(err.get('first_error_time')) or ''
                    })
            
            # 每个级别只保留 Top N
            for level in errors_by_level:
                errors_by_level[level] = sorted(errors_by_level[level], key=lambda x: x['error_count'], reverse=True)[:top_n]
            
            logger.info(f"各级别数量 - ERROR: {len(errors_by_level['ERROR'])}, WARN: {len(errors_by_level['WARN'])}, EXCEPTION: {len(errors_by_level['EXCEPTION'])}")
            
            return {'errors_by_level': errors_by_level}
        
        except Exception as e:
            logger.error(f"获取 Flink Job 错误分析失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    def get_flink_job_logs(self, filters: Dict[str, Any], page: int = 1, page_size: int = 50,
                          sort_field: str = 'log_timestamp', sort_order: str = 'desc') -> Dict[str, Any]:
        """
        获取 Flink Job 日志列表（分页）
        
        Args:
            filters: 筛选条件字典
            page: 页码
            page_size: 每页大小
            sort_field: 排序字段，默认 log_timestamp
            sort_order: 排序方向，desc 或 asc，默认 desc
        
        Returns:
            dict: 日志列表数据
        """
        start_time = filters.get('start_time')
        end_time = filters.get('end_time')
        
        # 排序字段白名单（防止SQL注入）- 映射到实际字段
        field_mapping = {'log_timestamp': 'details_timestamp', 'log_level': 'details_level'}
        if sort_field in field_mapping:
            sort_field = field_mapping[sort_field]
        else:
            sort_field = 'details_timestamp'
        
        # 排序方向验证
        sort_order = sort_order.lower()
        if sort_order not in ['asc', 'desc']:
            sort_order = 'desc'
        
        conn = None
        
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建WHERE条件
            where_conditions = []
            params = []
            
            # 构建时间过滤条件（可选）
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            if time_filter:
                # time_filter以' AND '开头，去掉后添加到conditions
                where_conditions.append(time_filter[5:])  # 去掉开头的' AND '
                params.extend(time_params)
            
            if filters.get('job_name'):
                where_conditions.append("project = %s")
                params.append(filters['job_name'])
            
            if filters.get('job_id'):
                where_conditions.append("env = %s")
                params.append(filters['job_id'])
            
            if filters.get('env'):
                where_conditions.append("env = %s")
                params.append(filters['env'])
            
            if filters.get('log_level'):
                where_conditions.append("details_level = %s")
                params.append(filters['log_level'])
            
            if filters.get('logger_name'):
                where_conditions.append("details_class LIKE %s")
                params.append(f"%{filters['logger_name']}%")
            
            if filters.get('class_name'):
                where_conditions.append("details_class LIKE %s")
                params.append(f"%{filters['class_name']}%")
            
            if filters.get('message'):
                where_conditions.append("details_message LIKE %s")
                params.append(f"%{filters['message']}%")
            
            where_clause = " AND ".join(where_conditions) if where_conditions else "1=1"
            
            # 查询总数
            count_sql = f"SELECT COUNT(*) as total FROM tds_flink_job_logs WHERE {where_clause}"
            cursor.execute(count_sql, params)
            total = cursor.fetchone().get('total', 0)
            
            # 查询全局统计（基于筛选条件）
            stats_sql = f"""
            SELECT 
                COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) as error_count,
                COUNT(CASE WHEN details_level = 'WARN' THEN 1 END) as warn_count,
                COUNT(CASE WHEN details_level = 'INFO' THEN 1 END) as info_count
            FROM tds_flink_job_logs 
            WHERE {where_clause}
            """
            cursor.execute(stats_sql, params)
            stats = cursor.fetchone()
            error_count = int(stats.get('error_count', 0))
            warn_count = int(stats.get('warn_count', 0))
            info_count = int(stats.get('info_count', 0))
            
            # 查询数据
            offset = (page - 1) * page_size
            data_sql = f"""
            SELECT 
                log_id,
                details_timestamp,
                details_level,
                details_class,
                details_class_path,
                details_message,
                message,
                project,
                component,
                env,
                host_name,
                host_ip,
                log_type,
                cluster,
                error_type,
                log_file_path,
                tags
            FROM tds_flink_job_logs
            WHERE {where_clause}
            ORDER BY {sort_field} {sort_order}
            LIMIT {page_size} OFFSET {offset}
            """
            
            cursor.execute(data_sql, params)
            logs = cursor.fetchall()
            
            # 格式化数据
            formatted_logs = []
            for log in logs:
                formatted_logs.append({
                    'log_id': log.get('log_id') or '',
                    'log_timestamp': self._format_datetime(log.get('details_timestamp')) or '',
                    'job_name': log.get('project') or '',
                    'env': log.get('env') or '',
                    'component': log.get('component') or '',
                    'log_level': log.get('details_level') or '',
                    'logger_name': log.get('details_class') or '',
                    'class_path': log.get('details_class_path') or '',
                    'message': log.get('details_message') or '',
                    'full_message': log.get('message') or '',
                    'exception_class': log.get('error_type') or '',
                    'host_name': log.get('host_name') or '',
                    'host_ip': log.get('host_ip') or '',
                    'log_type': log.get('log_type') or '',
                    'cluster': log.get('cluster') or '',
                    'log_file_path': log.get('log_file_path') or '',
                    'tags': log.get('tags')
                })
            
            return {
                'total': int(total),
                'page': page,
                'page_size': page_size,
                'logs': formatted_logs,
                'stats': {
                    'error_count': error_count,
                    'warn_count': warn_count,
                    'info_count': info_count
                }
            }
        
        except Exception as e:
            logger.error(f"获取 Flink Job 日志列表失败: {e}")
            raise
        finally:
            if conn:
                conn.close()

    # ========== StarRocks 错误日志查询方法 ==========

    def get_starrocks_error_clusters(self) -> Dict[str, Any]:
        """获取 StarRocks 错误日志可用的集群列表"""
        conn = self._get_starrocks_connection()
        try:
            with conn.cursor() as cursor:
                cursor.execute(
                    """
                    SELECT DISTINCT COALESCE(cluster, 'unknown') AS cluster
                    FROM tds_starrocks_error_logs
                    WHERE cluster IN ('etl', 'api')
                    ORDER BY cluster
                    """
                )
                results = cursor.fetchall()
                clusters = [row.get('cluster') or 'unknown' for row in results]
                return {'clusters': clusters}
        finally:
            conn.close()

    def get_starrocks_error_overview(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                                     cluster: Optional[str] = None) -> Dict[str, Any]:
        """获取 StarRocks 错误日志概览统计，限定 WARN/ERROR/FATAL"""
        conn = None

        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()

            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            # 基础过滤：只查询 WARN/ERROR/FATAL 级别，且 cluster 必须是 etl 或 api（防止脏数据）
            base_where = "WHERE details_level IN ('WARN', 'ERROR', 'FATAL') AND cluster IN ('etl', 'api')"

            params: List[Any] = list(time_params)
            overview_sql = (
                "SELECT "
                "COUNT(*) AS total_logs, "
                "COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) AS error_count, "
                "COUNT(CASE WHEN details_level = 'WARN' THEN 1 END) AS warn_count, "
                "COUNT(CASE WHEN details_level = 'FATAL' THEN 1 END) AS fatal_count, "
                "COUNT(DISTINCT COALESCE(cluster, 'unknown')) AS cluster_count, "
                "COUNT(DISTINCT COALESCE(component, 'unknown')) AS component_count, "
                "COUNT(DISTINCT COALESCE(node_ip, 'unknown')) AS node_count, "
                "MAX(details_timestamp) AS last_log_time "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter}"
            )

            if cluster:
                overview_sql += " AND cluster = %s"
                params.append(cluster)

            cursor.execute(overview_sql, params)
            overview = cursor.fetchone() or {}

            # 日志级别分布
            level_sql = (
                "SELECT details_level, COUNT(*) AS level_count "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter} "
            )
            level_params = list(time_params)
            if cluster:
                level_sql += " AND cluster = %s"
                level_params.append(cluster)
            level_sql += " GROUP BY details_level"
            cursor.execute(level_sql, level_params)
            level_rows = cursor.fetchall()

            level_breakdown = {
                row.get('details_level') or 'UNKNOWN': int(row.get('level_count') or 0)
                for row in level_rows
            }

            # 集群分布（根据过滤条件）
            cluster_sql = (
                "SELECT COALESCE(cluster, 'unknown') AS cluster, COUNT(*) AS log_count "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter} "
            )
            cluster_params = list(time_params)
            if cluster:
                cluster_sql += " AND cluster = %s"
                cluster_params.append(cluster)
            cluster_sql += " GROUP BY COALESCE(cluster, 'unknown') ORDER BY log_count DESC"
            cursor.execute(cluster_sql, cluster_params)
            cluster_rows = cursor.fetchall()
            cluster_distribution = [
                {
                    'cluster': row.get('cluster') or 'unknown',
                    'log_count': int(row.get('log_count') or 0)
                }
                for row in cluster_rows
            ]

            # 组件分布
            component_sql = (
                "SELECT COALESCE(component, 'unknown') AS component, COUNT(*) AS log_count "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter} "
            )
            component_params = list(time_params)
            if cluster:
                component_sql += " AND cluster = %s"
                component_params.append(cluster)
            component_sql += " GROUP BY COALESCE(component, 'unknown') ORDER BY log_count DESC"
            cursor.execute(component_sql, component_params)
            component_rows = cursor.fetchall()
            component_distribution = [
                {
                    'component': row.get('component') or 'unknown',
                    'log_count': int(row.get('log_count') or 0)
                }
                for row in component_rows
            ]

            return {
                'totals': {
                    'total_logs': int(overview.get('total_logs') or 0),
                    'error_count': int(overview.get('error_count') or 0),
                    'warn_count': int(overview.get('warn_count') or 0),
                    'fatal_count': int(overview.get('fatal_count') or 0),
                    'cluster_count': int(overview.get('cluster_count') or 0),
                    'component_count': int(overview.get('component_count') or 0),
                    'node_count': int(overview.get('node_count') or 0),
                    'last_log_time': self._format_datetime(overview.get('last_log_time')) or ''
                },
                'level_breakdown': level_breakdown,
                'cluster_distribution': cluster_distribution,
                'component_distribution': component_distribution,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }

        except Exception as e:
            logger.error(f"获取 StarRocks 错误日志概览统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()

    def get_starrocks_error_trend(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                                  cluster: Optional[str] = None, granularity: str = 'hour') -> Dict[str, Any]:
        """获取 StarRocks 错误日志趋势数据"""
        conn = None

        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()

            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            # 基础过滤：只查询 WARN/ERROR/FATAL 级别，且 cluster 必须是 etl 或 api（防止脏数据）
            base_where = "WHERE details_level IN ('WARN', 'ERROR', 'FATAL') AND cluster IN ('etl', 'api')"

            time_format = {
                'minute': '%%Y-%%m-%%d %%H:%%i:00',
                'hour': '%%Y-%%m-%%d %%H:00:00',
                'day': '%%Y-%%m-%%d 00:00:00'
            }.get(granularity, '%%Y-%%m-%%d %%H:00:00')

            trend_sql = (
                "SELECT "
                f"date_format(details_timestamp, '{time_format}') AS time_bucket, "
                "details_level, "
                "COUNT(*) AS log_count "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter} "
            )

            params = list(time_params)
            if cluster:
                trend_sql += " AND cluster = %s"
                params.append(cluster)

            trend_sql += " GROUP BY time_bucket, details_level ORDER BY time_bucket, details_level"

            cursor.execute(trend_sql, params)
            rows = cursor.fetchall()

            trends_dict: Dict[str, Dict[str, Any]] = {}
            for row in rows:
                time_bucket = str(row.get('time_bucket'))
                level = row.get('details_level') or 'UNKNOWN'

                if time_bucket not in trends_dict:
                    trends_dict[time_bucket] = {'time': time_bucket}

                trends_dict[time_bucket][level] = {'log_count': int(row.get('log_count') or 0)}

            trends = list(trends_dict.values())

            return {
                'trends': trends,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                },
                'granularity': granularity
            }

        except Exception as e:
            logger.error(f"获取 StarRocks 错误日志趋势失败: {e}")
            raise
        finally:
            if conn:
                conn.close()

    def get_starrocks_error_fingerprints(self, start_time: Optional[str] = None, end_time: Optional[str] = None,
                                         cluster: Optional[str] = None, level: Optional[str] = None,
                                         top_n: int = 20) -> Dict[str, Any]:
        """按错误指纹聚合的 Top N 错误"""
        conn = None

        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()

            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            # 基础过滤：只查询 WARN/ERROR/FATAL 级别，且 cluster 必须是 etl 或 api（防止脏数据）
            base_where = "WHERE details_level IN ('WARN', 'ERROR', 'FATAL') AND cluster IN ('etl', 'api')"

            fingerprint_sql = (
                "SELECT "
                "COALESCE(error_fingerprint, CONCAT(COALESCE(details_location, 'unknown'), '#', details_message)) AS fingerprint, "
                "MAX(details_message) AS sample_message, "
                "MAX(details_level) AS details_level, "
                "COALESCE(cluster, 'unknown') AS cluster, "
                "COALESCE(component, 'unknown') AS component, "
                "COUNT(*) AS log_count, "
                "MAX(details_timestamp) AS last_seen "
                "FROM tds_starrocks_error_logs "
                f"{base_where} "
                f"{time_filter} "
            )

            params = list(time_params)

            if cluster:
                fingerprint_sql += " AND cluster = %s"
                params.append(cluster)

            if level and level.upper() in {'WARN', 'ERROR', 'FATAL'}:
                fingerprint_sql += " AND details_level = %s"
                params.append(level.upper())

            fingerprint_sql += (
                " GROUP BY fingerprint, COALESCE(cluster, 'unknown'), COALESCE(component, 'unknown')"
                " ORDER BY log_count DESC LIMIT %s"
            )
            params.append(top_n)

            cursor.execute(fingerprint_sql, params)
            rows = cursor.fetchall()

            fingerprints = [
                {
                    'fingerprint': row.get('fingerprint') or '',
                    'message': row.get('sample_message') or '',
                    'details_level': row.get('details_level') or 'UNKNOWN',
                    'cluster': row.get('cluster') or 'unknown',
                    'component': row.get('component') or 'unknown',
                    'log_count': int(row.get('log_count') or 0),
                    'last_seen': self._format_datetime(row.get('last_seen')) or ''
                }
                for row in rows
            ]

            return {'fingerprints': fingerprints}

        except Exception as e:
            logger.error(f"获取 StarRocks 错误指纹聚合失败: {e}")
            raise
        finally:
            if conn:
                conn.close()

    def get_starrocks_error_logs(self, filters: Dict[str, Any], page: int = 1, page_size: int = 50,
                                 sort_field: str = 'details_timestamp', sort_order: str = 'desc') -> Dict[str, Any]:
        """获取 StarRocks 错误日志列表（分页）"""
        
        logger.info(f"[StarRocks错误日志] 开始查询 - filters={filters}, page={page}, page_size={page_size}")

        # 排序字段白名单
        field_mapping = {
            'details_timestamp': 'details_timestamp',
            'log_timestamp': 'details_timestamp',
            'details_level': 'details_level',
            'log_level': 'details_level'
        }
        sort_field = field_mapping.get(sort_field, 'details_timestamp')

        sort_order = sort_order.lower()
        if sort_order not in ['asc', 'desc']:
            sort_order = 'desc'

        start_time = filters.get('start_time')
        end_time = filters.get('end_time')

        conn = None

        try:
            conn = self._get_starrocks_connection()
            logger.info(f"[StarRocks错误日志] 成功连接到 StarRocks 数据库")
            cursor = conn.cursor()

            time_filter, time_params = self._build_time_filter(start_time, end_time, 'details_timestamp')
            # 基础过滤：只查询 WARN/ERROR/FATAL 级别，且 cluster 必须是 etl 或 api（防止脏数据）
            base_where = "WHERE details_level IN ('WARN', 'ERROR', 'FATAL') AND cluster IN ('etl', 'api')"

            where_clauses = []
            params: List[Any] = []

            if time_filter:
                # time_filter 以 ' AND ' 开头，移除后加入 where_clauses
                where_clauses.append(time_filter[5:])
                params.extend(time_params)

            if filters.get('cluster'):
                where_clauses.append("cluster = %s")
                params.append(filters['cluster'])

            if filters.get('component'):
                where_clauses.append("component = %s")
                params.append(filters['component'])

            if filters.get('details_level'):
                where_clauses.append("details_level = %s")
                params.append(filters['details_level'])

            if filters.get('node_type'):
                where_clauses.append("node_type = %s")
                params.append(filters['node_type'])

            if filters.get('node_ip'):
                where_clauses.append("node_ip = %s")
                params.append(filters['node_ip'])

            if filters.get('message'):
                where_clauses.append("details_message LIKE %s")
                params.append(f"%{filters['message']}%")

            if filters.get('fingerprint'):
                where_clauses.append("error_fingerprint = %s")
                params.append(filters['fingerprint'])

            where_sql = ' AND '.join(where_clauses)
            if where_sql:
                where_sql = f" AND {where_sql}"

            # 总数统计
            count_sql = (
                "SELECT COUNT(*) AS total "
                "FROM tds_starrocks_error_logs "
                f"{base_where}"
                f"{where_sql}"
            )
            logger.info(f"[StarRocks错误日志] 执行COUNT查询: {count_sql}")
            logger.info(f"[StarRocks错误日志] 查询参数: {params}")
            cursor.execute(count_sql, params)
            total = int(cursor.fetchone().get('total', 0))
            logger.info(f"[StarRocks错误日志] 查询结果总数: {total}")

            # 日志级别统计
            stats_sql = (
                "SELECT "
                "COUNT(CASE WHEN details_level = 'ERROR' THEN 1 END) AS error_count, "
                "COUNT(CASE WHEN details_level = 'WARN' THEN 1 END) AS warn_count, "
                "COUNT(CASE WHEN details_level = 'FATAL' THEN 1 END) AS fatal_count "
                "FROM tds_starrocks_error_logs "
                f"{base_where}"
                f"{where_sql}"
            )
            cursor.execute(stats_sql, params)
            stats = cursor.fetchone() or {}

            offset = (page - 1) * page_size
            list_sql = (
                "SELECT "
                "log_id, details_timestamp, details_level, details_message, details_location, "
                "component, cluster, node_type, node_ip, host_name, host_hostname, host_ip, "
                "error_fingerprint, log_file_path, log_file_name, detected_log_level, message, "
                "metadata_beat, metadata_version "
                "FROM tds_starrocks_error_logs "
                f"{base_where}"
                f"{where_sql} "
                f"ORDER BY {sort_field} {sort_order} "
                "LIMIT %s OFFSET %s"
            )

            list_params = list(params)
            list_params.extend([page_size, offset])

            logger.info(f"[StarRocks错误日志] 执行LIST查询: {list_sql}")
            logger.info(f"[StarRocks错误日志] 查询参数: {list_params}")
            cursor.execute(list_sql, list_params)
            rows = cursor.fetchall()
            logger.info(f"[StarRocks错误日志] 查询到 {len(rows)} 条记录")

            logs = [
                {
                    'log_id': row.get('log_id') or '',
                    'log_timestamp': self._format_datetime(row.get('details_timestamp')) or '',
                    'details_level': row.get('details_level') or '',
                    'details_message': row.get('details_message') or '',
                    'details_location': row.get('details_location') or '',
                    'component': row.get('component') or 'unknown',
                    'cluster': row.get('cluster') or 'unknown',
                    'node_type': row.get('node_type') or '',
                    'node_ip': row.get('node_ip') or '',
                    'host_name': row.get('host_name') or '',
                    'host_hostname': row.get('host_hostname') or '',
                    'host_ip': row.get('host_ip') or '',
                    'error_fingerprint': row.get('error_fingerprint') or '',
                    'log_file_path': row.get('log_file_path') or '',
                    'log_file_name': row.get('log_file_name') or '',
                    'detected_log_level': row.get('detected_log_level') or '',
                    'message': row.get('message') or '',
                    'metadata_beat': row.get('metadata_beat') or '',
                    'metadata_version': row.get('metadata_version') or ''
                }
                for row in rows
            ]

            return {
                'total': total,
                'page': page,
                'page_size': page_size,
                'logs': logs,
                'stats': {
                    'error_count': int(stats.get('error_count') or 0),
                    'warn_count': int(stats.get('warn_count') or 0),
                    'fatal_count': int(stats.get('fatal_count') or 0)
                }
            }

        except Exception as e:
            logger.error(f"获取 StarRocks 错误日志列表失败: {e}")
            raise
        finally:
            if conn:
                conn.close()


    # ========== StarRocks 查询日志接口 ==========
    
    def get_starrocks_query_logs(self, filters: Dict[str, Any], page: int = 1, page_size: int = 50,
                                  sort_field: str = 'query_timestamp', sort_order: str = 'desc') -> Dict[str, Any]:
        """
        获取 StarRocks 查询日志列表（分页）
        
        Args:
            filters: 筛选条件字典
            page: 页码
            page_size: 每页大小
            sort_field: 排序字段
            sort_order: 排序方向 (asc/desc)
            
        Returns:
            包含日志列表和统计信息的字典
        """
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 排序字段白名单
            allowed_sort_fields = ['timestamp', 'perf_time_ms', 'perf_scan_rows', 'perf_return_rows']
            if sort_field not in allowed_sort_fields:
                sort_field = 'timestamp'
            
            # 排序方向验证
            sort_order = sort_order.lower()
            if sort_order not in ['asc', 'desc']:
                sort_order = 'desc'
            
            # 构建基础WHERE条件（时间范围）
            start_time = filters.get('start_time')
            end_time = filters.get('end_time')
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            
            base_where = "WHERE 1=1"
            params = []
            
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            # 构建其他筛选条件
            where_conditions = []
            
            if filters.get('cluster'):
                where_conditions.append("cluster = %s")
                params.append(filters['cluster'])
            
            if filters.get('query_user'):
                where_conditions.append("query_user = %s")
                params.append(filters['query_user'])
            
            if filters.get('query_database'):
                where_conditions.append("query_database = %s")
                params.append(filters['query_database'])
            
            if filters.get('query_state'):
                where_conditions.append("query_state = %s")
                params.append(filters['query_state'])
            
            # 慢查询筛选（'1' 表示是，'0' 表示否）
            if filters.get('is_slow_query'):
                where_conditions.append("is_slow_query = %s")
                params.append(1 if filters['is_slow_query'] == '1' else 0)
            
            # 查询类型筛选（'1' 表示查询，'0' 表示非查询）
            if filters.get('is_query'):
                where_conditions.append("is_query = %s")
                params.append(1 if filters['is_query'] == '1' else 0)
            
            # SQL 语句模糊搜索
            if filters.get('query_stmt'):
                where_conditions.append("CAST(query_stmt AS VARCHAR) LIKE %s")
                params.append(f"%{filters['query_stmt']}%")
            
            where_sql = ""
            if where_conditions:
                where_sql = " AND " + " AND ".join(where_conditions)
            
            # 总数统计
            count_sql = f"""
                SELECT COUNT(*) AS total 
                FROM tds_starrocks_query_logs 
                {base_where}
                {where_sql}
            """
            cursor.execute(count_sql, params)
            total = int(cursor.fetchone().get('total', 0))
            
            # 统计信息
            stats_sql = f"""
                SELECT 
                    COUNT(CASE WHEN is_slow_query = 1 THEN 1 END) AS slow_query_count,
                    COUNT(CASE WHEN query_state = 'OK' THEN 1 END) AS success_count,
                    COUNT(CASE WHEN query_state = 'ERROR' THEN 1 END) AS error_count
                FROM tds_starrocks_query_logs 
                {base_where}
                {where_sql}
            """
            cursor.execute(stats_sql, params)
            stats = cursor.fetchone() or {}
            
            # 查询日志列表（使用 CAST 转换大数字字段为字符串）
            offset = (page - 1) * page_size
            list_sql = f"""
                SELECT 
                    log_id,
                    query_id,
                    CAST(query_timestamp AS VARCHAR) AS query_timestamp,
                    LEFT(query_log_timestamp, 19) AS query_log_timestamp,
                    query_client,
                    query_user,
                    query_authorized_user,
                    query_database,
                    query_catalog,
                    query_state,
                    query_error_code,
                    query_stmt,
                    query_digest,
                    CAST(perf_time_ms AS VARCHAR) AS perf_time_ms,
                    CAST(perf_scan_bytes AS VARCHAR) AS perf_scan_bytes,
                    CAST(perf_scan_rows AS VARCHAR) AS perf_scan_rows,
                    CAST(perf_return_rows AS VARCHAR) AS perf_return_rows,
                    resource_group,
                    warehouse,
                    is_query,
                    is_forward_to_leader,
                    is_slow_query,
                    cluster,
                    component,
                    node_type,
                    node_ip,
                    host_name,
                    host_hostname,
                    host_ip,
                    log_file_path,
                    log_file_name,
                    message
                FROM tds_starrocks_query_logs 
                {base_where}
                {where_sql}
                ORDER BY {sort_field} {sort_order}
                LIMIT %s OFFSET %s
            """
            
            list_params = list(params)
            list_params.extend([page_size, offset])
            
            cursor.execute(list_sql, list_params)
            logs = cursor.fetchall()
            
            return {
                'total': total,
                'page': page,
                'page_size': page_size,
                'logs': logs,
                'stats': {
                    'slow_query_count': int(stats.get('slow_query_count') or 0),
                    'success_count': int(stats.get('success_count') or 0),
                    'error_count': int(stats.get('error_count') or 0)
                }
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询日志列表失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_users(self) -> List[str]:
        """获取 StarRocks 查询日志中的所有用户列表"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            sql = """
                SELECT DISTINCT query_user 
                FROM tds_starrocks_query_logs 
                WHERE query_user IS NOT NULL AND query_user != ''
                ORDER BY query_user
                LIMIT 1000
            """
            cursor.execute(sql)
            results = cursor.fetchall()
            
            return [row.get('query_user') for row in results if row.get('query_user')]
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询用户列表失败: {e}")
            return []
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_databases(self) -> List[str]:
        """获取 StarRocks 查询日志中的所有数据库列表"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            sql = """
                SELECT DISTINCT query_database 
                FROM tds_starrocks_query_logs 
                WHERE query_database IS NOT NULL AND query_database != ''
                ORDER BY query_database
                LIMIT 1000
            """
            cursor.execute(sql)
            results = cursor.fetchall()
            
            return [row.get('query_database') for row in results if row.get('query_database')]
            
        except Exception as e:
            logger.error(f"获取 StarRocks 数据库列表失败: {e}")
            return []
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_clusters(self) -> List[str]:
        """获取 StarRocks 查询日志中的所有集群列表"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            sql = """
                SELECT DISTINCT cluster 
                FROM tds_starrocks_query_logs 
                WHERE cluster IS NOT NULL AND cluster != ''
                ORDER BY cluster
            """
            cursor.execute(sql)
            results = cursor.fetchall()
            
            return [row.get('cluster') for row in results if row.get('cluster')]
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询集群列表失败: {e}")
            return []
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_overview(self, cluster: Optional[str] = None, 
                                     start_time: Optional[str] = None, 
                                     end_time: Optional[str] = None) -> Dict[str, Any]:
        """获取 StarRocks 查询日志概览统计"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            base_where = "WHERE 1=1"
            
            params = []
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            # 集群过滤
            cluster_filter = ""
            if cluster:
                cluster_filter = " AND cluster = %s"
                params.append(cluster)
            
            # 总体统计（按 query_id 去重，避免 DUPLICATE KEY 导致的重复计数）
            overview_sql = f"""
                SELECT 
                    COUNT(DISTINCT query_id) AS total_queries,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count,
                    COUNT(DISTINCT CASE WHEN query_state = 'OK' THEN query_id END) AS success_count,
                    COUNT(DISTINCT CASE WHEN query_state = 'ERROR' THEN query_id END) AS error_count,
                    COUNT(DISTINCT query_user) AS user_count,
                    COUNT(DISTINCT query_database) AS database_count,
                    COUNT(DISTINCT cluster) AS cluster_count,
                    MAX(timestamp) AS last_query_time,
                    AVG(CAST(perf_time_ms AS DOUBLE)) AS avg_exec_time,
                    SUM(CAST(perf_scan_bytes AS BIGINT)) AS total_scan_bytes,
                    SUM(CAST(perf_scan_rows AS BIGINT)) AS total_scan_rows
                FROM tds_starrocks_query_logs
                {base_where}
                {cluster_filter}
            """
            cursor.execute(overview_sql, params)
            overview = cursor.fetchone() or {}
            
            # 集群分布（按 query_id 去重）
            cluster_sql = f"""
                SELECT 
                    COALESCE(cluster, 'unknown') AS cluster,
                    COUNT(DISTINCT query_id) AS query_count
                FROM tds_starrocks_query_logs
                {base_where}
                {cluster_filter}
                GROUP BY COALESCE(cluster, 'unknown')
                ORDER BY query_count DESC
            """
            cursor.execute(cluster_sql, params)
            cluster_rows = cursor.fetchall()
            cluster_distribution = [
                {
                    'cluster': row.get('cluster') or 'unknown',
                    'query_count': int(row.get('query_count') or 0)
                }
                for row in cluster_rows
            ]
            
            # 数据库分布（database 是保留关键字，使用反引号，按 query_id 去重）
            database_sql = f"""
                SELECT 
                    COALESCE(query_database, 'unknown') AS `database`,
                    COUNT(DISTINCT query_id) AS query_count,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count
                FROM tds_starrocks_query_logs
                {base_where}
                {cluster_filter}
                GROUP BY COALESCE(query_database, 'unknown')
                ORDER BY query_count DESC
                LIMIT 20
            """
            cursor.execute(database_sql, params)
            database_rows = cursor.fetchall()
            database_distribution = [
                {
                    'database': row.get('database') or 'unknown',
                    'query_count': int(row.get('query_count') or 0),
                    'slow_query_count': int(row.get('slow_query_count') or 0)
                }
                for row in database_rows
            ]
            
            return {
                'totals': {
                    'total_queries': int(overview.get('total_queries') or 0),
                    'slow_query_count': int(overview.get('slow_query_count') or 0),
                    'success_count': int(overview.get('success_count') or 0),
                    'error_count': int(overview.get('error_count') or 0),
                    'user_count': int(overview.get('user_count') or 0),
                    'database_count': int(overview.get('database_count') or 0),
                    'cluster_count': int(overview.get('cluster_count') or 0),
                    'last_query_time': self._format_datetime(overview.get('last_query_time')) or '',
                    'avg_exec_time': round(float(overview.get('avg_exec_time') or 0), 2),
                    'total_scan_bytes': int(overview.get('total_scan_bytes') or 0),
                    'total_scan_rows': int(overview.get('total_scan_rows') or 0)
                },
                'cluster_distribution': cluster_distribution,
                'database_distribution': database_distribution,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询日志概览统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_trend(self, cluster: Optional[str] = None,
                                  start_time: Optional[str] = None, 
                                  end_time: Optional[str] = None,
                                  granularity: str = 'hour') -> Dict[str, Any]:
        """获取 StarRocks 查询日志趋势数据"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            base_where = "WHERE 1=1"
            
            # 时间格式化（需要双百分号转义）
            time_format = {
                'minute': '%%Y-%%m-%%d %%H:%%i:00',
                'hour': '%%Y-%%m-%%d %%H:00:00',
                'day': '%%Y-%%m-%%d 00:00:00'
            }.get(granularity, '%%Y-%%m-%%d %%H:00:00')
            
            trend_sql = f"""
                SELECT 
                    date_format(timestamp, '{time_format}') AS time_bucket,
                    COUNT(DISTINCT query_id) AS total_count,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count,
                    COUNT(DISTINCT CASE WHEN query_state = 'ERROR' THEN query_id END) AS error_count
                FROM tds_starrocks_query_logs
                {base_where}
                {time_filter}
            """
            
            params = list(time_params)
            if cluster:
                trend_sql += " AND cluster = %s"
                params.append(cluster)
            
            trend_sql += " GROUP BY time_bucket ORDER BY time_bucket"
            
            cursor.execute(trend_sql, params)
            rows = cursor.fetchall()
            
            trends = [
                {
                    'time': str(row.get('time_bucket')),
                    'TOTAL': {'query_count': int(row.get('total_count') or 0)},
                    'SLOW': {'query_count': int(row.get('slow_query_count') or 0)},
                    'ERROR': {'query_count': int(row.get('error_count') or 0)}
                }
                for row in rows
            ]
            
            return {
                'trends': trends,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                },
                'granularity': granularity
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询趋势失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_user_stats(self, cluster: Optional[str] = None,
                                      start_time: Optional[str] = None, 
                                      end_time: Optional[str] = None,
                                      top_n: int = 20) -> Dict[str, Any]:
        """获取 StarRocks 查询日志用户维度统计"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            base_where = "WHERE query_user IS NOT NULL AND query_user != ''"
            
            params = []
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            if cluster:
                base_where += " AND cluster = %s"
                params.append(cluster)
            
            user_stats_sql = f"""
                SELECT 
                    query_user,
                    COUNT(DISTINCT query_id) AS total_queries,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count,
                    COUNT(DISTINCT CASE WHEN query_state = 'OK' THEN query_id END) AS success_count,
                    AVG(CAST(perf_time_ms AS DOUBLE)) AS avg_exec_time,
                    MAX(timestamp) AS last_query_time
                FROM tds_starrocks_query_logs
                {base_where}
                GROUP BY query_user
                ORDER BY total_queries DESC
                LIMIT %s
            """
            params.append(top_n)
            
            cursor.execute(user_stats_sql, params)
            rows = cursor.fetchall()
            
            user_stats = [
                {
                    'user': row.get('query_user'),
                    'total_queries': int(row.get('total_queries') or 0),
                    'slow_query_count': int(row.get('slow_query_count') or 0),
                    'success_count': int(row.get('success_count') or 0),
                    'success_rate': round((int(row.get('success_count') or 0) / int(row.get('total_queries') or 1)) * 100, 2),
                    'avg_exec_time': round(float(row.get('avg_exec_time') or 0), 2),
                    'last_query_time': self._format_datetime(row.get('last_query_time')) or ''
                }
                for row in rows
            ]
            
            return {
                'user_stats': user_stats,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 查询用户统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_database_stats(self, cluster: Optional[str] = None,
                                          start_time: Optional[str] = None, 
                                          end_time: Optional[str] = None,
                                          top_n: int = 20) -> Dict[str, Any]:
        """获取 StarRocks 查询日志数据库维度统计"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            base_where = "WHERE query_database IS NOT NULL AND query_database != ''"
            
            params = []
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            if cluster:
                base_where += " AND cluster = %s"
                params.append(cluster)
            
            database_stats_sql = f"""
                SELECT 
                    query_database,
                    COUNT(DISTINCT query_id) AS total_queries,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count,
                    AVG(CAST(perf_time_ms AS DOUBLE)) AS avg_exec_time,
                    SUM(CAST(perf_scan_bytes AS BIGINT)) AS total_scan_bytes,
                    MAX(timestamp) AS last_query_time
                FROM tds_starrocks_query_logs
                {base_where}
                GROUP BY query_database
                ORDER BY total_queries DESC
                LIMIT %s
            """
            params.append(top_n)
            
            cursor.execute(database_stats_sql, params)
            rows = cursor.fetchall()
            
            database_stats = [
                {
                    'database': row.get('query_database'),
                    'total_queries': int(row.get('total_queries') or 0),
                    'slow_query_count': int(row.get('slow_query_count') or 0),
                    'avg_exec_time': round(float(row.get('avg_exec_time') or 0), 2),
                    'total_scan_bytes': int(row.get('total_scan_bytes') or 0),
                    'last_query_time': self._format_datetime(row.get('last_query_time')) or ''
                }
                for row in rows
            ]
            
            return {
                'database_stats': database_stats,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 数据库统计失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_cluster_distribution(self, start_time: Optional[str] = None, 
                                                 end_time: Optional[str] = None) -> Dict[str, Any]:
        """获取 StarRocks 查询日志集群分布"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            base_where = "WHERE 1=1"
            
            params = []
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            cluster_dist_sql = f"""
                SELECT 
                    COALESCE(cluster, 'unknown') AS cluster,
                    COUNT(DISTINCT query_id) AS query_count,
                    COUNT(DISTINCT CASE WHEN is_slow_query = 1 THEN query_id END) AS slow_query_count,
                    AVG(CAST(perf_time_ms AS DOUBLE)) AS avg_exec_time
                FROM tds_starrocks_query_logs
                {base_where}
                GROUP BY COALESCE(cluster, 'unknown')
                ORDER BY query_count DESC
            """
            
            cursor.execute(cluster_dist_sql, params)
            rows = cursor.fetchall()
            
            cluster_distribution = [
                {
                    'cluster': row.get('cluster') or 'unknown',
                    'query_count': int(row.get('query_count') or 0),
                    'slow_query_count': int(row.get('slow_query_count') or 0),
                    'avg_exec_time': round(float(row.get('avg_exec_time') or 0), 2)
                }
                for row in rows
            ]
            
            return {
                'cluster_distribution': cluster_distribution,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                }
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks 集群分布失败: {e}")
            raise
        finally:
            if conn:
                conn.close()
    
    
    def get_starrocks_query_top_queries(self, cluster: Optional[str] = None,
                                       start_time: Optional[str] = None, 
                                       end_time: Optional[str] = None,
                                       top_n: int = 20,
                                       order_by: str = 'time') -> Dict[str, Any]:
        """获取 StarRocks SQL执行分析 Top N（按执行时间排序，显示所有查询）"""
        conn = None
        try:
            conn = self._get_starrocks_connection()
            cursor = conn.cursor()
            
            # 构建时间过滤条件
            time_filter, time_params = self._build_time_filter(start_time, end_time, 'timestamp')
            # 过滤掉系统级别的简单命令和注释，只保留业务查询
            base_where = "WHERE query_stmt IS NOT NULL AND query_stmt != '' " \
                "AND perf_time_ms IS NOT NULL " \
                "AND CAST(perf_time_ms AS DOUBLE) > 0 " \
                "AND UPPER(TRIM(query_stmt)) NOT LIKE 'USE %%' " \
                "AND UPPER(TRIM(query_stmt)) NOT LIKE 'SET %%' " \
                "AND UPPER(TRIM(query_stmt)) NOT LIKE 'SHOW %%' " \
                "AND UPPER(query_stmt) NOT LIKE '%%VERSION()%%' " \
                "AND UPPER(query_stmt) NOT LIKE '%%@@VERSION%%' " \
                "AND UPPER(query_stmt) NOT LIKE '%%DATABASE()%%' " \
                "AND UPPER(TRIM(query_stmt)) NOT LIKE 'SELECT 1' " \
                "AND TRIM(query_stmt) NOT LIKE '-%%' " \
                "AND TRIM(query_stmt) NOT LIKE '/*%%' " \
                "AND query_stmt NOT LIKE '%%============%%' " \
                "AND LENGTH(query_stmt) > 20"
            
            params = []
            if time_filter:
                base_where += time_filter
                params.extend(time_params)
            
            if cluster:
                base_where += " AND cluster = %s"
                params.append(cluster)
            
            # 按执行时间降序排序，获取最慢的SQL
            top_queries_sql = f"""
                SELECT 
                    log_id,
                    query_id,
                    query_stmt AS sample_query,
                    query_user,
                    query_database,
                    query_state,
                    CAST(perf_time_ms AS DOUBLE) AS exec_time,
                    CAST(perf_scan_bytes AS BIGINT) AS scan_bytes,
                    CAST(perf_scan_rows AS BIGINT) AS scan_rows,
                    CAST(perf_return_rows AS BIGINT) AS return_rows,
                    is_slow_query,
                    cluster,
                    timestamp AS exec_time_str
                FROM tds_starrocks_query_logs
                {base_where}
                ORDER BY exec_time DESC
                LIMIT %s
            """
            params.append(top_n)
            
            cursor.execute(top_queries_sql, params)
            rows = cursor.fetchall()
            
            top_queries = [
                {
                    'query_digest': row.get('query_id'),  # 使用 query_id 作为唯一标识
                    'sample_query': row.get('sample_query'),
                    'total_queries': 1,  # 单次执行
                    'slow_query_count': 1 if row.get('is_slow_query') else 0,
                    'slow_query_rate': 100.0 if row.get('is_slow_query') else 0.0,
                    'avg_exec_time': round(float(row.get('exec_time') or 0), 2),
                    'max_exec_time': round(float(row.get('exec_time') or 0), 2),
                    'last_exec_time': self._format_datetime(row.get('exec_time_str')) or '',
                    'query_user': row.get('query_user'),
                    'query_database': row.get('query_database'),
                    'query_state': row.get('query_state'),
                    'scan_bytes': int(row.get('scan_bytes') or 0),
                    'scan_rows': int(row.get('scan_rows') or 0),
                    'return_rows': int(row.get('return_rows') or 0),
                    'cluster': row.get('cluster')
                }
                for row in rows
            ]
            
            return {
                'top_queries': top_queries,
                'time_range': {
                    'start': start_time if start_time else 'all',
                    'end': end_time if end_time else 'all'
                },
                'order_by': order_by
            }
            
        except Exception as e:
            logger.error(f"获取 StarRocks SQL执行分析失败: {e}")
            raise
        finally:
            if conn:
                conn.close()