"""
System Monitoring Service
系统监控服务 - 系统监控、统计、日志管理
"""

import uuid
import psutil
import asyncio
from typing import Optional, Dict, Any, List
from datetime import datetime, timedelta

from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy import select, and_, or_, func, text
from sqlalchemy.orm import selectinload
import structlog

from ..models.user import User
from ..models.system_log import SystemLog, LogLevel, LogCategory, create_log_entry
from ..models.generated_content import GeneratedContent
from ..models.creation_request import CreationRequest
from ..models.task_queue import TaskQueue, TaskStatus
from ..core.database import DatabaseSession
from ..core.config import settings
from ..core.exceptions import ValidationError


class SimpleCache:
    """简单的内存缓存实现"""

    def __init__(self, default_ttl: int = 300):
        self._cache: Dict[str, tuple[Any, datetime]] = {}
        self.default_ttl = default_ttl
        self._lock = asyncio.Lock()

    async def get(self, key: str) -> Optional[Any]:
        """获取缓存值"""
        async with self._lock:
            if key in self._cache:
                value, expiry = self._cache[key]
                if datetime.utcnow() < expiry:
                    return value
                else:
                    del self._cache[key]
        return None

    async def set(self, key: str, value: Any, ttl: Optional[int] = None) -> None:
        """设置缓存值"""
        ttl = ttl or self.default_ttl
        expiry = datetime.utcnow() + timedelta(seconds=ttl)
        async with self._lock:
            self._cache[key] = (value, expiry)

    async def clear(self) -> None:
        """清除所有缓存"""
        async with self._lock:
            self._cache.clear()

    async def cleanup_expired(self) -> int:
        """清理过期缓存，返回清理数量"""
        now = datetime.utcnow()
        expired_keys = []

        async with self._lock:
            for key, (value, expiry) in self._cache.items():
                if now >= expiry:
                    expired_keys.append(key)

            for key in expired_keys:
                del self._cache[key]

        return len(expired_keys)


# 配置结构化日志
logger = structlog.get_logger(__name__)


class SystemMonitoringService:
    """系统监控服务类"""

    def __init__(self, db_session: DatabaseSession):
        self.db_session = db_session
        self.monitoring_data: Dict[str, Any] = {}
        self.is_monitoring = False
        # 初始化简单缓存（5分钟TTL）
        self._cache = SimpleCache(default_ttl=300)

    async def log_system_event(
        self,
        level: str,
        message: str,
        category: str = LogCategory.SYSTEM,
        user_id: Optional[uuid.UUID] = None,
        **kwargs,
    ) -> SystemLog:
        """
        记录系统事件

        Args:
            level: 日志级别
            message: 日志消息
            category: 日志分类
            user_id: 用户ID
            **kwargs: 其他参数

        Returns:
            SystemLog: 创建的日志记录
        """
        try:
            async with self.db_session as session:
                log_entry = create_log_entry(
                    level, message, category, user_id, **kwargs
                )
                session.add(log_entry)
                await session.commit()
                await session.refresh(log_entry)
                return log_entry
        except Exception as e:
            logger.error("记录系统事件失败", error=str(e), level=level, message=message)
            # 如果数据库记录失败，至少记录到结构化日志
            logger.log(level.lower(), message, **kwargs)
            return None

    async def log_error(
        self,
        message: str,
        error_type: str = None,
        error_code: str = None,
        stack_trace: str = None,
        user_id: Optional[uuid.UUID] = None,
        **kwargs,
    ) -> SystemLog:
        """
        记录[PAD]        Args:
            message: 错误消息
            error_type: 错误类型
            error_code: 错误代码
            stack_trace: 堆栈跟踪
            user_id: 用户ID
            **kwargs: 其他参数

        Returns:
            SystemLog: 创建的日志记录
        """
        kwargs.update(
            {
                "error_type": error_type,
                "error_code": error_code,
                "stack_trace": stack_trace,
            }
        )
        return await self.log_system_event(
            LogLevel.ERROR, message, LogCategory.ERROR, user_id, **kwargs
        )

    async def log_performance_event(
        self,
        action: str,
        execution_time_ms: int,
        memory_usage_mb: int = None,
        cpu_usage_percent: int = None,
        user_id: Optional[uuid.UUID] = None,
        **kwargs,
    ) -> SystemLog:
        """
        记录性能事件

        Args:
            action: 操作动作
            execution_time_ms: 执行时间（毫秒）
            memory_usage_mb: 内存使用（MB）
            cpu_usage_percent: CPU使用百分比
            user_id: 用户ID
            **kwargs: 其他参数

        Returns:
            SystemLog: 创建的日志记录
        """
        kwargs.update(
            {
                "action": action,
                "execution_time_ms": execution_time_ms,
                "memory_usage_mb": memory_usage_mb,
                "cpu_usage_percent": cpu_usage_percent,
            }
        )
        return await self.log_system_event(
            LogLevel.INFO,
            f"性能事件: {action}",
            LogCategory.PERFORMANCE,
            user_id,
            **kwargs,
        )

    async def log_security_event(
        self,
        message: str,
        security_level: str = "medium",
        security_category: str = None,
        user_id: Optional[uuid.UUID] = None,
        client_ip: str = None,
        **kwargs,
    ) -> SystemLog:
        """
        记录安全事件

        Args:
            message: 安全事件消息
            security_level: 安全级别
            security_category: 安全分类
            user_id: 用户ID
            client_ip: 客户端IP
            **kwargs: 其他参数

        Returns:
            SystemLog: 创建的日志记录
        """
        kwargs.update(
            {
                "security_level": security_level,
                "security_category": security_category,
                "client_ip": client_ip,
            }
        )
        return await self.log_system_event(
            LogLevel.WARNING, message, LogCategory.SECURITY, user_id, **kwargs
        )

    async def get_system_logs(
        self,
        level: Optional[str] = None,
        category: Optional[str] = None,
        user_id: Optional[uuid.UUID] = None,
        start_time: Optional[datetime] = None,
        end_time: Optional[datetime] = None,
        limit: int = 100,
        offset: int = 0,
    ) -> List[SystemLog]:
        """
        获取系统日志

        Args:
            level: 日志级别过滤
            category: 日志分类过滤
            user_id: 用户ID过滤
            start_time: 开始时间
            end_time: 结束时间
            limit: 限制数量
            offset: 偏移量

        Returns:
            List[SystemLog]: 日志列表
        """
        async with self.db_session as session:
            query = select(SystemLog)

            # 应用过滤条件
            if level:
                query = query.where(SystemLog.level == level)
            if category:
                query = query.where(SystemLog.category == category)
            if user_id:
                query = query.where(SystemLog.user_id == user_id)
            if start_time:
                query = query.where(SystemLog.timestamp >= start_time)
            if end_time:
                query = query.where(SystemLog.timestamp <= end_time)

            # 排序和分页
            query = (
                query.order_by(SystemLog.timestamp.desc()).offset(offset).limit(limit)
            )

            result = await session.execute(query)
            return result.scalars().all()

    async def get_system_statistics(self) -> Dict[str, Any]:
        """
        获取系统统计信息 - 优化版本，使用并行查询和缓存

        Returns:
            Dict[str, Any]: 系统统计信息
        """
        # 使用新的缓存系统（5分钟TTL）
        from ..core.cache import cache_manager, create_cache_key

        cache_key = create_cache_key("system_statistics")
        cache = (
            cache_manager.get_cache("system_stats") or cache_manager.get_default_cache()
        )

        # 尝试从缓存获取
        cached_result = await cache.get(cache_key)
        if cached_result is not None:
            logger.debug("返回缓存的系统统计信息")
            return cached_result

        async with self.db_session as session:
            # 并行执行所有统计查询
            user_stats_task = asyncio.create_task(self._get_user_statistics(session))
            content_stats_task = asyncio.create_task(
                self._get_content_statistics(session)
            )
            task_stats_task = asyncio.create_task(self._get_task_statistics(session))
            log_stats_task = asyncio.create_task(self._get_log_statistics(session))
            resource_stats_task = asyncio.create_task(self._get_system_resource_stats())

            # 等待所有任务完成
            (
                user_stats,
                content_stats,
                task_stats,
                log_stats,
                resource_stats,
            ) = await asyncio.gather(
                user_stats_task,
                content_stats_task,
                task_stats_task,
                log_stats_task,
                resource_stats_task,
            )

            result = {
                "timestamp": datetime.utcnow().isoformat(),
                "users": user_stats,
                "content": content_stats,
                "tasks": task_stats,
                "logs": log_stats,
                "system_resources": resource_stats,
                "application": {
                    "name": settings.app_name,
                    "version": settings.app_version,
                    "environment": settings.app_env,
                    "debug": settings.debug,
                },
            }

            # 缓存结果
            await cache.set(cache_key, result, ttl=300)  # 5分钟缓存

            return result

    async def get_user_activity_stats(self, days: int = 30) -> Dict[str, Any]:
        """
        获取用户活动统计

        Args:
            days: 统计天数

        Returns:
            Dict[str, Any]: 用户活动统计
        """
        cutoff_date = datetime.utcnow() - timedelta(days=days)

        async with self.db_session as session:
            # 活跃用户统计
            active_users_result = await session.execute(
                select(func.count(func.distinct(SystemLog.user_id))).where(
                    and_(
                        SystemLog.user_id.isnot(None),
                        SystemLog.timestamp >= cutoff_date,
                        SystemLog.category == LogCategory.USER_ACTIVITY,
                    )
                )
            )
            active_users = active_users_result.scalar()

            # 每日活动统计
            daily_activity_result = await session.execute(
                select(
                    func.date(SystemLog.timestamp).label("date"),
                    func.count(func.distinct(SystemLog.user_id)),
                )
                .where(
                    and_(
                        SystemLog.user_id.isnot(None),
                        SystemLog.timestamp >= cutoff_date,
                        SystemLog.category == LogCategory.USER_ACTIVITY,
                    )
                )
                .group_by(func.date(SystemLog.timestamp))
                .order_by(func.date(SystemLog.timestamp))
            )
            daily_activity = [
                {"date": str(date), "active_users": count}
                for date, count in daily_activity_result.all()
            ]

            # 最活跃的用户
            top_users_result = await session.execute(
                select(SystemLog.user_id, func.count(SystemLog.id))
                .where(
                    and_(
                        SystemLog.user_id.isnot(None),
                        SystemLog.timestamp >= cutoff_date,
                        SystemLog.category == LogCategory.USER_ACTIVITY,
                    )
                )
                .group_by(SystemLog.user_id)
                .order_by(func.count(SystemLog.id).desc())
                .limit(10)
            )
            top_users = [
                {"user_id": str(user_id), "activity_count": count}
                for user_id, count in top_users_result.all()
            ]

            return {
                "period_days": days,
                "total_active_users": active_users or 0,
                "daily_activity": daily_activity,
                "top_active_users": top_users,
                "average_daily_active": len(daily_activity) > 0
                and sum(item["active_users"] for item in daily_activity)
                / len(daily_activity)
                or 0,
            }

    async def get_performance_metrics(self, hours: int = 24) -> Dict[str, Any]:
        """
        获取性能指标

        Args:
            hours: 统计小时数

        Returns:
            Dict[str, Any]: 性能指标
        """
        cutoff_time = datetime.utcnow() - timedelta(hours=hours)

        async with self.db_session as session:
            # 平均响应时间
            avg_response_time_result = await session.execute(
                select(func.avg(SystemLog.execution_time_ms)).where(
                    and_(
                        SystemLog.execution_time_ms.isnot(None),
                        SystemLog.timestamp >= cutoff_time,
                        SystemLog.category == LogCategory.PERFORMANCE,
                    )
                )
            )
            avg_response_time = avg_response_time_result.scalar()

            # 最大响应时间
            max_response_time_result = await session.execute(
                select(func.max(SystemLog.execution_time_ms)).where(
                    and_(
                        SystemLog.execution_time_ms.isnot(None),
                        SystemLog.timestamp >= cutoff_time,
                        SystemLog.category == LogCategory.PERFORMANCE,
                    )
                )
            )
            max_response_time = max_response_time_result.scalar()

            # 响应时间分布
            response_distribution_result = await session.execute(
                select(
                    func.case(
                        (SystemLog.execution_time_ms < 100, "fast"),
                        (SystemLog.execution_time_ms < 1000, "medium"),
                        else_="slow",
                    ).label("speed_category"),
                    func.count(SystemLog.id),
                )
                .where(
                    and_(
                        SystemLog.execution_time_ms.isnot(None),
                        SystemLog.timestamp >= cutoff_time,
                        SystemLog.category == LogCategory.PERFORMANCE,
                    )
                )
                .group_by("speed_category")
            )
            response_distribution = dict(response_distribution_result.all())

            return {
                "period_hours": hours,
                "average_response_time_ms": avg_response_time or 0,
                "max_response_time_ms": max_response_time or 0,
                "response_distribution": response_distribution,
                "fast_requests": response_distribution.get("fast", 0),
                "medium_requests": response_distribution.get("medium", 0),
                "slow_requests": response_distribution.get("slow", 0),
            }

    async def get_error_statistics(self, hours: int = 24) -> Dict[str, Any]:
        """
        获取错误统计

        Args:
            hours: 统计小时数

        Returns:
            Dict[str, Any]: 错误统计
        """
        cutoff_time = datetime.utcnow() - timedelta(hours=hours)

        async with self.db_session as session:
            # 错误总数
            total_errors_result = await session.execute(
                select(func.count(SystemLog.id)).where(
                    and_(
                        SystemLog.level.in_([LogLevel.ERROR, LogLevel.CRITICAL]),
                        SystemLog.timestamp >= cutoff_time,
                    )
                )
            )
            total_errors = total_errors_result.scalar()

            # 按错误类型统计
            error_types_result = await session.execute(
                select(SystemLog.error_type, func.count(SystemLog.id))
                .where(
                    and_(
                        SystemLog.level.in_([LogLevel.ERROR, LogLevel.CRITICAL]),
                        SystemLog.error_type.isnot(None),
                        SystemLog.timestamp >= cutoff_time,
                    )
                )
                .group_by(SystemLog.error_type)
                .order_by(func.count(SystemLog.id).desc())
            )
            error_types = [
                {"error_type": error_type, "count": count}
                for error_type, count in error_types_result.all()
            ]

            # 按分类统计
            error_categories_result = await session.execute(
                select(SystemLog.category, func.count(SystemLog.id))
                .where(
                    and_(
                        SystemLog.level.in_([LogLevel.ERROR, LogLevel.CRITICAL]),
                        SystemLog.timestamp >= cutoff_time,
                    )
                )
                .group_by(SystemLog.category)
                .order_by(func.count(SystemLog.id).desc())
            )
            error_categories = [
                {"category": category, "count": count}
                for category, count in error_categories_result.all()
            ]

            return {
                "period_hours": hours,
                "total_errors": total_errors or 0,
                "error_types": error_types,
                "error_categories": error_categories,
            }

    async def cleanup_old_logs(self, max_age_days: int = 90) -> int:
        """
        清理旧的日志记录

        Args:
            max_age_days: 最大保留天数

        Returns:
            int: 清理的日志数量
        """
        cutoff_date = datetime.utcnow() - timedelta(days=max_age_days)

        async with self.db_session as session:
            # 获取要删除的旧日志
            old_logs_result = await session.execute(
                select(SystemLog)
                .where(SystemLog.timestamp < cutoff_date)
                .limit(10000)  # 限制每次删除的数量
            )
            old_logs = old_logs_result.scalars().all()

            # 删除旧日志
            deleted_count = 0
            for log in old_logs:
                await session.delete(log)
                deleted_count += 1

            await session.commit()

            logger.info(
                "清理旧日志完成", deleted_count=deleted_count, max_age_days=max_age_days
            )
            return deleted_count

    async def start_monitoring(self, interval_seconds: int = 60) -> None:
        """
        开始系统监控

        Args:
            interval_seconds: 监控间隔（秒）
        """
        if self.is_monitoring:
            logger.warning("系统监控已在运行中")
            return

        self.is_monitoring = True
        logger.info("开始系统监控", interval_seconds=interval_seconds)

        while self.is_monitoring:
            try:
                # 收集系统指标
                await self._collect_system_metrics()

                # 记录系统状态
                await self._log_system_status()

                await asyncio.sleep(interval_seconds)

            except Exception as e:
                logger.error("系统监控循环出错", error=str(e))
                await asyncio.sleep(interval_seconds)

    async def stop_monitoring(self) -> None:
        """停止系统监控"""
        self.is_monitoring = False
        logger.info("停止系统监控")

    # 私有辅助方法

    async def _get_user_statistics(self, session: AsyncSession) -> Dict[str, Any]:
        """获取用户统计 - 优化版本，使用批量查询"""
        today = datetime.utcnow().date()

        # 并行执行所有用户统计查询
        total_users_task = session.execute(select(func.count(User.id)))
        by_status_task = session.execute(
            select(User.status, func.count(User.id)).group_by(User.status)
        )
        by_role_task = session.execute(
            select(User.role, func.count(User.id)).group_by(User.role)
        )
        today_users_task = session.execute(
            select(func.count(User.id)).where(func.date(User.created_at) == today)
        )

        # 等待所有查询完成
        (
            total_users_result,
            by_status_result,
            by_role_result,
            today_users_result,
        ) = await asyncio.gather(
            total_users_task, by_status_task, by_role_task, today_users_task
        )

        total_users = total_users_result.scalar()
        by_status = dict(by_status_result.all())
        by_role = dict(by_role_result.all())
        today_users = today_users_result.scalar()

        return {
            "total_users": total_users or 0,
            "today_new_users": today_users or 0,
            "by_status": by_status,
            "by_role": by_role,
        }

    async def _get_content_statistics(self, session: AsyncSession) -> Dict[str, Any]:
        """获取内容统计 - 优化版本，使用批量查询"""
        today = datetime.utcnow().date()

        # 并行执行所有内容统计查询
        total_content_task = session.execute(select(func.count(GeneratedContent.id)))
        by_status_task = session.execute(
            select(GeneratedContent.status, func.count(GeneratedContent.id)).group_by(
                GeneratedContent.status
            )
        )
        by_type_task = session.execute(
            select(
                GeneratedContent.content_type, func.count(GeneratedContent.id)
            ).group_by(GeneratedContent.content_type)
        )
        today_content_task = session.execute(
            select(func.count(GeneratedContent.id)).where(
                func.date(GeneratedContent.created_at) == today
            )
        )

        # 等待所有查询完成
        (
            total_content_result,
            by_status_result,
            by_type_result,
            today_content_result,
        ) = await asyncio.gather(
            total_content_task, by_status_task, by_type_task, today_content_task
        )

        total_content = total_content_result.scalar()
        by_status = dict(by_status_result.all())
        by_type = dict(by_type_result.all())
        today_content = today_content_result.scalar()

        return {
            "total_content": total_content or 0,
            "today_new_content": today_content or 0,
            "by_status": by_status,
            "by_type": by_type,
        }

    async def _get_task_statistics(self, session: AsyncSession) -> Dict[str, Any]:
        """获取任务统计 - 优化版本，使用批量查询"""
        today = datetime.utcnow().date()

        # 并行执行所有任务统计查询
        total_tasks_task = session.execute(select(func.count(TaskQueue.id)))
        by_status_task = session.execute(
            select(TaskQueue.status, func.count(TaskQueue.id)).group_by(
                TaskQueue.status
            )
        )
        today_tasks_task = session.execute(
            select(func.count(TaskQueue.id)).where(
                func.date(TaskQueue.created_at) == today
            )
        )

        # 等待所有查询完成
        total_tasks_result, by_status_result, today_tasks_result = await asyncio.gather(
            total_tasks_task, by_status_task, today_tasks_task
        )

        total_tasks = total_tasks_result.scalar()
        by_status = dict(by_status_result.all())
        today_tasks = today_tasks_result.scalar()

        return {
            "total_tasks": total_tasks or 0,
            "today_new_tasks": today_tasks or 0,
            "by_status": by_status,
        }

    async def _get_log_statistics(self, session: AsyncSession) -> Dict[str, Any]:
        """获取日志统计"""
        # 今日日志统计
        today = datetime.utcnow().date()

        # 按级别统计
        by_level_result = await session.execute(
            select(SystemLog.level, func.count(SystemLog.id))
            .where(func.date(SystemLog.timestamp) == today)
            .group_by(SystemLog.level)
        )
        by_level = dict(by_level_result.all())

        # 按分类统计
        by_category_result = await session.execute(
            select(SystemLog.category, func.count(SystemLog.id))
            .where(func.date(SystemLog.timestamp) == today)
            .group_by(SystemLog.category)
        )
        by_category = dict(by_category_result.all())

        # 总日志数
        total_logs_result = await session.execute(
            select(func.count(SystemLog.id)).where(
                func.date(SystemLog.timestamp) == today
            )
        )
        total_logs = total_logs_result.scalar()

        return {
            "today_logs": total_logs or 0,
            "today_by_level": by_level,
            "today_by_category": by_category,
        }

    async def _get_system_resource_stats(self) -> Dict[str, Any]:
        """获取系统资源统计"""
        try:
            # CPU使用率
            cpu_percent = psutil.cpu_percent(interval=1)

            # 内存使用率
            memory = psutil.virtual_memory()
            memory_percent = memory.percent
            memory_available_mb = memory.available / (1024 * 1024)

            # 磁盘使用率
            disk = psutil.disk_usage("/")
            disk_percent = disk.percent
            disk_free_gb = disk.free / (1024 * 1024 * 1024)

            # 系统负载
            load_avg = (
                psutil.getloadavg() if hasattr(psutil, "getloadavg") else (0, 0, 0)
            )

            return {
                "cpu_usage_percent": cpu_percent,
                "memory_usage_percent": memory_percent,
                "memory_available_mb": round(memory_available_mb, 2),
                "disk_usage_percent": disk_percent,
                "disk_free_gb": round(disk_free_gb, 2),
                "load_average": {
                    "1min": load_avg[0],
                    "5min": load_avg[1],
                    "15min": load_avg[2],
                },
            }
        except Exception as e:
            logger.error("获取系统资源统计失败", error=str(e))
            return {
                "cpu_usage_percent": 0,
                "memory_usage_percent": 0,
                "memory_available_mb": 0,
                "disk_usage_percent": 0,
                "disk_free_gb": 0,
                "load_average": {"1min": 0, "5min": 0, "15min": 0},
            }

    async def _collect_system_metrics(self) -> None:
        """收集系统指标"""
        try:
            # 数据库连接数
            async with self.db_session as session:
                connection_result = await session.execute(
                    text("SELECT count(*) FROM pg_stat_activity WHERE state = 'active'")
                )
                active_connections = connection_result.scalar()

            # 更新监控数据
            self.monitoring_data.update(
                {
                    "timestamp": datetime.utcnow().isoformat(),
                    "active_db_connections": active_connections,
                    "system_resources": await self._get_system_resource_stats(),
                }
            )

        except Exception as e:
            logger.error("收集系统指标失败", error=str(e))

    async def _log_system_status(self) -> None:
        """记录系统状态"""
        try:
            # 获取当前系统统计
            stats = await self.get_system_statistics()

            # 记录系统状态日志
            await self.log_system_event(
                LogLevel.INFO,
                "系统状态监控",
                LogCategory.SYSTEM,
                metadata={
                    "users": stats["users"],
                    "content": stats["content"],
                    "tasks": stats["tasks"],
                    "system_resources": stats["system_resources"],
                },
            )

        except Exception as e:
            logger.error("记录系统状态失败", error=str(e))

    async def get_health_status(self) -> Dict[str, Any]:
        """
        获取系统健康状态

        Returns:
            Dict[str, Any]: 健康状态
        """
        health_status = {
            "status": "healthy",
            "timestamp": datetime.utcnow().isoformat(),
            "checks": {},
        }

        try:
            # 数据库健康检查
            async with self.db_session as session:
                await session.execute(text("SELECT 1"))
                health_status["checks"]["database"] = {"status": "healthy"}
        except Exception as e:
            health_status["checks"]["database"] = {
                "status": "unhealthy",
                "error": str(e),
            }
            health_status["status"] = "unhealthy"

        try:
            # 系统资源检查
            resources = await self._get_system_resource_stats()

            # CPU使用率检查
            if resources["cpu_usage_percent"] > 90:
                health_status["checks"]["cpu"] = {
                    "status": "warning",
                    "usage": resources["cpu_usage_percent"],
                }
            else:
                health_status["checks"]["cpu"] = {
                    "status": "healthy",
                    "usage": resources["cpu_usage_percent"],
                }

            # 内存使用率检查
            if resources["memory_usage_percent"] > 90:
                health_status["checks"]["memory"] = {
                    "status": "warning",
                    "usage": resources["memory_usage_percent"],
                }
            else:
                health_status["checks"]["memory"] = {
                    "status": "healthy",
                    "usage": resources["memory_usage_percent"],
                }

            # 磁盘使用率检查
            if resources["disk_usage_percent"] > 90:
                health_status["checks"]["disk"] = {
                    "status": "warning",
                    "usage": resources["disk_usage_percent"],
                }
            else:
                health_status["checks"]["disk"] = {
                    "status": "healthy",
                    "usage": resources["disk_usage_percent"],
                }

        except Exception as e:
            health_status["checks"]["system_resources"] = {
                "status": "unhealthy",
                "error": str(e),
            }
            health_status["status"] = "unhealthy"

        return health_status
