"""
GPU资源相关的数据模型定义
"""
from datetime import datetime
from typing import Optional, List, Dict, Any
from pydantic import BaseModel, Field, field_validator
from enum import Enum


class GPUStatus(str, Enum):
    """GPU状态枚举"""
    AVAILABLE = "available"
    BUSY = "busy"
    ERROR = "error"
    MAINTENANCE = "maintenance"
    OFFLINE = "offline"


class TaskPriority(str, Enum):
    """任务优先级枚举"""
    LOW = "low"
    NORMAL = "normal"
    HIGH = "high"
    CRITICAL = "critical"


class TaskStatus(str, Enum):
    """任务状态枚举"""
    PENDING = "pending"
    RUNNING = "running"
    COMPLETED = "completed"
    FAILED = "failed"
    CANCELLED = "cancelled"


class GPUInfo(BaseModel):
    """GPU信息模型"""
    gpu_id: int = Field(..., description="GPU ID")
    name: str = Field(..., description="GPU名称")
    memory_total: int = Field(..., description="总显存(MB)")
    memory_used: int = Field(0, description="已用显存(MB)")
    memory_free: int = Field(..., description="可用显存(MB)")
    utilization: float = Field(0.0, ge=0, le=100, description="GPU利用率(%)")
    temperature: Optional[float] = Field(None, description="温度(°C)")
    power_usage: Optional[float] = Field(None, description="功耗(W)")
    status: GPUStatus = Field(GPUStatus.AVAILABLE, description="GPU状态")
    driver_version: Optional[str] = Field(None, description="驱动版本")
    cuda_version: Optional[str] = Field(None, description="CUDA版本")
    
    @field_validator('memory_free', mode='before')
    @classmethod
    def calculate_memory_free(cls, v, info):
        if info.data.get('memory_total') and info.data.get('memory_used'):
            return info.data['memory_total'] - info.data['memory_used']
        return v
    
    model_config = {
        "json_schema_extra": {
            "example": {
                "gpu_id": 0,
                "name": "NVIDIA GeForce RTX 4090",
                "memory_total": 24576,
                "memory_used": 8192,
                "memory_free": 16384,
                "utilization": 75.5,
                "temperature": 68.0,
                "power_usage": 350.0,
                "status": "busy",
                "driver_version": "535.86.05",
                "cuda_version": "12.2"
            }
        }}


class GPUResourceRequest(BaseModel):
    """GPU资源请求模型"""
    memory_required: int = Field(..., gt=0, description="所需显存(MB)")
    compute_capability: Optional[str] = Field(None, description="计算能力要求")
    exclusive: bool = Field(False, description="是否独占GPU")
    max_duration: Optional[int] = Field(None, description="最大使用时长(秒)")
    preferred_gpu_ids: Optional[List[int]] = Field(None, description="首选GPU ID列表")
    
    model_config = {"json_schema_extra": {
            "example": {
                "memory_required": 4096,
                "compute_capability": "8.6",
                "exclusive": False,
                "max_duration": 3600,
                "preferred_gpu_ids": [0, 1]
        }
    }
        }


class GPUResourceAllocation(BaseModel):
    """GPU资源分配模型"""
    allocation_id: str = Field(..., description="分配ID")
    gpu_id: int = Field(..., description="分配的GPU ID")
    memory_allocated: int = Field(..., description="分配的显存(MB)")
    task_id: Optional[str] = Field(None, description="关联的任务ID")
    allocated_at: datetime = Field(..., description="分配时间")
    expires_at: Optional[datetime] = Field(None, description="过期时间")
    exclusive: bool = Field(False, description="是否独占")
    
    model_config = {
        "json_schema_extra": {
            "example": {
                "allocation_id": "alloc_123456",
                "gpu_id": 0,
                "memory_allocated": 4096,
                "task_id": "task_789",
                "allocated_at": "2024-01-01T12:00:00Z",
                "expires_at": "2024-01-01T13:00:00Z",
                "exclusive": False
            }
        }}


class AITask(BaseModel):
    """AI任务模型"""
    task_id: str = Field(..., description="任务ID")
    task_type: str = Field(..., description="任务类型")
    priority: TaskPriority = Field(TaskPriority.NORMAL, description="任务优先级")
    status: TaskStatus = Field(TaskStatus.PENDING, description="任务状态")
    resource_request: GPUResourceRequest = Field(..., description="资源需求")
    allocation: Optional[GPUResourceAllocation] = Field(None, description="资源分配")
    created_at: datetime = Field(..., description="创建时间")
    started_at: Optional[datetime] = Field(None, description="开始时间")
    completed_at: Optional[datetime] = Field(None, description="完成时间")
    progress: float = Field(0.0, ge=0, le=100, description="进度(%)")
    error_message: Optional[str] = Field(None, description="错误信息")
    metadata: Optional[Dict[str, Any]] = Field(None, description="任务元数据")
    
    model_config = {"json_schema_extra": {
            "example": {
                "task_id": "task_123456",
                "task_type": "person_detection",
                "priority": "high",
                "status": "running",
                "resource_request": {
                "memory_required": 4096,
                "exclusive": False
                },
                "created_at": "2024-01-01T12:00:00Z",
                "started_at": "2024-01-01T12:01:00Z",
                "progress": 45.5
            }}
        }


class GPUClusterInfo(BaseModel):
    """GPU集群信息模型"""
    total_gpus: int = Field(..., description="GPU总数")
    available_gpus: int = Field(..., description="可用GPU数")
    busy_gpus: int = Field(..., description="忙碌GPU数")
    error_gpus: int = Field(..., description="错误GPU数")
    total_memory: int = Field(..., description="总显存(MB)")
    used_memory: int = Field(..., description="已用显存(MB)")
    free_memory: int = Field(..., description="可用显存(MB)")
    average_utilization: float = Field(..., description="平均利用率(%)")
    pending_tasks: int = Field(..., description="待处理任务数")
    running_tasks: int = Field(..., description="运行中任务数")
    
    model_config = {"json_schema_extra": {
            "example": {
                "total_gpus": 4,
                "available_gpus": 2,
                "busy_gpus": 2,
                "error_gpus": 0,
                "total_memory": 98304,
                "used_memory": 32768,
                "free_memory": 65536,
                "average_utilization": 62.5,
                "pending_tasks": 5,
                "running_tasks": 3
        }
    }
        }


class ResourceUsageStats(BaseModel):
    """资源使用统计模型"""
    gpu_id: int = Field(..., description="GPU ID")
    timestamp: datetime = Field(..., description="时间戳")
    utilization: float = Field(..., description="利用率(%)")
    memory_used: int = Field(..., description="已用显存(MB)")
    temperature: Optional[float] = Field(None, description="温度(°C)")
    power_usage: Optional[float] = Field(None, description="功耗(W)")
    
    model_config = {
        "json_schema_extra": {
            "example": {
                "gpu_id": 0,
                "timestamp": "2024-01-01T12:00:00Z",
                "utilization": 85.5,
                "memory_used": 12288,
                "temperature": 72.0,
                "power_usage": 380.0
            }
        }}


class TaskScheduleRequest(BaseModel):
    """任务调度请求模型"""
    task_type: str = Field(..., description="任务类型")
    priority: TaskPriority = Field(TaskPriority.NORMAL, description="任务优先级")
    resource_request: GPUResourceRequest = Field(..., description="资源需求")
    metadata: Optional[Dict[str, Any]] = Field(None, description="任务元数据")
    callback_url: Optional[str] = Field(None, description="回调URL")
    
    model_config = {"json_schema_extra": {
            "example": {
                "task_type": "person_detection",
                "priority": "high",
                "resource_request": {
                "memory_required": 4096,
                "exclusive": False
                },
                "metadata": {
                "camera_id": "cam_001",
                "video_url": "rtsp://example.com/stream"
                }},
                "callback_url": "http://example.com/callback"
        }
    }


class TaskScheduleResponse(BaseModel):
    """任务调度响应模型"""
    task_id: str = Field(..., description="任务ID")
    status: TaskStatus = Field(..., description="任务状态")
    estimated_start_time: Optional[datetime] = Field(None, description="预计开始时间")
    queue_position: Optional[int] = Field(None, description="队列位置")
    
    model_config = {
        "json_schema_extra": {
            "example": {
                "task_id": "task_123456",
                "status": "pending",
                "estimated_start_time": "2024-01-01T12:05:00Z",
                "queue_position": 3
            }
        }}


class ResourceOptimizationSuggestion(BaseModel):
    """资源优化建议模型"""
    suggestion_type: str = Field(..., description="建议类型")
    description: str = Field(..., description="建议描述")
    priority: str = Field(..., description="建议优先级")
    potential_improvement: str = Field(..., description="潜在改进")
    implementation_steps: List[str] = Field(..., description="实施步骤")
    
    model_config = {"json_schema_extra": {
            "example": {
                "suggestion_type": "memory_optimization",
                "description": "GPU 0的显存使用率过高，建议优化内存分配",
                "priority": "high",
                "potential_improvement": "可提升20%的任务处理能力",
                "implementation_steps": [
                "分析当前内存使用模式",
                "实施内存池优化",
                "调整任务调度策略"
                ]
        }
    }
        }


class GPUHealthCheck(BaseModel):
    """GPU健康检查模型"""
    gpu_id: int = Field(..., description="GPU ID")
    health_score: float = Field(..., ge=0, le=100, description="健康评分")
    issues: List[str] = Field([], description="发现的问题")
    warnings: List[str] = Field([], description="警告信息")
    recommendations: List[str] = Field([], description="建议")
    last_check: datetime = Field(..., description="最后检查时间")
    
    model_config = {
        "json_schema_extra": {
            "example": {
                "gpu_id": 0,
                "health_score": 85.5,
                "issues": ["温度偏高"],
                "warnings": ["显存使用率超过80%"],
                "recommendations": ["增加散热", "优化内存使用"],
                "last_check": "2024-01-01T12:00:00Z"
            }
        }}


class LoadBalancingConfig(BaseModel):
    """负载均衡配置模型"""
    strategy: str = Field("round_robin", description="负载均衡策略")
    weight_factors: Dict[str, float] = Field({}, description="权重因子")
    max_queue_size: int = Field(100, description="最大队列大小")
    timeout_seconds: int = Field(300, description="超时时间(秒)")
    enable_preemption: bool = Field(False, description="是否启用抢占")
    
    model_config = {"json_schema_extra": {
            "example": {
                "strategy": "weighted_round_robin",
                "weight_factors": {
                "memory_usage": 0.4,
                "utilization": 0.3,
                "temperature": 0.2,
                "queue_length": 0.1
                },
                "max_queue_size": 50,
                "timeout_seconds": 600,
                "enable_preemption": True
            }}
        }