"""
模型相关的Pydantic模式
"""
from datetime import datetime
from typing import Dict, List, Optional, Any
from pydantic import BaseModel, Field, ConfigDict

from app.models.model import ModelStatus, ModelFormat, DeploymentStatus


class ModelBase(BaseModel):
    """模型基础模式"""
    name: str = Field(..., min_length=1, max_length=100, description="模型名称")
    version: str = Field(..., max_length=20, description="模型版本")
    description: Optional[str] = Field(None, max_length=1000, description="模型描述")
    model_type: str = Field(..., max_length=50, description="模型类型")
    model_format: ModelFormat = Field(default=ModelFormat.PYTORCH, description="模型格式")
    model_config: Dict[str, Any] = Field(default_factory=dict, description="模型配置")
    input_schema: Dict[str, Any] = Field(default_factory=dict, description="输入模式")
    output_schema: Dict[str, Any] = Field(default_factory=dict, description="输出模式")
    preprocessing_config: Dict[str, Any] = Field(default_factory=dict, description="预处理配置")
    model_tags: List[str] = Field(default_factory=list, description="模型标签")


class ModelCreate(ModelBase):
    """模型创建模式"""
    project_id: int = Field(..., description="项目ID")
    training_job_id: Optional[int] = Field(None, description="训练任务ID")
    parent_model_id: Optional[int] = Field(None, description="父模型ID")
    base_model_name: Optional[str] = Field(None, max_length=100, description="基础模型名称")
    is_fine_tuned: bool = Field(default=False, description="是否为微调模型")
    
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "name": "智慧照明预测模型",
                "version": "v1.0.0",
                "description": "基于Transformer的照明亮度预测模型",
                "model_type": "transformer",
                "model_format": "pytorch",
                "model_config": {
                    "hidden_size": 512,
                    "num_layers": 6,
                    "num_heads": 8,
                    "dropout": 0.1
                },
                "input_schema": {
                    "features": ["temperature", "humidity", "light_level"],
                    "sequence_length": 24,
                    "data_type": "float32"
                },
                "output_schema": {
                    "brightness_level": {"type": "float", "range": [0, 100]}
                },
                "model_tags": ["智慧照明", "预测", "transformer"],
                "project_id": 1,
                "training_job_id": 1
            }
        }
    )


class ModelUpdate(BaseModel):
    """模型更新模式"""
    description: Optional[str] = Field(None, max_length=1000, description="模型描述")
    model_config: Optional[Dict[str, Any]] = Field(None, description="模型配置")
    preprocessing_config: Optional[Dict[str, Any]] = Field(None, description="预处理配置")
    model_tags: Optional[List[str]] = Field(None, description="模型标签")
    status: Optional[ModelStatus] = Field(None, description="模型状态")
    quality_score: Optional[float] = Field(None, ge=0, le=100, description="质量评分")


class ModelInDB(ModelBase):
    """数据库中的模型模式"""
    id: int
    status: ModelStatus
    project_id: int
    training_job_id: Optional[int]
    created_by: int
    parent_model_id: Optional[int]
    base_model_name: Optional[str]
    is_fine_tuned: bool
    model_size_mb: Optional[float]
    parameter_count: Optional[int]
    model_path: Optional[str]
    checkpoint_path: Optional[str]
    config_path: Optional[str]
    metadata_path: Optional[str]
    training_metrics: Dict[str, Any]
    validation_metrics: Dict[str, Any]
    test_metrics: Dict[str, Any]
    inference_time_ms: Optional[float]
    throughput_qps: Optional[float]
    deployment_status: DeploymentStatus
    deployment_config: Dict[str, Any]
    endpoint_url: Optional[str]
    deployment_instances: int
    inference_count: int
    last_inference_at: Optional[datetime]
    download_count: int
    quality_score: Optional[float]
    trained_at: Optional[datetime]
    deployed_at: Optional[datetime]
    created_at: datetime
    updated_at: datetime
    
    model_config = ConfigDict(from_attributes=True)


class Model(ModelBase):
    """模型响应模式"""
    id: int
    status: ModelStatus
    project_id: int
    training_job_id: Optional[int]
    created_by: int
    parent_model_id: Optional[int]
    base_model_name: Optional[str]
    is_fine_tuned: bool
    model_size_mb: Optional[float]
    parameter_count: Optional[int]
    training_metrics: Dict[str, Any]
    validation_metrics: Dict[str, Any]
    test_metrics: Dict[str, Any]
    inference_time_ms: Optional[float]
    throughput_qps: Optional[float]
    deployment_status: DeploymentStatus
    endpoint_url: Optional[str]
    deployment_instances: int
    inference_count: int
    last_inference_at: Optional[datetime]
    download_count: int
    quality_score: Optional[float]
    trained_at: Optional[datetime]
    deployed_at: Optional[datetime]
    created_at: datetime
    updated_at: datetime
    
    model_config = ConfigDict(from_attributes=True)


class ModelSummary(BaseModel):
    """模型摘要模式"""
    id: int
    name: str
    version: str
    model_type: str
    status: ModelStatus
    deployment_status: DeploymentStatus
    quality_score: Optional[float]
    inference_count: int
    created_at: datetime
    
    model_config = ConfigDict(from_attributes=True)


class ModelList(BaseModel):
    """模型列表模式"""
    models: List[ModelSummary]
    total: int
    page: int
    page_size: int


class ModelDeploy(BaseModel):
    """模型部署模式"""
    deployment_config: Dict[str, Any] = Field(..., description="部署配置")
    instances: int = Field(default=1, ge=1, le=10, description="实例数量")
    resource_requirements: Dict[str, Any] = Field(default_factory=dict, description="资源需求")
    
    model_config = ConfigDict(
        json_schema_extra={
            "example": {
                "deployment_config": {
                    "environment": "production",
                    "auto_scaling": True,
                    "max_instances": 5,
                    "health_check_interval": 30
                },
                "instances": 2,
                "resource_requirements": {
                    "cpu": "1000m",
                    "memory": "2Gi",
                    "gpu": 0
                }
            }
        }
    )


class ModelInference(BaseModel):
    """模型推理模式"""
    input_data: Dict[str, Any] = Field(..., description="输入数据")
    preprocessing: bool = Field(default=True, description="是否预处理")
    return_confidence: bool = Field(default=False, description="返回置信度")


class ModelInferenceResult(BaseModel):
    """模型推理结果模式"""
    prediction: Dict[str, Any] = Field(..., description="预测结果")
    confidence: Optional[float] = Field(None, description="置信度")
    inference_time_ms: float = Field(..., description="推理时间(毫秒)")
    model_version: str = Field(..., description="模型版本")
    timestamp: datetime = Field(..., description="推理时间戳")


class ModelComparison(BaseModel):
    """模型对比模式"""
    models: List[int] = Field(..., min_items=2, max_items=5, description="对比模型ID列表")
    metrics: List[str] = Field(..., description="对比指标")


class ModelComparisonResult(BaseModel):
    """模型对比结果模式"""
    comparison_data: Dict[str, Any] = Field(..., description="对比数据")
    best_model_id: int = Field(..., description="最佳模型ID")
    recommendations: List[str] = Field(..., description="建议")


class ModelExport(BaseModel):
    """模型导出模式"""
    export_format: ModelFormat = Field(..., description="导出格式")
    include_config: bool = Field(default=True, description="包含配置文件")
    include_metadata: bool = Field(default=True, description="包含元数据")
    optimize_for_inference: bool = Field(default=False, description="优化推理性能")


class ModelStats(BaseModel):
    """模型统计模式"""
    total_models: int
    deployed_models: int
    training_models: int
    failed_models: int
    avg_inference_time: float
    total_inferences: int
    models_by_type: Dict[str, int]
    deployment_success_rate: float