from pydantic import BaseModel, Field, ConfigDict, field_validator, model_validator, field_serializer
from typing import Optional, List, Dict, Any, TypeVar, Generic, Literal
from datetime import datetime
import json

# 通用分页相关模型
class PaginationParams(BaseModel):
    """分页参数"""
    enable_pagination: bool = Field(default=False, description="是否启用分页，默认不启用")
    page: int = Field(default=1, ge=1, description="当前页数，从1开始")
    page_size: int = Field(default=20, ge=1, le=100, description="每页数量，最多100条")

# 训练任务相关
class TrainingTaskCreate(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    name: str = Field(..., description="任务名称")
    description: str = Field(default="", description="任务描述")
    
    # 1. 数据集选择（必需验证）
    dataset_name: str = Field(..., description="数据集名称，必须在数据集数据表中且文件不为空")
    
    # 2. 训练方法（必需选择）
    training_method: str = Field(..., description="训练方法", 
                                 choices=["样本协同", "知识协同", "模型协同", "增量学习"])
    
    # 3. 模型选择（必需验证）
    model_name: str = Field(..., description="模型名称，必须在数据表中且模型类型为yolo")
    
    # 4. 节点选择（必需验证）
    total_nodes: int = Field(..., ge=1, description="总节点数量，必须大于等于1")
    cloud_node_ids: List[str] = Field(..., description="云侧节点ID列表，必须在节点表中")
    edge_node_ids: List[str] = Field(..., description="边侧节点ID列表，必须在节点表中，增量学习时必须为空")
    
    # 5. 参数列表（根据训练方法不同）
    training_params: Dict[str, Any] = Field(..., description="训练参数，根据训练方法不同包含不同参数")
    
    # 保留原有的必要字段
    model_type: str = Field(default="yolov8l", description="模型类型")
    task_type: str = Field(default="detection", description="任务类型")
    type: str = Field(default="standard_training", description="训练类型")
    tags: Optional[str] = Field(
        default=None, 
        description="标签，只能选择其中一个：人员安全、环境巡视、设备运检I、设备运检S"
    )
    
    # 基础训练参数（可被training_params覆盖）
    epochs: int = Field(default=100, description="训练轮数")
    batch_size: int = Field(default=16, description="批次大小")
    learning_rate: float = Field(default=0.001, description="学习率")
    
    @field_validator('training_method')
    @classmethod
    def validate_training_method(cls, v):
        allowed_methods = ["样本协同", "知识协同", "模型协同", "增量学习"]
        if v not in allowed_methods:
            raise ValueError(f"训练方法必须是以下之一: {allowed_methods}")
        return v
    
    @field_validator('total_nodes')
    @classmethod
    def validate_total_nodes(cls, v):
        if v < 1:
            raise ValueError("总节点数量必须大于等于1")
        return v
    
    @field_validator('cloud_node_ids', 'edge_node_ids')
    @classmethod
    def validate_node_lists(cls, v):
        if not isinstance(v, list):
            raise ValueError("节点ID必须是列表格式")
        return v
    
    @field_validator('tags')
    @classmethod
    def validate_tags(cls, v):
        if v is not None:
            allowed_tags = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
            if v not in allowed_tags:
                raise ValueError(f"标签必须是以下之一: {allowed_tags}")
        return v
    
    @model_validator(mode='after')
    def validate_node_count_consistency(self):
        """验证节点数量一致性和增量学习限制"""
        cloud_count = len(self.cloud_node_ids) if self.cloud_node_ids else 0
        edge_count = len(self.edge_node_ids) if self.edge_node_ids else 0
        total_count = cloud_count + edge_count
        
        # 验证增量学习时不允许边侧节点
        if self.training_method == "增量学习" and edge_count > 0:
            raise ValueError("增量学习训练方法不允许使用边侧节点")
        
        if total_count != self.total_nodes:
            raise ValueError(f"总节点数量({self.total_nodes})与云侧节点({cloud_count})和边侧节点({edge_count})数量之和不一致")
        
        if cloud_count == 0 and edge_count == 0:
            raise ValueError("必须至少选择一个云侧或边侧节点")
        
        # 增量学习时，必须至少有一个云侧节点
        if self.training_method == "增量学习" and cloud_count == 0:
            raise ValueError("增量学习训练方法必须至少选择一个云侧节点")
        
        return self

class TrainingTaskResponse(BaseModel):
    model_config = ConfigDict(protected_namespaces=(), from_attributes=True, populate_by_name=True)
    
    # 原始字段
    task_id: str
    name: str
    type: str
    model_type: str
    task_type: str
    dataset_name: str
    status: str
    progress: int
    description: str
    tags: Optional[str] = None  # 修改为单个字符串
    cloud_nodes: int
    edge_nodes: int
    accuracy: Optional[float] = None
    start_time: Optional[datetime] = None
    end_time: Optional[datetime] = None  # 新增：结束时间
    log_path: Optional[str] = None
    model_output_path: Optional[str] = None
    error_message: Optional[str] = None
    created_at: datetime
    
    # 新增字段
    training_method: Optional[str] = None  # 训练方法
    model_name: Optional[str] = None  # 模型名称
    total_nodes: Optional[int] = None  # 总节点数量
    cloud_node_ids: Optional[List[str]] = None  # 云侧节点ID列表
    edge_node_ids: Optional[List[str]] = None  # 边侧节点ID列表
    training_params: Optional[Dict[str, Any]] = None  # 训练参数
    
    # 添加computed字段以确保@property被正确序列化
    startTime: Optional[str] = Field(default=None, description="格式化的开始时间")
    endTime: Optional[str] = Field(default=None, description="格式化的结束时间")
    duration: Optional[str] = Field(default=None, description="训练持续时间")
    
    @model_validator(mode='before')
    @classmethod
    def parse_json_fields(cls, data):
        """处理从数据库中读取的JSON字段"""
        if isinstance(data, dict):
            # 处理tags字段 - 现在是单个字符串，不需要JSON解析
            if 'tags' in data:
                tags_value = data['tags']
                if isinstance(tags_value, str) and tags_value:
                    # 如果数据库中存储的是JSON格式的列表，尝试解析为单个字符串
                    try:
                        parsed_tags = json.loads(tags_value)
                        if isinstance(parsed_tags, list) and len(parsed_tags) > 0:
                            data['tags'] = parsed_tags[0]  # 取第一个标签
                        elif isinstance(parsed_tags, str):
                            data['tags'] = parsed_tags
                        else:
                            data['tags'] = None
                    except (json.JSONDecodeError, TypeError):
                        # 如果不是JSON格式，直接使用字符串
                        data['tags'] = tags_value
                elif not tags_value:
                    data['tags'] = None
            
            # 处理cloud_node_ids字段
            if 'cloud_node_ids' in data:
                cloud_node_ids_value = data['cloud_node_ids']
                if isinstance(cloud_node_ids_value, str) and cloud_node_ids_value:
                    try:
                        data['cloud_node_ids'] = json.loads(cloud_node_ids_value)
                    except (json.JSONDecodeError, TypeError):
                        data['cloud_node_ids'] = []
                elif not cloud_node_ids_value:
                    data['cloud_node_ids'] = []
                elif cloud_node_ids_value is None:
                    data['cloud_node_ids'] = []
            
            # 处理edge_node_ids字段
            if 'edge_node_ids' in data:
                edge_node_ids_value = data['edge_node_ids']
                if isinstance(edge_node_ids_value, str) and edge_node_ids_value:
                    try:
                        data['edge_node_ids'] = json.loads(edge_node_ids_value)
                    except (json.JSONDecodeError, TypeError):
                        data['edge_node_ids'] = []
                elif not edge_node_ids_value:
                    data['edge_node_ids'] = []
                elif edge_node_ids_value is None:
                    data['edge_node_ids'] = []
            
            # 处理training_params字段
            if 'training_params' in data:
                training_params_value = data['training_params']
                if isinstance(training_params_value, str) and training_params_value:
                    try:
                        data['training_params'] = json.loads(training_params_value)
                    except (json.JSONDecodeError, TypeError):
                        data['training_params'] = {}
                elif not training_params_value:
                    data['training_params'] = {}
                elif training_params_value is None:
                    data['training_params'] = {}
                    
        # 如果是SQLAlchemy对象，处理数据库属性
        elif hasattr(data, '__dict__'):
            # 使用SQLAlchemy对象的属性方法来获取解析后的数据
            if hasattr(data, 'cloud_node_ids_list'):
                data.__dict__['cloud_node_ids'] = data.cloud_node_ids_list
            if hasattr(data, 'edge_node_ids_list'):
                data.__dict__['edge_node_ids'] = data.edge_node_ids_list
            if hasattr(data, 'training_params_dict'):
                data.__dict__['training_params'] = data.training_params_dict
            if hasattr(data, 'tags') and data.tags:
                try:
                    tags_str = data.tags
                    if isinstance(tags_str, str):
                        # 处理可能的JSON格式
                        try:
                            parsed_tags = json.loads(tags_str)
                            if isinstance(parsed_tags, list) and len(parsed_tags) > 0:
                                data.__dict__['tags'] = parsed_tags[0]  # 取第一个标签
                            elif isinstance(parsed_tags, str):
                                data.__dict__['tags'] = parsed_tags
                        except (json.JSONDecodeError, TypeError):
                            # 如果不是JSON格式，直接使用字符串
                            data.__dict__['tags'] = tags_str
                except (json.JSONDecodeError, TypeError, AttributeError):
                    pass
                
        return data
    
    # 前端友好的映射字段
    @property
    def id(self) -> str:
        return self.task_id
    
    @property
    def dataset(self) -> str:
        return self.dataset_name
    
    @property
    def model(self) -> str:
        return self.model_type
    
    def __init__(self, **data):
        super().__init__(**data)
        
        # 计算derived字段
        if self.start_time:
            self.startTime = self.start_time.strftime('%Y-%m-%d %H:%M:%S')
        if self.end_time:
            self.endTime = self.end_time.strftime('%Y-%m-%d %H:%M:%S')
        if self.start_time and self.end_time:
            duration = self.end_time - self.start_time
            total_seconds = int(duration.total_seconds())
            hours = total_seconds // 3600
            minutes = (total_seconds % 3600) // 60
            seconds = total_seconds % 60
            if hours > 0:
                self.duration = f"{hours}h {minutes}m {seconds}s"
            elif minutes > 0:
                self.duration = f"{minutes}m {seconds}s"
            else:
                self.duration = f"{seconds}s"
    
    @property
    def id(self) -> str:
        return self.task_id
    
    @property
    def dataset(self) -> str:
        return self.dataset_name
    
    @property
    def model(self) -> str:
        return self.model_type
    
    @property
    def cloudNodes(self) -> int:
        return self.cloud_nodes
    
    @property
    def edgeNodes(self) -> int:
        return self.edge_nodes

class TrainingTaskUpdate(BaseModel):
    """训练任务更新模型(只接受status字段的更新)"""
    model_config = ConfigDict(protected_namespaces=())
    
    name: Optional[str] = None
    status: Optional[str] = None
    progress: Optional[int] = None
    type: Optional[str] = None
    model_type: Optional[str] = None
    task_type: Optional[str] = None
    dataset_name: Optional[str] = None
    description: Optional[str] = None
    tags: Optional[List[str]] = Field(
        default=None, 
        description="标签，只能选择：人员安全、环境巡视、设备运检I、设备运检S"
    )
    cloud_nodes: Optional[int] = None
    edge_nodes: Optional[int] = None
    accuracy: Optional[float] = None
    start_time: Optional[datetime] = None
    end_time: Optional[datetime] = None  # 新增：结束时间
    epochs: Optional[int] = None
    batch_size: Optional[int] = None
    learning_rate: Optional[float] = None
    
    @field_validator('tags')
    @classmethod
    def validate_tags(cls, v):
        if v is not None:
            allowed_tags = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
            for tag in v:
                if tag not in allowed_tags:
                    raise ValueError(f"标签 '{tag}' 不在允许的选项中。允许的标签: {allowed_tags}")
        return v

class TrainingTaskProgressUpdate(BaseModel):
    """训练任务进度更新模型"""
    model_config = ConfigDict(protected_namespaces=())
    
    progress: int = Field(ge=0, le=100, description="训练进度百分比")
    accuracy: Optional[float] = Field(None, ge=0, le=1, description="当前准确率")

class TrainingTaskListResponse(BaseModel):
    """训练任务列表响应"""
    training_tasks: List[TrainingTaskResponse]
    total_count: int
    pagination: Optional[Dict[str, Any]] = Field(default=None, description="分页信息")
    
    @classmethod
    def create_response(cls, training_tasks: List[TrainingTaskResponse], total_count: int, pagination_params: Optional[PaginationParams] = None):
        """创建训练任务列表响应"""
        pagination_info = None
        if pagination_params and pagination_params.enable_pagination:
            total_pages = (total_count + pagination_params.page_size - 1) // pagination_params.page_size
            pagination_info = {
                "current_page": pagination_params.page,
                "page_size": pagination_params.page_size,
                "total_pages": total_pages,
                "has_next": pagination_params.page < total_pages,
                "has_prev": pagination_params.page > 1
            }
        return cls(training_tasks=training_tasks, total_count=total_count, pagination=pagination_info)

# 样本传输相关
class SampleTransferRequest(BaseModel):
    source_node: str = Field(description="源节点")
    target_node: str = Field(description="目标节点")
    sample_ids: List[str] = Field(description="样本ID列表")
    transfer_mode: str = Field(default="incremental", description="传输模式")

# 修复命名不匹配 - 添加SampleCollectRequest
class SampleCollectRequest(BaseModel):
    node_id: str = Field(description="节点ID")
    collection_type: str = Field(description="归集类型")
    sample_count: int = Field(description="样本数量")
    filters: Dict[str, Any] = Field(description="过滤条件")

# 微调相关
class FineTuneRequest(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    model_id: str = Field(description="模型ID")
    dataset_id: str = Field(description="数据集ID")
    epochs: int = Field(default=50, description="微调轮数")
    batch_size: int = Field(default=8, description="批次大小")
    learning_rate: float = Field(default=0.0001, description="学习率")
    freeze_layers: int = Field(default=10, description="冻结层数")

class FineTuneResponse(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    new_model_id: str
    dataset_id: str
    model_weight_path: str
    model_log_path: str
    evaluation_results: Dict[str, float]

# 模型查询相关
class ModelPerformance(BaseModel):
    """模型性能指标"""
    precision: Optional[float] = None
    recall: Optional[float] = None
    f1Score: Optional[float] = None

class ModelDeploymentInfo(BaseModel):
    """模型部署信息"""
    cloudNodes: Optional[List[str]] = None
    edgeNodes: Optional[List[str]] = None

class ModelInfo(BaseModel):
    model_config = ConfigDict(protected_namespaces=(), from_attributes=True)
    
    id: str
    name: str
    type: str
    architecture: Optional[str] = None
    version: Optional[str] = None
    status: Optional[str] = None
    accuracy: Optional[float] = None
    size: Optional[str] = None
    framework: Optional[str] = None
    createdAt: Optional[str] = None
    lastTrained: Optional[str] = None
    deploymentType: Optional[str] = None
    description: Optional[str] = None
    tags: Optional[List[str]] = None
    performance: Optional[ModelPerformance] = None
    deploymentInfo: Optional[ModelDeploymentInfo] = None
    
    # 模型基础字段
    model_id: Optional[str] = None
    model_name: Optional[str] = None
    dataset_name: Optional[str] = None
    updated_at: Optional[datetime] = None
    evaluation_results: Optional[Dict[str, float]] = None
    base_model_name: Optional[str] = None
    map_50: Optional[float] = None
    map_95: Optional[float] = None

class ModelListResponse(BaseModel):
    """模型列表响应"""
    success: bool = True
    message: str = ""
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
    data: Dict[str, Any]  # 包含 models 和 summary
    total_count: int
    pagination: Optional[Dict[str, Any]] = None
    
    @classmethod
    def create_response(cls, models: List[ModelInfo], total_count: int, summary: Dict[str, int], pagination_params: Optional[PaginationParams] = None, message: str = "查询成功"):
        """创建模型列表响应"""
        pagination_info = None
        if pagination_params and pagination_params.enable_pagination:
            total_pages = (total_count + pagination_params.page_size - 1) // pagination_params.page_size
            pagination_info = {
                "current_page": pagination_params.page,
                "page_size": pagination_params.page_size,
                "total_pages": total_pages,
                "has_next": pagination_params.page < total_pages,
                "has_prev": pagination_params.page > 1
            }
        
        return cls(
            success=True,
            message=message,
            data={
                "models": models,
                "summary": summary
            },
            total_count=total_count,
            pagination=pagination_info
        )

# 数据集模型
class DatasetBase(BaseModel):
    dataset_id: str = Field(..., description="数据集唯一标识")
    name: str = Field(..., description="数据集名称")
    description: Optional[str] = Field(None, description="数据集描述")
    uploader: Optional[str] = Field(None, description="上传者")
    
    # 学习和标注状态
    is_learned: bool = Field(default=False, description="是否已学习")
    is_annotated: bool = Field(default=False, description="是否已标注")
    
    # 数据信息
    data_size: Optional[str] = Field(None, description="数据大小，如 '2.3GB'")
    data_format: str = Field(default="IMAGE", description="数据格式：IMAGE, SEQUENCE 等")
    sample_count: int = Field(default=0, description="样本数量")
    upload_time: Optional[str] = Field(None, description="上传时间，格式如 '2024-01-15'")
    
    # 时间和更新信息
    time_range: Optional[str] = Field(None, description="时间范围，如 '2023-2024年'")
    update_frequency: Optional[str] = Field(None, description="更新频率，如 '每日'、'每周'")
    
    # 数据集特性标识
    is_forgetting: bool = Field(default=False, description="是否遗忘")
    is_quantization: bool = Field(default=False, description="是否量化")
    is_incremental: bool = Field(default=False, description="是否增量")
    is_base: bool = Field(default=True, description="是否基础数据集")
    
    # 路径和场景信息
    folder_path: Optional[str] = Field(None, description="文件夹路径")
    scenario: Optional[Literal["设备运检I", "设备运检S", "环境巡视", "人员安全"]] = Field(None, description="应用场景")
    
    # 新增字段
    base_dataset_id: Optional[int] = Field(None, description="基础数据集ID")
    origin: Literal["cloud", "edge"] = Field(default="cloud", description="数据来源")
    
    # 保留原有字段
    dataset_type: str = Field(..., description="数据集类型")
    status: Optional[str] = Field(default="active", description="状态")
    
    @field_validator('scenario')
    @classmethod
    def validate_scenario(cls, v):
        if v is not None:
            allowed_scenarios = ["设备运检I", "设备运检S", "环境巡视", "人员安全"]
            if v not in allowed_scenarios:
                raise ValueError(f"scenario字段必须是以下值之一: {allowed_scenarios}")
        return v
    
    @field_validator('origin')
    @classmethod
    def validate_origin(cls, v):
        if v not in ["cloud", "edge"]:
            raise ValueError("origin字段必须是 'cloud' 或 'edge'")
        return v

class DatasetCreate(DatasetBase):
    pass

class DatasetUpdate(BaseModel):
    dataset_id: Optional[str] = Field(None, description="数据集唯一标识")
    name: Optional[str] = Field(None, description="数据集名称")
    description: Optional[str] = Field(None, description="数据集描述")
    uploader: Optional[str] = Field(None, description="上传者")
    
    # 学习和标注状态
    is_learned: Optional[bool] = Field(None, description="是否已学习")
    is_annotated: Optional[bool] = Field(None, description="是否已标注")
    
    # 数据信息
    data_size: Optional[str] = Field(None, description="数据大小，如 '2.3GB'")
    data_format: Optional[str] = Field(None, description="数据格式：IMAGE, SEQUENCE 等")
    sample_count: Optional[int] = Field(None, description="样本数量")
    upload_time: Optional[str] = Field(None, description="上传时间，格式如 '2024-01-15'")
    
    # 时间和更新信息
    time_range: Optional[str] = Field(None, description="时间范围，如 '2023-2024年'")
    update_frequency: Optional[str] = Field(None, description="更新频率，如 '每日'、'每周'")
    
    # 数据集特性标识
    is_forgetting: Optional[bool] = Field(None, description="是否遗忘")
    is_quantization: Optional[bool] = Field(None, description="是否量化")
    is_incremental: Optional[bool] = Field(None, description="是否增量")
    is_base: Optional[bool] = Field(None, description="是否基础数据集")
    
    # 路径和场景信息
    folder_path: Optional[str] = Field(None, description="文件夹路径")
    scenario: Optional[Literal["设备运检I", "设备运检S", "环境巡视", "人员安全"]] = Field(None, description="应用场景")
    
    # 新增字段
    base_dataset_id: Optional[int] = Field(None, description="基础数据集ID")
    origin: Optional[Literal["cloud", "edge"]] = Field(None, description="数据来源")
    
    # 保留原有字段
    dataset_type: Optional[str] = Field(None, description="数据集类型")
    status: Optional[str] = Field(None, description="状态")
    
    @field_validator('scenario')
    @classmethod
    def validate_scenario(cls, v):
        if v is not None:
            allowed_scenarios = ["设备运检I", "设备运检S", "环境巡视", "人员安全"]
            if v not in allowed_scenarios:
                raise ValueError(f"scenario字段必须是以下值之一: {allowed_scenarios}")
        return v
    
    @field_validator('origin')
    @classmethod
    def validate_origin(cls, v):
        if v is not None and v not in ["cloud", "edge"]:
            raise ValueError("origin字段必须是 'cloud' 或 'edge'")
        return v

class DatasetResponse(DatasetBase):
    id: int = Field(..., description="数据集ID")
    file_count: int = Field(default=0, description="文件数量")
    total_size: int = Field(default=0, description="总大小(字节)")
    annotated_samples: int = Field(default=0, description="已标注样本数")
    created_at: Optional[datetime] = Field(None, description="创建时间")
    updated_at: Optional[datetime] = Field(None, description="更新时间")
    
    # 新增字段，用于显示基础数据集信息
    base_dataset_name: Optional[str] = Field(None, description="基础数据集名称")

    model_config = ConfigDict(from_attributes=True)
    
    @model_validator(mode='before')
    @classmethod
    def process_enum_values(cls, data):
        """处理枚举值转换"""
        if hasattr(data, '__dict__'):
            # 如果是SQLAlchemy对象
            data_dict = {}
            for key, value in data.__dict__.items():
                if key.startswith('_'):
                    continue
                data_dict[key] = value
            
            # 处理枚举值
            if hasattr(data, 'scenario') and data.scenario:
                data_dict['scenario'] = data.scenario.value
            if hasattr(data, 'origin') and data.origin:
                data_dict['origin'] = data.origin.value
            if hasattr(data, 'base_dataset') and data.base_dataset:
                data_dict['base_dataset_name'] = data.base_dataset.name
            
            return data_dict
        elif isinstance(data, dict):
            # 如果是字典，处理枚举值
            if 'scenario' in data and hasattr(data['scenario'], 'value'):
                data['scenario'] = data['scenario'].value
            if 'origin' in data and hasattr(data['origin'], 'value'):
                data['origin'] = data['origin'].value
            return data
        return data

# 使用通用的ListResponse，不需要专门的DatasetListResponse
T = TypeVar('T')
class ListResponse(BaseModel, Generic[T]):
    """通用列表响应格式"""
    success: bool = True
    message: str = ""
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
    data: List[T]
    total_count: int
    pagination: Optional[Dict[str, Any]] = None
    summary: Optional[Dict[str, Any]] = None

class DatasetListResponse(ListResponse[DatasetResponse]):
    """数据集列表响应"""
    pass

# 模型传输相关 - 保留正在使用的模型
class ModelPathResponse(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    model_id: str
    model_path: str
    file_size: int
    exists: bool

class ModelTransferRequest(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    model_id: str
    model_path: str
    transfer_protocol: str = Field(default="http", description="传输协议")
    target_endpoint: Optional[str] = Field(default=None, description="目标端点")

class ModelTransferResponse(BaseModel):
    model_config = ConfigDict(protected_namespaces=())
    
    model_id: str
    transfer_url: str
    file_size: int
    transfer_time: float
    status: str

# 节点相关
class NodeInfo(BaseModel):
    model_config = ConfigDict(protected_namespaces=(), from_attributes=True)
    
    node_id: str
    name: str
    node_type: str  # 'cloud' 或 'edge'
    location: Optional[str] = "N/A"
    status: str
    service_ip: Optional[str] = "N/A"  # 服务端口IP
    cpu: Optional[str] = "N/A"
    memory: Optional[str] = "N/A"
    gpu: Optional[str] = "N/A"
    bandwidth: Optional[str] = "N/A"
    
    # 扩展字段
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None
    collected_sample_count: int = 0
    uncollected_sample_count: int = 0
    cpu_load: float = 0.0
    memory_load: float = 0.0
    network_load: float = 0.0
    
    created_at: datetime
    updated_at: datetime
    
    @field_validator('running_services', mode='before')
    @classmethod
    def parse_running_services(cls, v):
        if isinstance(v, str):
            try:
                return json.loads(v) if v else None
            except (json.JSONDecodeError, TypeError):
                return None
        return v
    
    @field_validator('deployed_models', mode='before')
    @classmethod
    def parse_deployed_models(cls, v):
        if isinstance(v, str):
            try:
                return json.loads(v) if v else None
            except (json.JSONDecodeError, TypeError):
                return None
        return v

# 节点模型 (Base, Create, Update)
class NodeBase(BaseModel):
    """节点基础模型"""
    node_id: str
    name: str
    node_type: str = Field(default="edge", description="节点类型 'cloud' 或 'edge'")
    location: Optional[str] = None
    status: str = Field(default="unknown", description="节点状态")
    service_ip: Optional[str] = None
    cpu: Optional[str] = None
    memory: Optional[str] = None
    gpu: Optional[str] = None
    bandwidth: Optional[str] = None
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None
    
    @field_validator('node_type')
    @classmethod
    def validate_node_type(cls, v):
        if v not in ['cloud', 'edge']:
            raise ValueError(f"节点类型必须是 'cloud' 或 'edge'，不能是 '{v}'")
        return v

class NodeCreate(NodeBase):
    """用于创建节点的模型"""
    pass

class NodeUpdate(BaseModel):
    """用于更新节点的模型，所有字段可选"""
    name: Optional[str] = None
    node_type: Optional[str] = None
    location: Optional[str] = None
    status: Optional[str] = None
    service_ip: Optional[str] = None
    cpu: Optional[str] = None
    memory: Optional[str] = None
    gpu: Optional[str] = None
    bandwidth: Optional[str] = None
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None

class NodeListResponse(ListResponse[NodeInfo]):
    """节点列表响应"""
    
    @classmethod
    def create_response(cls, nodes: List[NodeInfo], total_count: int, pagination_params: Optional[PaginationParams] = None, message: str = "查询成功"):
        """创建节点列表响应"""
        pagination_info = None
        if pagination_params and pagination_params.enable_pagination:
            total_pages = (total_count + pagination_params.page_size - 1) // pagination_params.page_size
            pagination_info = {
                "current_page": pagination_params.page,
                "page_size": pagination_params.page_size,
                "total_pages": total_pages,
                "has_next": pagination_params.page < total_pages,
                "has_prev": pagination_params.page > 1
            }
        return cls(
            success=True,
            message=message,
            data=nodes,
            total_count=total_count,
            pagination=pagination_info
        )

class NodeUpdateRequest(BaseModel):
    status: Optional[str] = None
    service_ip: Optional[str] = None
    cpu: Optional[str] = None
    memory: Optional[str] = None
    gpu: Optional[str] = None
    bandwidth: Optional[str] = None
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None
    collected_sample_count: Optional[int] = None
    uncollected_sample_count: Optional[int] = None
    cpu_load: Optional[float] = None
    memory_load: Optional[float] = None
    network_load: Optional[float] = None

class NodeCreateRequest(BaseModel):
    """创建节点请求模型"""
    node_id: str
    name: str
    node_type: str = Field(default="edge", description="节点类型")
    location: str
    status: str = Field(default="online", description="节点状态")
    service_ip: str
    cpu: str
    memory: str
    gpu: str
    bandwidth: str
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None
    collected_sample_count: Optional[int] = 0
    uncollected_sample_count: Optional[int] = 0
    cpu_load: Optional[float] = 0.0
    memory_load: Optional[float] = 0.0
    network_load: Optional[float] = 0.0

# 节点扩展监控信息
class NodeCPUInfo(BaseModel):
    """CPU信息"""
    usage: float = Field(default=0.0, description="CPU使用率百分比")
    cores: int = Field(default=1, description="CPU核心数")
    temperature: float = Field(default=0.0, description="CPU温度")

class NodeMemoryInfo(BaseModel):
    """内存信息"""
    used: float = Field(default=0.0, description="已使用内存(GB)")
    total: float = Field(default=1.0, description="总内存(GB)")
    usage: float = Field(default=0.0, description="内存使用率百分比")

class NodeGPUInfo(BaseModel):
    """GPU信息"""
    usage: float = Field(default=0.0, description="GPU使用率百分比")
    memory_used: float = Field(default=0.0, description="GPU已使用显存(GB)")
    memory_total: float = Field(default=1.0, description="GPU总显存(GB)")
    temperature: float = Field(default=0.0, description="GPU温度")
    count: int = Field(default=0, description="GPU数量")

class NodeNetworkInfo(BaseModel):
    """网络信息"""
    bandwidth_in: float = Field(default=0.0, description="入网带宽(Mbps)")
    bandwidth_out: float = Field(default=0.0, description="出网带宽(Mbps)")
    latency: float = Field(default=0.0, description="网络延迟(ms)")

class NodeStorageInfo(BaseModel):
    """存储信息"""
    used: float = Field(default=0.0, description="已使用存储(GB)")
    total: float = Field(default=1.0, description="总存储(GB)")
    usage: float = Field(default=0.0, description="存储使用率百分比")

class NodeMonitoringInfo(BaseModel):
    """扩展的节点信息，包含详细监控数据"""
    model_config = ConfigDict(protected_namespaces=(), from_attributes=True)
    
    # 从NodeInfo继承的基础字段
    node_id: str
    name: str
    node_type: str  # 'cloud' 或 'edge'
    location: Optional[str] = "N/A"
    status: str
    service_ip: Optional[str] = "N/A"
    bandwidth: Optional[str] = "N/A"
    
    # 扩展字段
    running_services: Optional[List[str]] = None
    deployed_models: Optional[List[str]] = None
    collected_sample_count: int = 0
    uncollected_sample_count: int = 0
    cpu_load: float = 0.0
    memory_load: float = 0.0
    network_load: float = 0.0
    
    created_at: datetime
    updated_at: datetime

    # 下一个节点ID(如果前端需要新建节点，则按照这个节点id进行注册)
    next_edge_id: str = Field(default="", description="下一个边缘节点ID")
    next_cloud_id: str = Field(default="", description="下一个云端节点ID")
    
    # 新的详细监控字段
    cpu: NodeCPUInfo = Field(default_factory=NodeCPUInfo, description="CPU详细信息")
    memory: NodeMemoryInfo = Field(default_factory=NodeMemoryInfo, description="内存详细信息")
    gpu: NodeGPUInfo = Field(default_factory=NodeGPUInfo, description="GPU详细信息")
    network: NodeNetworkInfo = Field(default_factory=NodeNetworkInfo, description="网络详细信息")
    storage: NodeStorageInfo = Field(default_factory=NodeStorageInfo, description="存储详细信息")
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"), description="监控数据时间戳")
    
    @field_validator('running_services', mode='before')
    @classmethod
    def parse_running_services(cls, v):
        if isinstance(v, str):
            try:
                return json.loads(v) if v else None
            except (json.JSONDecodeError, TypeError):
                return None
        return v
    
    @field_validator('deployed_models', mode='before')
    @classmethod
    def parse_deployed_models(cls, v):
        if isinstance(v, str):
            try:
                return json.loads(v) if v else None
            except (json.JSONDecodeError, TypeError):
                return None
        return v

# 通用响应
class GenericResponse(BaseModel):
    success: bool
    message: str
    data: Optional[Any] = None

class DataResponse(BaseModel, Generic[T]):
    """包含数据的响应格式"""
    success: bool = True
    message: str = ""
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
    data: T

class ModelCreateRequest(BaseModel):
    """创建模型请求"""
    name: str
    architecture: Optional[str] = None
    version: Optional[str] = None
    status: str = "draft"
    framework: Optional[str] = None
    deploymentType: str = "both"
    description: Optional[str] = None
    tags: Optional[List[str]] = Field(
        default=None, 
        description="标签，只能选择：人员安全、环境巡视、设备运检I、设备运检S"
    )
    performance: Optional[ModelPerformance] = None
    deploymentInfo: Optional[ModelDeploymentInfo] = None
    weight_file_path: Optional[str] = Field(default=None, description="模型权重文件路径")
    
    @field_validator('tags')
    @classmethod
    def validate_tags(cls, v):
        if v is not None:
            allowed_tags = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
            for tag in v:
                if tag not in allowed_tags:
                    raise ValueError(f"标签 '{tag}' 不在允许的选项中。允许的标签: {allowed_tags}")
        return v

class ModelUpdateRequest(BaseModel):
    """更新模型请求"""
    name: Optional[str] = None
    architecture: Optional[str] = None
    version: Optional[str] = None
    status: Optional[str] = None
    framework: Optional[str] = None
    lastTrained: Optional[str] = None
    deploymentType: Optional[str] = None
    description: Optional[str] = None
    tags: Optional[List[str]] = Field(
        default=None, 
        description="标签，只能选择：人员安全、环境巡视、设备运检I、设备运检S"
    )
    performance: Optional[ModelPerformance] = None
    deploymentInfo: Optional[ModelDeploymentInfo] = None
    weight_file_path: Optional[str] = Field(default=None, description="模型权重文件路径")
    
    @field_validator('tags')
    @classmethod
    def validate_tags(cls, v):
        if v is not None:
            allowed_tags = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
            for tag in v:
                if tag not in allowed_tags:
                    raise ValueError(f"标签 '{tag}' 不在允许的选项中。允许的标签: {allowed_tags}")
        return v

# 数据集预览相关
class DatasetSample(BaseModel):
    """数据集样本信息"""
    image_name: str = Field(..., description="图片文件名")
    image_url: str = Field(..., description="图片访问URL")
    image_path: str = Field(..., description="图片相对路径")
    has_label: bool = Field(default=False, description="是否有标注文件")
    label_path: Optional[str] = Field(None, description="标注文件路径")
    file_size: Optional[int] = Field(None, description="文件大小（字节）")

class DatasetPreviewResponse(BaseModel):
    """数据集预览响应"""
    dataset_id: str = Field(..., description="数据集ID")
    dataset_name: str = Field(..., description="数据集名称")
    total_samples: int = Field(..., description="总样本数")
    preview_count: int = Field(..., description="预览样本数")
    samples: List[DatasetSample] = Field(..., description="样本列表")

# 日志相关模型
class LogEntry(BaseModel):
    """单条日志记录"""
    timestamp: str = Field(..., description="时间戳")
    level: str = Field(..., description="日志级别")
    logger: str = Field(..., description="日志记录器名称")
    message: str = Field(..., description="日志消息")
    module: Optional[str] = Field(None, description="模块名")
    function: Optional[str] = Field(None, description="函数名")
    line: Optional[int] = Field(None, description="行号")

class TaskLogResponse(BaseModel):
    """任务日志响应"""
    task_id: str = Field(..., description="任务ID")
    log_file_path: str = Field(..., description="日志文件路径")
    total_lines: int = Field(..., description="总行数")
    file_size: str = Field(..., description="文件大小")
    logs: List[LogEntry] = Field(..., description="日志记录列表")
    last_modified: Optional[str] = Field(None, description="最后修改时间")

class TaskLogRequest(BaseModel):
    """任务日志请求"""
    start_line: Optional[int] = Field(1, description="起始行号，默认从第1行开始")
    end_line: Optional[int] = Field(None, description="结束行号，为空则读取到文件末尾")
    max_lines: Optional[int] = Field(1000, description="最大返回行数，默认1000行")
    level_filter: Optional[str] = Field(None, description="日志级别过滤：INFO, DEBUG, WARNING, ERROR")

# 边侧样本归集相关模型
class EdgeSampleCollectionRequest(BaseModel):
    """边侧样本归集请求"""
    node_id: str = Field(..., description="边侧节点ID")
    scenario: str = Field(..., description="应用场景")
    difficulty_threshold: Optional[float] = Field(None, ge=0.0, le=1.0, description="难度阈值")
    time_start: Optional[str] = Field(None, description="开始时间")
    time_end: Optional[str] = Field(None, description="结束时间")
    max_samples: int = Field(1000, gt=0, le=10000, description="最大样本数")
    base_dataset_id: Optional[str] = Field(None, description="基础数据集ID")

class SampleCollectionRequest(BaseModel):
    """样本归集请求（用于API路由，不包含node_id）"""
    scenario: str = Field(..., description="应用场景")
    difficulty_threshold: Optional[float] = Field(None, ge=0.0, le=1.0, description="难度阈值")
    time_start: Optional[str] = Field(None, description="开始时间")
    time_end: Optional[str] = Field(None, description="结束时间")
    max_samples: int = Field(1000, gt=0, le=10000, description="最大样本数")
    base_dataset_id: Optional[str] = Field(None, description="基础数据集ID")

class EdgeNodeSampleInfo(BaseModel):
    """边侧节点样本信息"""
    node_id: str
    name: str
    location: Optional[str] = None
    status: str
    service_ip: Optional[str] = None
    cpu: Optional[str] = None
    memory: Optional[str] = None
    gpu: Optional[str] = None
    bandwidth: Optional[str] = None
    cpu_load: float = 0.0
    memory_load: float = 0.0
    network_load: float = 0.0
    collected_sample_count: int = 0
    uncollected_sample_count: int = 0
    total_sample_count: int = 0
    uninferred_sample_count: int = 0
    samples_info: Optional[Dict[str, Any]] = None
    created_at: Optional[str] = None
    updated_at: Optional[str] = None

class EdgeNodesWithSamplesResponse(BaseModel):
    """边侧节点样本信息响应"""
    message: str
    nodes: List[EdgeNodeSampleInfo]
    total_count: int
    summary: Dict[str, Any]

class SampleCollectionResult(BaseModel):
    """样本归集结果"""
    collection_result: Dict[str, Any]
    dataset: Optional[Dict[str, Any]] = None
    dataset_error: Optional[str] = None

class BaseDatasetInfo(BaseModel):
    """基础数据集信息"""
    dataset_id: str
    name: str
    description: Optional[str] = None
    sample_count: int = 0
    data_size: Optional[str] = None
    created_at: Optional[str] = None

# 调度策略相关模型
class SchedulingStrategyConfig(BaseModel):
    """调度策略配置"""
    priority: int = Field(default=1, description="优先级")
    maxConcurrentTasks: int = Field(default=1, description="最大并发任务数")
    resourceAllocation: Dict[str, int] = Field(default_factory=dict, description="资源分配")

class SchedulingStrategyMetrics(BaseModel):
    """调度策略性能指标"""
    totalExecutions: int = Field(default=0, description="总执行次数")
    successRate: float = Field(default=0.0, description="成功率")
    avgExecutionTime: str = Field(default="0分钟", description="平均执行时间")

class SchedulingStrategyBase(BaseModel):
    """调度策略基础模型"""
    name: str = Field(..., description="策略名称")
    description: str = Field(..., description="策略描述")
    type: str = Field(..., description="策略类型，如 priority_based, load_balanced")
    scenario: str = Field(..., description="应用场景")
    status: str = Field(default="draft", description="策略状态：draft, active, paused, deprecated")
    priority: int = Field(default=1, description="策略优先级，数字越小优先级越高")
    max_concurrent_tasks: int = Field(default=1, description="最大并发任务数")
    cloud_nodes: int = Field(default=0, description="分配的云端节点数")
    edge_nodes: int = Field(default=0, description="分配的边缘节点数")
    config: Optional[SchedulingStrategyConfig] = Field(default=None, description="策略配置")
    metrics: Optional[SchedulingStrategyMetrics] = Field(default=None, description="性能指标")
    last_executed: Optional[datetime] = Field(default=None, description="最后执行时间")
    next_execution: Optional[datetime] = Field(default=None, description="下次执行时间")

class SchedulingStrategyCreate(SchedulingStrategyBase):
    """创建调度策略请求模型"""
    pass

class SchedulingStrategyUpdate(BaseModel):
    """更新调度策略请求模型"""
    name: Optional[str] = None
    description: Optional[str] = None
    type: Optional[str] = None
    scenario: Optional[str] = None
    status: Optional[str] = None
    priority: Optional[int] = None
    max_concurrent_tasks: Optional[int] = None
    cloud_nodes: Optional[int] = None
    edge_nodes: Optional[int] = None
    config: Optional[SchedulingStrategyConfig] = None
    metrics: Optional[SchedulingStrategyMetrics] = None
    last_executed: Optional[datetime] = None
    next_execution: Optional[datetime] = None

class SchedulingStrategyResponse(SchedulingStrategyBase):
    """调度策略响应模型"""
    model_config = ConfigDict(from_attributes=True)
    
    id: str = Field(..., description="策略ID")
    strategy_id: str = Field(..., description="策略唯一标识")
    created_at: datetime = Field(..., description="创建时间")
    updated_at: datetime = Field(..., description="更新时间")
    createdAt: Optional[str] = Field(default=None, description="格式化的创建时间")
    lastExecuted: Optional[str] = Field(default=None, description="格式化的最后执行时间")
    nextExecution: Optional[str] = Field(default=None, description="格式化的下次执行时间")
    
    @model_validator(mode='before')
    @classmethod
    def parse_json_fields(cls, data):
        """处理从数据库中读取的JSON字段"""
        if isinstance(data, dict):
            # 解析config字段
            if 'config' in data and isinstance(data['config'], str):
                try:
                    config_data = json.loads(data['config'])
                    data['config'] = SchedulingStrategyConfig(**config_data)
                except (json.JSONDecodeError, TypeError):
                    data['config'] = None
            
            # 解析metrics字段
            if 'metrics' in data and isinstance(data['metrics'], str):
                try:
                    metrics_data = json.loads(data['metrics'])
                    data['metrics'] = SchedulingStrategyMetrics(**metrics_data)
                except (json.JSONDecodeError, TypeError):
                    data['metrics'] = None
        
        # 如果是SQLAlchemy对象
        elif hasattr(data, 'config') and isinstance(data.config, str):
            try:
                config_data = json.loads(data.config)
                data.config = SchedulingStrategyConfig(**config_data)
            except (json.JSONDecodeError, TypeError):
                data.config = None
                
        if hasattr(data, 'metrics') and isinstance(data.metrics, str):
            try:
                metrics_data = json.loads(data.metrics)
                data.metrics = SchedulingStrategyMetrics(**metrics_data)
            except (json.JSONDecodeError, TypeError):
                data.metrics = None
        
        return data
    
    def __init__(self, **data):
        super().__init__(**data)
        
        # 格式化时间字段
        if self.created_at:
            self.createdAt = self.created_at.strftime('%Y-%m-%d %H:%M:%S')
        if self.last_executed:
            self.lastExecuted = self.last_executed.strftime('%Y-%m-%d %H:%M:%S')
        if self.next_execution:
            self.nextExecution = self.next_execution.strftime('%Y-%m-%d %H:%M:%S')

class SchedulingStrategyListResponse(ListResponse[SchedulingStrategyResponse]):
    """调度策略列表响应"""
    
    @classmethod
    def create_response(cls, strategies: List[SchedulingStrategyResponse], total_count: int, pagination_params: Optional[PaginationParams] = None, message: str = "查询成功"):
        """创建标准响应格式"""
        pagination = None
        if pagination_params and pagination_params.enable_pagination:
            pagination = {
                "page": pagination_params.page,
                "page_size": pagination_params.page_size,
                "total_count": total_count,
                "total_pages": (total_count + pagination_params.page_size - 1) // pagination_params.page_size
            }
        
        # 统计信息
        summary = {
            "total": total_count,
            "active": len([s for s in strategies if s.status == "active"]),
            "draft": len([s for s in strategies if s.status == "draft"]),
            "paused": len([s for s in strategies if s.status == "paused"]),
            "deprecated": len([s for s in strategies if s.status == "deprecated"])
        }
        
        return cls(
            message=message,
            data=strategies,
            total_count=total_count,
            pagination=pagination,
            summary=summary
        )

# 数据集创建相关的新schema
class FileUploadResponse(BaseModel):
    """文件上传响应"""
    temp_path: str = Field(..., description="临时文件路径")
    original_filename: str = Field(..., description="原始文件名")
    temp_filename: str = Field(..., description="临时文件名")
    file_size: int = Field(..., description="文件大小(字节)")
    size_mb: float = Field(..., description="文件大小(MB)")
    extracted_path: Optional[str] = Field(None, description="解压后的文件夹路径")

class DatasetCreationRequest(BaseModel):
    """数据集创建请求"""
    name: str = Field(..., description="数据集名称")
    description: Optional[str] = Field(None, description="数据集描述")
    base_dataset_id: str = Field(..., description="基础数据集ID")
    samples_path: str = Field(..., description="样本文件夹路径")
    labels_path: str = Field(..., description="标签文件夹路径")
    uploader: str = Field(..., description="上传者")
    scenario: Optional[Literal["设备运检I", "设备运检S", "环境巡视", "人员安全"]] = Field(None, description="应用场景")
    dataset_type: str = Field(default="detection", description="数据集类型")
    
    @field_validator('scenario')
    @classmethod
    def validate_scenario(cls, v):
        if v is not None and v not in ["设备运检I", "设备运检S", "环境巡视", "人员安全"]:
            raise ValueError("场景必须是：设备运检I, 设备运检S, 环境巡视, 人员安全 中的一个")
        return v

class BaseDatasetListResponse(BaseModel):
    """基础数据集列表响应"""
    success: bool = True
    message: str = ""
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
    data: List[BaseDatasetInfo]
    total_count: int

# 评测集相关模型
class OriginDatasetItem(BaseModel):
    """原始数据集配置项"""
    dataset_id: str = Field(..., description="数据集ID")
    dataset_name: str = Field(..., description="数据集名称")
    sample_ratio: float = Field(..., ge=0.0, le=1.0, description="抽取比例，0.0-1.0之间")

class TestSetBase(BaseModel):
    """评测集基础模型"""
    name: str = Field(..., description="评测集名称")
    description: Optional[str] = Field(None, description="评测集描述")
    tag: str = Field(..., description="标签，如'设备运检I'等")
    origin_datasets: List[OriginDatasetItem] = Field(..., description="原始数据集列表及其抽取比例")
    
    @field_validator('tag')
    @classmethod
    def validate_tag(cls, v):
        allowed_tags = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
        if v not in allowed_tags:
            raise ValueError(f"标签必须是以下之一: {allowed_tags}")
        return v
    
    @field_validator('origin_datasets')
    @classmethod
    def validate_origin_datasets(cls, v):
        if not v:
            raise ValueError("至少需要包含一个原始数据集")
        
        # 检查是否有重复的数据集ID
        dataset_ids = [item.dataset_id for item in v]
        if len(dataset_ids) != len(set(dataset_ids)):
            raise ValueError("原始数据集ID不能重复")
        
        return v

class TestSetCreate(TestSetBase):
    """创建评测集请求模型"""
    is_default: bool = Field(default=False, description="是否设为默认评测集")

class TestSetUpdate(BaseModel):
    """更新评测集请求模型"""
    name: Optional[str] = Field(None, description="评测集名称")
    description: Optional[str] = Field(None, description="评测集描述")
    origin_datasets: Optional[List[OriginDatasetItem]] = Field(None, description="原始数据集列表及其抽取比例")
    is_default: Optional[bool] = Field(None, description="是否设为默认评测集")
    
    @field_validator('origin_datasets')
    @classmethod
    def validate_origin_datasets(cls, v):
        if v is not None:
            if not v:
                raise ValueError("至少需要包含一个原始数据集")
            
            # 检查是否有重复的数据集ID
            dataset_ids = [item.dataset_id for item in v]
            if len(dataset_ids) != len(set(dataset_ids)):
                raise ValueError("原始数据集ID不能重复")
        
        return v

class TestSetResponse(TestSetBase):
    """评测集响应模型"""
    model_config = ConfigDict(from_attributes=True)
    
    test_set_id: str = Field(..., description="评测集ID")
    is_default: bool = Field(..., description="是否为默认评测集")
    total_samples: int = Field(default=0, description="总样本数")
    total_size: Optional[str] = Field(None, description="总数据大小")
    created_at: datetime = Field(..., description="创建时间")
    updated_at: datetime = Field(..., description="更新时间")
    
    @model_validator(mode='before')
    @classmethod
    def parse_json_fields(cls, data):
        """处理从数据库中读取的JSON字段"""
        if isinstance(data, dict):
            # 处理origin_datasets字段
            if 'origin_datasets' in data:
                origin_datasets_value = data['origin_datasets']
                if isinstance(origin_datasets_value, str) and origin_datasets_value:
                    try:
                        data['origin_datasets'] = json.loads(origin_datasets_value)
                    except (json.JSONDecodeError, TypeError):
                        data['origin_datasets'] = []
                elif not origin_datasets_value:
                    data['origin_datasets'] = []
        return data

class TestSetListResponse(ListResponse[TestSetResponse]):
    """评测集列表响应"""
    pass

    @classmethod
    def create_response(cls, test_sets: List[TestSetResponse], total_count: int, pagination_params: Optional[PaginationParams] = None, message: str = "查询成功"):
        """创建评测集列表响应"""
        pagination_info = None
        if pagination_params and pagination_params.enable_pagination:
            pagination_info = {
                "current_page": pagination_params.page,
                "page_size": pagination_params.page_size,
                "total_pages": (total_count + pagination_params.page_size - 1) // pagination_params.page_size,
                "total_count": total_count,
                "has_next": pagination_params.page * pagination_params.page_size < total_count,
                "has_prev": pagination_params.page > 1
            }
        
        # 添加摘要信息
        summary = {
            "total_test_sets": total_count,
            "default_test_sets": len([ts for ts in test_sets if ts.is_default]),
            "tags": list(set([ts.tag for ts in test_sets]))
        }
        
        return cls(
            success=True,
            message=message,
            data=test_sets,
            total_count=total_count,
            pagination=pagination_info,
            summary=summary
        )

# 模型评估结果相关模型
class ModelEvaluationFile(BaseModel):
    """单个评估结果文件信息"""
    filename: str = Field(..., description="文件名")
    file_path: str = Field(..., description="文件路径")
    exists: bool = Field(..., description="文件是否存在")
    file_size: Optional[str] = Field(None, description="文件大小")
    last_modified: Optional[str] = Field(None, description="最后修改时间")
    image_url: str = Field(..., description="图片访问URL")

class ModelEvaluationResultsResponse(BaseModel):
    """模型评估结果响应"""
    model_config = ConfigDict(from_attributes=True)
    
    model_id: str = Field(..., description="模型ID")
    model_name: str = Field(..., description="模型名称")
    training_task_id: str = Field(..., description="关联的训练任务ID")
    task_status: str = Field(..., description="训练任务状态")
    evaluation_files: List[ModelEvaluationFile] = Field(..., description="评估结果文件列表")
    base_path: str = Field(..., description="评估结果文件基础路径")
    created_at: Optional[str] = Field(None, description="模型创建时间")
    
    # 评估指标摘要
    evaluation_summary: Optional[Dict[str, Any]] = Field(None, description="评估指标摘要")

# 在线评测请求/响应
class ModelEvaluateRequest(BaseModel):
    """发起模型评测的请求参数"""
    dataset_name: str = Field(..., description="用于评测的数据集名称")
    max_samples: Optional[int] = Field(200, gt=0, le=10000, description="使用的样本数量上限")
    conf: Optional[float] = Field(0.25, ge=0.0, le=1.0, description="置信度阈值")
    iou: Optional[float] = Field(0.7, ge=0.0, le=1.0, description="IoU阈值")
    img_size: Optional[int] = Field(640, ge=64, le=2048, description="输入尺寸(imgsz)")
    device: Optional[str] = Field("cpu", description="设备：cpu、cuda、cuda:0 等")
    shuffle: Optional[bool] = Field(True, description="是否随机抽样")
    seed: Optional[int] = Field(None, description="随机种子(设置后可复现抽样)")

class ModelEvaluateResponse(BaseModel):
    """模型评测响应"""
    model_id: str = Field(..., description="模型ID")
    model_name: str = Field(..., description="模型名称")
    dataset_id: str = Field(..., description="数据集ID")
    dataset_name: str = Field(..., description="数据集名称")
    used_samples: int = Field(..., description="实际参与评测的样本数")
    metrics: Dict[str, Any] = Field(..., description="评测指标，包含map_50、map_95、precision、recall、f1_score")
    eval_dir: str = Field(..., description="评测临时目录")
    preds_dir: Optional[str] = Field(None, description="YOLO可视化输出目录(绝对路径)")
    preds_subdir: Optional[str] = Field(None, description="可视化目录名，例如 preds 或 preds2")
    params: Dict[str, Any] = Field(..., description="评测时使用的参数")
    eval_id: str = Field(..., description="评测临时目录的ID(目录名)")
    predicted_images: List[ModelEvaluationFile] = Field(default_factory=list, description="带框预测图片列表")

# 评测任务列表相关模型
class EvaluationTaskItem(BaseModel):
    """单个评测任务项"""
    model_config = ConfigDict(from_attributes=True)
    
    task_name: Optional[str] = Field(None, description="任务名称（仅来自训练任务的模型有此字段）")
    model_name: str = Field(..., description="模型名称")
    model_id: str = Field(..., description="模型ID")
    dataset_name: Optional[str] = Field(None, description="数据集名称")
    model_accuracy: Optional[float] = Field(None, description="模型精度")
    forgetting_rate: Optional[float] = Field(None, description="模型遗忘率（基于recall和precision计算）")
    tags: Optional[List[str]] = Field(None, description="模型的tag标签")
    created_at: str = Field(..., description="模型创建时间")
    
    # 额外的信息字段
    has_training_task: bool = Field(default=False, description="是否来自训练任务")
    precision: Optional[float] = Field(None, description="精确率")
    recall: Optional[float] = Field(None, description="召回率")
    map_50: Optional[float] = Field(None, description="mAP@0.5")
    map_95: Optional[float] = Field(None, description="mAP@0.5:0.95")

class EvaluationTaskListResponse(BaseModel):
    """评测任务列表响应"""
    success: bool = True
    message: str = ""
    timestamp: str = Field(default_factory=lambda: datetime.utcnow().isoformat())
    data: List[EvaluationTaskItem]
    total_count: int
    pagination: Optional[Dict[str, Any]] = None
    summary: Optional[Dict[str, Any]] = None

# 场景趋势分析相关模型
class ScenarioTrendRequest(BaseModel):
    """场景趋势分析请求"""
    scenario: str = Field(..., description="应用场景，可选值：人员安全、环境巡视、设备运检I、设备运检S")
    
    @field_validator('scenario')
    @classmethod
    def validate_scenario(cls, v):
        allowed_scenarios = ["人员安全", "环境巡视", "设备运检I", "设备运检S"]
        if v not in allowed_scenarios:
            raise ValueError(f"场景必须是以下之一: {allowed_scenarios}")
        return v

class ScenarioTrendResponse(BaseModel):
    """场景趋势分析响应"""
    scenario: str = Field(..., description="应用场景")
    forget: List[float] = Field(..., description="遗忘率趋势，包含5个float值，呈下降趋势")
    precision: List[float] = Field(..., description="准确率趋势，包含5个float值，呈上升趋势")
    
    @field_validator('forget', 'precision')
    @classmethod
    def validate_trend_length(cls, v):
        if len(v) != 5:
            raise ValueError("趋势数据必须包含5个值")
        return v
