"""
模型服务模块
负责模型的管理、创建、更新、删除和微调等功能
"""

import uuid
import json
import shutil
import threading
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from datetime import datetime
from sqlalchemy.orm import Session
from fastapi import UploadFile

from ultralytics import YOLO
from database import Model
from config import config
from exceptions import (
    ModelException, ValidationException, DatasetException,
    ErrorCode, ExceptionHandler, NotFoundException
)
from utils import (
    LogManager, ValidationUtils, DatasetUtils, ModelEvaluationUtils, FileManager
)
import random
# 导入其他服务
from services.training_service import TrainingService
from services.node_service import NodeService


# 初始化日志系统
LogManager.initialize_logging(config.log.level)
logger = LogManager.get_logger(__name__)


class ModelService:
    """模型服务"""
    
    def __init__(self):
        # 使用统一的配置管理
        self.config = config
        self.logger = LogManager.get_logger(__name__)
        
        # 确保目录存在
        self.config.ensure_directories()
    
    def _format_file_size(self, file_path: str) -> str:
        """格式化文件大小为可读字符串"""
        if not file_path:
            return "未知"
        return FileManager.format_file_size(Path(file_path))
    
    def _determine_model_type(self, file_path: str) -> str:
        """根据文件大小判断模型类型"""
        try:
            if not file_path or not Path(file_path).exists():
                return "small"  # 默认为小模型
            
            # 使用统一的工具方法获取文件大小
            size_bytes = FileManager.get_file_size(Path(file_path))
            size_mb = size_bytes / (1024 * 1024)  # 转换为MB
            
            # 大于50MB为大模型，否则为小模型
            return "large" if size_mb > 50 else "small"
            
        except Exception as e:
            self.logger.warning(f"判断模型类型失败: {e}")
            return "small"  # 默认为小模型
    
    def _safe_parse_json_field(self, field_value, default=None):
        """安全解析JSON字段，处理各种边缘情况
        
        Args:
            field_value: 数据库字段值
            default: 解析失败时的默认值
            
        Returns:
            解析后的Python对象或默认值
        """
        if not field_value:
            return default
        
        # 如果字段值是空字符串，返回默认值
        if field_value.strip() == "":
            return default
        
        try:
            # 尝试解析JSON
            return json.loads(field_value)
        except (json.JSONDecodeError, TypeError) as e:
            self.logger.warning(f"JSON解析失败，字段值: {repr(field_value)}, 错误: {e}")
            
            # 如果不是有效JSON，尝试处理特殊情况
            if isinstance(field_value, str):
                # 如果是单个字符串值，尝试转换为合适的格式
                if default == []:
                    # tags字段，将单个字符串转换为列表
                    return [field_value.strip()]
                elif default == {}:
                    # deployment_info字段，返回空字典
                    return {}
            
            # 如果都失败了，返回默认值
            return default
    
    def initialize_pretrained_models(self, db: Session):
        """初始化预训练模型到数据库"""
        try:
            self.logger.info("开始初始化预训练模型...")
            
            # 检查pretrained文件夹
            pretrained_dir = self.config.paths.pretrained_models_dir
            if not pretrained_dir.exists():
                self.logger.warning(f"预训练模型目录不存在: {pretrained_dir}")
                return 0
            
            # 支持的YOLO模型类型
            yolo_models = ["yolov8n", "yolov8s", "yolov8m", "yolov8l", "yolov8x"]
            initialized_count = 0
            
            for model_type in yolo_models:
                model_file = pretrained_dir / f"{model_type}.pt"
                if model_file.exists():
                    # 检查数据库中是否已存在相同路径的模型
                    existing_model = db.query(Model).filter(
                        Model.weight_file_path == str(model_file)
                    ).first()
                    
                    if not existing_model:
                        # 创建预训练模型记录
                        model_id = str(uuid.uuid4())
                        
                        # 计算文件大小和模型类型
                        file_size = self._format_file_size(str(model_file))
                        model_type_calculated = self._determine_model_type(str(model_file))
                        
                        pretrained_model = Model(
                            model_id=model_id,
                            model_name=f"pretrained_{model_type}",
                            model_type=model_type_calculated,
                            base_model_name=model_type,
                            dataset_name="COCO",
                            weight_file_path=str(model_file),
                            training_task_id=None,
                            architecture="YOLO",
                            version="1.0.0",
                            status="published",
                            size=file_size,
                            framework="PyTorch",
                            deployment_type="both",
                            description=f"预训练的{model_type}目标检测模型，文件大小: {file_size}",
                            tags='["环境巡视", "设备运检I"]',
                            deployment_info='{"cloudNodes": [], "edgeNodes": []}',
                            map_50=None,
                            map_95=None,
                            precision=None,
                            recall=None
                        )
                        
                        db.add(pretrained_model)
                        initialized_count += 1
                        self.logger.info(f"添加预训练模型: {model_type}")
                    else:
                        self.logger.info(f"预训练模型路径已存在，跳过: {model_file} (模型类型: {model_type})")
            
            if initialized_count > 0:
                db.commit()
                self.logger.info(f"预训练模型初始化完成，共添加 {initialized_count} 个模型")
            else:
                self.logger.info("没有新的预训练模型需要添加")
            
            return initialized_count
        
        except Exception as e:
            self.logger.error(f"初始化预训练模型失败: {e}")
            db.rollback()
            raise  # 重新抛出异常，实现快速失败
    
    def publish_model(self, db: Session, model_id: str) -> dict:
        """发布模型"""
        try:
            model = db.query(Model).filter(Model.model_id == model_id).first()
            if not model:
                raise NotFoundException(f"Model with id {model_id} not found.")
            model.status = "published"
            db.commit()
            db.refresh(model)
            return self._convert_model_to_dict(model)
        except Exception as e:
            db.rollback()
            raise ValidationException(f"发布模型失败: {e}")

    def _convert_model_to_dict(self, model: Model) -> dict:
        """将数据库模型对象转换为字典格式"""
        try:
            # 解析JSON字段 - 增强容错性
            tags = self._safe_parse_json_field(model.tags, default=[])
            deployment_info = self._safe_parse_json_field(model.deployment_info, default={})
            
            # 计算accuracy - 优先使用map_50，其次precision
            accuracy = None
            if model.map_50 is not None:
                accuracy = model.map_50
            elif model.precision is not None:
                accuracy = model.precision
            
            # 处理lastTrained字段 - 支持字符串和datetime类型
            last_trained = None
            if model.last_trained:
                if isinstance(model.last_trained, str):
                    last_trained = model.last_trained
                else:
                    # 如果是datetime类型，转换为ISO格式字符串
                    last_trained = model.last_trained.isoformat() if hasattr(model.last_trained, 'isoformat') else str(model.last_trained)
            
            return {
                "id": model.model_id,
                "name": model.model_name,
                "type": model.model_type or "small",
                "architecture": model.architecture,
                "version": model.version,
                "status": model.status,
                "accuracy": accuracy,
                "size": model.size,
                "framework": model.framework,
                "createdAt": model.created_at.isoformat() if model.created_at else None,
                "lastTrained": last_trained,
                "deploymentType": model.deployment_type,
                "description": model.description,
                "tags": tags,
                "performance": {
                    "precision": model.precision,
                    "recall": model.recall,
                    "f1Score": model.f1_score,
                    "map50": model.map_50,
                    "map95": model.map_95
                } if any([model.precision, model.recall, model.f1_score, model.map_50, model.map_95]) else None,
                "deploymentInfo": deployment_info,
                # 额外的数据库字段
                "model_id": model.model_id,
                "model_name": model.model_name,
                "dataset_name": model.dataset_name,
                "updated_at": model.updated_at.isoformat() if model.updated_at else None,
                "base_model_name": model.base_model_name,
                "map_50": model.map_50,
                "map_95": model.map_95
            }
        except Exception as e:
            self.logger.error(f"转换模型对象失败: {e}")
            # 返回基本格式以避免完全失败，确保统一使用model_id作为业务标识
            return {
                "id": model.model_id,  # 统一使用业务ID model_id，而非数据库主键id
                "name": model.model_name or "未知模型",
                "type": model.model_type or "small",
                "architecture": model.architecture,
                "version": model.version,
                "status": model.status,
                "accuracy": None,
                "size": model.size,
                "framework": model.framework,
                "createdAt": model.created_at.isoformat() if model.created_at else None,
                "lastTrained": None,
                "deploymentType": model.deployment_type,
                "description": model.description,
                "tags": [],
                "performance": None,
                "deploymentInfo": {},
                # 为了向后兼容，也保留model_id字段
                "model_id": model.model_id,
                "model_name": model.model_name or "未知模型",
                "dataset_name": getattr(model, 'dataset_name', None),
                "updated_at": model.updated_at.isoformat() if hasattr(model, 'updated_at') and model.updated_at else None,
                "base_model_name": getattr(model, 'base_model_name', None),
                "map_50": getattr(model, 'map_50', None),
                "map_95": getattr(model, 'map_95', None)
            }

    def create_model(self, db: Session, model_data: dict, temp_file_to_delete: Path = None) -> dict:
        """创建新模型"""
        try:
            # 获取文件路径
            weight_file_path = model_data.get("weight_file_path")
            
            # 检查模型路径是否已存在
            if weight_file_path:
                existing_model = db.query(Model).filter(Model.weight_file_path == weight_file_path).first()
                if existing_model:
                    self.logger.info(f"模型路径已存在，跳过创建: {weight_file_path}")
                    return {
                        "skipped": True,
                        "message": f"模型路径 '{weight_file_path}' 已存在，自动跳过创建",
                        "existing_model": self._convert_model_to_dict(existing_model)
                    }
            
            model_id = str(uuid.uuid4())
            
            # 如果是从临时文件创建，需要先移动文件
            if temp_file_to_delete and weight_file_path:
                # 验证临时文件是否存在
                if not temp_file_to_delete.exists():
                    raise ValidationException(f"临时文件不存在: {temp_file_to_delete}")
                
                # 创建模型专属目录
                model_dir = self.config.paths.cloud_models_dir / model_id
                model_dir.mkdir(parents=True, exist_ok=True)
                
                # 移动文件到模型目录
                final_filename = f"model{temp_file_to_delete.suffix}"
                final_model_path = model_dir / final_filename
                
                shutil.move(str(temp_file_to_delete), str(final_model_path))
                
                # 更新模型字典中的路径
                weight_file_path = str(final_model_path)
                model_data["weight_file_path"] = weight_file_path
            
            # 自动计算文件大小和模型类型
            if weight_file_path:
                calculated_size = self._format_file_size(weight_file_path)
                calculated_type = self._determine_model_type(weight_file_path)
            else:
                calculated_size = "未知"
                calculated_type = "small"
                self.logger.warning(f"创建模型 {model_id} 时未提供模型文件路径，使用默认值")
            
            # 处理tags和deployment_info为JSON字符串
            tags_json = json.dumps(model_data.get("tags", []), ensure_ascii=False) if model_data.get("tags") else None
            deployment_info_json = json.dumps(model_data.get("deploymentInfo", {}), ensure_ascii=False) if model_data.get("deploymentInfo") else None
            
            new_model = Model(
                model_id=model_id,
                model_name=model_data["name"],
                model_type=calculated_type,
                architecture=model_data.get("architecture"),
                version=model_data.get("version"),
                status=model_data.get("status", "draft"),
                size=calculated_size,
                framework=model_data.get("framework"),
                deployment_type=model_data.get("deploymentType", "both"),
                description=model_data.get("description"),
                tags=tags_json,
                deployment_info=deployment_info_json,
                weight_file_path=weight_file_path,
                created_at=datetime.utcnow()
            )
            
            # 如果提供了性能数据，设置性能字段
            if "performance" in model_data and model_data["performance"] is not None:
                performance = model_data["performance"]
                new_model.precision = performance.get("precision")
                new_model.recall = performance.get("recall")
                new_model.f1_score = performance.get("f1Score")
            
            db.add(new_model)
            db.commit()
            db.refresh(new_model)
            
            self.logger.info(f"创建模型成功: {model_id}")
            return self._convert_model_to_dict(new_model)
            
        except Exception as e:
            db.rollback()
            self.logger.error(f"创建模型失败: {e}")
            raise ValidationException(f"创建模型失败: {e}")
    
    def update_model(self, db: Session, model_id: str, update_data: dict) -> Optional[dict]:
        """更新模型信息"""
        try:
            # 直接查询数据库而不是调用转换方法
            model = db.query(Model).filter(Model.model_id == model_id).first()
            if not model:
                raise NotFoundException(f"Model with id {model_id} not found.")
            
            # 如果更新了模型文件路径，重新计算大小和类型
            if "weight_file_path" in update_data and update_data["weight_file_path"]:
                new_file_path = update_data["weight_file_path"]
                calculated_size = self._format_file_size(new_file_path)
                calculated_type = self._determine_model_type(new_file_path)
                
                # 更新文件路径、大小和类型
                model.weight_file_path = new_file_path
                model.size = calculated_size
                model.model_type = calculated_type
                
                self.logger.info(f"模型 {model_id} 文件路径更新")
            
            # 更新其他字段
            for field, value in update_data.items():
                if field == "name":
                    model.model_name = value
                elif field == "tags" and value is not None:
                    model.tags = json.dumps(value, ensure_ascii=False)
                elif field == "deploymentInfo" and value is not None:
                    model.deployment_info = json.dumps(value, ensure_ascii=False)
                elif field == "performance" and value is not None:
                    model.precision = value.get("precision")
                    model.recall = value.get("recall")
                    model.f1_score = value.get("f1Score")
                elif field == "lastTrained":
                    model.last_trained = value
                elif field not in ["weight_file_path"] and hasattr(model, field):
                    setattr(model, field, value)
            
            model.updated_at = datetime.utcnow()
            
            db.commit()
            db.refresh(model)
            
            self.logger.info(f"更新模型成功: {model_id}")
            return self._convert_model_to_dict(model)
            
        except Exception as e:
            db.rollback()
            self.logger.error(f"更新模型失败: {e}")
            raise ValidationException(f"更新模型失败: {e}")
    
    def get_model_by_id(self, db: Session, model_id: str) -> Optional[dict]:
        """根据ID获取模型对象"""
        model = db.query(Model).filter(Model.model_id == model_id).first()
        if not model:
            raise NotFoundException(f"Model with id {model_id} not found.")
        return self._convert_model_to_dict(model)
    
    def list_models(self, db: Session, page: int, page_size: int, 
                   model_type: Optional[str] = None, status: Optional[str] = None) -> Tuple[List[dict], int]:
        """获取模型列表，支持过滤和分页"""
        query = db.query(Model)

        # 应用过滤条件
        if model_type:
            query = query.filter(Model.model_type == model_type)
        if status:
            query = query.filter(Model.status == status)

        # 获取总数
        total_count = query.count()

        # 应用分页
        models = query.offset((page - 1) * page_size).limit(page_size).all()
        
        # 转换为字典格式
        converted_models = [self._convert_model_to_dict(model) for model in models]
        
        return converted_models, total_count
    
    def get_model_path(self, db: Session, model_id: str) -> Path:
        """获取模型的本地文件路径"""
        # 直接查询数据库而不是调用转换方法
        model = db.query(Model).filter(Model.model_id == model_id).first()
        if not model:
            raise NotFoundException(f"Model with id {model_id} not found.")
        
        if not model.weight_file_path:
            raise NotFoundException(f"未找到模型 {model_id} 的路径")
        
        model_path = Path(model.weight_file_path)
        if not model_path.exists():
            raise NotFoundException(f"模型文件不存在: {model_path}")
            
        return model_path

    def transfer_model(self, db: Session, model_id: str, target_node_name: str) -> Dict:
        """
        将模型传输到目标节点 (实现核心查找逻辑，传输为占位符)
        """
        # 1. 获取模型路径
        model_path = self.get_model_path(db, model_id)
        
        # 2. 获取目标节点信息
        node_service = NodeService()
        target_node = node_service.get_node_by_name(db, target_node_name)
        if not target_node:
            raise NotFoundException(f"目标节点不存在: {target_node_name}")
        
        # 假设节点有 ip_address 和 port 字段
        target_url = f"http://{target_node.service_ip}:8001/api/v1/models/upload"  # 使用默认端口8001
        
        self.logger.info(f"准备将模型 {model_id} 从 {model_path} 传输到节点 {target_node_name} ({target_url})")
        
        # --- 实际的文件传输逻辑 ---
        # 此处应使用 requests 或 httpx 等库发起一个 POST 请求来上传文件
        # with open(model_path, "rb") as f:
        #     files = {"model_file": (model_path.name, f, "application/octet-stream")}
        #     response = requests.post(target_url, files=files)
        #     response.raise_for_status() # 确保请求成功
        # -------------------------
        
        self.logger.info(f"模型 {model_id} 已成功触发向 {target_node_name} 的传输")
        
        return {
            "model_id": model_id,
            "target_node": target_node_name,
            "status": "transfer_initiated"
        }

    def evaluate_model(
        self,
        db: Session,
        model_id: str,
        eval_request: "ModelEvaluateRequest"
    ) -> Dict:
        """使用指定数据集名称与样本数对模型进行评测。

        过程：
        1) 根据 model_id 定位模型权重
        2) 根据 dataset_name 定位数据集目录
        3) 抽样 max_samples 张图片构建临时评测集目录
        4) 生成临时 dataset.yaml 并调用 YOLO 的 val 执行评测
        5) 提取指标并返回
        """
        try:
            # 延迟导入，避免模块级副作用
            from database import Dataset as DBDataset, Model as DBModel
            from schemas import ModelEvaluateResponse
        except Exception:
            pass

        # 1. 获取模型
        model_obj = db.query(Model).filter(Model.model_id == model_id).first()
        if not model_obj:
            raise NotFoundException(f"模型不存在: {model_id}")
        if not model_obj.weight_file_path:
            raise NotFoundException(f"模型权重文件路径缺失: {model_id}")
        weight_path = Path(model_obj.weight_file_path)
        if not weight_path.exists():
            raise NotFoundException(f"模型权重不存在: {weight_path}")

        # 2. 获取数据集
        dataset = db.query(DBDataset).filter(DBDataset.name == eval_request.dataset_name).first()
        if not dataset:
            # 兼容使用dataset_id查找
            dataset = db.query(DBDataset).filter(DBDataset.dataset_id == eval_request.dataset_name).first()
        if not dataset:
            raise NotFoundException(f"数据集不存在: {eval_request.dataset_name}")

        # 定位数据集目录
        if dataset.folder_path:
            dataset_folder = Path(dataset.folder_path)
        else:
            dataset_folder = self.config.paths.datasets_dir / dataset.dataset_id
        if not dataset_folder.exists():
            raise NotFoundException(f"数据集目录不存在: {dataset_folder}")

        images_dir = dataset_folder / "images"
        labels_dir = dataset_folder / "labels"
        if not images_dir.exists():
            raise ValidationException(f"图片目录不存在: {images_dir}")

        # 3. 选择子集样本
        image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
        all_images = [p for p in images_dir.iterdir() if p.is_file() and p.suffix.lower() in image_extensions]
        if len(all_images) == 0:
            raise ValidationException("数据集中未找到图片")

        # 排序保证可复现，再根据需要随机打乱
        all_images.sort(key=lambda p: p.name)
        if eval_request.shuffle:
            rnd = random.Random(eval_request.seed)
            rnd.shuffle(all_images)

        selected_images = all_images[: min(eval_request.max_samples or len(all_images), len(all_images))]
        used_samples = len(selected_images)

        # 4. 构建临时评测目录
        eval_dir = self.config.paths.temp_dir / f"eval_{model_id}_{dataset.dataset_id}_{uuid.uuid4().hex[:8]}"
        eval_images = eval_dir / "images"
        eval_labels = eval_dir / "labels"
        eval_preds = eval_dir / "preds"
        eval_images.mkdir(parents=True, exist_ok=True)
        eval_labels.mkdir(parents=True, exist_ok=True)
        eval_preds.mkdir(parents=True, exist_ok=True)

        # 拷贝子集样本及对应标签，并统计标签情况
        copied = 0
        label_files_copied = 0
        label_class_max = -1
        for img in selected_images:
            try:
                shutil.copy2(img, eval_images / img.name)
                label_path = labels_dir / f"{img.stem}.txt"
                if label_path.exists():
                    shutil.copy2(label_path, eval_labels / label_path.name)
                    label_files_copied += 1
                    # 解析label中的最大类别ID
                    try:
                        with open(label_path, 'r') as lf:
                            for line in lf:
                                line = line.strip()
                                if not line:
                                    continue
                                parts = line.split()
                                cls_id = int(float(parts[0]))
                                if cls_id > label_class_max:
                                    label_class_max = cls_id
                    except Exception:
                        pass
                copied += 1
            except Exception as e:
                self.logger.warning(f"复制样本失败 {img}: {e}")

        if copied == 0:
            raise ValidationException("未能准备评测样本")

        # 读取原数据集类别信息
        class_names: List[str] = []
        try:
            # 优先从原dataset.yaml读取
            if dataset.config_path and Path(dataset.config_path).exists():
                import yaml
                with open(dataset.config_path, 'r', encoding='utf-8') as f:
                    base_cfg = yaml.safe_load(f) or {}
                    names = base_cfg.get("names")
                    if isinstance(names, dict):
                        # 可能是 index->name 的映射
                        class_names = [name for _, name in sorted(names.items(), key=lambda kv: int(kv[0]))]
                    elif isinstance(names, list):
                        class_names = names
        except Exception as e:
            self.logger.warning(f"读取原数据集类别信息失败: {e}")

        if not class_names:
            class_names = DatasetUtils.get_class_names_from_dataset(dataset_folder)

        # 若标签类别ID超过现有names长度，则扩展names，避免mAP统计时忽略
        if label_class_max >= 0 and label_class_max + 1 > len(class_names):
            needed = label_class_max + 1 - len(class_names)
            class_names = class_names + [f"class_{len(class_names)+i}" for i in range(needed)]

        # 在临时目录创建dataset.yaml
        eval_yaml_path = Path(DatasetUtils.create_yolo_config(eval_dir, class_names))

        # 5. 载入模型并评测
        try:
            yolo_model = YOLO(str(weight_path))
        except Exception as e:
            raise ValidationException(f"加载模型失败: {e}")

        preds_subdir = "preds"
        try:
            val_results = yolo_model.val(
                data=str(eval_yaml_path),
                imgsz=eval_request.img_size or 640,
                conf=eval_request.conf if eval_request.conf is not None else 0.1,
                iou=eval_request.iou if eval_request.iou is not None else 0.5,
                device=eval_request.device or "cpu",
                plots=True,
                save_json=False,
                save_hybrid=False,
                project=str(eval_dir),
                name=preds_subdir,  # 将可视化输出到 eval_dir/preds*
            )
        except Exception as e:
            raise ValidationException(f"评测执行失败: {e}")

        # 6. 提取指标
        metrics = ModelEvaluationUtils.extract_evaluation_metrics(val_results)
        # 计算F1
        if metrics.get("precision") and metrics.get("recall") and (metrics.get("precision") + metrics.get("recall")):
            pr = metrics["precision"]
            rc = metrics["recall"]
            f1_score = 2 * pr * rc / (pr + rc)
        else:
            f1_score = 0.0
        metrics["f1_score"] = float(f1_score)

        # 可选：更新模型的指标为最新评测
        try:
            model_obj.precision = float(metrics.get("precision") or 0)
            model_obj.recall = float(metrics.get("recall") or 0)
            model_obj.map_50 = float(metrics.get("map_50") or 0)
            model_obj.map_95 = float(metrics.get("map_95") or 0)
            model_obj.updated_at = datetime.utcnow()
            db.commit()
        except Exception:
            db.rollback()

        # 收集可视化预测图片
        predicted_images: List[Dict[str, str]] = []
        try:
            # YOLO val 保存的结果可能在 eval_dir/preds* 下
            # 收集常见可视化输出：val_batch*_pred.jpg、val_batch*_labels.jpg、results.png 等
            # YOLO 可能用 preds, preds2 等名称，优先检查 val_results.save_dir
            allowed_exts = {'.png', '.jpg', '.jpeg'}
            # 优先使用模型返回目录
            preds_root = None
            try:
                if hasattr(val_results, 'save_dir') and val_results.save_dir:
                    preds_root = Path(str(val_results.save_dir))
            except Exception:
                preds_root = None
            if preds_root is None:
                # 兼容回退：尝试 preds 与 preds2
                if (eval_dir / preds_subdir).exists():
                    preds_root = eval_dir / preds_subdir
                elif (eval_dir / f"{preds_subdir}2").exists():
                    preds_root = eval_dir / f"{preds_subdir}2"
                else:
                    preds_root = eval_preds

            # 将最终目录反映给响应
            preds_subdir = preds_root.name
            # 同步到eval_preds变量供后续遍历
            eval_preds = preds_root

            for p in sorted(preds_root.glob('*')):
                if p.is_file() and p.suffix.lower() in allowed_exts:
                    # 暴露访问URL（通过新路由提供静态访问）
                    url = f"/api/v1/evaluations/{eval_dir.name}/{p.name}"
                    predicted_images.append({
                        "filename": p.name,
                        "file_path": str(p),
                        "exists": True,
                        "file_size": FileManager.format_file_size(p),
                        "last_modified": datetime.fromtimestamp(p.stat().st_mtime).strftime("%Y-%m-%d %H:%M:%S"),
                        "image_url": url
                    })
        except Exception as e:
            self.logger.warning(f"收集预测图片失败: {e}")

        # 保存评测记录
        try:
            from database import EvaluationRecord
            import json as _json
            record = EvaluationRecord(
                eval_id=eval_dir.name,
                model_id=model_obj.model_id,
                model_name=model_obj.model_name,
                dataset_id=dataset.dataset_id,
                dataset_name=dataset.name,
                eval_dir=str(eval_dir),
                preds_dir=str(eval_preds),
                preds_subdir=preds_subdir,
                used_samples=used_samples,
                metrics=_json.dumps(metrics, ensure_ascii=False),
                params=_json.dumps({
                    "conf": eval_request.conf,
                    "iou": eval_request.iou,
                    "img_size": eval_request.img_size,
                    "device": eval_request.device,
                    "max_samples": eval_request.max_samples,
                    "shuffle": eval_request.shuffle,
                    "seed": eval_request.seed,
                }, ensure_ascii=False),
                labels_in_eval=label_files_copied,
                max_label_class_id=label_class_max,
                nc=len(class_names),
                device_type=(eval_request.device or "cpu"),
                status="completed",
            )
            db.add(record)
            db.commit()
        except Exception as e:
            self.logger.warning(f"保存评测记录失败: {e}")
            db.rollback()

        response = {
            "model_id": model_obj.model_id,
            "model_name": model_obj.model_name,
            "dataset_id": dataset.dataset_id,
            "dataset_name": dataset.name,
            "used_samples": used_samples,
            "metrics": metrics,
            "eval_dir": str(eval_dir),
            "params": {
                "conf": eval_request.conf,
                "iou": eval_request.iou,
                "img_size": eval_request.img_size,
                "device": eval_request.device,
                "max_samples": eval_request.max_samples,
                "shuffle": eval_request.shuffle,
                "seed": eval_request.seed,
            },
            "eval_id": eval_dir.name,
            "predicted_images": predicted_images,
            "preds_dir": str(eval_preds),
            "preds_subdir": preds_subdir,
            # 诊断信息
            "labels_in_eval": label_files_copied,
            "max_label_class_id": label_class_max,
            "nc": len(class_names),
        }
        return response

    def finetune_model(
        self, db: Session, model_file: UploadFile, dataset_name: str, 
        epochs: int, batch_size: int, learning_rate: float
    ) -> Dict:
        """
        使用上传的模型文件和指定的数据集进行微调
        """
        # 1. 将上传的模型文件保存到临时目录
        temp_dir = self.config.paths.temp_dir / f"finetune_{uuid.uuid4()}"
        temp_dir.mkdir(parents=True, exist_ok=True)
        
        base_model_path = temp_dir / model_file.filename
        try:
            with open(base_model_path, "wb") as buffer:
                shutil.copyfileobj(model_file.file, buffer)
        finally:
            model_file.file.close()
        
        self.logger.info(f"微调基础模型已保存到: {base_model_path}")
        
        # 2. 实例化训练服务
        training_service = TrainingService()
        
        # 3. 构造训练任务参数 - 使用正确的对象格式
        from schemas import TrainingTaskCreate
        task_data = TrainingTaskCreate(
            model_type="custom",  # 使用自定义模型
            task_type="detection",
            dataset_name=dataset_name,
            epochs=epochs,
            batch_size=batch_size,
            learning_rate=learning_rate
        )
        
        # 4. 创建训练任务
        task = training_service.create_training_task(db, task_data)
        self.logger.info(f"已为微调创建训练任务: {task.task_id}")
        
        return {
            "message": "模型微调任务已成功启动",
            "training_task_id": task.task_id
        }

    def get_models_summary(self, db: Session, model_type: Optional[str] = None, 
                          status: Optional[str] = None) -> dict:
        """获取模型统计摘要"""
        from sqlalchemy import func
        from database import Model
        
        # 基础查询
        query = db.query(Model)
        
        # 应用筛选条件
        if model_type:
            query = query.filter(Model.model_type == model_type)
        if status:
            query = query.filter(Model.status == status)
        
        # 统计不同类型的模型数量
        type_stats = db.query(Model.model_type, func.count(Model.id)).group_by(Model.model_type).all()
        
        # 统计不同状态的模型数量
        status_stats = db.query(Model.status, func.count(Model.id)).group_by(Model.status).all()
        
        # 统计不同架构的模型数量
        architecture_stats = db.query(Model.architecture, func.count(Model.id)).group_by(Model.architecture).all()
        
        return {
            "by_type": {item[0]: item[1] for item in type_stats if item[0]},
            "by_status": {item[0]: item[1] for item in status_stats if item[0]},
            "by_architecture": {item[0]: item[1] for item in architecture_stats if item[0]},
            "total_types": len(type_stats),
            "total_statuses": len(status_stats),
            "total_architectures": len([item for item in architecture_stats if item[0]])
        } 

    def get_model_evaluation_results(self, db: Session, model_id: str) -> dict:
        """
        获取模型评估结果
        通过模型ID找到对应的训练任务ID，并返回训练任务输出目录中的评估结果图片文件
        """
        try:
            # 1. 根据模型ID获取模型信息
            model = db.query(Model).filter(Model.model_id == model_id).first()
            if not model:
                raise NotFoundException(f"模型不存在: {model_id}")
            
            # 2. 检查是否有关联的训练任务
            if not model.training_task_id:
                raise ValidationException(f"模型 {model_id} 没有关联的训练任务，无法获取评估结果")
            
            # 3. 获取训练任务信息
            from database import TrainingTask
            training_task = db.query(TrainingTask).filter(
                TrainingTask.task_id == model.training_task_id
            ).first()
            
            if not training_task:
                raise NotFoundException(f"训练任务不存在: {model.training_task_id}")
            
            # 4. 确定评估结果文件的基础路径
            if training_task.model_output_path:
                base_path = Path(training_task.model_output_path)
            else:
                # 如果任务没有输出路径，使用配置的默认路径
                base_path = self.config.get_cloud_model_path(model.training_task_id)
            
            self.logger.info(f"查找评估结果文件，基础路径: {base_path}")
            
            # 5. 定义需要查找的评估结果文件
            evaluation_file_names = [
                "F1_curve.png",
                "P_curve.png", 
                "PR_curve.png",
                "R_curve.png",
                "results.png"
            ]
            
            # 6. 查找评估结果文件
            evaluation_files = []
            
            # 遍历可能的子目录（YOLO训练通常在runs/detect/train*/目录下）
            possible_subdirs = [
                base_path,  # 直接在输出目录下
                base_path / "runs" / "detect" / model.training_task_id,  # YOLO默认结构
                base_path / model.training_task_id,  # 以任务ID命名的子目录
            ]
            
            # 还要检查所有以train开头的目录
            if base_path.exists():
                for item in base_path.iterdir():
                    if item.is_dir() and item.name.startswith('train'):
                        possible_subdirs.append(item)
                
                # 检查runs目录下的可能路径
                runs_dir = base_path / "runs"
                if runs_dir.exists():
                    for detect_dir in runs_dir.iterdir():
                        if detect_dir.is_dir() and detect_dir.name == "detect":
                            for train_dir in detect_dir.iterdir():
                                if train_dir.is_dir():
                                    possible_subdirs.append(train_dir)
            
            found_base_path = None
            
            for subdir in possible_subdirs:
                if not subdir.exists():
                    continue
                    
                # 检查该目录是否包含评估结果文件
                found_files = 0
                for filename in evaluation_file_names:
                    file_path = subdir / filename
                    if file_path.exists():
                        found_files += 1
                
                # 如果找到任何评估文件，就使用这个目录
                if found_files > 0:
                    found_base_path = subdir
                    self.logger.info(f"找到评估结果目录: {found_base_path}，包含 {found_files} 个文件")
                    break
            
            if not found_base_path:
                # 即使没找到文件，也返回基础信息，标记文件不存在
                found_base_path = base_path
                self.logger.warning(f"未在任何目录中找到评估结果文件，使用基础路径: {found_base_path}")
            
            # 7. 构建文件信息列表
            for filename in evaluation_file_names:
                file_path = found_base_path / filename
                file_info = {
                    "filename": filename,
                    "file_path": str(file_path),
                    "exists": file_path.exists(),
                    "file_size": None,
                    "last_modified": None,
                    "image_url": f"/api/v1/models/{model_id}/evaluation-images/{filename}"
                }
                
                if file_path.exists():
                    try:
                        file_info["file_size"] = FileManager.format_file_size(file_path)
                        file_info["last_modified"] = datetime.fromtimestamp(
                            file_path.stat().st_mtime
                        ).strftime("%Y-%m-%d %H:%M:%S")
                    except Exception as e:
                        self.logger.warning(f"获取文件 {filename} 信息失败: {e}")
                
                evaluation_files.append(file_info)
            
            # 8. 解析评估指标摘要
            evaluation_summary = None
            if model.precision or model.recall or model.map_50 or model.map_95:
                evaluation_summary = {
                    "precision": model.precision,
                    "recall": model.recall,
                    "f1_score": model.f1_score,
                    "map_50": model.map_50,
                    "map_95": model.map_95
                }
            
            # 9. 构建响应数据
            result = {
                "model_id": model_id,
                "model_name": model.model_name,
                "training_task_id": model.training_task_id,
                "task_status": training_task.status,
                "evaluation_files": evaluation_files,
                "base_path": str(found_base_path),
                "created_at": model.created_at.strftime("%Y-%m-%d %H:%M:%S") if model.created_at else None,
                "evaluation_summary": evaluation_summary
            }
            
            self.logger.info(f"获取模型 {model_id} 评估结果成功，找到 {len([f for f in evaluation_files if f['exists']])} 个文件")
            return result
            
        except (NotFoundException, ValidationException):
            raise
        except Exception as e:
            self.logger.error(f"获取模型评估结果失败: {e}")
            raise ValidationException(f"获取模型评估结果失败: {e}")

    def get_evaluation_task_list(self, db: Session, page: int = 1, page_size: int = 20) -> Tuple[List[dict], int]:
        """
        获取评测任务列表, 仅返回有训练任务的模型，
        返回所有模型的评测信息，包括：任务名称、模型名称、模型ID、模型精度、模型遗忘率、tag标签、数据集名称、创建时间
        """
        try:
            # 查询有训练任务的模型，内连接训练任务表获取任务名称
            from database import TrainingTask
            from sqlalchemy import func
            from sqlalchemy.orm import aliased
            
            # 基础查询，内连接训练任务表，只返回有训练任务的模型
            query = db.query(
                Model,
                TrainingTask.name.label('task_name')
            ).join(
                TrainingTask, Model.training_task_id == TrainingTask.task_id
            ).filter(
                Model.training_task_id.isnot(None)
            )
            
            # 获取总数
            total_count = query.count()
            
            # 应用分页，按创建时间倒序
            models_with_tasks = query.order_by(Model.created_at.desc()) \
                                    .offset((page - 1) * page_size) \
                                    .limit(page_size) \
                                    .all()
            
            # 转换为评测任务列表格式
            evaluation_tasks = []
            
            for model, task_name in models_with_tasks:
                # 计算模型精度 - 优先使用map_50，其次precision
                model_accuracy = None
                if model.map_50 is not None:
                    model_accuracy = model.map_50
                elif model.precision is not None:
                    model_accuracy = model.precision
                
                # 计算遗忘率 - 基于recall和precision
                forgetting_rate = None
                if model.recall is not None and model.precision is not None:
                    # 遗忘率可以定义为：1 - (recall * precision) / (recall + precision - recall * precision)
                    # 或者更简单的：1 - F1_score，其中F1 = 2 * (precision * recall) / (precision + recall)
                    if model.recall > 0 and model.precision > 0:
                        f1_score = 2 * (model.precision * model.recall) / (model.precision + model.recall)
                        forgetting_rate = 1 - f1_score
                        # 确保遗忘率在合理范围内
                        forgetting_rate = max(0.0, min(1.0, forgetting_rate))
                
                # 解析标签
                tags = []
                if model.tags:
                    try:
                        tags = json.loads(model.tags)
                        if not isinstance(tags, list):
                            tags = [model.tags]
                    except (json.JSONDecodeError, TypeError):
                        tags = [model.tags] if model.tags else []
                
                # 构建评测任务项
                evaluation_task = {
                    "task_name": task_name if task_name else None,
                    "model_name": model.model_name,
                    "model_id": model.model_id,
                    "dataset_name": model.dataset_name,  # 添加数据集名称
                    "model_accuracy": model_accuracy,
                    "forgetting_rate": forgetting_rate,
                    "tags": tags,
                    "created_at": model.created_at.strftime("%Y-%m-%d %H:%M:%S") if model.created_at else None,
                    
                    # 额外信息
                    "has_training_task": bool(model.training_task_id),
                    "precision": model.precision,
                    "recall": model.recall,
                    "map_50": model.map_50,
                    "map_95": model.map_95
                }
                
                evaluation_tasks.append(evaluation_task)
            
            self.logger.info(f"获取评测任务列表成功，共 {len(evaluation_tasks)} 个任务，总数 {total_count}")
            return evaluation_tasks, total_count
            
        except Exception as e:
            self.logger.error(f"获取评测任务列表失败: {e}")
            raise ValidationException(f"获取评测任务列表失败: {e}")
    
    def get_evaluation_task_summary(self, db: Session) -> dict:
        """获取评测任务统计摘要"""
        try:
            from database import TrainingTask
            from sqlalchemy import func
            
            # 统计模型总数
            total_models = db.query(func.count(Model.id)).scalar()
            
            # 统计有训练任务的模型数量
            models_with_tasks = db.query(func.count(Model.id)).filter(
                Model.training_task_id.isnot(None)
            ).scalar()
            
            # 统计不同状态的模型数量
            status_stats = db.query(Model.status, func.count(Model.id)).group_by(Model.status).all()
            
            # 统计不同架构的模型数量
            architecture_stats = db.query(Model.architecture, func.count(Model.id)).group_by(Model.architecture).all()
            
            # 计算有效精度的模型数量
            models_with_accuracy = db.query(func.count(Model.id)).filter(
                (Model.map_50.isnot(None)) | (Model.precision.isnot(None))
            ).scalar()
            
            # 计算平均精度
            avg_map_50 = db.query(func.avg(Model.map_50)).filter(
                Model.map_50.isnot(None)
            ).scalar()
            
            avg_precision = db.query(func.avg(Model.precision)).filter(
                Model.precision.isnot(None)
            ).scalar()
            
            return {
                "total_models": total_models,
                "models_with_tasks": models_with_tasks,
                "models_without_tasks": total_models - models_with_tasks,
                "models_with_accuracy": models_with_accuracy,
                "avg_map_50": float(avg_map_50) if avg_map_50 else None,
                "avg_precision": float(avg_precision) if avg_precision else None,
                "by_status": {item[0]: item[1] for item in status_stats if item[0]},
                "by_architecture": {item[0]: item[1] for item in architecture_stats if item[0]}
            }
            
        except Exception as e:
            self.logger.error(f"获取评测任务统计失败: {e}")
            return {"error": str(e)} 