import uuid
import json
import threading
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any
from datetime import datetime
from sqlalchemy.orm import Session
from contextlib import contextmanager
import time
from ultralytics import YOLO
from database import TrainingTask, Model, SessionLocal, Dataset, Node
from config import config
from exceptions import (
    TrainingException, ValidationException, DatasetException, ModelException,
    ErrorCode, ExceptionHandler, NotFoundException
)
from utils import (
    LogManager, ValidationUtils, DatasetUtils, ModelEvaluationUtils, FileManager
)
from process_manager import process_manager, managed_training_task

# 初始化日志系统
LogManager.initialize_logging(config.log.level)
logger = LogManager.get_logger(__name__)


class TrainingService:
    """模型训练服务"""
    
    def __init__(self):
        # 使用统一的配置管理
        self.config = config
        
        # 确保目录存在
        self.config.ensure_directories()
    
    def _validate_training_parameters(self, task_data: 'TrainingTaskCreate') -> None:
        """验证训练参数的合理性"""
        # 验证epochs
        if task_data.epochs <= 0 or task_data.epochs > 1000:
            raise ValidationException(f"Epochs must be between 1 and 1000, got: {task_data.epochs}")
        
        # 验证batch_size
        if task_data.batch_size <= 0 or task_data.batch_size > 128:
            raise ValidationException(f"Batch size must be between 1 and 128, got: {task_data.batch_size}")
        
        # 验证learning_rate
        if task_data.learning_rate <= 0 or task_data.learning_rate > 1.0:
            raise ValidationException(f"Learning rate must be between 0 and 1.0, got: {task_data.learning_rate}")
        
        # 验证模型类型
        valid_model_types = ['yolov8n', 'yolov8s', 'yolov8m', 'yolov8l', 'yolov8x']
        if task_data.model_type not in valid_model_types:
            raise ValidationException(f"Model type must be one of {valid_model_types}, got: {task_data.model_type}")
    
    def _validate_dataset(self, db: Session, dataset_name: str) -> None:
        """验证数据集是否存在且文件不为空"""
        # 检查数据集是否存在
        dataset = db.query(Dataset).filter(Dataset.name == dataset_name).first()
        if not dataset:
            raise NotFoundException(f"数据集 '{dataset_name}' 不存在")
        
        # 检查数据集文件是否为空
        if dataset.sample_count <= 0:
            raise ValidationException(f"数据集 '{dataset_name}' 文件为空或样本数量为0")
        
        # 检查数据集状态
        if dataset.status != "active":
            raise ValidationException(f"数据集 '{dataset_name}' 状态不可用，当前状态: {dataset.status}")
        
        logger.info(f"数据集验证通过: {dataset_name}, 样本数量: {dataset.sample_count}")

    def _validate_model(self, db: Session, model_name: str) -> None:
        """验证模型是否存在且类型为yolo"""
        # 检查模型是否存在
        model = db.query(Model).filter(Model.model_name == model_name).first()
        if not model:
            raise NotFoundException(f"模型 '{model_name}' 不存在")
        
        # 检查模型架构是否为YOLO
        if model.architecture and model.architecture.upper() != "YOLO":
            raise ValidationException(f"模型 '{model_name}' 不是YOLO架构，当前架构: {model.architecture}")
        
        # 检查模型状态
        if model.status not in ["published", "draft"]:
            raise ValidationException(f"模型 '{model_name}' 状态不可用，当前状态: {model.status}")
        
        logger.info(f"模型验证通过: {model_name}, 架构: {model.architecture}")

    def _validate_nodes(self, db: Session, cloud_node_ids: List[str], edge_node_ids: List[str]) -> None:
        """验证节点是否存在且可用"""
        all_node_ids = cloud_node_ids + edge_node_ids
        
        if not all_node_ids:
            raise ValidationException("必须至少选择一个节点")
        
        # 查询所有节点
        existing_nodes = db.query(Node).filter(Node.node_id.in_(all_node_ids)).all()
        existing_node_ids = {node.node_id for node in existing_nodes}
        
        # 检查缺失的节点
        missing_nodes = set(all_node_ids) - existing_node_ids
        if missing_nodes:
            raise NotFoundException(f"以下节点不存在: {list(missing_nodes)}")
        
        # 验证云侧节点类型
        for node in existing_nodes:
            if node.node_id in cloud_node_ids and node.node_type != "cloud":
                raise ValidationException(f"节点 '{node.node_id}' 不是云侧节点，当前类型: {node.node_type}")
            if node.node_id in edge_node_ids and node.node_type != "edge":
                raise ValidationException(f"节点 '{node.node_id}' 不是边侧节点，当前类型: {node.node_type}")
            
            # 检查节点状态
            if node.status not in ["online", "idle"]:
                logger.warning(f"节点 '{node.node_id}' 状态不是在线，当前状态: {node.status}")
        
        logger.info(f"节点验证通过: 云侧节点 {cloud_node_ids}, 边侧节点 {edge_node_ids}")

    def _validate_training_params(self, training_method: str, training_params: Dict[str, Any]) -> Dict[str, Any]:
        """验证和处理训练参数"""
        # 不同训练方法的默认参数配置
        default_params = {
            "增量学习": {
                "base_epochs": 50,
                "fine_tune_epochs": 30,
                "freeze_backbone": True,
                "learning_rate_decay": 0.1,
                "patience": 10
            },
            "样本协同": {
                "aggregation_strategy": "federated_avg",
                "communication_rounds": 10,
                "local_epochs": 5,
                "min_participants": 2,
                "sync_frequency": 1
            },
            "知识协同": {
                "knowledge_transfer_method": "distillation",
                "teacher_weight": 0.7,
                "student_weight": 0.3,
                "temperature": 4.0,
                "alpha": 0.5,
                "transfer_rounds": 12
            },
            "模型协同": {
                "model_fusion_method": "weighted_average",
                "performance_threshold": 0.8,
                "collaboration_rounds": 15,
                "knowledge_distillation": True,
                "temperature": 3.0
            }
        }
        
        # 获取默认参数
        method_defaults = default_params.get(training_method, {})
        
        # 合并用户参数和默认参数
        final_params = method_defaults.copy()
        final_params.update(training_params)
        
        # 验证参数合理性
        if training_method == "增量学习":
            if final_params.get("base_epochs", 0) <= 0:
                raise ValidationException("增量学习的base_epochs必须大于0")
        elif training_method == "样本协同":
            if final_params.get("min_participants", 0) < 2:
                raise ValidationException("样本协同的min_participants必须大于等于2")
        elif training_method == "知识协同":
            if not (0 < final_params.get("teacher_weight", 0) <= 1):
                raise ValidationException("知识协同的teacher_weight必须在0和1之间")
        elif training_method == "模型协同":
            if not (0 < final_params.get("performance_threshold", 0) <= 1):
                raise ValidationException("模型协同的performance_threshold必须在0和1之间")
        
        logger.info(f"训练参数验证通过: {training_method}, 参数: {final_params}")
        return final_params
    
    @contextmanager
    def _get_db_session(self):
        """数据库会话上下文管理器"""
        db = SessionLocal()
        try:
            yield db
            db.commit()  # 显式提交更改
        except Exception:
            db.rollback()
            raise
        finally:
            db.close()
    
    def create_training_task(self, db: Session, task_data: 'TrainingTaskCreate') -> TrainingTask:
        """创建训练任务"""
        # 验证训练参数
        self._validate_training_parameters(task_data)
        
        # 1. 验证数据集是否存在且文件不为空
        self._validate_dataset(db, task_data.dataset_name)
        
        # 2. 验证模型是否存在且类型为yolo
        self._validate_model(db, task_data.model_name)
        
        # 3. 验证节点是否存在且可用
        self._validate_nodes(db, task_data.cloud_node_ids, task_data.edge_node_ids)
        
        # 4. 验证和处理训练参数
        validated_training_params = self._validate_training_params(
            task_data.training_method, task_data.training_params
        )

        task_id = str(uuid.uuid4())
        
        # 处理tags字段 - 现在是单个字符串
        tags_str = task_data.tags if task_data.tags else None
        
        # 处理节点ID列表 - 转换为JSON字符串
        cloud_node_ids_json = json.dumps(task_data.cloud_node_ids, ensure_ascii=False)
        edge_node_ids_json = json.dumps(task_data.edge_node_ids, ensure_ascii=False)
        
        # 处理训练参数 - 转换为JSON字符串
        training_params_json = json.dumps(validated_training_params, ensure_ascii=False)
        
        # 计算兼容性字段
        cloud_nodes_count = len(task_data.cloud_node_ids)
        edge_nodes_count = len(task_data.edge_node_ids)
        
        # 使用配置的默认值和传入的参数
        new_task = TrainingTask(
            task_id=task_id,
            name=task_data.name,
            description=task_data.description,
            type=task_data.type,
            model_type=task_data.model_type,
            task_type=task_data.task_type,
            dataset_name=task_data.dataset_name,
            tags=tags_str,
            
            # 新增字段
            training_method=task_data.training_method,
            model_name=task_data.model_name,
            total_nodes=task_data.total_nodes,
            cloud_node_ids=cloud_node_ids_json,
            edge_node_ids=edge_node_ids_json,
            training_params=training_params_json,
            
            # 兼容性字段
            cloud_nodes=cloud_nodes_count,
            edge_nodes=edge_nodes_count,
            
            # 状态和路径
            status="pending",
            progress=0,
            start_time=datetime.utcnow(),
            epochs=task_data.epochs,
            batch_size=task_data.batch_size,
            learning_rate=task_data.learning_rate,
            log_path=str(self.config.get_log_path(task_id)),
            model_output_path=str(self.config.get_cloud_model_path(task_id))
        )
        
        db.add(new_task)
        db.commit()
        db.refresh(new_task)
        
        # 将 task_data 转换为字典以传递给后台线程
        thread_task_data = task_data.model_dump()
        thread_task_data['validated_training_params'] = validated_training_params
        
        # 异步启动训练
        thread = threading.Thread(target=self._run_training, args=(task_id, thread_task_data))
        thread.start()
        
        # 注册到进程管理器
        process_manager.register_training_thread(task_id, thread)
        
        logger.info(f"训练任务创建成功: {task_id}, 训练方法: {task_data.training_method}, 模型: {task_data.model_name}")
        
        return new_task
    
    def get_training_task_by_id(self, db: Session, task_id: str) -> TrainingTask:
        """根据ID获取训练任务"""
        task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
        if not task:
            raise NotFoundException(f"Training task with id {task_id} not found.")
        return task

    def list_training_tasks(
        self, db: Session, page: int, page_size: int, 
        status: Optional[str] = None, model_type: Optional[str] = None
    ) -> Tuple[List[TrainingTask], int]:
        """获取训练任务列表，支持过滤和分页"""
        query = db.query(TrainingTask)

        if status:
            query = query.filter(TrainingTask.status == status)
        if model_type:
            query = query.filter(TrainingTask.model_type.contains(model_type))

        total_count = query.count()
        tasks = query.order_by(TrainingTask.created_at.desc()) \
                     .offset((page - 1) * page_size) \
                     .limit(page_size) \
                     .all()
        return tasks, total_count

    def update_training_task(self, db: Session, task_id: str, task_update: 'TrainingTaskUpdate') -> TrainingTask:
        """更新训练任务 (通常由内部调用)"""
        task = self.get_training_task_by_id(db, task_id)
        update_data = task_update.model_dump(exclude_unset=True)
        
        # 特殊处理tags字段 - 转换为JSON字符串
        if 'tags' in update_data and update_data['tags'] is not None:
            import json
            update_data['tags'] = json.dumps(update_data['tags'], ensure_ascii=False)
        
        for key, value in update_data.items():
            setattr(task, key, value)
        db.commit()
        db.refresh(task)
        return task

    def cancel_training_task(self, db: Session, task_id: str):
        """取消训练任务"""
        task = self.get_training_task_by_id(db, task_id)
        
        if task.status not in ["pending", "running"]:
            raise ValidationException(f"Task is in '{task.status}' state and cannot be canceled.")
        
        # 尝试从进程管理器停止线程/进程
        stopped = process_manager.unregister_training_thread(task_id)
        if not stopped:
            logger.warning(f"Could not find or stop process for task {task_id}. Updating status anyway.")
            
        task.status = "canceled"
        task.error_message = "Task canceled by user."
        db.commit()
        logger.info(f"Training task {task_id} has been canceled.")

    def delete_training_task(self, db: Session, task_id: str):
        """删除训练任务和相关的模型记录"""
        import shutil
        
        # 获取训练任务
        task = self.get_training_task_by_id(db, task_id)
        
        # 验证任务状态，只有特定状态的任务才能被删除
        if task.status in ["running"]:
            raise ValidationException(f"无法删除正在运行的训练任务。请先取消任务后再删除。")
        
        logger.info(f"开始删除训练任务: {task_id}")
        
        try:
            # 1. 停止相关的训练进程（如果还在运行）
            stopped = process_manager.unregister_training_thread(task_id)
            if stopped:
                logger.info(f"已停止训练任务 {task_id} 的相关进程")
            
            # 2. 删除相关的模型记录
            related_models = db.query(Model).filter(Model.training_task_id == task_id).all()
            deleted_model_count = 0
            for model in related_models:
                logger.info(f"删除与训练任务 {task_id} 相关的模型: {model.model_name} (ID: {model.model_id})")
                db.delete(model)
                deleted_model_count += 1
            
            # 3. 清理训练相关的文件
            try:
                # 删除模型输出目录
                model_output_path = self.config.get_cloud_model_path(task_id)
                if model_output_path.exists():
                    shutil.rmtree(model_output_path)
                    logger.info(f"已删除模型输出目录: {model_output_path}")
                
                # 删除日志文件
                log_path = self.config.get_log_path(task_id)
                if log_path.exists():
                    log_path.unlink()
                    logger.info(f"已删除日志文件: {log_path}")
                    
            except Exception as e:
                logger.warning(f"清理训练任务 {task_id} 的文件时发生错误: {e}")
            
            # 4. 删除训练任务记录
            db.delete(task)
            
            # 提交所有删除操作
            db.commit()
            
            logger.info(f"训练任务 {task_id} 删除成功，同时删除了 {deleted_model_count} 个相关模型")
            
        except Exception as e:
            db.rollback()
            logger.error(f"删除训练任务 {task_id} 失败: {e}")
            raise ValidationException(f"删除训练任务失败: {str(e)}")

    def _update_task_status(self, db: Session, task_id: str, status: str, **kwargs):
        """更新任务状态的事务性操作"""
        try:
            task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
            if not task:
                raise NotFoundException(f"Training task with id {task_id} not found.")
            
            task.status = status
            
            # 如果任务完成或失败，设置结束时间
            if status in ['completed', 'failed', 'canceled']:
                task.end_time = datetime.utcnow()
            
            for key, value in kwargs.items():
                if hasattr(task, key):
                    setattr(task, key, value)
            
            db.commit()  # 显式提交
            db.refresh(task)  # 刷新任务对象
        except Exception as e:
            db.rollback()
            raise
    
    def update_task_progress(self, db: Session, task_id: str, progress: int, accuracy: Optional[float] = None):
        """更新训练任务进度和准确率"""
        try:
            task = self.get_training_task_by_id(db, task_id)
            task.progress = progress
            if accuracy is not None:
                task.accuracy = accuracy
            db.commit()
            db.refresh(task)
            return task
        except Exception as e:
            db.rollback()
            raise

    def _run_training(self, task_id: str, task_data: dict):
        """后台运行训练任务"""
        # 使用上下文管理器管理数据库连接
        # 使用LogManager创建任务专用日志记录器
        task_logger = LogManager.create_task_logger(task_id)
        
        # 使用进程管理器的上下文管理器
        with managed_training_task(task_id):
            try:
                # 更新任务状态为运行中 - 使用独立会话
                with self._get_db_session() as db:
                    self._update_task_status(
                        db, task_id, "running", 
                        log_path=str(self.config.get_log_path(task_id))
                    )
                
                task_logger.info(f"开始训练任务 {task_id}")
                task_logger.info(f"训练参数: {task_data}")
                
                # 准备数据集配置 - 使用统一的配置管理
                dataset_name = task_data.get("dataset_name")
                if not dataset_name:
                    default_dataset = getattr(self.config.training, 'default_dataset', None)
                    if not default_dataset:
                        raise ValidationException("Dataset name is required and no default dataset configured")
                    dataset_name = default_dataset
                
                dataset_path = self.config.get_dataset_path(dataset_name)
                
                # 提取模型类型
                model_type = task_data.get("model_type", self.config.training.model_type)
                
                task_logger.info(f"数据集路径: {dataset_path.absolute()}")
                task_logger.info(f"模型类型: {model_type}")
                
                # 验证数据集路径
                if not ValidationUtils.validate_dataset_path(dataset_path):
                    raise DatasetException(
                        f"无效的数据集路径: {dataset_path.absolute()}",
                        ErrorCode.DATASET_NOT_FOUND,
                        details={"dataset_path": str(dataset_path)}
                    )
                
                # 创建YOLO数据集配置文件 - 使用工具类
                dataset_config = self._create_dataset_config(dataset_path, task_logger)
                task_logger.info(f"数据集配置文件: {dataset_config}")
                
                # 初始化模型
                model = self._initialize_model(task_data, model_type, task_logger)
                
                # 设置输出目录
                output_dir = self.config.get_cloud_model_path(task_id)
                output_dir.mkdir(parents=True, exist_ok=True)
                task_logger.info(f"模型输出目录: {output_dir}")
                
                # 开始训练
                results = self._train_model(model, dataset_config, task_data, output_dir, task_id, task_logger)
                
                # 评估模型 - 使用工具类
                evaluation_results = self._evaluate_model(model, task_logger)
                
                # 更新任务状态为完成 - 使用独立会话
                with self._get_db_session() as db:
                    self._update_task_status(
                        db, task_id, "completed",
                        evaluation_results=json.dumps(evaluation_results),
                        final_accuracy=evaluation_results.get("map_50", 0.0)
                    )
                
                # 创建模型记录 - 使用独立会话
                with self._get_db_session() as db:
                    self._create_model_record(db, task_id, model_type, dataset_name, evaluation_results, output_dir, task_logger)
                
                task_logger.info(f"训练任务 {task_id} 完成")
                logger.info(f"训练任务 {task_id} 完成")
                
            except (DatasetException, ModelException, ValidationException) as e:
                # 处理已知的业务异常
                error_msg = f"训练任务 {task_id} 失败 - {type(e).__name__}: {str(e)}"
                task_logger.error(error_msg)
                logger.error(error_msg)
                
                with self._get_db_session() as db:
                    self._update_task_status(db, task_id, "failed")
                    task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
                    ExceptionHandler.safe_set_error_message(task, str(e))
                    db.commit()
                
            except Exception as e:
                # 处理未预期的异常
                error_msg = f"训练任务 {task_id} 发生未预期错误: {str(e)}"
                task_logger.error(error_msg)
                task_logger.error(f"错误详情: {repr(e)}")
                logger.error(error_msg)
                
                with self._get_db_session() as db:
                    self._update_task_status(db, task_id, "failed")
                    task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
                    ExceptionHandler.safe_set_error_message(task, f"Unexpected error: {str(e)}")
                    db.commit()
                
            finally:
                # 清理任务日志记录器
                LogManager.cleanup_task_logger(task_id)
    
    def _initialize_model(self, task_data: dict, model_type: str, task_logger) -> YOLO:
        """初始化训练模型"""
        task_logger.info(f"初始化模型: {model_type}")
        
        # 检查是否为微调任务
        base_model_path = task_data.get("base_model_path")
        if base_model_path and Path(base_model_path).exists():
            task_logger.info(f"使用指定的微调基础模型: {base_model_path}")
            return YOLO(str(base_model_path))
        else:
            # 使用配置管理获取预训练模型路径
            pretrained_path = self.config.get_model_path(model_type)
            
            if pretrained_path.exists():
                task_logger.info(f"找到本地预训练权重: {pretrained_path}")
                return YOLO(str(pretrained_path))
            else:
                raise ModelException(
                    f"本地预训练文件不存在: {pretrained_path}，请先下载对应的预训练模型",
                    ErrorCode.MODEL_LOAD_FAILED,
                    details={"pretrained_path": str(pretrained_path)}
                )
    
    def _train_model(self, model: YOLO, dataset_config: str, task_data: dict, 
                    output_dir: Path, task_id: str, task_logger) -> any:
        """执行模型训练"""
        task_logger.info("开始模型训练...")
        
        # 创建自定义的训练回调来实时更新进度
        from ultralytics.utils.callbacks import default_callbacks
        from functools import wraps
        import time
        
        def retry_db_operation(max_retries=3, delay=1):
            """数据库操作重试装饰器"""
            def decorator(func):
                @wraps(func)
                def wrapper(*args, **kwargs):
                    for attempt in range(max_retries):
                        try:
                            return func(*args, **kwargs)
                        except Exception as e:
                            if attempt == max_retries - 1:
                                task_logger.error(f"数据库操作失败，已重试{max_retries}次: {e}")
                                raise
                            task_logger.warning(f"数据库操作失败，第{attempt + 1}次重试: {e}")
                            time.sleep(delay * (2 ** attempt))  # 指数退避
                    return None
                return wrapper
            return decorator
        
        @retry_db_operation(max_retries=3, delay=0.5)
        def safe_update_progress(current_epoch: int, total_epochs: int, accuracy: float = None):
            """安全地更新训练进度"""
            with self._get_db_session() as db:
                progress = int((current_epoch / total_epochs) * 100)
                task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
                if task:
                    task.progress = progress
                    if accuracy is not None:
                        task.accuracy = accuracy
                    # 上下文管理器会自动提交
                    
                    task_logger.info(f"训练进度更新: Epoch {current_epoch}/{total_epochs} ({progress}%)")
                    if accuracy is not None:
                        task_logger.info(f"当前准确率: {accuracy:.4f}")
                else:
                    raise ValueError(f"Training task {task_id} not found in database")
        
        @retry_db_operation(max_retries=3, delay=0.5)
        def safe_update_task_status(progress: int, end_time: datetime = None, accuracy: float = None):
            """安全地更新任务状态"""
            with self._get_db_session() as db:
                task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
                if task:
                    task.progress = progress
                    if end_time is not None:
                        task.end_time = end_time
                    if accuracy is not None:
                        task.accuracy = accuracy
                    # 上下文管理器会自动提交
                else:
                    raise ValueError(f"Training task {task_id} not found in database")
        
        def on_train_epoch_end(trainer):
            """训练epoch结束时的回调"""
            try:
                current_epoch = trainer.epoch + 1
                total_epochs = trainer.epochs
                
                # 获取当前的验证指标
                accuracy = None
                if hasattr(trainer, 'metrics') and trainer.metrics:
                    # 尝试获取mAP@0.5作为准确率指标
                    if 'metrics/mAP50(B)' in trainer.metrics:
                        accuracy = float(trainer.metrics['metrics/mAP50(B)'])
                    elif 'metrics/mAP50' in trainer.metrics:
                        accuracy = float(trainer.metrics['metrics/mAP50'])
                
                # 使用安全的数据库更新方法
                safe_update_progress(current_epoch, total_epochs, accuracy)
                    
            except Exception as e:
                task_logger.error(f"训练回调执行失败: {e}")
                # 不重新抛出异常，避免中断训练过程
        
        def on_train_start(trainer):
            """训练开始时的回调"""
            try:
                # 使用安全的数据库更新方法
                safe_update_task_status(progress=0)
                task_logger.info("训练开始，进度初始化为0%")
            except Exception as e:
                task_logger.error(f"训练开始回调失败: {e}")
                # 不重新抛出异常，避免中断训练过程
        
        def on_train_end(trainer):
            """训练结束时的回调"""
            try:
                # 获取最终准确率
                final_accuracy = None
                if hasattr(trainer, 'best_fitness') and trainer.best_fitness is not None:
                    final_accuracy = float(trainer.best_fitness)
                
                # 使用安全的数据库更新方法
                safe_update_task_status(
                    progress=100,
                    end_time=datetime.utcnow(),
                    accuracy=final_accuracy
                )
                task_logger.info("训练完成，进度设置为100%，结束时间已记录")
            except Exception as e:
                task_logger.error(f"训练结束回调失败: {e}")
                # 不重新抛出异常，但记录错误
        
        def on_train_interrupt(trainer):
            """训练中断时的回调"""
            try:
                task_logger.warning("训练被中断")
                # 记录中断状态，但不更新为失败状态，让外层处理
            except Exception as e:
                task_logger.error(f"训练中断回调失败: {e}")
        
        # 添加自定义回调
        model.add_callback("on_train_epoch_end", on_train_epoch_end)
        model.add_callback("on_train_start", on_train_start)
        model.add_callback("on_train_end", on_train_end)
        model.add_callback("on_train_interrupt", on_train_interrupt)
        
        try:
            return model.train(
                data=dataset_config,
                epochs=task_data.get("epochs", self.config.training.epochs),
                batch=task_data.get("batch_size", self.config.training.batch_size),
                lr0=task_data.get("learning_rate", self.config.training.learning_rate),
                project=str(output_dir.parent),
                name=task_id,
                exist_ok=True,
                verbose=True,
                amp=False  # 禁用自动混合精度，避免自动下载
            )
        except Exception as e:
            task_logger.error(f"训练过程中发生异常: {e}")
            # 不需要手动关闭数据库连接，因为使用了上下文管理器
            raise
    
    def _evaluate_model(self, model: YOLO, task_logger) -> Dict:
        """评估训练后的模型"""
        task_logger.info("开始模型评估...")
        val_results = model.val()
        evaluation_results = ModelEvaluationUtils.extract_evaluation_metrics(val_results)
        
        task_logger.info(f"评估结果: {ModelEvaluationUtils.format_evaluation_results(evaluation_results)}")
        return evaluation_results
    
    def _create_dataset_config(self, dataset_path: Path, logger) -> str:
        """创建YOLO数据集配置文件"""
        try:
            # 获取类别名称
            class_names = DatasetUtils.get_class_names_from_dataset(dataset_path)
            logger.info(f"数据集类别: {class_names}")
            
            # 使用工具类创建配置
            return DatasetUtils.create_yolo_config(dataset_path, class_names)
            
        except Exception as e:
            logger.error(f"创建数据集配置失败: {e}")
            raise DatasetException(
                f"创建数据集配置失败: {e}",
                ErrorCode.DATASET_CORRUPTED,
                details={"dataset_path": str(dataset_path)}
            )
    
    def _create_model_tags_json(self, training_task) -> str:
        """
        从训练任务中创建模型的tags JSON字符串
        确保返回的是有效的JSON数组格式
        
        Args:
            training_task: 训练任务对象
            
        Returns:
            str: JSON数组格式的标签字符串
        """
        import json
        
        # 默认标签
        default_tags = [""]
        
        if not training_task or not training_task.tags:
            return json.dumps(default_tags, ensure_ascii=False)
        
        # 尝试解析training_task.tags
        try:
            # 如果training_task.tags已经是JSON数组格式
            if training_task.tags.strip().startswith('[') and training_task.tags.strip().endswith(']'):
                # 验证是否为有效JSON
                parsed_tags = json.loads(training_task.tags)
                if isinstance(parsed_tags, list) and len(parsed_tags) > 0:
                    return training_task.tags  # 直接返回有效的JSON数组
                else:
                    return json.dumps(default_tags, ensure_ascii=False)
            else:
                # 如果是单个字符串，转换为数组
                single_tag = training_task.tags.strip()
                if single_tag:
                    return json.dumps([single_tag], ensure_ascii=False)
                else:
                    return json.dumps(default_tags, ensure_ascii=False)
                    
        except (json.JSONDecodeError, AttributeError, TypeError) as e:
            logger.warning(f"解析训练任务tags字段失败: {training_task.tags}, 错误: {e}")
            # 如果解析失败，尝试将其作为单个字符串处理
            if isinstance(training_task.tags, str) and training_task.tags.strip():
                return json.dumps([training_task.tags.strip()], ensure_ascii=False)
            else:
                return json.dumps(default_tags, ensure_ascii=False)
    
    def _create_model_record(self, db: Session, task_id: str, model_type: str, 
                           dataset_name: str, evaluation_results: Dict, output_dir: Path, task_logger):
        """创建模型记录"""
        try:
            # 验证权重文件是否存在
            weight_file_path = output_dir / "weights" / "best.pt"
            if not weight_file_path.exists():
                # 尝试其他可能的权重文件名
                alternative_paths = [
                    output_dir / "weights" / "last.pt",
                    output_dir / f"{task_id}.pt"
                ]
                
                weight_file_path = None
                for alt_path in alternative_paths:
                    if alt_path.exists():
                        weight_file_path = alt_path
                        break
                
                if weight_file_path is None:
                    error_msg = f"训练完成但未找到权重文件，检查路径: {output_dir / 'weights'}"
                    task_logger.warning(error_msg)
                    
                    # 更新任务状态，标记权重文件缺失
                    self._update_task_status(
                        db, task_id, "completed_with_warnings",
                        error_message="训练完成但权重文件缺失"
                    )
                    return
            
            # 计算模型文件大小
            try:
                file_size = FileManager.format_file_size(weight_file_path)
            except Exception as e:
                task_logger.warning(f"计算文件大小失败: {e}")
                file_size = "未知"
            
            # 确定模型架构
            architecture = "YOLO"
            if "yolov8" in model_type.lower():
                architecture = "YOLOv8"
            elif "yolov5" in model_type.lower():
                architecture = "YOLOv5"
            
            # 从训练任务中获取标签信息
            training_task = db.query(TrainingTask).filter(TrainingTask.task_id == task_id).first()
            tags_json = self._create_model_tags_json(training_task)
            
            # 查询数据集获取dataset_id
            dataset = db.query(Dataset).filter(Dataset.name == dataset_name).first()
            if not dataset:
                error_msg = f"无法找到数据集 '{dataset_name}'，模型记录创建可能不完整"
                task_logger.warning(error_msg)
                dataset_id = None
            else:
                dataset_id = dataset.dataset_id
            
            model_id = str(uuid.uuid4())
            model_record = Model(
                model_id=model_id,
                model_name=f"{model_type}_{task_id}",
                model_type=model_type,
                base_model_name=model_type,
                dataset_name=dataset_name,
                dataset_id=dataset_id,  # 设置数据集ID外键
                weight_file_path=str(weight_file_path),
                training_task_id=task_id,
                
                # 新增的完整字段
                architecture=architecture,
                version="1.0.0",  # 训练生成的模型默认版本
                status="draft",  # 训练完成的模型状态为草稿
                size=file_size,
                framework="PyTorch",  # YOLO模型使用PyTorch框架
                last_trained=datetime.utcnow().strftime("%Y-%m-%d"),  # 最后训练时间
                deployment_type="both",  # 默认支持云端和边缘部署
                description=f"基于{dataset_name}数据集训练的{model_type}模型，任务ID: {task_id}",
                tags=tags_json,
                deployment_info='{"cloudNodes": [], "edgeNodes": []}',  # 默认部署信息
                
                # 性能指标
                map_50=evaluation_results.get("map_50"),
                map_95=evaluation_results.get("map_95"),
                precision=evaluation_results.get("precision"),
                recall=evaluation_results.get("recall"),
                f1_score=evaluation_results.get("f1_score")  # 如果评估结果中有F1分数
            )
            
            db.add(model_record)
            db.commit()
            task_logger.info(f"成功创建模型记录: {model_id}, 架构: {architecture}, 大小: {file_size}, 数据集ID: {dataset_id}")
            
        except Exception as e:
            error_msg = f"创建模型记录失败: {e}"
            task_logger.error(error_msg)
            logger.error(error_msg)
            
            # 更新任务状态，标记模型记录创建失败
            self._update_task_status(
                db, task_id, "completed_with_warnings",
                error_message=f"训练完成但模型记录创建失败: {str(e)}"
            )

    def get_training_tasks_summary(self, db: Session, status: Optional[str] = None, 
                                  model_type: Optional[str] = None) -> dict:
        """获取训练任务统计摘要"""
        from sqlalchemy import func
        from database import TrainingTask
        
        # 基础查询
        query = db.query(TrainingTask)
        
        # 应用筛选条件
        if status:
            query = query.filter(TrainingTask.status == status)
        if model_type:
            query = query.filter(TrainingTask.model_type == model_type)
        
        # 统计不同状态的训练任务数量
        status_stats = db.query(TrainingTask.status, func.count(TrainingTask.id)).group_by(TrainingTask.status).all()
        
        # 统计不同模型类型的训练任务数量
        type_stats = db.query(TrainingTask.model_type, func.count(TrainingTask.id)).group_by(TrainingTask.model_type).all()
        
        # 统计不同数据集的训练任务数量
        dataset_stats = db.query(TrainingTask.dataset_name, func.count(TrainingTask.id)).group_by(TrainingTask.dataset_name).all()
        
        return {
            "by_status": {item[0]: item[1] for item in status_stats if item[0]},
            "by_model_type": {item[0]: item[1] for item in type_stats if item[0]},
            "by_dataset": {item[0]: item[1] for item in dataset_stats if item[0]},
            "total_statuses": len(status_stats),
            "total_model_types": len(type_stats),
            "total_datasets": len(dataset_stats)
        }

    def get_task_logs(self, task_id: str, start_line: int = 1, end_line: Optional[int] = None, 
                      max_lines: int = 1000, level_filter: Optional[str] = None) -> Dict[str, Any]:
        """获取任务日志"""
        try:
            log_path = self.config.get_log_path(task_id)
            
            if not log_path.exists():
                return {
                    "task_id": task_id,
                    "log_file_path": str(log_path),
                    "total_lines": 0,
                    "file_size": "0 B",
                    "logs": [],
                    "last_modified": None
                }
            
            # 获取文件信息
            file_stats = log_path.stat()
            from utils import format_file_size
            file_size = format_file_size(file_stats.st_size)
            last_modified = datetime.fromtimestamp(file_stats.st_mtime).isoformat()
            
            # 读取日志文件
            all_lines = []
            total_lines = 0
            
            try:
                with open(log_path, 'r', encoding='utf-8') as f:
                    all_lines = f.readlines()
                    total_lines = len(all_lines)
            except UnicodeDecodeError:
                # 尝试其他编码
                with open(log_path, 'r', encoding='gbk') as f:
                    all_lines = f.readlines()
                    total_lines = len(all_lines)
            
            # 计算实际的起始和结束行
            actual_start = max(1, start_line)
            if end_line is None:
                actual_end = total_lines
            else:
                actual_end = min(end_line, total_lines)
            
            # 限制最大返回行数
            if actual_end - actual_start + 1 > max_lines:
                actual_end = actual_start + max_lines - 1
            
            # 提取指定范围的日志行
            selected_lines = all_lines[actual_start-1:actual_end]
            
            # 解析日志行
            parsed_logs = []
            for line_num, line in enumerate(selected_lines, start=actual_start):
                parsed_log = self._parse_log_line(line.strip())
                if level_filter and parsed_log['level'] != level_filter:
                    continue
                parsed_logs.append(parsed_log)
            
            return {
                "task_id": task_id,
                "log_file_path": str(log_path),
                "total_lines": total_lines,
                "file_size": file_size,
                "logs": parsed_logs,
                "last_modified": last_modified
            }
            
        except Exception as e:
            logger.error(f"获取任务日志失败: {task_id}, 错误: {str(e)}")
            return {
                "task_id": task_id,
                "log_file_path": "",
                "total_lines": 0,
                "file_size": "0 B",
                "logs": [],
                "last_modified": None,
                "error": str(e)
            }

    def retry_training_task(self, db: Session, task_id: str) -> TrainingTask:
        """重试训练任务"""
        # 获取现有任务
        task = self.get_training_task_by_id(db, task_id)
        
        # 验证任务状态是否可以重试
        if task.status not in ["failed", "canceled", "completed"]:
            raise ValidationException(f"Task is in '{task.status}' state and cannot be retried. Only failed, canceled, or completed tasks can be retried.")
        
        # 检查是否已经有正在运行的任务
        if task_id in process_manager._active_tasks:
            raise ValidationException(f"Task {task_id} is already running and cannot be retried.")
        
        # 重置任务状态和相关字段
        task.status = "pending"
        task.progress = 0
        task.accuracy = None
        task.error_message = None
        task.end_time = None
        task.start_time = datetime.utcnow()
        task.updated_at = datetime.utcnow()
        
        # 清理旧的日志文件（可选）
        log_path = self.config.get_log_path(task_id)
        if log_path.exists():
            try:
                # 备份旧日志
                backup_path = log_path.with_suffix(f'.backup.{int(time.time())}.log')
                log_path.rename(backup_path)
                logger.info(f"备份旧日志文件到: {backup_path}")
            except Exception as e:
                logger.warning(f"备份日志文件失败: {e}")
        
        db.commit()
        db.refresh(task)
        
        # 重构任务数据以启动新的训练
        task_data = {
            "name": task.name,
            "model_type": task.model_type,
            "task_type": task.task_type,
            "type": task.type,
            "dataset_name": task.dataset_name,
            "description": task.description,
            "cloud_nodes": task.cloud_nodes,
            "edge_nodes": task.edge_nodes,
            "epochs": task.epochs,
            "batch_size": task.batch_size,
            "learning_rate": task.learning_rate
        }
        
        # 异步启动重试训练
        thread = threading.Thread(target=self._run_training, args=(task_id, task_data))
        thread.start()
        
        # 注册到进程管理器
        process_manager.register_training_thread(task_id, thread)
        
        logger.info(f"Training task {task_id} has been queued for retry")
        
        return task

    def _parse_log_line(self, line: str) -> Dict[str, Any]:
        """解析日志行"""
        try:
            # 日志格式: 2025-06-22 10:05:44 - task_xxx - INFO - _run_training:198 - 开始训练任务
            parts = line.split(' - ', 4)
            if len(parts) >= 5:
                timestamp = parts[0]
                logger_name = parts[1]
                level = parts[2]
                location = parts[3]  # function:line
                message = parts[4]
                
                # 解析函数和行号
                function, line_num = None, None
                if ':' in location:
                    function, line_num_str = location.rsplit(':', 1)
                    try:
                        line_num = int(line_num_str)
                    except ValueError:
                        pass
                
                return {
                    "timestamp": timestamp,
                    "level": level,
                    "logger": logger_name,
                    "message": message,
                    "function": function,
                    "line": line_num
                }
            else:
                # 如果无法解析，返回原始行作为消息
                return {
                    "timestamp": "",
                    "level": "UNKNOWN",
                    "logger": "system",
                    "message": line,
                    "function": None,
                    "line": None
                }
        except Exception:
            # 解析失败时返回原始行
            return {
                "timestamp": "",
                "level": "UNKNOWN", 
                "logger": "system",
                "message": line,
                "function": None,
                "line": None
            } 