"""
数据集服务模块
负责数据集的管理、创建、更新和删除等功能
"""

import yaml
from pathlib import Path
from typing import List, Optional, Tuple, Dict, Any
from datetime import datetime
from sqlalchemy.orm import Session, aliased
from sqlalchemy import func
import json
import shutil
import uuid

from database import Dataset, ScenarioEnum, OriginEnum
from config import config
from utils import LogManager
from exceptions import NotFoundException, ValidationException

# 初始化日志系统
LogManager.initialize_logging(config.log.level)
logger = LogManager.get_logger(__name__)


class DatasetService:
    """数据集服务"""
    
    def __init__(self):
        # 使用统一的配置管理
        self.config = config
        
        # 确保目录存在
        self.config.ensure_directories()
        
        # 定义默认值
        self.default_dataset_values = {
            'description': '自动加载的数据集',
            'dataset_type': 'detection',
            'data_format': 'IMAGE',
            'uploader': 'system',
            'status': 'active',
            'file_count': 0,
            'total_size': 0,
            'sample_count': 0,
            'annotated_samples': 0,
            'upload_time': datetime.utcnow().strftime('%Y-%m-%d'),
            'is_learned': False,
            'is_annotated': False,
            'data_size': '0MB',
            'time_range': '未知',
            'update_frequency': '一次性',
            'is_forgetting': False,
            'is_quantization': False,
            'is_incremental': False,
            'is_base': True,
            'folder_path': '',
            'scenario': None,  # 修改为None，让用户明确指定
            'base_dataset_id': None,  # 新增：基础数据集ID
            'origin': 'cloud'  # 新增：数据来源，默认为云端
        }
        
        # 定义Profile文件默认值
        self.default_profile_values = {
            'id': None,  # 会从文件夹名称生成
            'name': None,  # 会从文件夹名称生成
            'uploader': 'system',
            'isLearned': False,
            'dataSize': '未知',
            'uploadTime': datetime.utcnow().isoformat(),
            'isAnnotated': True,
            'dataFormat': 'yolo',
            'sampleCount': 0,
            'timeRange': '未知',
            'updateFrequency': '一次性',
            'isForgetting': False,
            'isQuantization': False,
            'isIncremental': False,
            'isBase': False
        }
    
    def _apply_profile_defaults(self, profile_data: Dict[str, Any], folder_name: str) -> Dict[str, Any]:
        """为profile数据应用默认值"""
        # 创建一个新的字典以避免修改原始数据
        processed_data = self.default_profile_values.copy()
        
        # 使用文件夹名称作为默认ID和名称
        if not profile_data.get('id'):
            processed_data['id'] = folder_name
        
        if not profile_data.get('name'):
            processed_data['name'] = folder_name
        
        # 覆盖配置文件中提供的值
        processed_data.update(profile_data)
        
        logger.info(f"应用默认值后的配置: {folder_name} -> {processed_data}")
        return processed_data
    
    def _apply_dataset_defaults(self, dataset_data: Dict[str, Any]) -> Dict[str, Any]:
        """为数据集数据应用默认值"""
        # 创建一个新的字典以避免修改原始数据
        processed_data = self.default_dataset_values.copy()
        
        # 覆盖用户提供的值
        processed_data.update(dataset_data)
        
        # 确保必需字段
        if not processed_data.get('name'):
            raise ValidationException("数据集名称是必需的")
            
        if not processed_data.get('dataset_type'):
            processed_data['dataset_type'] = 'detection'
        
        logger.info(f"应用默认值后的数据集数据: {processed_data}")
        return processed_data
    
    def _calculate_dataset_stats(self, folder_path: Path) -> Dict[str, Any]:
        """计算数据集统计信息"""
        stats = {
            'file_count': 0,
            'total_size': 0,
            'sample_count': 0,
            'annotated_samples': 0
        }
        
        if not folder_path.exists():
            return stats
        
        try:
            # 计算图像文件
            images_dir = folder_path / 'images'
            if images_dir.exists():
                image_files = list(images_dir.glob('*.[jJ][pP][gG]')) + \
                             list(images_dir.glob('*.[pP][nN][gG]')) + \
                             list(images_dir.glob('*.[jJ][pP][eE][gG]'))
                stats['sample_count'] = len(image_files)
                stats['file_count'] += len(image_files)
                
                # 计算总大小
                for img_file in image_files:
                    try:
                        stats['total_size'] += img_file.stat().st_size
                    except Exception:
                        continue
            
            # 计算标注文件
            labels_dir = folder_path / 'labels'
            if labels_dir.exists():
                label_files = list(labels_dir.glob('*.txt'))
                stats['annotated_samples'] = len(label_files)
                stats['file_count'] += len(label_files)
                
                # 计算标注文件大小
                for label_file in label_files:
                    try:
                        stats['total_size'] += label_file.stat().st_size
                    except Exception:
                        continue
        
        except Exception as e:
            logger.warning(f"计算数据集统计信息失败 {folder_path}: {str(e)}")
        
        return stats
    
    def load_profiles_from_datasets(self, db: Session) -> int:
        """从datasets文件夹中加载所有profile.yaml文件到数据库"""
        logger.info("开始从datasets文件夹加载profile.yaml文件...")
        
        # 使用统一的配置管理获取数据集路径
        datasets_path = self.config.paths.datasets_dir
        loaded_count = 0
        
        if not datasets_path.exists():
            logger.warning(f"datasets文件夹不存在: {datasets_path}")
            return 0
        
        # 遍历所有子文件夹
        for folder in datasets_path.iterdir():
            if folder.is_dir():
                folder_name = folder.name
                profile_file = folder / "profile.yaml"
                
                try:
                    # 读取profile.yaml文件（如果存在）
                    profile_data = {}
                    if profile_file.exists():
                        with open(profile_file, 'r', encoding='utf-8') as f:
                            profile_data = yaml.safe_load(f) or {}
                    else:
                        logger.info(f"profile.yaml不存在，将使用默认值: {folder_name}")
                    
                    # 应用默认值
                    processed_data = self._apply_profile_defaults(profile_data, folder_name)
                    
                    # 计算数据集统计信息
                    stats = self._calculate_dataset_stats(folder)
                    
                    # 检查是否已存在（使用处理后的ID）
                    dataset_id = processed_data.get('id')
                    existing = db.query(Dataset).filter(Dataset.dataset_id == dataset_id).first()
                    
                    if existing:
                        logger.info(f"数据集配置已存在，跳过: {dataset_id}")
                        continue
                    
                    # 创建数据集配置记录
                    dataset_dict = {
                        'dataset_id': dataset_id,
                        'name': processed_data.get('name', dataset_id),
                        'description': f"从profile加载: {processed_data.get('name', dataset_id)}",
                        'dataset_type': 'profile',
                        'data_format': processed_data.get('dataFormat', 'IMAGE').upper(),
                        'uploader': processed_data.get('uploader', 'system'),
                        'status': 'active',
                        'file_count': stats['file_count'],
                        'total_size': stats['total_size'],
                        'sample_count': stats['sample_count'],
                        'annotated_samples': stats['annotated_samples'],
                        'config_path': str(folder / 'dataset.yaml') if (folder / 'dataset.yaml').exists() else None,
                        'profile_path': str(profile_file) if profile_file.exists() else None,
                        'upload_time': processed_data.get('uploadTime', datetime.utcnow().strftime('%Y-%m-%d')),
                        'is_learned': processed_data.get('isLearned', False),
                        'is_annotated': processed_data.get('isAnnotated', True),
                        'data_size': processed_data.get('dataSize', '0MB'),
                        'time_range': processed_data.get('timeRange', '未知'),
                        'update_frequency': processed_data.get('updateFrequency', '一次性'),
                        'is_forgetting': processed_data.get('isForgetting', False),
                        'is_quantization': processed_data.get('isQuantization', False),
                        'is_incremental': processed_data.get('isIncremental', False),
                        'is_base': processed_data.get('isBase', True),
                        'folder_path': str(folder),
                        'scenario': processed_data.get('scenario'),  # 移除默认值，让用户明确指定
                        'base_dataset_id': processed_data.get('baseDatasetId'),  # 新增字段
                        'origin': processed_data.get('origin', 'cloud')  # 新增字段，默认为cloud
                    }
                    
                    # 应用默认值
                    final_data = self._apply_dataset_defaults(dataset_dict)
                    
                    # 转换枚举值
                    if final_data.get('scenario'):
                        final_data['scenario'] = self._get_scenario_enum(final_data['scenario'])
                    if final_data.get('origin'):
                        final_data['origin'] = self._get_origin_enum(final_data['origin'])
                    
                    dataset = Dataset(**final_data)
                    db.add(dataset)
                    db.commit()
                    loaded_count += 1
                    
                    logger.info(f"成功加载数据集配置: {dataset_id} - 样本数:{stats['sample_count']}, 标注数:{stats['annotated_samples']}")
                    
                except Exception as e:
                    logger.error(f"加载数据集配置失败 {folder_name}: {str(e)}")
                    continue
        
        logger.info(f"完成加载数据集配置，共加载 {loaded_count} 个配置")
        return loaded_count
    
    def create_dataset(self, db: Session, dataset_data: 'DatasetCreate') -> Dataset:
        """创建数据集"""
        # 转换Pydantic模型为字典
        dataset_dict = dataset_data.model_dump()
        
        # 应用默认值
        processed_data = self._apply_dataset_defaults(dataset_dict)
        
        # 检查数据集名称是否已存在
        if db.query(Dataset).filter(Dataset.dataset_id == processed_data['dataset_id']).first():
            raise ValidationException(f"Dataset with id '{processed_data['dataset_id']}' already exists.")
        
        # 验证基础数据集规则
        self._validate_base_dataset_rules(db, processed_data)
        
        # 转换枚举值
        if processed_data.get('scenario'):
            processed_data['scenario'] = self._get_scenario_enum(processed_data['scenario'])
        if processed_data.get('origin'):
            processed_data['origin'] = self._get_origin_enum(processed_data['origin'])
        
        new_dataset = Dataset(**processed_data)
        db.add(new_dataset)
        db.commit()
        db.refresh(new_dataset)
        
        logger.info(f"创建数据集: {new_dataset.id} - {new_dataset.name}")
        return new_dataset
    
    def _validate_base_dataset_rules(self, db: Session, dataset_data: Dict[str, Any]):
        """验证基础数据集规则"""
        scenario = dataset_data.get('scenario')
        is_base = dataset_data.get('is_base', False)
        
        if scenario and is_base:
            # 检查该scenario是否已经有base数据集
            existing_base = db.query(Dataset).filter(
                Dataset.scenario == self._get_scenario_enum(scenario),
                Dataset.is_base == True
            ).first()
            
            if existing_base:
                raise ValidationException(f"场景 '{scenario}' 已经存在基础数据集: {existing_base.name}")
    
    def _get_scenario_enum(self, scenario_str: str) -> ScenarioEnum:
        """将字符串转换为ScenarioEnum"""
        enum_map = {
            "设备运检I": ScenarioEnum.DEVICE_INSPECTION_I,
            "设备运检S": ScenarioEnum.DEVICE_INSPECTION_S,
            "环境巡视": ScenarioEnum.ENVIRONMENT_PATROL,
            "人员安全": ScenarioEnum.PERSONNEL_SAFETY
        }
        return enum_map.get(scenario_str)
    
    def _get_origin_enum(self, origin_str: str) -> OriginEnum:
        """将字符串转换为OriginEnum"""
        enum_map = {
            "cloud": OriginEnum.CLOUD,
            "edge": OriginEnum.EDGE
        }
        return enum_map.get(origin_str, OriginEnum.CLOUD)
    
    def get_dataset_by_id(self, db: Session, dataset_id: str) -> Dataset:
        """根据数据集ID获取数据集"""
        dataset = db.query(Dataset).filter(Dataset.dataset_id == dataset_id).first()
        if not dataset:
            raise NotFoundException(f"Dataset with id {dataset_id} not found.")
        return dataset
    
    def get_dataset_by_db_id(self, db: Session, dataset_db_id: int) -> Optional[Dataset]:
        """根据数据库ID获取数据集"""
        return db.query(Dataset).filter(
            Dataset.id == dataset_db_id
        ).first()
    
    def list_datasets(
        self, db: Session, page: int, page_size: int, 
        name_filter: Optional[str] = None, 
        uploader_filter: Optional[str] = None,
        data_format_filter: Optional[str] = None, 
        dataset_type_filter: Optional[str] = None,
        scenario_filter: Optional[str] = None,
        origin_filter: Optional[str] = None,
        is_base_filter: Optional[bool] = None
    ) -> Tuple[List[Dataset], int]:
        """获取数据集列表"""
        query = db.query(Dataset)
        
        # 应用过滤条件
        if name_filter:
            query = query.filter(func.lower(Dataset.name).contains(name_filter.lower()))
        if uploader_filter:
            query = query.filter(Dataset.uploader == uploader_filter)
        if data_format_filter:
            query = query.filter(Dataset.data_format == data_format_filter)
        if dataset_type_filter:
            query = query.filter(Dataset.dataset_type == dataset_type_filter)
        if scenario_filter:
            scenario_enum = self._get_scenario_enum(scenario_filter)
            if scenario_enum:
                query = query.filter(Dataset.scenario == scenario_enum)
        if origin_filter:
            origin_enum = self._get_origin_enum(origin_filter)
            if origin_enum:
                query = query.filter(Dataset.origin == origin_enum)
        if is_base_filter is not None:
            query = query.filter(Dataset.is_base == is_base_filter)
        
        total_count = query.count()
        datasets = query.order_by(Dataset.created_at.desc()) \
                        .offset((page - 1) * page_size) \
                        .limit(page_size) \
                        .all()
        
        # 自动更新数据集大小（如果为0）
        updated_datasets = []
        need_update_datasets = []
        
        for dataset in datasets:
            # 检查是否需要更新大小信息
            if (dataset.total_size == 0 or 
                dataset.data_size == "0MB" or 
                dataset.data_size == "0 B" or 
                dataset.data_size == "未知" or
                not dataset.data_size):
                need_update_datasets.append(dataset)
            else:
                updated_datasets.append(dataset)
        
        # 批量更新需要更新的数据集
        if need_update_datasets:
            updated_batch = self._batch_update_dataset_sizes(db, need_update_datasets)
            updated_datasets.extend(updated_batch)
        
        return updated_datasets, total_count
    
    def _update_dataset_size_if_needed(self, db: Session, dataset: 'Dataset') -> 'Dataset':
        """如果数据集大小为0，则自动更新大小信息"""
        try:
            # 构建数据集文件夹路径
            if dataset.folder_path:
                folder_path = Path(dataset.folder_path)
            else:
                folder_path = self.config.paths.datasets_dir / dataset.dataset_id
            
            if folder_path.exists():
                # 重新计算数据集统计信息
                stats = self._calculate_dataset_stats(folder_path)
                
                # 更新数据库中的信息
                if stats['total_size'] > 0:
                    dataset.total_size = stats['total_size']
                    dataset.file_count = stats['file_count']
                    dataset.sample_count = stats['sample_count']
                    dataset.annotated_samples = stats['annotated_samples']
                    
                    # 格式化显示大小
                    from utils import format_file_size
                    dataset.data_size = format_file_size(stats['total_size'])
                    
                    # 更新时间
                    dataset.updated_at = datetime.utcnow()
                    
                    # 提交到数据库
                    db.commit()
                    db.refresh(dataset)
                    
                    logger.info(f"自动更新数据集大小: {dataset.dataset_id} - {dataset.data_size}")
            
        except Exception as e:
            logger.warning(f"更新数据集大小失败 {dataset.dataset_id}: {str(e)}")
        
        return dataset
    
    def _batch_update_dataset_sizes(self, db: Session, datasets: List['Dataset']) -> List['Dataset']:
        """批量更新数据集大小信息"""
        updated_datasets = []
        updates_made = 0
        
        try:
            for dataset in datasets:
                try:
                    # 构建数据集文件夹路径
                    if dataset.folder_path:
                        folder_path = Path(dataset.folder_path)
                    else:
                        folder_path = self.config.paths.datasets_dir / dataset.dataset_id
                    
                    if folder_path.exists():
                        # 重新计算数据集统计信息
                        stats = self._calculate_dataset_stats(folder_path)
                        
                        # 更新数据库中的信息
                        if stats['total_size'] > 0:
                            dataset.total_size = stats['total_size']
                            dataset.file_count = stats['file_count']
                            dataset.sample_count = stats['sample_count']
                            dataset.annotated_samples = stats['annotated_samples']
                            
                            # 格式化显示大小
                            from utils import format_file_size
                            dataset.data_size = format_file_size(stats['total_size'])
                            
                            # 更新时间
                            dataset.updated_at = datetime.utcnow()
                            
                            updates_made += 1
                            logger.debug(f"准备更新数据集大小: {dataset.dataset_id} - {dataset.data_size}")
                    
                    updated_datasets.append(dataset)
                    
                except Exception as e:
                    logger.warning(f"更新单个数据集大小失败 {dataset.dataset_id}: {str(e)}")
                    updated_datasets.append(dataset)  # 即使失败也要返回原始数据集
            
            # 统一提交所有更新
            if updates_made > 0:
                db.commit()
                logger.info(f"批量更新数据集大小完成，共更新 {updates_made} 个数据集")
                
                # 刷新所有更新的数据集
                for dataset in updated_datasets:
                    if hasattr(dataset, '_sa_instance_state'):
                        db.refresh(dataset)
        
        except Exception as e:
            logger.error(f"批量更新数据集大小时发生错误: {str(e)}")
            db.rollback()
        
        return updated_datasets
    
    def refresh_dataset_size(self, db: Session, dataset_id: str) -> 'Dataset':
        """手动刷新指定数据集的大小信息"""
        dataset = self.get_dataset_by_id(db, dataset_id)
        return self._update_dataset_size_if_needed(db, dataset)
    
    def refresh_all_dataset_sizes(self, db: Session) -> Dict[str, Any]:
        """手动刷新所有数据集的大小信息"""
        logger.info("开始刷新所有数据集的大小信息...")
        
        # 获取所有数据集
        all_datasets = db.query(Dataset).all()
        
        # 查找需要更新的数据集
        need_update_datasets = []
        for dataset in all_datasets:
            if (dataset.total_size == 0 or 
                dataset.data_size == "0MB" or 
                dataset.data_size == "0 B" or 
                dataset.data_size == "未知" or
                not dataset.data_size):
                need_update_datasets.append(dataset)
        
        # 批量更新
        if need_update_datasets:
            updated_datasets = self._batch_update_dataset_sizes(db, need_update_datasets)
            return {
                "total_datasets": len(all_datasets),
                "updated_datasets": len([d for d in updated_datasets if d.total_size > 0]),
                "skipped_datasets": len(all_datasets) - len(need_update_datasets),
                "message": f"成功刷新 {len(updated_datasets)} 个数据集的大小信息"
            }
        else:
            return {
                "total_datasets": len(all_datasets),
                "updated_datasets": 0,
                "skipped_datasets": len(all_datasets),
                "message": "所有数据集的大小信息都是最新的"
            }
    
    def update_dataset(self, db: Session, dataset_id: str, update_data: 'DatasetUpdate') -> Dataset:
        """更新数据集"""
        dataset = self.get_dataset_by_id(db, dataset_id)
        
        update_dict = update_data.model_dump(exclude_unset=True)
        
        # 应用默认值到更新数据（只对非None值）
        filtered_update = {k: v for k, v in update_dict.items() if v is not None}
        
        # 验证更新规则
        if 'scenario' in filtered_update and 'is_base' in filtered_update:
            temp_data = {'scenario': filtered_update['scenario'], 'is_base': filtered_update['is_base']}
            self._validate_base_dataset_rules(db, temp_data)
        elif 'scenario' in filtered_update and dataset.is_base:
            temp_data = {'scenario': filtered_update['scenario'], 'is_base': True}
            self._validate_base_dataset_rules(db, temp_data)
        elif 'is_base' in filtered_update and filtered_update['is_base'] and dataset.scenario:
            temp_data = {'scenario': dataset.scenario.value, 'is_base': True}
            self._validate_base_dataset_rules(db, temp_data)
        
        # 转换枚举值
        if 'scenario' in filtered_update:
            filtered_update['scenario'] = self._get_scenario_enum(filtered_update['scenario'])
        if 'origin' in filtered_update:
            filtered_update['origin'] = self._get_origin_enum(filtered_update['origin'])
        
        for key, value in filtered_update.items():
            setattr(dataset, key, value)
        
        dataset.updated_at = datetime.utcnow()
        db.commit()
        db.refresh(dataset)
        
        logger.info(f"更新数据集: {dataset.id} - {dataset.name}")
        return dataset
    
    def delete_dataset(self, db: Session, dataset_id: str):
        """删除数据集"""
        dataset = self.get_dataset_by_id(db, dataset_id)
        
        db.delete(dataset)
        db.commit()
        
        logger.info(f"删除数据集: {dataset_id}")
    
    def reload_datasets(self, db: Session) -> Dict:
        """重新加载数据集配置"""
        logger.info("开始重新加载数据集配置...")
        loaded_count = self.load_profiles_from_datasets(db)
        return {
            "message": "数据集配置重新加载成功",
            "reloaded_datasets": loaded_count
        }

    def create_sample_profile_file(self, folder_path: Path, sample_data: dict = None):
        """在指定文件夹中创建示例profile.yaml文件"""
        profile_file = folder_path / "profile.yaml"
        
        # 如果没有提供示例数据，使用默认值
        if sample_data is None:
            folder_name = folder_path.name
            sample_data = self._apply_profile_defaults({}, folder_name)
        
        with open(profile_file, 'w', encoding='utf-8') as f:
            yaml.dump(sample_data, f, default_flow_style=False, allow_unicode=True)
        
        logger.info(f"创建示例profile.yaml文件: {profile_file}")

    def get_datasets_summary(self, db: Session, dataset_type: Optional[str] = None, 
                            name: Optional[str] = None, uploader: Optional[str] = None, 
                            data_format: Optional[str] = None) -> dict:
        """获取数据集统计摘要"""
        from sqlalchemy import func
        from database import Dataset
        
        # 基础查询
        query = db.query(Dataset)
        
        # 应用筛选条件
        if name:
            query = query.filter(Dataset.name.ilike(f"%{name}%"))
        if uploader:
            query = query.filter(Dataset.uploader.ilike(f"%{uploader}%"))
        if data_format:
            query = query.filter(Dataset.data_format == data_format)
        if dataset_type:
            query = query.filter(Dataset.dataset_type == dataset_type)
        
        # 统计不同类型的数据集数量
        type_stats = db.query(Dataset.dataset_type, func.count(Dataset.id)).group_by(Dataset.dataset_type).all()
        
        # 统计不同格式的数据集数量
        format_stats = db.query(Dataset.data_format, func.count(Dataset.id)).group_by(Dataset.data_format).all()
        
        # 统计不同上传者的数据集数量
        uploader_stats = db.query(Dataset.uploader, func.count(Dataset.id)).group_by(Dataset.uploader).all()
        
        return {
            "by_type": {item[0]: item[1] for item in type_stats if item[0]},
            "by_format": {item[0]: item[1] for item in format_stats if item[0]},
            "by_uploader": {item[0]: item[1] for item in uploader_stats if item[0]},
            "total_uploaders": len(uploader_stats),
            "total_types": len(type_stats),
            "total_formats": len(format_stats)
        }

    def get_dataset_preview(self, db: Session, dataset_id: str, limit: int = 10) -> Dict[str, Any]:
        """获取数据集预览样本"""
        try:
            # 获取数据集信息
            dataset = self.get_dataset_by_id(db, dataset_id)
            
            # 构建数据集文件夹路径
            dataset_folder = self.config.paths.datasets_dir / dataset_id
            images_folder = dataset_folder / "images"
            labels_folder = dataset_folder / "labels"
            
            if not images_folder.exists():
                logger.warning(f"图片文件夹不存在: {images_folder}")
                return {
                    "dataset_id": dataset_id,
                    "dataset_name": dataset.name,
                    "total_samples": 0,
                    "preview_count": 0,
                    "samples": []
                }
            
            # 获取图片文件列表（支持常见格式）
            image_extensions = ['*.jpg', '*.jpeg', '*.png', '*.bmp', '*.tiff', '*.gif']
            image_files = []
            for ext in image_extensions:
                image_files.extend(images_folder.glob(ext))
                image_files.extend(images_folder.glob(ext.upper()))
            
            # 按文件名排序
            image_files.sort(key=lambda x: x.name)
            
            # 限制预览数量
            preview_files = image_files[:limit]
            
            samples = []
            for img_file in preview_files:
                # 检查是否有对应的标注文件
                label_file = labels_folder / f"{img_file.stem}.txt"
                has_label = label_file.exists()
                
                # 获取文件大小
                file_size = None
                try:
                    file_size = img_file.stat().st_size
                except Exception:
                    pass
                
                # 构建图片访问URL
                # 使用相对于datasets目录的路径
                relative_path = f"{dataset.name}/images/{img_file.name}"
                image_url = f"/api/v1/datasets/images/{dataset.name}/{img_file.name}"

                sample = {
                    "image_name": img_file.name,
                    "image_url": image_url,
                    "image_path": relative_path,
                    "has_label": has_label,
                    "label_path": f"{dataset.name}/labels/{img_file.stem}.txt" if has_label else None,
                    "file_size": file_size
                }
                samples.append(sample)
            
            logger.info(f"获取数据集预览: {dataset.name}, 总样本数: {len(image_files)}, 预览数: {len(samples)}")
            
            return {
                "dataset_id": dataset_id,
                "dataset_name": dataset.name,
                "total_samples": len(image_files),
                "preview_count": len(samples),
                "samples": samples
            }

        except Exception as e:
            logger.error(f"获取数据集预览失败 {dataset_id}: {str(e)}")
            raise ValidationException(f"获取数据集预览失败: {str(e)}")

    def create_dataset_from_edge_samples(
        self, 
        db: Session, 
        node_id: str, 
        scenario: str, 
        samples_info: Dict[str, Any],
        base_dataset_id: Optional[str] = None
    ) -> Dataset:
        """从边侧样本创建数据集"""
        try:
            # 生成新的数据集ID
            new_dataset_id = f"edge_{node_id}_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
            
            # 验证scenario
            scenario_enum = self._get_scenario_enum(scenario)
            
            # 如果指定了base_dataset_id，验证其存在性
            base_dataset = None
            if base_dataset_id:
                base_dataset = self.get_dataset_by_id(db, base_dataset_id)
                if not base_dataset:
                    raise ValidationException(f"基础数据集不存在: {base_dataset_id}")
            
            # 创建数据集目录
            dataset_folder = self.config.paths.datasets_dir / new_dataset_id
            dataset_folder.mkdir(parents=True, exist_ok=True)
            
            # 创建images和labels子目录
            images_dir = dataset_folder / "images"
            labels_dir = dataset_folder / "labels"
            images_dir.mkdir(exist_ok=True)
            labels_dir.mkdir(exist_ok=True)
            
            # 计算样本数量
            sample_count = samples_info.get("sample_count", 0)
            total_samples = samples_info.get("total_sample_count", sample_count)
            
            # 创建数据集记录
            dataset_data = {
                "dataset_id": new_dataset_id,
              #  "name": f"边侧数据集_{node_id}_{datetime.now().strftime('%m%d_%H%M')}",
                "name": new_dataset_id,
                "description": f"从边侧节点 {node_id} 收集的{scenario}样本数据集",
                "uploader": node_id,
                "dataset_type": "detection",
                "scenario": scenario_enum,
                "origin": OriginEnum.EDGE,
                "base_dataset_id": base_dataset.id if base_dataset else None,
                "folder_path": str(dataset_folder),
                "sample_count": total_samples,
                "file_count": total_samples * 2,  # 图像 + 标签
                "data_size": f"{sample_count * 0.5:.1f}MB",  # 估算大小
                "upload_time": datetime.now().strftime('%Y-%m-%d'),
                "is_base": False,
                "is_incremental": True,
                "status": "processing"
            }
            
            # 应用默认值并创建数据集
            processed_data = self._apply_dataset_defaults(dataset_data)
            
            new_dataset = Dataset(**processed_data)
            db.add(new_dataset)
            db.flush()  # 获取ID
            
            # 创建dataset.yaml配置文件
            self._create_dataset_yaml(dataset_folder, new_dataset, base_dataset)
            
            # 创建profile.yaml文件
            self._create_profile_yaml(dataset_folder, new_dataset, samples_info)
            
            # 更新数据集的配置文件路径
            new_dataset.config_path = str(dataset_folder / "dataset.yaml")
            new_dataset.profile_path = str(dataset_folder / "profile.yaml")
            
            db.commit()
            db.refresh(new_dataset)
            
            logger.info(f"成功创建边侧数据集: {new_dataset_id}")
            return new_dataset
            
        except Exception as e:
            logger.error(f"创建边侧数据集失败: {str(e)}")
            db.rollback()
            # 清理创建的目录
            if 'dataset_folder' in locals() and dataset_folder.exists():
                shutil.rmtree(dataset_folder, ignore_errors=True)
            raise ValidationException(f"创建边侧数据集失败: {str(e)}")
    
    def _create_dataset_yaml(self, dataset_folder: Path, dataset: Dataset, base_dataset: Optional[Dataset] = None):
        """创建dataset.yaml配置文件"""
        try:
            # 构建数据集配置
            config_data = {
                "path": str(dataset_folder),
                "train": "images",
                "val": "images",  # 可能需要后续分割
                "test": "images",
                "nc": 10,  # 默认类别数，需要根据实际scenario调整
                "names": []  # 将根据scenario填充
            }
            
            # 根据scenario设置类别信息
            if dataset.scenario == ScenarioEnum.DEVICE_INSPECTION_I:
                config_data["names"] = ["设备缺陷", "正常设备", "维修中", "异常状态"]
                config_data["nc"] = 4
            elif dataset.scenario == ScenarioEnum.DEVICE_INSPECTION_S:
                config_data["names"] = ["序列异常", "正常序列", "预警状态"]
                config_data["nc"] = 3  
            elif dataset.scenario == ScenarioEnum.ENVIRONMENT_PATROL:
                config_data["names"] = ["环境隐患", "正常环境", "需要关注"]
                config_data["nc"] = 3
            elif dataset.scenario == ScenarioEnum.PERSONNEL_SAFETY:
                config_data["names"] = ["安全违规", "正常作业", "危险行为", "防护不当"]
                config_data["nc"] = 4
            else:
                config_data["names"] = [f"class_{i}" for i in range(10)]
            
            if base_dataset and base_dataset.config_path and Path(base_dataset.config_path).exists():
                try:
                    with open(base_dataset.config_path, 'r', encoding='utf-8') as f:
                        base_config = yaml.safe_load(f)
                        if base_config:
                            config_data["nc"] = base_config.get("nc", config_data["nc"])
                            config_data["names"] = base_config.get("names", config_data["names"])
                except Exception as e:
                    self.logger.warning(f"读取基础数据集配置失败: {str(e)}")
            
            # 写入配置文件
            config_path = dataset_folder / "dataset.yaml"
            with open(config_path, 'w', encoding='utf-8') as f:
                yaml.dump(config_data, f, default_flow_style=False, allow_unicode=True)
                
            logger.info(f"创建dataset.yaml配置文件: {config_path}")
            
        except Exception as e:
            logger.error(f"创建dataset.yaml失败: {str(e)}")
            raise
    
    def _create_profile_yaml(self, dataset_folder: Path, dataset: Dataset, samples_info: Dict[str, Any]):
        """创建profile.yaml文件"""
        try:
            profile_data = {
                # FIX: dataset_id 和 name需要保持一致
                "id": dataset.dataset_id,
      #          "name": dataset.name,
                "name": dataset.dataset_id,
                "uploader": dataset.uploader,
                "isLearned": dataset.is_learned,
                "dataSize": dataset.data_size,
                "uploadTime": dataset.upload_time or datetime.now().strftime('%Y-%m-%d'),
                "isAnnotated": dataset.is_annotated,
                "dataFormat": dataset.data_format,
                "sampleCount": dataset.sample_count,
                "timeRange": dataset.time_range or "边侧收集",
                "updateFrequency": dataset.update_frequency or "一次性",
                "isForgetting": dataset.is_forgetting,
                "isQuantization": dataset.is_quantization,
                "isIncremental": dataset.is_incremental,
                "isBase": dataset.is_base,
                "scenario": dataset.scenario.value if dataset.scenario else None,
                "origin": dataset.origin.value if dataset.origin else "edge",
                "collection_info": {
                    "source_node": samples_info.get("node_id", "unknown"),
                    "collection_time": datetime.now().isoformat(),
                    "aggregation_params": samples_info.get("request_params", {}),
                    "selected_samples": samples_info.get("selected_samples", [])
                }
            }
            
            # 写入profile文件
            profile_path = dataset_folder / "profile.yaml"
            with open(profile_path, 'w', encoding='utf-8') as f:
                yaml.dump(profile_data, f, default_flow_style=False, allow_unicode=True)
                
            logger.info(f"创建profile.yaml文件: {profile_path}")
            
        except Exception as e:
            logger.error(f"创建profile.yaml失败: {str(e)}")
            raise

    def update_dataset_processing_status(self, db: Session, dataset_id: str, status: str, **kwargs) -> Dataset:
        """更新数据集处理状态"""
        try:
            dataset = self.get_dataset_by_id(db, dataset_id)
            if not dataset:
                raise NotFoundException(f"数据集不存在: {dataset_id}")
            
            dataset.status = status
            dataset.updated_at = datetime.utcnow()
            
            # 更新其他可选字段
            for key, value in kwargs.items():
                if hasattr(dataset, key) and value is not None:
                    setattr(dataset, key, value)
            
            db.commit()
            db.refresh(dataset)
            
            logger.info(f"数据集状态更新: {dataset_id} -> {status}")
            return dataset
            
        except Exception as e:
            self.logger.error(f"更新数据集状态失败: {str(e)}")
            db.rollback()
            raise ValidationException(f"更新数据集状态失败: {str(e)}")

    def get_base_datasets_by_scenario(self, db: Session, scenario: str) -> List[Dataset]:
        """根据scenario获取基础数据集"""
        try:
            scenario_enum = self._get_scenario_enum(scenario)
            
            base_datasets = db.query(Dataset).filter(
                Dataset.scenario == scenario_enum,
                Dataset.is_base == True,
                Dataset.status == "active"
            ).all()
            
            return base_datasets
            
        except Exception as e:
            self.logger.error(f"获取基础数据集失败: {str(e)}")
            raise ValidationException(f"获取基础数据集失败: {str(e)}")

    def extract_zip_file(self, zip_file_path: Path, extract_to: Optional[Path] = None) -> Path:
        """解压ZIP文件并返回解压后的文件夹路径"""
        import zipfile
        
        try:
            if not zip_file_path.exists():
                raise ValidationException(f"压缩文件不存在: {zip_file_path}")
            
            # 如果没有指定解压目录，使用temp目录
            if extract_to is None:
                extract_to = self.config.paths.temp_dir / f"extracted_{uuid.uuid4().hex[:8]}"
            
            extract_to.mkdir(parents=True, exist_ok=True)
            
            # 解压文件
            with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:
                zip_ref.extractall(extract_to)
            
            logger.info(f"成功解压文件: {zip_file_path} -> {extract_to}")
            return extract_to
            
        except zipfile.BadZipFile:
            raise ValidationException("不是有效的ZIP文件")
        except Exception as e:
            logger.error(f"解压文件失败: {str(e)}")
            raise ValidationException(f"解压文件失败: {str(e)}")
    
    def get_base_datasets(self, db: Session) -> List[Dataset]:
        """获取所有基础数据集"""
        try:
            base_datasets = db.query(Dataset).filter(
                Dataset.is_base == True,
                Dataset.status == "active"
            ).all()
            
            return base_datasets
            
        except Exception as e:
            logger.error(f"获取基础数据集失败: {str(e)}")
            raise ValidationException(f"获取基础数据集失败: {str(e)}")
    
    def create_dataset_from_uploaded_files(
        self,
        db: Session,
        request: 'DatasetCreationRequest'
    ) -> Dataset:
        """从上传的文件创建数据集"""
        try:
            # 验证基础数据集是否存在
            base_dataset = db.query(Dataset).filter(Dataset.dataset_id == request.base_dataset_id).first()
            if not base_dataset:
                raise ValidationException(f"基础数据集不存在: {request.base_dataset_id}")
            
            # 验证路径是否存在
            samples_path = Path(request.samples_path)
            labels_path = Path(request.labels_path)
            
            if not samples_path.exists():
                raise ValidationException(f"样本路径不存在: {request.samples_path}")
            if not labels_path.exists():
                raise ValidationException(f"标签路径不存在: {request.labels_path}")
            
            # 生成新的数据集ID
            new_dataset_id = f"{base_dataset.name}_{uuid.uuid4().hex[:8]}"
            # 创建数据集目录
            dataset_folder = self.config.paths.datasets_dir / new_dataset_id
            dataset_folder.mkdir(parents=True, exist_ok=True)
            
            # 创建images和labels子目录
            images_dir = dataset_folder / "images"
            labels_dir = dataset_folder / "labels"
            images_dir.mkdir(exist_ok=True)
            labels_dir.mkdir(exist_ok=True)
            
            # 复制文件到数据集目录
            copy_stats = self._copy_files_to_dataset(samples_path, images_dir, labels_path, labels_dir)
            
            # 重新计算数据集的精确统计信息
            final_stats = self._calculate_dataset_stats(dataset_folder)
            
            # 使用实际复制的文件数量
            sample_count = copy_stats['sample_count']
            label_count = copy_stats['label_count']
            total_files = final_stats['file_count']
            total_size = final_stats['total_size']
            
            # 格式化数据大小
            if total_size > 1024 * 1024 * 1024:  # GB
                data_size = f"{total_size / (1024 * 1024 * 1024):.1f}GB"
            elif total_size > 1024 * 1024:  # MB
                data_size = f"{total_size / (1024 * 1024):.1f}MB"
            elif total_size > 1024:  # KB
                data_size = f"{total_size / 1024:.1f}KB"
            else:  # Bytes
                data_size = f"{total_size}B"
            
            # 创建数据集记录
            dataset_data = {
                "dataset_id": new_dataset_id,
                "name": new_dataset_id,
                "description": request.description or f"基于{base_dataset.name}创建的数据集",
                "uploader": request.uploader,
                "dataset_type": request.dataset_type,
                "scenario": self._get_scenario_enum(request.scenario) if request.scenario else base_dataset.scenario,
                "origin": OriginEnum.CLOUD,
                "base_dataset_id": base_dataset.id,
                "folder_path": str(dataset_folder),
                "sample_count": sample_count,
                "file_count": total_files,  # 使用实际统计的文件数量
                "total_size": total_size,
                "data_size": data_size,
                "annotated_samples": label_count,  # 标注样本数量
                "upload_time": datetime.now().strftime('%Y-%m-%d'),
                "is_base": False,
                "is_incremental": False,
                "is_annotated": label_count > 0,  # 根据实际标签文件数量判断
                "status": "active"
            }
            
            # 验证统计信息
            logger.info(f"数据集统计信息 - 样本数: {sample_count}, 标签数: {label_count}, 总文件数: {total_files}, 总大小: {data_size}")
            
            if sample_count == 0:
                logger.warning("警告：未找到有效的图像文件")
            
            if label_count == 0:
                logger.warning("警告：未找到标签文件，数据集将标记为未标注")
            
            # 应用默认值并创建数据集
            processed_data = self._apply_dataset_defaults(dataset_data)
            
            new_dataset = Dataset(**processed_data)
            db.add(new_dataset)
            db.flush()  # 获取ID
            
            # 创建dataset.yaml配置文件
            self._create_dataset_yaml(dataset_folder, new_dataset, base_dataset)
            
            # 创建profile.yaml文件
            self._create_profile_yaml_for_uploaded(dataset_folder, new_dataset, request)
            
            # 更新数据集的配置文件路径
            new_dataset.config_path = str(dataset_folder / "dataset.yaml")
            new_dataset.profile_path = str(dataset_folder / "profile.yaml")
            
            db.commit()
            db.refresh(new_dataset)
            
            logger.info(f"成功创建数据集: {new_dataset_id}")
            return new_dataset
            
        except Exception as e:
            logger.error(f"创建数据集失败: {str(e)}")
            db.rollback()
            # 清理创建的目录
            if 'dataset_folder' in locals() and dataset_folder.exists():
                shutil.rmtree(dataset_folder, ignore_errors=True)
            raise ValidationException(f"创建数据集失败: {str(e)}")
    
    def _copy_files_to_dataset(self, samples_path: Path, images_dir: Path, labels_path: Path, labels_dir: Path) -> Dict[str, int]:
        """将文件复制到数据集目录并返回统计信息"""
        try:
            stats = {
                'sample_count': 0,
                'label_count': 0,
                'total_files': 0
            }
            
            # 支持的图像格式
            image_extensions = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
            
            # 复制样本文件
            if samples_path.is_dir():
                # 递归查找所有图像文件
                for file_path in samples_path.rglob('*'):
                    if file_path.is_file() and file_path.suffix.lower() in image_extensions:
                        try:
                            # 使用相对路径保持目录结构（如果有的话）
                            relative_path = file_path.relative_to(samples_path)
                            target_path = images_dir / relative_path.name  # 这里只用文件名，不保持子目录结构
                            
                            # 确保目标目录存在
                            target_path.parent.mkdir(parents=True, exist_ok=True)
                            
                            shutil.copy2(file_path, target_path)
                            stats['sample_count'] += 1
                            stats['total_files'] += 1
                            logger.debug(f"复制图像文件: {file_path} -> {target_path}")
                        except Exception as e:
                            logger.warning(f"复制图像文件失败 {file_path}: {str(e)}")
                            continue
            elif samples_path.is_file() and samples_path.suffix.lower() in image_extensions:
                shutil.copy2(samples_path, images_dir / samples_path.name)
                stats['sample_count'] += 1
                stats['total_files'] += 1
            
            # 复制标签文件
            if labels_path.is_dir():
                # 递归查找所有标签文件
                for file_path in labels_path.rglob('*.txt'):
                    if file_path.is_file():
                        try:
                            # 使用相对路径保持目录结构（如果有的话）
                            relative_path = file_path.relative_to(labels_path)
                            target_path = labels_dir / relative_path.name  # 这里只用文件名，不保持子目录结构
                            
                            # 确保目标目录存在
                            target_path.parent.mkdir(parents=True, exist_ok=True)
                            
                            shutil.copy2(file_path, target_path)
                            stats['label_count'] += 1
                            stats['total_files'] += 1
                            logger.debug(f"复制标签文件: {file_path} -> {target_path}")
                        except Exception as e:
                            logger.warning(f"复制标签文件失败 {file_path}: {str(e)}")
                            continue
            elif labels_path.is_file() and labels_path.suffix.lower() == '.txt':
                shutil.copy2(labels_path, labels_dir / labels_path.name)
                stats['label_count'] += 1
                stats['total_files'] += 1
            
            logger.info(f"文件复制统计: 图像={stats['sample_count']}, 标签={stats['label_count']}, 总文件={stats['total_files']}")
            return stats
            
        except Exception as e:
            logger.error(f"复制文件失败: {str(e)}")
            raise ValidationException(f"复制文件失败: {str(e)}")
    
    def _calculate_folder_size(self, folder_path: Path) -> int:
        """计算文件夹大小（字节）"""
        try:
            total_size = 0
            for file_path in folder_path.rglob('*'):
                if file_path.is_file():
                    total_size += file_path.stat().st_size
            return total_size
        except Exception as e:
            logger.warning(f"计算文件夹大小失败: {str(e)}")
            return 0
    
    def _create_profile_yaml_for_uploaded(self, dataset_folder: Path, dataset: Dataset, request: 'DatasetCreationRequest'):
        """为上传的数据集创建profile.yaml文件"""
        try:
            profile_data = {
                "id": dataset.dataset_id,
                "name": dataset.name,
                "uploader": dataset.uploader,
                "isLearned": dataset.is_learned,
                "dataSize": dataset.data_size,
                "uploadTime": dataset.upload_time or datetime.now().strftime('%Y-%m-%d'),
                "isAnnotated": dataset.is_annotated,
                "dataFormat": dataset.data_format,
                "sampleCount": dataset.sample_count,
                "timeRange": dataset.time_range or "上传时间",
                "updateFrequency": dataset.update_frequency or "一次性",
                "isForgetting": dataset.is_forgetting,
                "isQuantization": dataset.is_quantization,
                "isIncremental": dataset.is_incremental,
                "isBase": dataset.is_base,
                "scenario": dataset.scenario.value if dataset.scenario else None,
                "origin": dataset.origin.value if dataset.origin else "cloud",
                "creation_info": {
                    "base_dataset_id": request.base_dataset_id,
                    "samples_source": request.samples_path,
                    "labels_source": request.labels_path,
                    "creation_time": datetime.now().isoformat(),
                    "creator": request.uploader
                }
            }
            
            # 写入profile文件
            profile_path = dataset_folder / "profile.yaml"
            with open(profile_path, 'w', encoding='utf-8') as f:
                yaml.dump(profile_data, f, default_flow_style=False, allow_unicode=True)
                
            logger.info(f"创建profile.yaml文件: {profile_path}")
            
        except Exception as e:
            logger.error(f"创建profile.yaml失败: {str(e)}")
            raise 