"""
数据集类

提供PyTorch数据集实现
"""

import torch
from torch.utils.data import Dataset, DataLoader
import numpy as np
from pathlib import Path
from typing import List, Dict, Tuple, Optional, Union, Any
import json
import random
import cv2

from .loader import EchoVideoLoader
from .preprocessor import EchoPreprocessor
from .augmentation import EchoAugmentation, TemporalAugmentation, CardiacCycleAugmentation
from src.utils.logger import get_logger


class EchoVideoDataset(Dataset):
    """超声心动图视频数据集"""
    
    def __init__(self, 
                 data_dir: Union[str, Path],
                 split: str = 'train',
                 image_size: Tuple[int, int] = (1024, 1024),
                 frame_interval: int = 1,
                 max_frames: Optional[int] = None,
                 load_masks: bool = True,
                 augmentation: bool = True,
                 preprocessing_config: Optional[Dict[str, Any]] = None,
                 augmentation_config: Optional[Dict[str, Any]] = None):
        """
        初始化数据集
        
        Args:
            data_dir: 数据目录
            split: 数据划分 (train/val/test)
            image_size: 图像尺寸
            frame_interval: 帧间隔
            max_frames: 最大帧数
            load_masks: 是否加载掩码
            augmentation: 是否使用数据增强
            preprocessing_config: 预处理配置
            augmentation_config: 增强配置
        """
        self.data_dir = Path(data_dir)
        self.split = split
        self.image_size = image_size
        self.frame_interval = frame_interval
        self.max_frames = max_frames
        self.load_masks = load_masks
        self.augmentation = augmentation and split == 'train'
        self.logger = get_logger("EchoVideoDataset")
        
        # 初始化组件
        self.loader = EchoVideoLoader(
            data_dir=data_dir,
            image_size=image_size,
            frame_interval=frame_interval,
            max_frames=max_frames
        )
        
        # 预处理配置
        preprocessing_config = preprocessing_config or {}
        self.preprocessor = EchoPreprocessor(
            image_size=image_size,
            normalize=preprocessing_config.get('normalize', True),
            mean=preprocessing_config.get('mean', (0.485, 0.456, 0.406)),
            std=preprocessing_config.get('std', (0.229, 0.224, 0.225))
        )
        
        # 数据增强
        if self.augmentation:
            augmentation_config = augmentation_config or {}
            self.spatial_aug = EchoAugmentation(
                image_size=image_size,
                training=True,
                augmentation_config=augmentation_config
            )
            self.temporal_aug = TemporalAugmentation(
                frame_drop_prob=augmentation_config.get('frame_drop_prob', 0.1),
                frame_repeat_prob=augmentation_config.get('frame_repeat_prob', 0.1)
            )
            self.cardiac_aug = CardiacCycleAugmentation(
                phase_shift_prob=augmentation_config.get('phase_shift_prob', 0.3),
                cycle_length_variation=augmentation_config.get('cycle_length_variation', 0.1)
            )
        
        # 加载数据
        self.samples = self._load_samples()
        
        self.logger.info(f"加载 {split} 数据集: {len(self.samples)} 个样本")
    
    def _load_samples(self) -> List[Dict[str, Any]]:
        """加载数据样本"""
        split_file = self.data_dir / f"{self.split}_split.json"
        
        if split_file.exists():
            with open(split_file, 'r', encoding='utf-8') as f:
                split_data = json.load(f)
            return split_data.get('samples', [])
        else:
            # 创建默认划分
            return self._create_default_split()
    
    def _create_default_split(self) -> List[Dict[str, Any]]:
        """创建默认数据划分"""
        samples = []
        
        # 添加视频样本
        for video_path in self.loader.list_videos():
            samples.append({
                'type': 'video',
                'path': str(video_path),
                'id': video_path.stem
            })
        
        # 添加图像序列样本
        for seq_info in self.loader.list_sequences():
            samples.append({
                'type': 'sequence',
                'path': str(seq_info['dir']),
                'files': [str(f) for f in seq_info['files']],
                'id': seq_info['dir'].name
            })
        
        # 随机打乱
        random.shuffle(samples)
        
        return samples
    
    def __len__(self) -> int:
        """返回数据集大小"""
        return len(self.samples)
    
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """
        获取数据样本
        
        Args:
            idx: 样本索引
            
        Returns:
            数据样本
        """
        sample = self.samples[idx]
        
        try:
            # 处理不同的样本格式
            sample_type = sample.get('type', 'unknown')
            
            # 初始化原始帧号列表
            original_frame_numbers = []
            
            # 确定路径字段
            video_id = sample.get('id', 'unknown')
            if idx < 3:  # 只打印前3个样本的详细信息
                print(f"\n{'='*60}")
                print(f"样本 {idx}: video_id={video_id}, type={sample_type}")
                print(f"样本键: {list(sample.keys())}")
            
            if 'video_path' in sample:
                # EchoNet-Dynamic格式：优先从原始视频加载
                video_path = sample['video_path']
                video_path_obj = Path(video_path)
                
                if idx < 3:
                    print(f"原始video_path: {video_path}")
                    print(f"video_path_obj.exists(): {video_path_obj.exists()}")
                    print(f"video_path_obj.is_absolute(): {video_path_obj.is_absolute()}")
                
                # 如果路径是相对路径或不存在，尝试查找
                if not video_path_obj.is_absolute() or not video_path_obj.exists():
                    # 尝试从常见位置查找视频文件
                    video_filename = video_path_obj.name
                    potential_paths = [
                        video_path_obj if video_path_obj.is_absolute() else None,  # 先尝试原始路径（如果是绝对路径）
                        Path(video_path).resolve() if not video_path_obj.is_absolute() else None,  # 尝试解析相对路径
                        Path('D:/Data/EchoNet-Dynamic/Videos') / video_filename,  # 常见路径1
                        Path('D:\\Data\\EchoNet-Dynamic\\Videos') / video_filename,  # 常见路径2（Windows格式）
                        self.data_dir.parent.parent / 'Data' / 'EchoNet-Dynamic' / 'Videos' / video_filename,  # 相对路径
                    ]
                    if idx < 3:
                        print(f"尝试查找视频文件: {video_filename}")
                        for i, pp in enumerate(potential_paths):
                            if pp is not None:
                                exists = pp.exists() and pp.is_file()
                                print(f"  路径{i}: {pp} -> exists={exists}")
                    for pp in potential_paths:
                        if pp is not None and pp.exists() and pp.is_file():
                            video_path_obj = pp
                            if idx < 3:
                                print(f"✓ 找到视频文件: {video_path_obj}")
                            break
                
                # 优先从原始视频文件加载
                if video_path_obj.exists() and video_path_obj.is_file():
                    try:
                        # 从原始视频加载所有帧
                        frames = self.loader.load_video(str(video_path_obj))
                        # 记录原始帧号（从0开始）
                        original_frame_numbers = list(range(len(frames)))
                        if idx < 3:
                            print(f"✓ 成功加载视频: {len(frames)} 帧")
                    except Exception as e:
                        self.logger.warning(f"无法加载视频文件 {video_path_obj}: {e}")
                        if idx < 3:
                            print(f"✗ 加载视频失败: {e}")
                        frames = []
                        original_frame_numbers = []
                else:
                    if idx < 3:
                        print(f"✗ 视频文件不存在: {video_path_obj}")
                    self.logger.warning(f"视频文件不存在: {video_path_obj}")
                    # 如果视频文件不存在，尝试从关键帧目录加载
                    frames_dir = sample.get('frames_dir')
                    if frames_dir and Path(frames_dir).exists():
                        frames = []
                        frames_dir_path = Path(frames_dir)
                        frame_files = sorted(frames_dir_path.glob('keyframe_*.png'))
                        
                        # 获取帧映射以确定原始帧号
                        frame_mapping = self._get_frame_mapping(frames_dir_path)
                        original_frame_numbers = []
                        
                        for frame_file in frame_files:
                            frame = cv2.imread(str(frame_file))
                            if frame is not None:
                                frames.append(frame)
                                # 获取对应的原始帧号
                                try:
                                    frame_idx = int(frame_file.stem.split('_')[1])
                                    original_frame_numbers.append(frame_mapping.get(frame_idx, frame_idx))
                                except:
                                    original_frame_numbers.append(len(original_frame_numbers))
                    else:
                        # 如果都不可用，创建空列表
                        frames = []
                        original_frame_numbers = []
                    
            elif 'frames_dir' in sample and sample_type == 'camus2019':
                # CAMUS2019格式（从frames_dir加载）
                frames_dir = Path(sample['frames_dir'])
                if frames_dir.exists():
                    frames = []
                    for frame_file in sorted(frames_dir.glob('frame_*.png')):
                        frame = cv2.imread(str(frame_file))
                        if frame is not None:
                            frames.append(frame)
                    original_frame_numbers = list(range(len(frames)))
                else:
                    frames = []
                    original_frame_numbers = []
                    
            elif 'image_path' in sample and sample_type in ['brats2023', 'cardiacnet']:
                # BraTS2023或CardiacNet格式（单帧图像）
                image_path = sample['image_path']
                image = cv2.imread(image_path)
                if image is not None:
                    frames = [image]
                    original_frame_numbers = [0]
                else:
                    frames = []
                    original_frame_numbers = []
                    
            elif 'path' in sample:
                # 标准格式
                if sample_type == 'video':
                    frames = self.loader.load_video(sample['path'])
                    original_frame_numbers = list(range(len(frames)))
                else:  # sequence
                    frames = self.loader.load_image_sequence({
                        'dir': Path(sample['path']),
                        'files': [Path(f) for f in sample.get('files', [])]
                    })
                    original_frame_numbers = list(range(len(frames)))
            else:
                raise ValueError(f"样本缺少路径字段: {sample.keys()}")
            
            # 确保original_frame_numbers长度与frames一致
            if len(original_frame_numbers) != len(frames):
                original_frame_numbers = list(range(len(frames)))
            
            if not frames:
                if idx < 3:
                    print(f"✗ 无法加载样本: {sample['id']} - 没有帧数据")
                raise ValueError(f"无法加载样本: {sample['id']}")
            
            # 加载掩码和标注点（使用原始视频帧号匹配）
            masks = None
            annotation_points = None
            is_annotated = None
            annotation_frames = []
            
            if self.load_masks:
                # 加载生成的掩码和标注点（使用原始视频帧号）
                video_id = sample.get('id', '')
                # 使用processed数据目录下的masks和annotations
                masks_dir = self.data_dir / 'masks' / video_id
                annotations_dir = self.data_dir / 'annotations' / video_id
                
                if idx < 3:
                    print(f"\n掩码/标注检索:")
                    print(f"  masks_dir: {masks_dir}")
                    print(f"  masks_dir.exists(): {masks_dir.exists()}")
                    print(f"  annotations_dir: {annotations_dir}")
                    print(f"  annotations_dir.exists(): {annotations_dir.exists()}")
                
                # 如果masks_dir不存在，尝试使用绝对路径
                if not masks_dir.exists():
                    # 尝试从config或环境变量获取processed_path
                    processed_path = getattr(self, 'processed_path', None)
                    if processed_path:
                        masks_dir = Path(processed_path) / 'masks' / video_id
                        annotations_dir = Path(processed_path) / 'annotations' / video_id
                        if idx < 3:
                            print(f"  尝试processed_path: {processed_path}")
                            print(f"  新masks_dir: {masks_dir}, exists={masks_dir.exists()}")
                
                if masks_dir.exists() and annotations_dir.exists():
                    if idx < 3:
                        print(f"✓ 掩码和标注目录存在")
                    # 加载生成的掩码和标注点
                    masks_list = []
                    points_list = []
                    is_annotated_list = []
                    
                    # 使用原始帧号列表（从视频加载时已确定）
                    if idx < 3:
                        print(f"  开始加载掩码和标注，总帧数: {len(original_frame_numbers)}")
                        print(f"  原始帧号范围: {min(original_frame_numbers) if original_frame_numbers else 'N/A'} - {max(original_frame_numbers) if original_frame_numbers else 'N/A'}")
                    
                    loaded_mask_count = 0
                    loaded_annotation_count = 0
                    for i, original_frame_num in enumerate(original_frame_numbers):
                        # 尝试加载掩码（使用原始帧号）
                        mask_file = masks_dir / f"frame_{original_frame_num:03d}.png"
                        annotation_file = annotations_dir / f"frame_{original_frame_num:03d}.json"
                        
                        if idx < 3 and i < 5:  # 只打印前5帧的信息
                            print(f"    帧{i} (原始帧号{original_frame_num}):")
                            print(f"      mask_file: {mask_file.name}, exists={mask_file.exists()}")
                            print(f"      annotation_file: {annotation_file.name}, exists={annotation_file.exists()}")
                        
                        if mask_file.exists() and annotation_file.exists():
                            loaded_mask_count += 1
                            loaded_annotation_count += 1
                            # 加载掩码
                            mask = cv2.imread(str(mask_file), cv2.IMREAD_GRAYSCALE)
                            if mask is not None:
                                # 调整尺寸到目标尺寸
                                if mask.shape[:2] != self.image_size:
                                    mask = cv2.resize(mask, (self.image_size[1], self.image_size[0]), 
                                                     interpolation=cv2.INTER_NEAREST)
                                masks_list.append(mask)
                                
                                # 加载标注点
                                try:
                                    with open(annotation_file, 'r', encoding='utf-8') as f:
                                        ann_data = json.load(f)
                                    
                                    # 提取标注点并归一化到当前图像尺寸
                                    endo_points = None
                                    epi_points = None
                                    
                                    if ann_data.get('endo_points'):
                                        endo_points = np.array(ann_data['endo_points'], dtype=np.float32)
                                        # 如果标注点是基于原始图像尺寸的，需要缩放到当前尺寸
                                        ann_image_shape = ann_data.get('image_shape', [self.image_size[0], self.image_size[1]])
                                        if ann_image_shape != list(self.image_size):
                                            scale_h = self.image_size[0] / ann_image_shape[0]
                                            scale_w = self.image_size[1] / ann_image_shape[1]
                                            endo_points[:, 0] *= scale_w
                                            endo_points[:, 1] *= scale_h
                                    
                                    if ann_data.get('epi_points'):
                                        epi_points = np.array(ann_data['epi_points'], dtype=np.float32)
                                        # 同样缩放
                                        ann_image_shape = ann_data.get('image_shape', [self.image_size[0], self.image_size[1]])
                                        if ann_image_shape != list(self.image_size):
                                            scale_h = self.image_size[0] / ann_image_shape[0]
                                            scale_w = self.image_size[1] / ann_image_shape[1]
                                            epi_points[:, 0] *= scale_w
                                            epi_points[:, 1] *= scale_h
                                    
                                    points_list.append({
                                        'endo': endo_points,
                                        'epi': epi_points,
                                        'frame_num': original_frame_num
                                    })
                                    
                                    is_annotated_list.append(True)
                                    annotation_frames.append(i)
                                except Exception as e:
                                    self.logger.warning(f"加载标注点失败 {annotation_file}: {e}")
                                    masks_list.append(np.zeros(self.image_size, dtype=np.uint8))
                                    points_list.append(None)
                                    is_annotated_list.append(False)
                            else:
                                masks_list.append(np.zeros(self.image_size, dtype=np.uint8))
                                points_list.append(None)
                                is_annotated_list.append(False)
                        else:
                            # 没有标注，创建空掩码
                            masks_list.append(np.zeros(self.image_size, dtype=np.uint8))
                            points_list.append(None)
                            is_annotated_list.append(False)
                    
                    if masks_list:
                        masks = np.stack(masks_list, axis=0)
                        annotation_points = points_list
                        is_annotated = np.array(is_annotated_list, dtype=bool)
                        if idx < 3:
                            print(f"✓ 成功加载: {loaded_mask_count} 个掩码, {loaded_annotation_count} 个标注")
                            print(f"  掩码形状: {masks.shape}, 标注点数量: {len([p for p in points_list if p is not None])}")
                            print(f"  标注帧数: {is_annotated.sum()}/{len(is_annotated)}")
                    else:
                        if idx < 3:
                            print(f"✗ 未加载到任何掩码或标注")
                
                # 如果没有生成的掩码，尝试从旧格式加载
                if masks is None:
                    if idx < 3:
                        print(f"  尝试从旧格式加载掩码...")
                mask_path = self.data_dir / 'masks' / f"{sample['id']}.npy"
                if mask_path.exists():
                    masks = np.load(mask_path)
                    if len(masks.shape) == 2:
                        masks = masks[None, :, :]  # 添加时间维度
                    else:
                        # 尝试从annotations中加载
                        annotations = self.loader.annotations if hasattr(self.loader, 'annotations') else {}
                        sample_id = sample.get('id', '')
                        
                        # 尝试不同的key格式
                        mask_data = None
                        for key in [sample_id, f"{sample_id}.avi", sample_id.replace('0X', '').upper()]:
                            if key in annotations:
                                mask_data = annotations[key]
                                break
                        
                        if mask_data is not None:
                            # 从annotations中提取mask信息
                            masks = self._load_masks_from_annotations(mask_data, len(frames))
                else:
                    # 创建空掩码
                    masks = np.zeros((len(frames), *self.image_size), dtype=np.float32)
                    is_annotated = np.zeros(len(frames), dtype=bool)
            
            # 打印最终结果
            if idx < 3:
                print(f"\n✓ 最终结果: {len(frames)} 帧, 掩码={'有' if masks is not None else '无'}, 标注点={'有' if annotation_points else '无'}")
                if masks is not None:
                    print(f"  掩码形状: {masks.shape}")
                if annotation_points:
                    print(f"  标注点数量: {len([p for p in annotation_points if p is not None])}")
                if is_annotated is not None:
                    print(f"  标注帧数: {is_annotated.sum()}/{len(is_annotated)}")
                print(f"{'='*60}\n")
            
            # 数据增强
            if self.augmentation:
                frames, masks = self.temporal_aug(frames, masks)
                frames, masks = self.cardiac_aug(frames, masks)
            
            # 预处理
            processed_frames = []
            processed_masks = []
            
            for i, frame in enumerate(frames):
                # 空间增强
                # 检查masks是否为None或空列表（安全地处理numpy数组）
                has_masks = False
                if masks is not None:
                    try:
                        has_masks = len(masks) > 0
                    except (TypeError, ValueError):
                        # 如果是numpy数组，使用size检查
                        has_masks = masks.size > 0 if hasattr(masks, 'size') else False
                
                if self.augmentation and has_masks:
                    # 安全地获取mask
                    try:
                        if isinstance(masks, (list, tuple)):
                            mask = masks[i] if i < len(masks) else None
                        else:
                            # numpy数组
                            mask = masks[i] if i < masks.shape[0] else None
                    except (IndexError, TypeError):
                        mask = None
                    
                    # 确保mask是numpy数组且尺寸匹配
                    if mask is not None:
                        # 确保mask是numpy数组
                        if not isinstance(mask, np.ndarray):
                            mask = np.array(mask)
                        
                        # 确保mask是2D的
                        if len(mask.shape) == 3:
                            mask = mask.squeeze()
                        elif len(mask.shape) > 3:
                            mask = mask.reshape(mask.shape[-2:])
                        
                        # 确保frame和mask尺寸匹配（使用tuple比较避免数组布尔值歧义）
                        frame_h, frame_w = frame.shape[:2]
                        mask_h, mask_w = mask.shape[:2]
                        if frame_h != mask_h or frame_w != mask_w:
                            # 调整mask尺寸以匹配frame
                            if frame_h > 0 and frame_w > 0:
                                try:
                                    mask = cv2.resize(mask, (frame_w, frame_h), interpolation=cv2.INTER_NEAREST)
                                except Exception:
                                    # 如果resize失败，创建空mask
                                    mask = np.zeros((frame_h, frame_w), dtype=mask.dtype)
                            else:
                                mask = np.zeros((frame_h, frame_w), dtype=np.uint8)
                        
                        aug_result = self.spatial_aug(frame, mask)
                        processed_frames.append(aug_result['image'])
                        processed_masks.append(aug_result['mask'])
                    else:
                        # 没有mask，只处理图像
                        processed_frame = self.preprocessor.preprocess_image(frame)
                        processed_frames.append(torch.from_numpy(processed_frame).permute(2, 0, 1))
                    if has_masks:
                        try:
                            if isinstance(masks, (list, tuple)):
                                if i < len(masks):
                                    processed_masks.append(torch.from_numpy(masks[i]).float())
                            else:
                                # numpy数组
                                if i < masks.shape[0]:
                                    processed_masks.append(torch.from_numpy(masks[i]).float())
                        except (IndexError, TypeError):
                            pass
            
            # 限制帧数以节省内存（如果设置了max_frames）
            if self.max_frames and len(processed_frames) > self.max_frames:
                # 智能采样：优先保留标注帧
                if annotation_frames and len(annotation_frames) > 0:
                    # 优先保留标注帧
                    annotated_indices = np.array(annotation_frames)
                    annotated_indices = annotated_indices[annotated_indices < len(processed_frames)]  # 确保索引有效
                    
                    if len(annotated_indices) > 0:
                        # 如果标注帧数小于max_frames，补充其他帧
                        remaining_slots = self.max_frames - len(annotated_indices)
                        if remaining_slots > 0:
                            # 获取非标注帧的索引
                            all_indices = np.arange(len(processed_frames))
                            non_annotated = np.setdiff1d(all_indices, annotated_indices)
                            
                            if len(non_annotated) > 0:
                                # 从非标注帧中均匀采样
                                if len(non_annotated) <= remaining_slots:
                                    selected_non_annotated = non_annotated
                                else:
                                    step = len(non_annotated) / remaining_slots
                                    selected_indices = (np.arange(remaining_slots) * step).astype(int)
                                    selected_non_annotated = non_annotated[selected_indices]
                                
                                # 合并标注帧和非标注帧，保持时间顺序
                                combined_indices = np.concatenate([annotated_indices, selected_non_annotated])
                                combined_indices = np.sort(combined_indices)  # 按时间顺序排序
                                # 如果合并后超过max_frames，截断
                                if len(combined_indices) > self.max_frames:
                                    combined_indices = combined_indices[:self.max_frames]
                            else:
                                combined_indices = annotated_indices[:self.max_frames]
                        else:
                            # 标注帧太多，只保留前max_frames个
                            combined_indices = annotated_indices[:self.max_frames]
                        
                        indices = combined_indices
                    else:
                        # 没有有效的标注帧，使用均匀采样
                        indices = np.linspace(0, len(processed_frames) - 1, self.max_frames, dtype=int)
                else:
                    # 没有标注帧信息，使用均匀采样（保持时间连续性以支持周期性学习）
                    # 为了保持周期性，使用连续采样
                    if len(processed_frames) > self.max_frames:
                        # 计算采样间隔，保持连续性
                        step = len(processed_frames) / self.max_frames
                        indices = (np.arange(self.max_frames) * step).astype(int)
                        indices = np.clip(indices, 0, len(processed_frames) - 1)
                        # 确保索引唯一且连续
                        indices = np.unique(indices)
                        if len(indices) < self.max_frames:
                            # 如果不够，补充
                            remaining = self.max_frames - len(indices)
                            last_idx = indices[-1]
                            if last_idx < len(processed_frames) - 1:
                                additional = np.linspace(last_idx + 1, len(processed_frames) - 1, remaining, dtype=int)
                                indices = np.concatenate([indices, additional])
                    else:
                        indices = np.arange(len(processed_frames))
                
                processed_frames = [processed_frames[i] for i in indices]
                if processed_masks:
                    processed_masks = [processed_masks[i] if i < len(processed_masks) else processed_masks[-1] for i in indices]
                # 更新original_frame_numbers
                if len(original_frame_numbers) > self.max_frames:
                    original_frame_numbers = [original_frame_numbers[i] for i in indices]
                if annotation_points and len(annotation_points) > self.max_frames:
                    annotation_points = [annotation_points[i] if i < len(annotation_points) else None for i in indices]
                if is_annotated is not None and len(is_annotated) > self.max_frames:
                    is_annotated = is_annotated[indices]
                # 更新annotation_frames
                if annotation_frames and len(annotation_frames) > 0:
                    # 重新计算annotation_frames在新索引中的位置
                    new_annotation_frames = []
                    for ann_frame in annotation_frames:
                        if ann_frame in indices:
                            new_annotation_frames.append(np.where(indices == ann_frame)[0][0])
                    annotation_frames = new_annotation_frames
            
            # 再次检查：如果仍然超过max_frames，强制截断（防止内存溢出）
            if self.max_frames and len(processed_frames) > self.max_frames:
                processed_frames = processed_frames[:self.max_frames]
                if processed_masks:
                    processed_masks = processed_masks[:self.max_frames]
                if len(original_frame_numbers) > self.max_frames:
                    original_frame_numbers = original_frame_numbers[:self.max_frames]
                if annotation_points and len(annotation_points) > self.max_frames:
                    annotation_points = annotation_points[:self.max_frames]
                if is_annotated is not None and len(is_annotated) > self.max_frames:
                    is_annotated = is_annotated[:self.max_frames]
            
            # 转换为tensor
            if not processed_frames:
                # 如果没有帧，返回空样本
                return self._get_empty_sample(sample.get('id', 'unknown'))
            
            frames_tensor = torch.stack(processed_frames)
            
            result = {
                'frames': frames_tensor,
                'video_id': sample['id'],
                'num_frames': len(processed_frames)
            }
            
            if processed_masks:
                if len(processed_masks) != len(processed_frames):
                    # 确保masks和frames数量一致
                    if len(processed_masks) < len(processed_frames):
                        last_mask = processed_masks[-1] if processed_masks else torch.zeros(*self.image_size)
                        while len(processed_masks) < len(processed_frames):
                            processed_masks.append(last_mask)
                    else:
                        processed_masks = processed_masks[:len(processed_frames)]
                masks_tensor = torch.stack(processed_masks)
                result['masks'] = masks_tensor
            
            # 添加标注信息
            if annotation_points is not None:
                result['annotation_points'] = annotation_points
            if is_annotated is not None:
                result['is_annotated'] = torch.from_numpy(is_annotated).bool()
            if annotation_frames:
                result['annotation_frames'] = annotation_frames
            
            return result
            
        except Exception as e:
            import traceback
            error_msg = f"加载样本失败 {sample.get('id', 'unknown')}: {str(e)}"
            self.logger.error(error_msg)
            self.logger.debug(f"错误详情:\n{traceback.format_exc()}")
            # 返回空样本
            return self._get_empty_sample(sample.get('id', 'unknown'))
    
    def _load_masks_from_annotations(self, annotation_data: Dict[str, Any], num_frames: int) -> np.ndarray:
        """
        从annotations中加载mask
        
        Args:
            annotation_data: 标注数据字典
            num_frames: 帧数
            
        Returns:
            mask数组 (T, H, W)
        """
        masks = []
        
        # 尝试不同的mask数据格式
        if 'masks' in annotation_data:
            # 如果直接有masks数组
            mask_list = annotation_data['masks']
            for mask_info in mask_list[:num_frames]:
                if isinstance(mask_info, (list, np.ndarray)):
                    # 如果是数组，直接使用
                    mask = np.array(mask_info)
                    if len(mask.shape) == 2:
                        # 调整尺寸
                        mask = cv2.resize(mask, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
                    masks.append(mask.astype(np.float32))
                elif isinstance(mask_info, dict) and 'contour' in mask_info:
                    # 如果是轮廓点，转换为mask
                    mask = self._contour_to_mask(mask_info['contour'], self.image_size)
                    masks.append(mask)
        elif 'contours' in annotation_data:
            # 如果有轮廓列表
            contours = annotation_data['contours']
            for contour in contours[:num_frames]:
                mask = self._contour_to_mask(contour, self.image_size)
                masks.append(mask)
        elif 'labels' in annotation_data:
            # 如果有labels（可能是EchoNet格式）
            labels = annotation_data['labels']
            for label_info in labels[:num_frames]:
                if 'contour' in label_info:
                    mask = self._contour_to_mask(label_info['contour'], self.image_size)
                    masks.append(mask)
                elif 'mask' in label_info:
                    mask = np.array(label_info['mask'])
                    if len(mask.shape) == 2:
                        mask = cv2.resize(mask, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_NEAREST)
                    masks.append(mask.astype(np.float32))
        
        # 如果成功加载了masks，确保数量匹配
        if masks:
            while len(masks) < num_frames:
                # 重复最后一帧
                masks.append(masks[-1] if masks else np.zeros(self.image_size, dtype=np.float32))
            masks = masks[:num_frames]
            return np.array(masks)
        else:
            # 如果没有找到mask数据，返回空mask
            return np.zeros((num_frames, *self.image_size), dtype=np.float32)
    
    def _contour_to_mask(self, contour_points: Union[List, np.ndarray], image_size: Tuple[int, int]) -> np.ndarray:
        """
        将轮廓点转换为mask
        
        Args:
            contour_points: 轮廓点坐标 [(x1,y1), (x2,y2), ...] 或 [[x1,y1], [x2,y2], ...]
            image_size: 图像尺寸 (height, width)
            
        Returns:
            mask数组 (H, W)
        """
        mask = np.zeros(image_size, dtype=np.float32)
        
        if contour_points is None or len(contour_points) == 0:
            return mask
        
        try:
            # 转换为numpy数组
            if isinstance(contour_points, list):
                points = np.array(contour_points, dtype=np.int32)
            else:
                points = np.array(contour_points, dtype=np.int32)
            
            # 确保points是2D数组
            if len(points.shape) == 1:
                # 如果是扁平化的数组，需要reshape
                if len(points) % 2 == 0:
                    points = points.reshape(-1, 2)
                else:
                    return mask
            
            # 调整坐标到目标尺寸
            if len(points) > 0:
                # 如果points是归一化的（0-1），需要转换为像素坐标
                if points.max() <= 1.0:
                    points = (points * np.array([image_size[1], image_size[0]])).astype(np.int32)
                
                # 填充轮廓
                cv2.fillPoly(mask, [points], 1.0)
        except Exception as e:
            self.logger.warning(f"转换轮廓到mask失败: {e}")
        
        return mask
    
    def _get_frame_mapping(self, frames_dir: Path) -> Dict[int, int]:
        """
        获取关键帧索引到原始帧号的映射
        
        Args:
            frames_dir: 关键帧目录
            
        Returns:
            字典 {keyframe_index: original_frame_num}
        """
        mapping = {}
        if not frames_dir.exists():
            return mapping
        
        metadata_file = frames_dir / "metadata.json"
        if metadata_file.exists():
            with open(metadata_file, 'r', encoding='utf-8') as f:
                metadata = json.load(f)
            
            # 从metadata中获取key_indices
            key_indices = metadata.get('key_indices', [])
            for i, original_frame_num in enumerate(key_indices):
                mapping[i] = original_frame_num
        else:
            # 如果没有metadata，尝试从文件名推断
            frame_files = sorted(frames_dir.glob("keyframe_*.png"))
            for i, frame_file in enumerate(frame_files):
                try:
                    frame_num = int(frame_file.stem.split('_')[1])
                    mapping[i] = frame_num
                except:
                    mapping[i] = i
        
        return mapping
    
    def get_annotated_frames(self, video_id: str) -> List[int]:
        """
        获取视频的标注帧索引列表
        
        Args:
            video_id: 视频ID
            
        Returns:
            标注帧索引列表
        """
        # 查找样本
        sample = None
        for s in self.samples:
            if s.get('id') == video_id:
                sample = s
                break
        
        if not sample:
            return []
        
        # 加载标注信息
        annotations_dir = self.data_dir / 'annotations' / video_id
        if not annotations_dir.exists():
            return []
        
        frames_dir_path = Path(sample.get('frames_dir', ''))
        frame_mapping = self._get_frame_mapping(frames_dir_path)
        
        # 查找所有标注帧
        annotation_frames = []
        for kf_idx, orig_frame in frame_mapping.items():
            annotation_file = annotations_dir / f"frame_{orig_frame:03d}.json"
            if annotation_file.exists():
                annotation_frames.append(kf_idx)
        
        return sorted(annotation_frames)
    
    def _get_empty_sample(self, video_id: str) -> Dict[str, Any]:
        """获取空样本"""
        # 确保空样本也有正确的帧数（与其他样本一致）
        num_frames = self.max_frames if self.max_frames else 1
        empty_frame = torch.zeros(3, *self.image_size)
        empty_mask = torch.zeros(*self.image_size)
        
        # 创建固定数量的帧
        frames_list = [empty_frame] * num_frames
        masks_list = [empty_mask] * num_frames
        
        return {
            'frames': torch.stack(frames_list),
            'masks': torch.stack(masks_list),
            'video_id': video_id,
            'num_frames': num_frames,
            'is_annotated': torch.zeros(num_frames, dtype=torch.bool)
        }


class EchoFrameDataset(Dataset):
    """超声心动图帧数据集（单帧）"""
    
    def __init__(self, 
                 data_dir: Union[str, Path],
                 split: str = 'train',
                 image_size: Tuple[int, int] = (1024, 1024),
                 augmentation: bool = True,
                 preprocessing_config: Optional[Dict[str, Any]] = None,
                 augmentation_config: Optional[Dict[str, Any]] = None):
        """
        初始化帧数据集
        
        Args:
            data_dir: 数据目录
            split: 数据划分
            image_size: 图像尺寸
            augmentation: 是否使用数据增强
            preprocessing_config: 预处理配置
            augmentation_config: 增强配置
        """
        self.data_dir = Path(data_dir)
        self.split = split
        self.image_size = image_size
        self.augmentation = augmentation and split == 'train'
        self.logger = get_logger("EchoFrameDataset")
        
        # 初始化组件
        self.preprocessor = EchoPreprocessor(
            image_size=image_size,
            normalize=preprocessing_config.get('normalize', True) if preprocessing_config else True,
            mean=preprocessing_config.get('mean', (0.485, 0.456, 0.406)) if preprocessing_config else (0.485, 0.456, 0.406),
            std=preprocessing_config.get('std', (0.229, 0.224, 0.225)) if preprocessing_config else (0.229, 0.224, 0.225)
        )
        
        if self.augmentation:
            self.augmentation = EchoAugmentation(
                image_size=image_size,
                training=True,
                augmentation_config=augmentation_config or {}
            )
        
        # 加载数据
        self.samples = self._load_frame_samples()
        
        self.logger.info(f"加载 {split} 帧数据集: {len(self.samples)} 个样本")
    
    def _load_frame_samples(self) -> List[Dict[str, Any]]:
        """加载帧样本"""
        frames_dir = self.data_dir / 'frames' / self.split
        masks_dir = self.data_dir / 'masks' / self.split
        
        if not frames_dir.exists():
            self.logger.warning(f"帧目录不存在: {frames_dir}")
            return []
        
        samples = []
        for frame_file in frames_dir.glob('*.png'):
            mask_file = masks_dir / frame_file.name
            samples.append({
                'frame_path': str(frame_file),
                'mask_path': str(mask_file) if mask_file.exists() else None,
                'id': frame_file.stem
            })
        
        return samples
    
    def __len__(self) -> int:
        """返回数据集大小"""
        return len(self.samples)
    
    def __getitem__(self, idx: int) -> Dict[str, Any]:
        """获取帧样本"""
        sample = self.samples[idx]
        
        try:
            # 加载图像
            frame = cv2.imread(sample['frame_path'])
            if frame is None:
                raise ValueError(f"无法加载图像: {sample['frame_path']}")
            
            # 加载掩码
            mask = None
            if sample['mask_path'] and Path(sample['mask_path']).exists():
                mask = cv2.imread(sample['mask_path'], cv2.IMREAD_GRAYSCALE)
            
            # 数据增强
            if self.augmentation and mask is not None:
                aug_result = self.augmentation(frame, mask)
                frame_tensor = aug_result['image']
                mask_tensor = aug_result['mask']
            else:
                # 预处理
                processed_frame = self.preprocessor.preprocess_image(frame)
                frame_tensor = torch.from_numpy(processed_frame).permute(2, 0, 1)
                
                if mask is not None:
                    mask_tensor = torch.from_numpy(mask).float() / 255.0
                else:
                    mask_tensor = torch.zeros(*self.image_size)
            
            return {
                'image': frame_tensor,
                'mask': mask_tensor,
                'image_id': sample['id']
            }
            
        except Exception as e:
            self.logger.error(f"加载帧样本失败 {sample['id']}: {e}")
            return self._get_empty_frame_sample(sample['id'])
    
    def _get_empty_frame_sample(self, image_id: str) -> Dict[str, Any]:
        """获取空帧样本"""
        empty_image = torch.zeros(3, *self.image_size)
        empty_mask = torch.zeros(*self.image_size)
        
        return {
            'image': empty_image,
            'mask': empty_mask,
            'image_id': image_id
        }


def create_dataloader(data_dir: Union[str, Path],
                     dataset_type: str = 'video',
                     split: str = 'train',
                     batch_size: int = 4,
                     num_workers: int = 4,
                     shuffle: bool = True,
                     **kwargs) -> DataLoader:
    """
    创建数据加载器
    
    Args:
        data_dir: 数据目录
        dataset_type: 数据集类型 ('video' 或 'frame')
        split: 数据划分
        batch_size: 批大小
        num_workers: 工作进程数
        shuffle: 是否打乱
        **kwargs: 其他参数
        
    Returns:
        数据加载器
    """
    if dataset_type == 'video':
        dataset = EchoVideoDataset(data_dir, split=split, **kwargs)
    else:
        dataset = EchoFrameDataset(data_dir, split=split, **kwargs)
    
    return DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=shuffle,
        num_workers=num_workers,
        pin_memory=True,
        drop_last=True
    )


def test_dataset():
    """测试数据集"""
    logger = get_logger("TestDataset")
    
    data_dir = Path("data/raw/echocardiogram")
    if not data_dir.exists():
        logger.warning(f"数据目录不存在: {data_dir}")
        return
    
    # 测试视频数据集
    try:
        dataset = EchoVideoDataset(data_dir, split='train')
        logger.info(f"视频数据集大小: {len(dataset)}")
        
        if len(dataset) > 0:
            sample = dataset[0]
            logger.info(f"样本键: {sample.keys()}")
            logger.info(f"帧张量形状: {sample['frames'].shape}")
    except Exception as e:
        logger.error(f"视频数据集测试失败: {e}")
    
    # 测试帧数据集
    try:
        frame_dataset = EchoFrameDataset(data_dir, split='train')
        logger.info(f"帧数据集大小: {len(frame_dataset)}")
    except Exception as e:
        logger.error(f"帧数据集测试失败: {e}")


if __name__ == "__main__":
    test_dataset()
