#!/usr/bin/env python3
"""
统一提示生成器训练脚本

训练多模态智能提示生成系统，支持：
- NLP提示模块
- 周期性提示模块
- 质量感知提示模块
- 记忆掩码提示模块
- 自提示机制模块
- 提示融合模块
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from pathlib import Path
import yaml
import argparse
from tqdm import tqdm
import sys
from datetime import datetime
import numpy as np

# 添加项目路径
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))
from src.sam.prompt.unified_prompt_generator import UnifiedPromptGenerator
from src.data.dataset import EchoVideoDataset, EchoFrameDataset
from src.utils.logger import get_logger
from src.utils.config import load_config
from src.utils.visualization import setup_plot_style, plot_training_curves
from src.utils.visualization.report_generator import ReportGenerator


def parse_args():
    """解析命令行参数"""
    parser = argparse.ArgumentParser(description='训练统一提示生成器')
    parser.add_argument('--config', type=str, default='configs/training.yaml',
                       help='配置文件路径')
    parser.add_argument('--resume', type=str, default=None,
                       help='恢复训练的检查点路径')
    parser.add_argument('--device', type=str, default='cuda',
                       help='设备 (cuda/cpu)')
    parser.add_argument('--output_dir', type=str, default='results/training',
                       help='输出目录')
    return parser.parse_args()


def custom_collate_fn(batch):
    """
    自定义collate函数，处理变长的annotation_points和annotation_frames
    
    Args:
        batch: 批次数据列表
        
    Returns:
        整理后的批次字典
    """
    if not batch:
        raise ValueError("批次为空")
    
    # 分离不同类型的字段
    tensor_fields = {}  # 可以stack的tensor字段
    string_fields = {}  # 字符串字段（如video_id）
    int_fields = {}  # 整数字段（如num_frames）
    annotation_points_list = []
    annotation_frames_list = []
    
    for item in batch:
        # 提取annotation_points（列表，不能直接collate）
        if 'annotation_points' in item:
            annotation_points_list.append(item['annotation_points'])
        else:
            annotation_points_list.append(None)
        
        # 提取annotation_frames（列表，不能直接collate）
        if 'annotation_frames' in item:
            annotation_frames_list.append(item['annotation_frames'])
        else:
            annotation_frames_list.append([])
        
        # 分类处理其他字段
        for key, value in item.items():
            if key in ['annotation_points', 'annotation_frames']:
                continue
            
            if isinstance(value, torch.Tensor):
                if key not in tensor_fields:
                    tensor_fields[key] = []
                tensor_fields[key].append(value)
            elif isinstance(value, (str, bytes)):
                if key not in string_fields:
                    string_fields[key] = []
                string_fields[key].append(value)
            elif isinstance(value, (int, float, bool)):
                if key not in int_fields:
                    int_fields[key] = []
                int_fields[key].append(value)
            else:
                # 其他类型，尝试转换为tensor或保持为列表
                if key not in tensor_fields:
                    tensor_fields[key] = []
                try:
                    if isinstance(value, (list, tuple, np.ndarray)):
                        # 如果是列表或数组，尝试转换为tensor
                        if isinstance(value, np.ndarray):
                            tensor_fields[key].append(torch.from_numpy(value))
                        else:
                            tensor_fields[key].append(torch.tensor(value))
                    else:
                        tensor_fields[key].append(value)
                except Exception:
                    # 如果转换失败，作为字符串处理
                    if key not in string_fields:
                        string_fields[key] = []
                    string_fields[key].append(str(value))
    
    # 使用默认collate处理tensor字段
    from torch.utils.data._utils.collate import default_collate
    result = {}
    
    if tensor_fields:
        # 特殊处理frames字段（可能形状不一致）
        if 'frames' in tensor_fields:
            frames_list = tensor_fields.pop('frames')
            # 检查所有frames的形状
            try:
                # 尝试使用default_collate（如果所有frames形状相同）
                # default_collate期望接收一个批次列表，所以需要包装
                result['frames'] = default_collate([{'frames': f} for f in frames_list])['frames']
            except Exception:
                # 如果失败，说明形状不一致，需要padding或截断
                # 验证frames_list不为空且都是tensor
                if not frames_list or not all(isinstance(f, torch.Tensor) for f in frames_list):
                    raise ValueError(f"frames_list无效: 长度={len(frames_list) if frames_list else 0}, 类型={[type(f) for f in frames_list[:3]]}")
                
                # 找到最大帧数（假设形状是 (T, C, H, W)）
                shapes = [f.shape for f in frames_list]
                max_frames_raw = max(s[0] if len(s) > 0 else 1 for s in shapes)
                
                # 限制最大帧数，避免内存溢出
                # 注意：周期模块需要至少20帧来学习周期性
                # 增加到40帧以更好地学习周期性（覆盖更多心脏周期）
                MAX_FRAMES_LIMIT = 40  # 增加到40帧以更好地学习周期性
                PERIODIC_SEQUENCE_LENGTH = 20  # 周期模块的sequence_length
                max_frames = min(max_frames_raw, MAX_FRAMES_LIMIT)
                
                # 确保至少保留足够的帧数供周期模块使用
                if max_frames < PERIODIC_SEQUENCE_LENGTH:
                    max_frames = min(max_frames_raw, PERIODIC_SEQUENCE_LENGTH)
                
                # 找到最大空间尺寸（假设所有frames空间尺寸相同，但以防万一）
                if len(shapes[0]) >= 3:
                    # 假设形状是 (T, C, H, W)
                    max_h = max(s[-2] if len(s) >= 2 else 1 for s in shapes)
                    max_w = max(s[-1] if len(s) >= 2 else 1 for s in shapes)
                    channels = shapes[0][1] if len(shapes[0]) >= 2 else 3
                else:
                    max_h = max_w = 1024  # 默认值
                    channels = 3
                
                # Padding或截断所有frames到相同形状
                # 优先保留标注帧（如果有annotation_frames信息）
                padded_frames = []
                # 注意：annotation_frames在result中，但我们需要在循环前获取
                # 先获取annotation_frames_list（如果存在）
                annotation_frames_list = []
                for item in batch:
                    if 'annotation_frames' in item:
                        annotation_frames_list.append(item['annotation_frames'])
                    else:
                        annotation_frames_list.append([])
                
                for idx, f in enumerate(frames_list):
                    if len(f.shape) == 4:  # (T, C, H, W)
                        T, C, H, W = f.shape
                        # 截断或padding到max_frames
                        if T > max_frames:
                            # 智能截断：优先保留标注帧
                            annotation_frames = annotation_frames_list[idx] if idx < len(annotation_frames_list) else []
                            
                            if annotation_frames and len(annotation_frames) > 0:
                                # 优先保留标注帧
                                annotated_indices = torch.tensor(annotation_frames, dtype=torch.long)
                                annotated_indices = annotated_indices[annotated_indices < T]  # 确保索引有效
                                
                                if len(annotated_indices) > 0:
                                    # 如果标注帧数小于max_frames，补充其他帧
                                    remaining_slots = max_frames - len(annotated_indices)
                                    if remaining_slots > 0:
                                        # 获取非标注帧的索引
                                        all_indices = torch.arange(T, dtype=torch.long)
                                        non_annotated = all_indices[~torch.isin(all_indices, annotated_indices)]
                                        
                                        if len(non_annotated) > 0:
                                            # 从非标注帧中均匀采样
                                            if len(non_annotated) <= remaining_slots:
                                                selected_non_annotated = non_annotated
                                            else:
                                                step = len(non_annotated) / remaining_slots
                                                selected_indices = (torch.arange(remaining_slots, dtype=torch.float) * step).long()
                                                selected_non_annotated = non_annotated[selected_indices]
                                            
                                            # 合并标注帧和非标注帧，保持时间顺序
                                            combined_indices = torch.cat([annotated_indices, selected_non_annotated])
                                            combined_indices = torch.sort(combined_indices)[0]  # 按时间顺序排序
                                            # 如果合并后超过max_frames，截断
                                            if len(combined_indices) > max_frames:
                                                combined_indices = combined_indices[:max_frames]
                                        else:
                                            combined_indices = annotated_indices[:max_frames]
                                    else:
                                        # 标注帧太多，只保留前max_frames个
                                        combined_indices = annotated_indices[:max_frames]
                                    
                                    f = f[combined_indices]
                                    T = len(combined_indices)
                                else:
                                    # 没有有效的标注帧，使用均匀采样
                                    indices = torch.linspace(0, T - 1, max_frames, dtype=torch.long)
                                    f = f[indices]
                                    T = max_frames
                            else:
                                # 没有标注帧信息，使用均匀采样（保持时间连续性）
                                # 为了保持周期性，使用连续采样而不是跳跃采样
                                if T > max_frames:
                                    # 计算采样间隔，保持连续性
                                    step = T / max_frames
                                    indices = (torch.arange(max_frames, dtype=torch.float) * step).long()
                                    indices = torch.clamp(indices, 0, T - 1)
                                    # 确保索引唯一且连续
                                    indices = torch.unique_consecutive(indices)
                                    if len(indices) < max_frames:
                                        # 如果不够，补充
                                        remaining = max_frames - len(indices)
                                        last_idx = indices[-1].item()
                                        if last_idx < T - 1:
                                            additional = torch.linspace(last_idx + 1, T - 1, remaining, dtype=torch.long)
                                            indices = torch.cat([indices, additional])
                                    f = f[indices]
                                    T = len(indices)
                                else:
                                    T = T
                        
                        if T < max_frames or H != max_h or W != max_w:
                            # 创建padded tensor（在CPU上分配，避免GPU内存问题）
                            padded = torch.zeros(max_frames, C, max_h, max_w, dtype=f.dtype, device='cpu')
                            # 复制数据
                            padded[:T, :, :H, :W] = f[:T, :, :, :]
                            padded_frames.append(padded)
                        else:
                            padded_frames.append(f)
                    else:
                        # 其他形状，直接使用（不应该出现）
                        padded_frames.append(f)
                
                # Stack所有padded frames
                result['frames'] = torch.stack(padded_frames)  # (B, T, C, H, W)
        
        # 特殊处理masks字段（可能形状不一致）
        if 'masks' in tensor_fields:
            masks_list = tensor_fields.pop('masks')
            try:
                # 尝试使用default_collate（如果所有masks形状相同）
                # default_collate期望接收一个批次列表
                result['masks'] = default_collate([{'masks': m} for m in masks_list])['masks']
            except Exception:
                # 如果失败，说明形状不一致，需要padding或截断
                if masks_list and all(isinstance(m, torch.Tensor) for m in masks_list):
                    # 找到最大帧数和空间尺寸
                    shapes = [m.shape for m in masks_list]
                    max_frames_raw = max(s[0] if len(s) > 0 else 1 for s in shapes)
                    MAX_FRAMES_LIMIT = 40  # 与frames保持一致（增加到40帧以更好地学习周期性）
                    max_frames = min(max_frames_raw, MAX_FRAMES_LIMIT)
                    max_h = max(s[-2] if len(s) >= 2 else 1 for s in shapes)
                    max_w = max(s[-1] if len(s) >= 2 else 1 for s in shapes)
                    
                    # Padding或截断所有masks到相同形状
                    padded_masks = []
                    for m in masks_list:
                        if len(m.shape) == 3:  # (T, H, W)
                            T, H, W = m.shape
                            # 截断或padding到max_frames
                            if T > max_frames:
                                indices = torch.linspace(0, T - 1, max_frames, dtype=torch.long)
                                m = m[indices]
                                T = max_frames
                            if T < max_frames or H != max_h or W != max_w:
                                padded = torch.zeros(max_frames, max_h, max_w, dtype=m.dtype, device='cpu')
                                padded[:T, :H, :W] = m[:T, :, :]
                                padded_masks.append(padded)
                            else:
                                padded_masks.append(m)
                        else:
                            padded_masks.append(m)
                    
                    result['masks'] = torch.stack(padded_masks)  # (B, T, H, W)
                else:
                    result['masks'] = masks_list
        
        # 特殊处理is_annotated字段
        if 'is_annotated' in tensor_fields:
            is_annotated_list = tensor_fields.pop('is_annotated')
            try:
                # 尝试使用default_collate（如果所有is_annotated形状相同）
                # default_collate期望接收一个批次列表
                result['is_annotated'] = default_collate([{'is_annotated': a} for a in is_annotated_list])['is_annotated']
            except Exception:
                # 如果失败，说明形状不一致，需要padding
                if is_annotated_list and all(isinstance(a, torch.Tensor) for a in is_annotated_list):
                    # 找到最大帧数
                    max_frames_raw = max(a.shape[0] if len(a.shape) > 0 else 1 for a in is_annotated_list)
                    MAX_FRAMES_LIMIT = 40  # 与frames保持一致（增加到40帧以更好地学习周期性）
                    max_frames = min(max_frames_raw, MAX_FRAMES_LIMIT)
                    
                    # Padding或截断所有is_annotated到相同长度
                    padded_annotated = []
                    for a in is_annotated_list:
                        if len(a.shape) == 1:  # (T,)
                            T = a.shape[0]
                            # 截断或padding到max_frames
                            if T > max_frames:
                                a = a[:max_frames]
                                T = max_frames
                            if T < max_frames:
                                padded = torch.zeros(max_frames, dtype=a.dtype, device='cpu')
                                padded[:T] = a
                                padded_annotated.append(padded)
                            else:
                                padded_annotated.append(a)
                        else:
                            padded_annotated.append(a)
                    
                    result['is_annotated'] = torch.stack(padded_annotated)  # (B, T)
                else:
                    # 如果是列表但不是tensor，尝试转换
                    try:
                        result['is_annotated'] = torch.stack([torch.tensor(a) if not isinstance(a, torch.Tensor) else a for a in is_annotated_list])
                    except:
                        result['is_annotated'] = is_annotated_list
        
        # 处理其他tensor字段
        if tensor_fields:
            try:
                # 将tensor字段转换为default_collate期望的格式（列表的字典）
                collatable_dict = {k: v for k, v in tensor_fields.items()}
                collated = default_collate(collatable_dict)
                result.update(collated)
            except Exception as e:
                # 如果失败，逐个字段处理
                for key, values in tensor_fields.items():
                    try:
                        result[key] = default_collate({key: values})[key]
                    except Exception:
                        # 如果某个字段无法collate，尝试stack
                        try:
                            result[key] = torch.stack(values)
                        except Exception:
                            # 如果stack也失败，保持为列表
                            result[key] = values
    
    # 添加字符串字段（保持为列表）
    result.update(string_fields)
    
    # 添加整数/浮点数字段（转换为tensor）
    for key, values in int_fields.items():
        try:
            result[key] = torch.tensor(values)
        except Exception:
            result[key] = values
    
    # 添加annotation_points和annotation_frames（保持为列表，不进行collate）
    result['annotation_points'] = annotation_points_list
    result['annotation_frames'] = annotation_frames_list
    
    return result


def train_epoch(model, dataloader, optimizer, criterion, device, logger, 
                loss_weights=None, train_only_on_annotated=False):
    """
    训练一个epoch
    
    Args:
        model: 模型
        dataloader: 数据加载器
        optimizer: 优化器
        criterion: 损失函数
        device: 设备
        logger: 日志记录器
        loss_weights: 损失权重字典 {'segmentation_weight': 1.0, 'point_prompt_weight': 0.5}
        train_only_on_annotated: 是否仅在标注帧上计算损失
    """
    model.train()
    total_loss = 0.0
    total_seg_loss = 0.0
    total_point_loss = 0.0
    num_batches = 0
    num_annotated_batches = 0
    
    if loss_weights is None:
        loss_weights = {'segmentation_weight': 1.0, 'point_prompt_weight': 0.5}
    
    seg_weight = loss_weights.get('segmentation_weight', 1.0)
    point_weight = loss_weights.get('point_prompt_weight', 0.5)
    
    pbar = tqdm(dataloader, desc='Training')
    logger.info(f"开始训练循环，总批次数: {len(dataloader)}")
    
    for batch_idx, batch in enumerate(pbar):
        try:
            # 调试信息：打印第一个batch的信息
            if batch_idx == 0:
                logger.info(f"第一个batch的键: {batch.keys()}")
                if 'frames' in batch:
                    frames_val = batch['frames']
                    if isinstance(frames_val, torch.Tensor):
                        logger.info(f"frames形状: {frames_val.shape}, dtype={frames_val.dtype}")
                    else:
                        logger.info(f"frames类型: {type(frames_val)}, 是列表: {isinstance(frames_val, list)}")
                        if isinstance(frames_val, list) and len(frames_val) > 0:
                            logger.info(f"frames列表长度: {len(frames_val)}, 第一个元素类型: {type(frames_val[0])}")
                if 'masks' in batch:
                    logger.info(f"masks形状: {batch['masks'].shape if batch['masks'] is not None and isinstance(batch['masks'], torch.Tensor) else type(batch['masks'])}")
                if 'is_annotated' in batch:
                    logger.info(f"is_annotated形状: {batch['is_annotated'].shape if batch['is_annotated'] is not None and isinstance(batch['is_annotated'], torch.Tensor) else type(batch['is_annotated'])}")
            
            # 适配数据集格式：EchoVideoDataset返回'frames'，EchoFrameDataset返回'image'
            if 'frames' in batch:
                # 确保frames是tensor
                frames = batch['frames']
                if isinstance(frames, list):
                    # 如果是列表，尝试转换为tensor
                    try:
                        frames = torch.stack(frames)
                    except Exception as e:
                        logger.error(f"无法将frames列表转换为tensor: {e}")
                        logger.error(f"frames列表长度: {len(frames)}, 第一个元素形状: {frames[0].shape if hasattr(frames[0], 'shape') else 'N/A'}")
                        continue
                
                # 视频数据集：处理所有帧，但优先使用标注帧
                frames = frames.to(device)  # (B, T, C, H, W)
                masks = batch.get('masks', None)
                is_annotated = batch.get('is_annotated', None)
                annotation_points = batch.get('annotation_points', None)
                
                # 确保masks和is_annotated是tensor
                if masks is not None:
                    if isinstance(masks, list):
                        try:
                            masks = torch.stack(masks).to(device)
                        except Exception as e:
                            logger.warning(f"无法将masks列表转换为tensor: {e}")
                            masks = None
                    else:
                        masks = masks.to(device)
                
                if is_annotated is not None:
                    if isinstance(is_annotated, list):
                        try:
                            is_annotated = torch.stack([torch.tensor(a) if not isinstance(a, torch.Tensor) else a for a in is_annotated]).to(device)
                        except Exception as e:
                            logger.warning(f"无法将is_annotated列表转换为tensor: {e}")
                            is_annotated = None
                    else:
                        is_annotated = is_annotated.to(device)
                
                # 如果只在标注帧上训练，选择标注帧
                if train_only_on_annotated and is_annotated is not None:
                    # 检查是否有标注帧
                    if not is_annotated.any():
                        continue
                    
                    # 为每个样本选择第一个标注帧
                    batch_size = frames.shape[0]
                    selected_frames = []
                    selected_masks = []
                    selected_annotation_points = []
                    
                    for b in range(batch_size):
                        # 找到第一个标注帧的索引
                        annotated_indices = torch.where(is_annotated[b])[0]
                        if len(annotated_indices) > 0:
                            frame_idx = annotated_indices[0].item()
                            selected_frames.append(frames[b, frame_idx])
                            if masks is not None:
                                if len(masks.shape) == 4:  # (B, T, H, W)
                                    selected_masks.append(masks[b, frame_idx])
                                else:  # (B, T, 1, H, W)
                                    selected_masks.append(masks[b, frame_idx, 0])
                            else:
                                selected_masks.append(None)
                            
                            # 获取对应的标注点
                            # annotation_points是列表，每个元素对应一个样本，每个样本是一个列表，每个元素对应一帧
                            if annotation_points is not None and b < len(annotation_points):
                                ann_pts_batch = annotation_points[b]
                                if isinstance(ann_pts_batch, list) and frame_idx < len(ann_pts_batch):
                                    selected_annotation_points.append(ann_pts_batch[frame_idx])
                                else:
                                    selected_annotation_points.append(None)
                            else:
                                selected_annotation_points.append(None)
                        else:
                            # 如果没有标注帧，使用第一帧（但不会计算损失）
                            selected_frames.append(frames[b, 0])
                            if masks is not None:
                                if len(masks.shape) == 4:
                                    selected_masks.append(masks[b, 0])
                                else:
                                    selected_masks.append(masks[b, 0, 0])
                            else:
                                selected_masks.append(None)
                            selected_annotation_points.append(None)
                    
                    images = torch.stack(selected_frames)  # (B, C, H, W)
                    if any(m is not None for m in selected_masks):
                        target_masks = torch.stack([m if m is not None else torch.zeros(frames.shape[3], frames.shape[4], device=device) 
                                                   for m in selected_masks])  # (B, H, W)
                    else:
                        target_masks = None
                    annotation_points = selected_annotation_points
                else:
                    # 使用第一帧（所有帧都训练）
                    images = frames[:, 0]  # 取第一帧 (B, C, H, W)
                    if masks is not None:
                        masks = masks.to(device)
                        target_masks = masks[:, 0] if len(masks.shape) == 4 else masks[:, 0, 0]  # (B, H, W)
                    else:
                        target_masks = None
                    
                    # 获取第一帧的标注点
                    if annotation_points is not None:
                        annotation_points = [ann_pts[0] if ann_pts and len(ann_pts) > 0 else None 
                                            for ann_pts in annotation_points]
            elif 'image' in batch:
                # 单帧数据集
                images = batch['image'].to(device)
                target_masks = batch.get('mask', None)
                if target_masks is not None:
                    target_masks = target_masks.to(device)
                is_annotated = None
            else:
                logger.warning(f"批次 {batch_idx} 缺少 'frames' 或 'image' 字段，跳过")
                logger.warning(f"批次键: {list(batch.keys())}")
                continue
            
            # 构建目标字典（使用生成的掩码作为分割目标）
            targets = {}
            if target_masks is not None:
                if len(target_masks.shape) == 2:  # (B, H, W) - 单通道掩码
                    target_masks = target_masks.unsqueeze(1)  # (B, 1, H, W)
                elif len(target_masks.shape) == 3 and target_masks.shape[1] != 1:  # (B, H, W) 需要添加通道维度
                    target_masks = target_masks.unsqueeze(1)  # (B, 1, H, W)
                # 归一化掩码到[0, 1]
                if target_masks.max() > 1.0:
                    target_masks = target_masks.float() / 255.0
                targets['masks'] = target_masks
            
            # 前向传播
            optimizer.zero_grad()
            
            # 处理上下文信息
            context = {}
            
            # 如果有掩码，可以用作提示
            if 'masks' in batch and masks is not None:
                # 使用第一帧的掩码作为初始提示
                prev_mask = masks[:, 0].unsqueeze(1) if len(masks.shape) == 5 else masks[:, 0:1]
                context['prev_masks'] = prev_mask.to(device)
            
            # 判断是否为第一帧（视频的第一帧）
            is_first_frame = True  # 简化处理，将每个batch的第一帧视为第一帧
            
            # 生成提示
            try:
                if is_first_frame:
                    prompts = model.generate_initial_prompts(images, context)
                else:
                    prompts = model.forward(images, context, is_first_frame=False)
            except Exception as e:
                logger.error(f"生成提示时出错 (batch {batch_idx}): {e}")
                import traceback
                logger.error(traceback.format_exc())
                continue
            
            # 计算损失
            seg_loss = torch.tensor(0.0, device=device, requires_grad=True)
            point_loss = torch.tensor(0.0, device=device, requires_grad=True)
            
            # 分割损失（使用生成的掩码作为目标）
            if targets and 'masks' in targets and target_masks is not None:
                # 检查掩码是否有效（非零掩码）
                valid_mask = (target_masks.sum(dim=(1, 2, 3)) > 0) if len(target_masks.shape) == 4 else (target_masks.sum(dim=(1, 2)) > 0)
                
                if valid_mask.any():
                    seg_loss = model.compute_loss(prompts, targets)
                    # 如果只有部分样本有有效掩码，按比例缩放损失
                    if not valid_mask.all():
                        seg_loss = seg_loss * (valid_mask.float().sum() / len(valid_mask))
                else:
                    seg_loss = torch.tensor(0.0, device=device, requires_grad=True)
            
            # 点提示损失
            if annotation_points is not None and 'point_coords' in prompts:
                # 提取标注点坐标
                # annotation_points是列表，每个元素是字典 {'endo': points, 'epi': points}
                # 需要转换为tensor格式
                batch_size = images.shape[0]
                target_points_list = []
                
                for i in range(batch_size):
                    if i < len(annotation_points) and annotation_points[i] is not None:
                        ann_pts = annotation_points[i]
                        # 使用心内膜点或心外膜点（优先心内膜）
                        if ann_pts.get('endo') is not None:
                            points = torch.tensor(ann_pts['endo'], device=device, dtype=torch.float32)
                        elif ann_pts.get('epi') is not None:
                            points = torch.tensor(ann_pts['epi'], device=device, dtype=torch.float32)
                        else:
                            points = None
                        
                        if points is not None and len(points) > 0:
                            # 标注点已经是像素坐标，需要归一化到[0, 1]范围
                            H, W = images.shape[2], images.shape[3]
                            points_norm = points.clone()
                            points_norm[:, 0] = torch.clamp(points[:, 0] / W, 0, 1)
                            points_norm[:, 1] = torch.clamp(points[:, 1] / H, 0, 1)
                            target_points_list.append(points_norm)
                        else:
                            target_points_list.append(None)
                    else:
                        target_points_list.append(None)
                
                # 计算点提示损失（使用标注点作为目标）
                if any(p is not None for p in target_points_list):
                    # 找到所有有效的标注点
                    valid_indices = [i for i, p in enumerate(target_points_list) if p is not None]
                    if valid_indices:
                        # 只对有效的样本计算损失
                        pred_points = prompts['point_coords'][valid_indices]
                        target_points = torch.stack([target_points_list[i] for i in valid_indices])
                        
                        # 如果点数不匹配，取前N个点或填充
                        max_points = max(pred_points.shape[1], target_points.shape[1])
                        if pred_points.shape[1] < max_points:
                            # 填充预测点
                            padding = torch.zeros(pred_points.shape[0], max_points - pred_points.shape[1], 2, 
                                                device=pred_points.device)
                            pred_points = torch.cat([pred_points, padding], dim=1)
                        if target_points.shape[1] < max_points:
                            # 填充目标点
                            padding = torch.zeros(target_points.shape[0], max_points - target_points.shape[1], 2,
                                                device=target_points.device)
                            target_points = torch.cat([target_points, padding], dim=1)
                        
                        # 只计算有效点的损失（忽略填充的零值）
                        num_valid_points = min(pred_points.shape[1], target_points.shape[1])
                        pred_points = pred_points[:, :num_valid_points, :]
                        target_points = target_points[:, :num_valid_points, :]
                        
                        # 计算点损失（使用自动点提示生成器）
                        if hasattr(model, 'auto_point_generator') and model.auto_point_generator is not None:
                            point_loss = model.auto_point_generator.compute_point_loss(
                                pred_points, target_points
                            )
                        else:
                            # 使用MSE损失作为后备
                            point_loss = torch.nn.functional.mse_loss(pred_points, target_points)
            
            # 混合损失
            total_loss_batch = seg_weight * seg_loss + point_weight * point_loss
            
            # 确保loss是tensor类型
            if not isinstance(total_loss_batch, torch.Tensor):
                total_loss_batch = torch.tensor(total_loss_batch, device=device, requires_grad=True)
            
            # 反向传播
            if total_loss_batch.requires_grad and total_loss_batch.item() != 0.0:
                total_loss_batch.backward()
                torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
                optimizer.step()
            
            total_loss += total_loss_batch.item()
            total_seg_loss += seg_loss.item() if isinstance(seg_loss, torch.Tensor) else 0.0
            total_point_loss += point_loss.item() if isinstance(point_loss, torch.Tensor) else 0.0
            num_batches += 1
            if is_annotated is not None and is_annotated.any():
                num_annotated_batches += 1
            
            # 更新进度条
            pbar.set_postfix({
                'loss': f'{total_loss_batch.item():.4f}',
                'seg': f'{seg_loss.item():.4f}' if isinstance(seg_loss, torch.Tensor) else '0.0',
                'point': f'{point_loss.item():.4f}' if isinstance(point_loss, torch.Tensor) else '0.0'
            })
        except Exception as e:
            logger.error(f"处理批次 {batch_idx} 时出错: {e}")
            import traceback
            logger.error(traceback.format_exc())
            continue
    
    avg_loss = total_loss / num_batches if num_batches > 0 else 0.0
    avg_seg_loss = total_seg_loss / num_batches if num_batches > 0 else 0.0
    avg_point_loss = total_point_loss / num_batches if num_batches > 0 else 0.0
    
    logger.info(f"训练损失: 总计={avg_loss:.4f}, 分割={avg_seg_loss:.4f}, 点提示={avg_point_loss:.4f}, "
                f"标注批次={num_annotated_batches}/{num_batches}")
    
    return avg_loss


def validate(model, dataloader, criterion, device, logger):
    """验证"""
    model.eval()
    total_loss = 0.0
    num_batches = 0
    
    with torch.no_grad():
        pbar = tqdm(dataloader, desc='Validation')
        for batch in pbar:
            # 适配数据集格式
            if 'frames' in batch:
                frames = batch['frames'].to(device)
                images = frames[:, 0]  # 取第一帧
                masks = batch.get('masks', None)
                if masks is not None:
                    masks = masks.to(device)
                    target_masks = masks[:, 0] if len(masks.shape) == 5 else masks
                else:
                    target_masks = None
            elif 'image' in batch:
                images = batch['image'].to(device)
                target_masks = batch.get('mask', None)
                if target_masks is not None:
                    target_masks = target_masks.to(device)
            else:
                continue
            
            # 构建目标字典
            targets = {}
            if target_masks is not None:
                if len(target_masks.shape) == 3:
                    target_masks = target_masks.unsqueeze(1)
                targets['masks'] = target_masks
            
            # 处理上下文信息
            context = {}
            if 'masks' in batch and masks is not None:
                prev_mask = masks[:, 0].unsqueeze(1) if len(masks.shape) == 5 else masks[:, 0:1]
                context['prev_masks'] = prev_mask.to(device)
            
            is_first_frame = True
            
            # 生成提示
            if is_first_frame:
                prompts = model.generate_initial_prompts(images, context)
            else:
                prompts = model.forward(images, context, is_first_frame=False)
            
            # 计算损失
            if targets:
                loss = model.compute_loss(prompts, targets)
            else:
                if 'confidence' in prompts:
                    loss = -prompts['confidence'].mean()
                else:
                    loss = torch.tensor(0.0, device=device)
            
            # 确保loss是tensor类型
            if not isinstance(loss, torch.Tensor):
                loss = torch.tensor(loss, device=device)
            
            total_loss += loss.item()
            num_batches += 1
            
            pbar.set_postfix({'loss': f'{loss.item():.4f}'})
    
    avg_loss = total_loss / num_batches if num_batches > 0 else 0.0
    return avg_loss


def main():
    """主函数"""
    args = parse_args()
    
    # 设置日志
    logger = get_logger("TrainUnifiedPrompt")
    logger.info(f"开始训练统一提示生成器")
    logger.info(f"配置文件: {args.config}")
    
    # 加载配置
    config = load_config(args.config)
    
    # 创建输出目录
    output_dir = Path(args.output_dir)
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 设置设备
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
    logger.info(f"使用设备: {device}")
    
    # 创建模型
    model = UnifiedPromptGenerator(
        input_dim=config.get('input_dim', 256),
        prompt_dim=config.get('prompt_dim', 256),
        enable_nlp=config.get('enable_nlp', True),
        enable_periodic=config.get('enable_periodic', True),
        enable_quality=config.get('enable_quality', True),
        enable_memory=config.get('enable_memory', True),
        fusion_method=config.get('fusion_method', 'attention'),
        device=device
    ).to(device)
    
    logger.info(f"模型创建完成: {model.get_model_info()}")
    
    # 获取数据路径
    data_config = config.get('data', {})
    dataset_path = data_config.get('dataset_path', 'data/processed/segmentation')
    processed_path = data_config.get('processed_path', 'data/processed/segmentation')
    
    # 如果配置中有datasets，使用第一个启用的数据集
    datasets = data_config.get('datasets', {})
    selected_dataset = None
    for name, dataset_info in datasets.items():
        if dataset_info.get('enabled', False):
            selected_dataset = dataset_info.get('path', dataset_path)
            logger.info(f"使用数据集: {name} - {selected_dataset}")
            break
    
    # 如果没有找到启用的数据集，使用默认路径
    if selected_dataset is None:
        selected_dataset = dataset_path
        logger.info(f"使用默认数据路径: {selected_dataset}")
    
    # 创建数据集
    # 注意：这里需要根据实际的数据集结构调整
    # 如果数据还没有预处理，可能需要先运行预处理
    try:
        train_dataset = EchoVideoDataset(
            data_dir=processed_path if Path(processed_path).exists() else selected_dataset,
            split='train',
            **config.get('dataset', {})
        )
        val_dataset = EchoVideoDataset(
            data_dir=processed_path if Path(processed_path).exists() else selected_dataset,
            split='val',
            **config.get('dataset', {})
        )
    except Exception as e:
        logger.error(f"数据集创建失败: {e}")
        logger.info("提示：可能需要先运行数据预处理脚本")
        logger.info("运行: python scripts/run_preprocessing.py")
        return
    
    logger.info(f"训练集大小: {len(train_dataset)}, 验证集大小: {len(val_dataset)}")
    
    # 获取训练配置
    training_config = config.get('training', {})
    loader_config = data_config.get('loader', {})
    
    # 创建DataLoader（使用自定义collate函数处理annotation_points）
    # Windows上多进程可能有问题，如果出错可以设置num_workers=0
    num_workers = loader_config.get('num_workers', training_config.get('num_workers', 0))
    # 如果num_workers > 0，在Windows上可能会有问题，建议设为0
    import platform
    if platform.system() == 'Windows' and num_workers > 0:
        logger.warning("Windows系统检测到，将num_workers设为0以避免多进程问题")
        num_workers = 0
    
    # 减少batch size以避免CUDA内存不足
    batch_size = loader_config.get('batch_size', training_config.get('batch_size', 4))
    # 如果batch_size太大，自动减少
    if batch_size > 2:
        logger.warning(f"检测到batch_size={batch_size}，为减少内存使用，将batch_size设为2")
        batch_size = 2
    
    # Windows上禁用pin_memory以避免CUDA内存问题
    use_pin_memory = False  # 禁用pin_memory以避免CUDA内存错误
    
    train_loader = DataLoader(
        train_dataset,
        batch_size=batch_size,
        shuffle=loader_config.get('shuffle', True),
        num_workers=num_workers,
        pin_memory=use_pin_memory,
        drop_last=loader_config.get('drop_last', True),
        collate_fn=custom_collate_fn,
        timeout=0  # Windows上单进程必须设为0
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=num_workers,
        pin_memory=use_pin_memory,
        drop_last=False,
        collate_fn=custom_collate_fn,
        timeout=0  # Windows上单进程必须设为0
    )
    
    logger.info(f"DataLoader配置: batch_size={batch_size}, num_workers={num_workers}, pin_memory={use_pin_memory}")
    
    # 创建优化器和调度器
    optimizer_config = training_config.get('optimizer', {})
    
    # 确保数值类型正确（处理字符串格式的数值，如 "1e-4"）
    def safe_float(value, default):
        """安全地将值转换为浮点数"""
        if value is None:
            return default
        if isinstance(value, (int, float)):
            return float(value)
        if isinstance(value, str):
            try:
                return float(value)
            except ValueError:
                return default
        return default
    
    def safe_int(value, default):
        """安全地将值转换为整数"""
        if value is None:
            return default
        if isinstance(value, (int, float)):
            return int(value)
        if isinstance(value, str):
            try:
                return int(float(value))  # 先转float再转int，支持"1e-4"格式
            except ValueError:
                return default
        return default
    
    lr = safe_float(optimizer_config.get('lr', training_config.get('lr', 1e-4)), 1e-4)
    weight_decay = safe_float(optimizer_config.get('weight_decay', 1e-4), 1e-4)
    eps = safe_float(optimizer_config.get('eps', 1e-8), 1e-8)
    betas = optimizer_config.get('betas', [0.9, 0.999])
    if isinstance(betas, list) and len(betas) == 2:
        betas = [safe_float(b, 0.9 if i == 0 else 0.999) for i, b in enumerate(betas)]
    else:
        betas = [0.9, 0.999]
    
    optimizer = optim.AdamW(
        model.parameters(),
        lr=lr,
        weight_decay=weight_decay,
        betas=betas,
        eps=eps
    )
    
    scheduler_config = training_config.get('scheduler', {})
    scheduler_type = scheduler_config.get('type', 'cosine')
    
    num_epochs_int = safe_int(training_config.get('epochs', 50), 50)
    min_lr = safe_float(scheduler_config.get('min_lr', 1e-6), 1e-6)
    
    if scheduler_type == 'cosine':
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=num_epochs_int,
            eta_min=min_lr
        )
    elif scheduler_type == 'step':
        step_size = safe_int(scheduler_config.get('step_size', 30), 30)
        gamma = safe_float(scheduler_config.get('gamma', 0.1), 0.1)
        scheduler = optim.lr_scheduler.StepLR(
            optimizer,
            step_size=step_size,
            gamma=gamma
        )
    else:
        scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max=num_epochs_int,
            eta_min=min_lr
        )
    
    # 损失函数
    criterion = nn.BCEWithLogitsLoss()  # 示例，实际应根据任务调整
    
    # 恢复训练
    start_epoch = 0
    best_val_loss = float('inf')
    train_losses = []
    val_losses = []
    
    if args.resume:
        checkpoint = torch.load(args.resume, map_location=device, weights_only=False)
        
        # 加载模型状态，允许部分匹配（strict=False）
        # 这样可以处理模型结构变化的情况
        model_state_dict = checkpoint['model_state_dict']
        current_model_state_dict = model.state_dict()
        
        # 过滤掉不匹配的键
        filtered_state_dict = {}
        missing_keys = []
        unexpected_keys = []
        
        for key, value in model_state_dict.items():
            if key in current_model_state_dict:
                if current_model_state_dict[key].shape == value.shape:
                    filtered_state_dict[key] = value
                else:
                    unexpected_keys.append(f"{key} (shape mismatch: {current_model_state_dict[key].shape} vs {value.shape})")
            else:
                unexpected_keys.append(key)
        
        for key in current_model_state_dict:
            if key not in model_state_dict:
                missing_keys.append(key)
        
        # 加载过滤后的状态字典
        model.load_state_dict(filtered_state_dict, strict=False)
        
        if missing_keys:
            logger.warning(f"加载检查点时缺少以下键（将使用默认初始化）: {missing_keys[:10]}...")
        if unexpected_keys:
            logger.warning(f"检查点中有以下意外的键（将被忽略）: {unexpected_keys[:10]}...")
        
        # 加载优化器和调度器状态
        try:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        except Exception as e:
            logger.warning(f"无法加载优化器状态: {e}，将使用新的优化器状态")
        
        try:
            scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        except Exception as e:
            logger.warning(f"无法加载调度器状态: {e}，将使用新的调度器状态")
        
        start_epoch = checkpoint['epoch'] + 1
        best_val_loss = checkpoint.get('best_val_loss', float('inf'))
        train_losses = checkpoint.get('train_losses', [])
        val_losses = checkpoint.get('val_losses', [])
        logger.info(f"恢复训练: epoch {start_epoch}, 最佳验证损失: {best_val_loss:.4f}")
        logger.info(f"成功加载 {len(filtered_state_dict)}/{len(model_state_dict)} 个模型参数")
    
    # 训练循环
    num_epochs = safe_int(training_config.get('epochs', 50), 50)
    save_config = training_config.get('save', {})
    save_interval = safe_int(save_config.get('interval', 10), 10)
    
    for epoch in range(start_epoch, num_epochs):
        logger.info(f"Epoch {epoch+1}/{num_epochs}")
        
        # 获取损失权重配置
        loss_weights = training_config.get('loss_weights', {})
        train_only_on_annotated = data_config.get('annotations', {}).get('train_only_on_annotated_frames', False)
        
        # 训练
        train_loss = train_epoch(
            model, train_loader, optimizer, criterion, device, logger,
            loss_weights=loss_weights,
            train_only_on_annotated=train_only_on_annotated
        )
        train_losses.append(train_loss)
        
        # 验证
        val_loss = validate(model, val_loader, criterion, device, logger)
        val_losses.append(val_loss)
        
        # 更新学习率
        scheduler.step()
        
        logger.info(f"Epoch {epoch+1}: 训练损失={train_loss:.4f}, 验证损失={val_loss:.4f}")
        
        # 保存最佳模型
        if val_loss < best_val_loss:
            best_val_loss = val_loss
            checkpoint_path = output_dir / 'best_model.pt'
            checkpoint_dir = checkpoint_path.parent
            checkpoint_dir.mkdir(parents=True, exist_ok=True)
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_val_loss': best_val_loss,
                'train_losses': train_losses,
                'val_losses': val_losses,
                'config': config
            }, checkpoint_path)
            logger.info(f"保存最佳模型: {checkpoint_path}")
        
        # 定期保存检查点
        if (epoch + 1) % save_interval == 0:
            checkpoint_path = output_dir / f'checkpoint_epoch_{epoch+1}.pt'
            checkpoint_dir = checkpoint_path.parent
            checkpoint_dir.mkdir(parents=True, exist_ok=True)
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
                'best_val_loss': best_val_loss,
                'train_losses': train_losses,
                'val_losses': val_losses,
                'config': config
            }, checkpoint_path)
            logger.info(f"保存检查点: {checkpoint_path}")
    
    # 绘制训练曲线
    setup_plot_style()
    plot_training_curves(
        train_loss=train_losses,
        val_loss=val_losses,
        output_path=str(output_dir / 'training_curves.png'),
        title='Training Curves'
    )
    
    # 生成训练报告
    report_generator = ReportGenerator(output_dir=str(output_dir))
    report_generator.generate_experiment_report(
        experiment_name='unified_prompt_training',
        method_description='多模态智能提示生成系统训练',
        processing_logic='使用统一提示生成器整合NLP、周期性、质量、记忆等模块',
        results={'final_train_loss': train_losses[-1], 'final_val_loss': val_losses[-1]},
        metrics={'best_val_loss': best_val_loss, 'total_epochs': num_epochs},
        interpretations=[
            f'训练损失从 {train_losses[0]:.4f} 降至 {train_losses[-1]:.4f}',
            f'验证损失从 {val_losses[0]:.4f} 降至 {val_losses[-1]:.4f}',
            f'最佳验证损失: {best_val_loss:.4f}'
        ],
        conclusions=[
            '多模态智能提示生成系统训练完成',
            '模型收敛良好，损失持续下降',
            '建议进行进一步评估和测试'
        ],
        figures=[str(output_dir / 'training_curves.png')],
        config=config
    )
    
    logger.info("训练完成！")


if __name__ == '__main__':
    main()
