#!/usr/bin/env python
# -*- coding: utf-8 -*-

"""
使用 SmolVLADataset 训练 SmolVLA 模型
增强版：添加了更多的错误处理和稳定性改进
"""

import os
import sys
import time
import traceback
import numpy as np
# 设置 tokenizers 并行处理环境变量，避免多进程警告
os.environ["TOKENIZERS_PARALLELISM"] = "false"

# 设置 Hugging Face 缓存目录
# 可以通过环境变量 HF_CACHE_DIR 自定义缓存路径，默认使用 /root/autodl-tmp/huggingface
hf_cache_dir = os.environ.get("HF_CACHE_DIR", "/home/dm/.cache/my_huggingface")

# 设置所有相关的 Hugging Face 环境变量
os.environ["HF_HOME"] = hf_cache_dir
os.environ["HUGGINGFACE_HUB_CACHE"] = hf_cache_dir  # 新版本优先使用这个
os.environ["HF_HUB_CACHE"] = hf_cache_dir
os.environ["TRANSFORMERS_CACHE"] = os.path.join(hf_cache_dir, "transformers")
os.environ["HF_DATASETS_CACHE"] = os.path.join(hf_cache_dir, "datasets")

# 确保缓存目录存在
os.makedirs(hf_cache_dir, exist_ok=True)
os.makedirs(os.path.join(hf_cache_dir, "transformers"), exist_ok=True)
os.makedirs(os.path.join(hf_cache_dir, "datasets"), exist_ok=True)
os.makedirs(os.path.join(hf_cache_dir, "hub"), exist_ok=True)

# 打印缓存目录信息以便验证
print(f"Hugging Face 缓存目录设置为: {hf_cache_dir}")
print(f"HF_HOME: {os.environ.get('HF_HOME')}")
print(f"TRANSFORMERS_CACHE: {os.environ.get('TRANSFORMERS_CACHE')}")
import argparse
import torch
from torch.utils.data import DataLoader
import logging
from tqdm import tqdm

from lerobot.common.datasets.lerobot_dataset import LeRobotDataset
from lerobot.common.datasets.smolvla_dataset import SmolVLADataset, create_smolvla_dataloader, ACTION
from lerobot.common.policies.smolvla.configuration_smolvla import SmolVLAConfig
from lerobot.common.policies.smolvla.modeling_smolvla import SmolVLAPolicy
from lerobot.configs.types import PolicyFeature, FeatureType

# 设置日志级别
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)


def parse_args():
    parser = argparse.ArgumentParser(description="训练 SmolVLA 模型")
    parser.add_argument("--dataset_path", type=str, default="/home/dm/ydw/lerobot/dataset-fanguo-v2.2",
                        help="数据集路径")
    parser.add_argument("--policy_path", type=str, default="lerobot/smolvla_base",
                        help="策略模型路径")
    parser.add_argument("--batch_size", type=int, default=24,
                        help="批次大小")
    parser.add_argument("--steps", type=int, default=20000,
                        help="训练步数")
    parser.add_argument("--lr", type=float, default=1e-4,
                        help="学习率")
    parser.add_argument("--num_workers", type=int, default=0,
                        help="数据加载线程数（设为0避免多进程崩溃）")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu",
                        help="训练设备")
    parser.add_argument("--output_dir", type=str, default="./output",
                        help="输出目录")
    parser.add_argument("--save_steps", type=int, default=2000,
                        help="保存模型的步数间隔")
    parser.add_argument("--use_empty_cameras", action="store_true", default=False,
                        help="是否使用空相机（默认启用）")
    parser.add_argument("--resume_from_checkpoint", type=str, default="./output/checkpoint_step_300.pt",
                        help="从指定的检查点文件恢复训练")
    parser.add_argument("--use_cache", action="store_true", default=True,
                        help="是否使用缓存加速数据加载（默认启用）")
    parser.add_argument("--preprocess_only", action="store_true", default=False,
                        help="仅预处理数据并生成缓存，不进行训练")
    parser.add_argument("--cache_dir", type=str, default=None,
                        help="缓存目录路径，默认为数据集目录下的 .smolvla_cache")
    parser.add_argument("--use_stats_cache", action="store_true", default=True,
                        help="是否使用缓存的数据集统计信息（默认启用）")
    parser.add_argument("--recalculate_stats", action="store_true", default=False,
                        help="强制重新计算数据集统计信息，即使缓存存在")
    return parser.parse_args()


def save_checkpoint(model, optimizer, scheduler, global_step, args, filename):
    """保存训练检查点"""
    try:
        # 获取目录路径
        dir_path = os.path.dirname(filename)
        if dir_path:  # 如果有目录路径
            # 确保目录存在
            os.makedirs(dir_path, exist_ok=True)
            logger.info(f"确保目录存在: {dir_path}")
        
        checkpoint = {
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'scheduler_state_dict': scheduler.state_dict(),
            'global_step': global_step,
            'args': vars(args),  # 保存训练参数
        }
        
        # 先保存到临时文件，然后重命名（原子操作）
        temp_filename = filename + '.tmp'
        torch.save(checkpoint, temp_filename)
        os.rename(temp_filename, filename)
        
        logger.info(f"检查点已保存到: {filename}")
        
    except Exception as e:
        logger.error(f"保存检查点失败: {e}")
        logger.error(f"尝试保存到: {filename}")
        logger.error(f"当前工作目录: {os.getcwd()}")
        logger.error(f"目录路径: {os.path.dirname(filename)}")
        logger.error(f"目录是否存在: {os.path.exists(os.path.dirname(filename))}")
        raise


def load_checkpoint(checkpoint_path, model, optimizer, scheduler, device):
    """加载训练检查点"""
    logger.info(f"从检查点恢复训练: {checkpoint_path}")
    checkpoint = torch.load(checkpoint_path, map_location=device)
    
    model.load_state_dict(checkpoint['model_state_dict'])
    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
    global_step = checkpoint['global_step']
    
    logger.info(f"已从步数 {global_step} 恢复训练")
    return global_step


def train(args):
    # 创建输出目录
    os.makedirs(args.output_dir, exist_ok=True)
    
    # 创建配置
    from lerobot.configs.types import PolicyFeature, FeatureType
    
    config = SmolVLAConfig(
        max_state_dim=32,
        max_action_dim=32,
        chunk_size=50,
        n_obs_steps=1,
        n_action_steps=50,
        resize_imgs_with_padding=(512, 512),
        empty_cameras=2 if args.use_empty_cameras else 0,
        prefix_length=400,  # 设置固定的前缀长度，确保序列长度一致
    )
    
    # 手动添加图像特征
    if not hasattr(config, 'input_features'):
        config.input_features = {}
    
    # 添加真实图像键
    for key in ["head_camera", "left_camera", "right_camera"]:
        config.input_features[key] = PolicyFeature(
            type=FeatureType.VISUAL,
            shape=(3, 480, 640),
        )
    # state
    config.input_features["state"] = PolicyFeature(
        type=FeatureType.STATE,
        shape=(config.max_state_dim,),
    )
    # instruction
    config.input_features["task"] = PolicyFeature(
        type=FeatureType.INSTRUCTION,
        shape=(config.max_instruction_dim,),
    )
    # 添加输出特征
    if not hasattr(config, 'output_features'):
        config.output_features = {}
    
    # 添加动作特征（机器人关节控制）
    config.output_features[ACTION] = PolicyFeature(
        type=FeatureType.ACTION,  # 使用ACTION类型而不是CONTINUOUS
        shape=(config.max_action_dim,),  # 使用配置中的动作维度
    )
    
    # 记录缓存使用情况
    logger.info(f"缓存设置: use_cache={args.use_cache}, cache_dir={args.cache_dir or '默认'}, preprocess_only={args.preprocess_only}, use_stats_cache={args.use_stats_cache}")
    
    # 添加空相机
    # config.validate_features()
    
    # 创建数据加载器
    logger.info(f"加载数据集: {args.dataset_path}")
    # 优化DataLoader配置以避免多进程崩溃
    pin_memory_enabled = args.num_workers == 0  # 只在单进程时启用pin_memory
    
    # 如果只是预处理，则设置 num_workers=0 以避免多进程问题
    if args.preprocess_only:
        logger.info("预处理模式：生成数据集缓存文件...")
        preprocess_workers = 0
    else:
        preprocess_workers = args.num_workers
    
    dataloader = create_smolvla_dataloader(
        dataset_path=args.dataset_path,
        config=config,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=preprocess_workers,
        pin_memory=pin_memory_enabled,
        local_only=True,  # 仅使用本地数据集，不从网络获取
        use_cache=args.use_cache,  # 使用缓存加速
        cache_dir=args.cache_dir,  # 自定义缓存目录
    )
    logger.info("注意：数据加载器未设置drop_last=True，将在训练循环中检测并跳过小批次")
    logger.info(f"DataLoader配置: batch_size={args.batch_size}, num_workers={args.num_workers}, pin_memory={pin_memory_enabled}")
    
    # 如果只是预处理模式，则遍历一次数据集以生成缓存，然后退出
    if args.preprocess_only:
        logger.info("开始预处理数据集以生成缓存文件...")
        # 遍历整个数据集，触发缓存生成
        with tqdm(total=len(dataloader.dataset), desc="预处理进度") as pbar:
            for i, _ in enumerate(dataloader):
                pbar.update(args.batch_size)
                if (i + 1) % 10 == 0:
                    logger.info(f"已处理 {min((i + 1) * args.batch_size, len(dataloader.dataset))}/{len(dataloader.dataset)} 个样本")
        
        # 强制计算并缓存数据集统计信息
        logger.info("计算并缓存数据集统计信息...")
        # 这里会触发上面的统计信息计算和缓存逻辑
        
        logger.info("预处理完成，缓存文件和统计信息已生成！")
        return
    
    # 计算或加载数据集统计信息
    stats_cache_dir = args.cache_dir if args.cache_dir else os.path.join(args.dataset_path, '.smolvla_cache')
    os.makedirs(stats_cache_dir, exist_ok=True)
    
    # 生成统计信息缓存文件名
    dataset_name = os.path.basename(args.dataset_path)
    stats_cache_file = os.path.join(stats_cache_dir, f"{dataset_name}_stats.pt")
    
    # 检查是否使用缓存的统计信息
    if args.use_stats_cache and os.path.exists(stats_cache_file) and not args.recalculate_stats:
        logger.info(f"加载缓存的数据集统计信息: {stats_cache_file}")
        try:
            dataset_stats = torch.load(stats_cache_file)
            logger.info(f"成功加载数据集统计信息缓存")
        except Exception as e:
            logger.warning(f"加载统计信息缓存失败: {e}，将重新计算统计信息")
            dataset_stats = None
    else:
        dataset_stats = None
    
    # 如果没有缓存或需要重新计算，则计算统计信息
    if dataset_stats is None:
        logger.info("计算数据集统计信息...")
        dataset_stats = {}
        
        # 为 action 特征创建统计信息
        actions = []
        for batch in tqdm(dataloader, desc="收集 action 数据"):
            if ACTION in batch:
                actions.append(batch[ACTION]) # shape: (batch_size, max_action_dim)
        if actions:
            actions_tensor = torch.cat(actions, dim=0)
            dataset_stats[ACTION] = {
                "mean": actions_tensor.mean(dim=0),
                "std": actions_tensor.std(dim=0) + 1e-8,  # 添加小值避免除零
            }
            # 保存统计信息到缓存
            logger.info(f"保存数据集统计信息到缓存: {stats_cache_file}")
            try:
                torch.save(dataset_stats, stats_cache_file)
                logger.info("统计信息缓存保存成功")
            except Exception as e:
                logger.warning(f"保存统计信息缓存失败: {e}")
        else:
            # 如果没有 action 数据，使用默认值
            action_dim = config.output_features[ACTION].shape[0]
            dataset_stats[ACTION] = {
                "mean": torch.zeros(action_dim),
                "std": torch.ones(action_dim),
            }
            logger.info(f"Using default ACTION stats with dim {action_dim}")
    
    # 创建模型
    logger.info(f"加载模型: {args.policy_path}")
    
    # 检查是否需要从检查点恢复
    resume_from_checkpoint = False
    if args.resume_from_checkpoint and os.path.exists(args.resume_from_checkpoint):
        resume_from_checkpoint = True
        
    if resume_from_checkpoint:
        # 如果从检查点恢复，先创建模型实例，稍后加载检查点
        logger.info("将从检查点恢复模型权重，先创建模型实例")
        model = SmolVLAPolicy(config, dataset_stats=dataset_stats)
    else:
        # 如果不从检查点恢复，直接从预训练模型加载
        logger.info(f"从预训练模型加载权重: {args.policy_path}")
        
        # 先尝试从本地加载
        try:
            logger.info("尝试从本地加载预训练模型...")
            model = SmolVLAPolicy.from_pretrained(
                args.policy_path,
                config=config,
                local_files_only=True,  # 仅从本地加载
            )
            logger.info("成功从本地加载预训练模型")
        except Exception as e:
            logger.info(f"本地加载失败: {str(e)}，尝试从网络下载...")
            model = SmolVLAPolicy.from_pretrained(
                args.policy_path,
                config=config,
                local_files_only=False,  # 允许从网络下载
            )
            logger.info("成功从网络下载预训练模型")
        # 设置数据集统计信息用于归一化
        if hasattr(model, "normalize_targets") and dataset_stats is not None:
            # 直接更新缓冲区而不是调用不存在的方法
            for key, stats in dataset_stats.items():
                buffer_name = "buffer_" + key.replace(".", "_")
                if hasattr(model.normalize_targets, buffer_name):
                    buffer = getattr(model.normalize_targets, buffer_name)
                    if "mean" in stats and "mean" in buffer:
                        buffer["mean"].data = stats["mean"].clone().to(dtype=torch.float32)
                    if "std" in stats and "std" in buffer:
                        buffer["std"].data = stats["std"].clone().to(dtype=torch.float32)
                    if "min" in stats and "min" in buffer:
                        buffer["min"].data = stats["min"].clone().to(dtype=torch.float32)
                    if "max" in stats and "max" in buffer:
                        buffer["max"].data = stats["max"].clone().to(dtype=torch.float32)
                    
        if hasattr(model, "unnormalize_outputs") and dataset_stats is not None:
            # 直接更新缓冲区而不是调用不存在的方法
            for key, stats in dataset_stats.items():
                buffer_name = "buffer_" + key.replace(".", "_")
                if hasattr(model.unnormalize_outputs, buffer_name):
                    buffer = getattr(model.unnormalize_outputs, buffer_name)
                    if "mean" in stats and "mean" in buffer:
                        buffer["mean"].data = stats["mean"].clone().to(dtype=torch.float32)
                    if "std" in stats and "std" in buffer:
                        buffer["std"].data = stats["std"].clone().to(dtype=torch.float32)
                    if "min" in stats and "min" in buffer:
                        buffer["min"].data = stats["min"].clone().to(dtype=torch.float32)
                    if "max" in stats and "max" in buffer:
                        buffer["max"].data = stats["max"].clone().to(dtype=torch.float32)
            
    model.to(args.device)
    
    # 创建优化器
    optimizer = torch.optim.AdamW(
        model.get_optim_params(),
        lr=args.lr,
        betas=config.optimizer_betas,
        eps=config.optimizer_eps,
        weight_decay=config.optimizer_weight_decay,
    )
    
    # 创建学习率调度器
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer,
        T_max=args.steps,
        eta_min=config.scheduler_decay_lr,
    )
    
    # 从检查点恢复训练状态
    global_step = 0
    if args.resume_from_checkpoint:
        if os.path.exists(args.resume_from_checkpoint):
            logger.info(f"从检查点恢复训练: {args.resume_from_checkpoint}")
            global_step = load_checkpoint(
                args.resume_from_checkpoint, 
                model, 
                optimizer, 
                scheduler, 
                args.device
            )
        else:
            logger.warning(f"检查点文件不存在: {args.resume_from_checkpoint}，将从头开始训练")
    
        # 记录已处理的批次索引，用于跳过问题批次
    processed_batch_indices_file = os.path.join(args.output_dir, "processed_batches.txt")
    processed_batch_indices = set()
    if os.path.exists(processed_batch_indices_file):
        try:
            with open(processed_batch_indices_file, "r") as f:
                for line in f:
                    if line.strip():
                        processed_batch_indices.add(int(line.strip()))
            logger.info(f"已加载 {len(processed_batch_indices)} 个已处理的批次索引")
        except Exception as e:
            logger.warning(f"加载已处理批次索引失败: {e}，将重新开始记录")
    
    # 训练循环
    if global_step == 0:
        logger.info("开始新的训练...")
    else:
        logger.info(f"从步数 {global_step} 继续训练...")
    model.train()
    
    # 监控GPU内存使用情况的函数
    def log_gpu_memory():
        if torch.cuda.is_available():
            allocated = torch.cuda.memory_allocated() / (1024 ** 3)
            max_allocated = torch.cuda.max_memory_allocated() / (1024 ** 3)
            reserved = torch.cuda.memory_reserved() / (1024 ** 3)
            max_reserved = torch.cuda.max_memory_reserved() / (1024 ** 3)
            logger.info(f"GPU内存: 当前分配 {allocated:.2f}GB, 最大分配 {max_allocated:.2f}GB, 当前保留 {reserved:.2f}GB, 最大保留 {max_reserved:.2f}GB")
    
    # 初始记录GPU内存
    log_gpu_memory()
    
    while global_step < args.steps:
        try:
            for batch_idx, batch in enumerate(tqdm(dataloader, desc=f"Epoch {global_step // len(dataloader) + 1}")):
                # 如果这个批次之前处理过且出现过问题，跳过它
                if batch_idx in processed_batch_indices:
                    logger.info(f"跳过之前处理过的批次 {batch_idx}")
                    continue
                    
                try:
                    # 记录批次开始处理的时间和信息
                    batch_start_time = time.time()
                    logger.info(f"开始处理批次 {batch_idx}, 当前步数: {global_step}")
                    
                    # 验证批次数据
                    logger.info(f"批次键值: {list(batch.keys())}")
                    has_invalid_data = False
                    
                    # 检查批次大小
                    first_tensor = next((v for v in batch.values() if isinstance(v, torch.Tensor)), None)
                    if first_tensor is not None:
                        batch_size = first_tensor.shape[0]
                        logger.info(f"批次 {batch_idx} 大小: {batch_size}")
                        
                        # 如果批次大小小于预期，跳过此批次
                        min_batch_size = args.batch_size // 2  # 设置可接受的最小批次大小
                        if batch_size < min_batch_size:
                            logger.warning(f"批次 {batch_idx} 大小过小: {batch_size} < {min_batch_size}，跳过此批次")
                            # 保存问题批次以便后续分析
                            problem_batch_file = os.path.join(args.output_dir, f"small_batch_{batch_idx}.pt")
                            torch.save(batch, problem_batch_file)
                            logger.info(f"已保存小批次到: {problem_batch_file}")
                            # 记录此批次已处理
                            processed_batch_indices.add(batch_idx)
                            with open(processed_batch_indices_file, "a") as f:
                                f.write(f"{batch_idx}\n")
                            continue
                    
                    # 将数据移动到设备（非阻塞传输以提高GPU利用率）
                    for key, value in batch.items():
                        if isinstance(value, torch.Tensor):
                            # 检查是否有NaN或无穷大值
                            if torch.isnan(value).any() or torch.isinf(value).any():
                                logger.warning(f"批次 {batch_idx} 中键 {key} 包含NaN或无穷大值!")
                                has_invalid_data = True
                                # 尝试修复NaN值
                                nan_mask = torch.isnan(value)
                                inf_mask = torch.isinf(value)
                                if nan_mask.any() or inf_mask.any():
                                    logger.warning(f"尝试修复键 {key} 中的NaN/Inf值")
                                    fixed_value = value.clone()
                                    if nan_mask.any():
                                        fixed_value[nan_mask] = 0.0
                                    if inf_mask.any():
                                        fixed_value[inf_mask] = 0.0
                                    value = fixed_value
                                    logger.info(f"已修复键 {key} 中的NaN/Inf值")
                            
                            # 记录张量信息
                            logger.info(f"键 {key}: 形状 {value.shape}, 类型 {value.dtype}, 设备 {value.device}")
                            batch[key] = value.to(args.device, non_blocking=True)
                    
                    # 如果数据有问题，可能需要跳过此批次
                    if has_invalid_data:
                        logger.warning(f"批次 {batch_idx} 包含无效数据，但尝试继续处理...")
                    
                    # 尝试前向传播，捕获可能的异常
                    try:
                        logger.info(f"执行前向传播...")
                        loss, loss_dict = model.forward(batch)
                        
                        # 检查损失是否有效
                        if torch.isnan(loss).any() or torch.isinf(loss).any():
                            logger.error(f"损失值无效: {loss.item()}，跳过此批次")
                            # 保存问题批次以便后续分析
                            problem_batch_file = os.path.join(args.output_dir, f"problem_batch_{global_step}.pt")
                            torch.save(batch, problem_batch_file)
                            logger.info(f"已保存问题批次到: {problem_batch_file}")
                            # 记录此批次已处理
                            processed_batch_indices.add(batch_idx)
                            with open(processed_batch_indices_file, "a") as f:
                                f.write(f"{batch_idx}\n")
                            continue
                            
                        logger.info(f"损失值: {loss.item()}, 损失字典: {loss_dict}")
                    except RuntimeError as e:
                        logger.error(f"前向传播出错: {e}")
                        logger.error(traceback.format_exc())
                        # 保存问题批次
                        problem_batch_file = os.path.join(args.output_dir, f"problem_batch_{global_step}.pt")
                        torch.save(batch, problem_batch_file)
                        logger.info(f"已保存问题批次到: {problem_batch_file}")
                        # 记录此批次已处理
                        processed_batch_indices.add(batch_idx)
                        with open(processed_batch_indices_file, "a") as f:
                            f.write(f"{batch_idx}\n")
                        continue
                    
                    # 反向传播
                    optimizer.zero_grad()
                    logger.info(f"执行反向传播...")
                    loss.backward()
                    
                    # 检查梯度是否包含NaN
                    has_nan_grad = False
                    for name, param in model.named_parameters():
                        if param.grad is not None:
                            if torch.isnan(param.grad).any():
                                logger.error(f"参数 {name} 的梯度包含NaN!")
                                has_nan_grad = True
                            elif torch.isinf(param.grad).any():
                                logger.error(f"参数 {name} 的梯度包含Inf!")
                                has_nan_grad = True
                    
                    # 如果梯度有问题，跳过此批次
                    if has_nan_grad:
                        logger.warning(f"跳过包含NaN/Inf梯度的批次 {batch_idx}")
                        # 记录此批次已处理
                        processed_batch_indices.add(batch_idx)
                        with open(processed_batch_indices_file, "a") as f:
                            f.write(f"{batch_idx}\n")
                        continue
                    
                    # 梯度裁剪
                    logger.info(f"执行梯度裁剪...")
                    torch.nn.utils.clip_grad_norm_(model.parameters(), config.optimizer_grad_clip_norm)
                    
                    # 更新参数
                    optimizer.step()
                    scheduler.step()
                    
                    # 记录日志
                    if global_step % 10 == 0:
                        batch_time = time.time() - batch_start_time
                        logger.info(f"Step {global_step}/{args.steps}, Loss: {loss.item():.4f}, LR: {scheduler.get_last_lr()[0]:.6f}, 批次处理时间: {batch_time:.2f}秒")
                        # 每10步记录一次GPU内存使用情况
                        log_gpu_memory()
                    
                    # 保存检查点
                    if global_step % args.save_steps == 0 and global_step > 0:
                        checkpoint_path = os.path.join(args.output_dir, f"checkpoint_step_{global_step}.pt")
                        save_checkpoint(model, optimizer, scheduler, global_step, args, checkpoint_path)
                        
                        # 同时保存模型权重（兼容性）
                        model_path = os.path.join(args.output_dir, f"model_step_{global_step}.pt")
                        os.makedirs(os.path.dirname(model_path), exist_ok=True)  # 确保目录存在
                        torch.save(model.state_dict(), model_path)
                        logger.info(f"模型权重已保存到: {model_path}")
                    
                    # 成功处理的批次不再添加到processed_batch_indices中
                    # 只有出现问题的批次才会被记录，以便在后续训练中跳过
                    
                    global_step += 1
                    if global_step >= args.steps:
                        break
                        
                except Exception as e:
                    logger.error(f"处理批次数据时出错: {e}")
                    logger.error(traceback.format_exc())
                    logger.info("跳过当前批次，继续下一个...")
                    
                    # 记录此批次已处理（出错）
                    processed_batch_indices.add(batch_idx)
                    with open(processed_batch_indices_file, "a") as f:
                        f.write(f"{batch_idx}\n")
                    continue
                    
        except RuntimeError as e:
            error_msg = str(e)
            logger.error(f"RuntimeError: {error_msg}")
            logger.error(traceback.format_exc())
            
            if "CUDA out of memory" in error_msg:
                logger.error("CUDA内存不足，尝试减小批次大小并继续训练...")
                # 保存当前检查点
                emergency_checkpoint_path = os.path.join(args.output_dir, f"emergency_checkpoint_step_{global_step}.pt")
                save_checkpoint(model, optimizer, scheduler, global_step, args, emergency_checkpoint_path)
                logger.info(f"已保存紧急检查点: {emergency_checkpoint_path}")
                
                # 减小批次大小
                args.batch_size = max(1, args.batch_size // 2)
                logger.info(f"减小批次大小至: {args.batch_size}")
                
                # 重新创建DataLoader
                dataloader = create_smolvla_dataloader(
                    dataset_path=args.dataset_path,
                    config=config,
                    batch_size=args.batch_size,
                    shuffle=True,
                    num_workers=0,  # 强制使用单进程
                    pin_memory=False,  # 禁用pin_memory
                    local_only=True,
                )
                logger.info("已重新创建DataLoader，继续训练...")
                continue
                
            elif "DataLoader worker" in error_msg:
                logger.error(f"DataLoader worker 崩溃: {error_msg}")
                logger.info("尝试重新创建 DataLoader...")
                # 重新创建 DataLoader
                dataloader = create_smolvla_dataloader(
                    dataset_path=args.dataset_path,
                    config=config,
                    batch_size=args.batch_size,
                    shuffle=True,
                    num_workers=0,  # 强制使用单进程
                    pin_memory=False,  # 禁用pin_memory
                    local_only=True,
                )
                logger.info("DataLoader 重新创建成功，继续训练...")
                continue
            elif "非法指令" in error_msg or "Illegal instruction" in error_msg:
                logger.error("检测到非法指令错误，可能是硬件兼容性问题")
                # 保存紧急检查点
                emergency_checkpoint_path = os.path.join(args.output_dir, f"emergency_checkpoint_step_{global_step}.pt")
                try:
                    save_checkpoint(model, optimizer, scheduler, global_step, args, emergency_checkpoint_path)
                    logger.info(f"已保存紧急检查点: {emergency_checkpoint_path}")
                except Exception as save_err:
                    logger.error(f"保存紧急检查点失败: {save_err}")
                
                # 记录系统信息
                logger.error("系统信息:")
                logger.error(f"Python版本: {sys.version}")
                logger.error(f"PyTorch版本: {torch.__version__}")
                if torch.cuda.is_available():
                    logger.error(f"CUDA版本: {torch.version.cuda}")
                    logger.error(f"GPU: {torch.cuda.get_device_name(0)}")
                
                # 提示用户可能的解决方案
                logger.error("建议解决方案:")
                logger.error("1. 检查CUDA和PyTorch版本兼容性")
                logger.error("2. 尝试降低批次大小")
                logger.error("3. 禁用某些优化选项")
                logger.error("4. 检查GPU驱动是否最新")
                
                # 这种错误通常很严重，需要退出
                raise
            else:
                logger.error(f"训练过程中出现未知错误: {error_msg}")
                raise
        except Exception as e:
            logger.error(f"训练过程中出现异常: {e}")
            logger.error(traceback.format_exc())
            
            # 保存紧急检查点
            try:
                emergency_checkpoint_path = os.path.join(args.output_dir, f"emergency_checkpoint_step_{global_step}.pt")
                save_checkpoint(model, optimizer, scheduler, global_step, args, emergency_checkpoint_path)
                logger.info(f"已保存紧急检查点: {emergency_checkpoint_path}")
            except Exception as save_err:
                logger.error(f"保存紧急检查点失败: {save_err}")
                
            raise
    
    # 保存最终检查点和模型
    final_checkpoint_path = os.path.join(args.output_dir, "checkpoint_final.pt")
    save_checkpoint(model, optimizer, scheduler, global_step, args, final_checkpoint_path)
    
    model_path = os.path.join(args.output_dir, "model_final.pt")
    os.makedirs(os.path.dirname(model_path), exist_ok=True)  # 确保目录存在
    torch.save(model.state_dict(), model_path)
    logger.info(f"最终模型已保存到: {model_path}")


if __name__ == "__main__":
    # 设置更详细的日志记录
    log_file = os.path.join("./logs", f"train_{time.strftime('%Y%m%d_%H%M%S')}.log")
    os.makedirs(os.path.dirname(log_file), exist_ok=True)
    
    # 同时输出到文件和控制台
    file_handler = logging.FileHandler(log_file)
    file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
    logger.addHandler(file_handler)
    
    logger.info(f"开始训练，日志文件: {log_file}")
    
    try:
        # 记录系统信息
        logger.info("系统信息:")
        logger.info(f"Python版本: {sys.version}")
        logger.info(f"PyTorch版本: {torch.__version__}")
        if torch.cuda.is_available():
            logger.info(f"CUDA版本: {torch.version.cuda}")
            logger.info(f"GPU: {torch.cuda.get_device_name(0)}")
            logger.info(f"可用GPU数量: {torch.cuda.device_count()}")
            logger.info(f"当前GPU: {torch.cuda.current_device()}")
        
        args = parse_args()
        logger.info(f"训练参数: {vars(args)}")
        
        # 启用自动垃圾回收以减少内存泄漏
        import gc
        gc.enable()
        
        train(args)
        
    except Exception as e:
        logger.error(f"训练失败: {e}")
        logger.error(traceback.format_exc())
        sys.exit(1)
