import os
import sys
import time
import torch
from contextlib import nullcontext
from torch.cuda.amp import GradScaler

# 添加项目根目录到Python路径
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))

from core.trainer import BaseTrainer

try:
    from accelerate import Accelerator
    ACCELERATE_AVAILABLE = True
except ImportError:
    ACCELERATE_AVAILABLE = False
    print("Accelerate is not available. Install it using 'pip install accelerate' for distributed training.")

try:
    import deepspeed
    DEEPSPEED_AVAILABLE = True
except ImportError:
    DEEPSPEED_AVAILABLE = False
    print("DeepSpeed is not available. Install it using 'pip install deepspeed' for advanced distributed training.")


class PretrainTrainer(BaseTrainer):
    """预训练训练器"""
    
    def _init_components(self):
        """初始化模型、数据集、优化器等组件"""
        self._log_if_rank0(f"[INFO] 开始初始化模型组件")
        # 初始化模型
        self._log_if_rank0(f"[INFO] 加载模型插件: {self.config.get('model_plugin', 'poxiao')}")
        model_plugin = self._load_plugin('models', self.config.get('model_plugin', 'poxiao'))
        self._log_if_rank0(f"[INFO] 创建模型实例")
        self.model = model_plugin.create_model(self.config)
        
        # 打印模型参数量
        params_count = model_plugin.get_model_params_count(self.model)
        self._log_if_rank0(f'PoXiao-LLM可训练总参数量：{params_count / 1e6:.3f} 百万')
        
        # 打印模型结构
        if not self.ddp or self._safe_get_rank() == 0:
            self._log_if_rank0("=== 模型结构 ===")
            self._log_if_rank0(str(self.model))
        
        # 获取训练模式
        use_accelerate = self.config.get('use_accelerate', False)
        use_torchrun = self.config.get('use_torchrun', False)
        use_deepspeed = self.config.get('use_deepspeed', False)
        
        # 默认启动分布式训练 - 自动检测可用GPU
        if not use_accelerate and not use_torchrun and not use_deepspeed:
            # 自动检测是否使用分布式训练
            if torch.cuda.is_available() and torch.cuda.device_count() > 1:
                self._log_if_rank0(f"[INFO] 检测到 {torch.cuda.device_count()} 个GPU，自动启用分布式训练")
                # 默认使用DDP
                use_accelerate = True
                self.config['use_accelerate'] = True
        
        self._log_if_rank0(f"[INFO] 设备配置 - Accelerate: {use_accelerate}, TorchRun: {use_torchrun}, DeepSpeed: {use_deepspeed}")
        
        # 根据不同的训练模式初始化模型和设备
        if not (use_accelerate and ACCELERATE_AVAILABLE) and not use_torchrun and not use_deepspeed:
            self._log_if_rank0(f"[INFO] 将模型移动到设备: {self.device}")
            self.model = self.model.to(self.device)
        elif use_torchrun:
            # 使用TorchRun启动插件
            self._log_if_rank0(f"[INFO] 使用TorchRun启动插件")
            from plugins.torchrun_launcher import TorchRunLauncherPlugin
            self.torchrun_launcher = TorchRunLauncherPlugin(self.config)
            device = self.torchrun_launcher.setup_distributed()
            if device:
                self._log_if_rank0(f"[INFO] 将模型移动到分布式设备: {device}")
                self.model = self.model.to(device)
            self._log_if_rank0(f"[INFO] 包装模型以支持分布式训练")
            self.model = self.torchrun_launcher.wrap_model(self.model)
        elif use_deepspeed and DEEPSPEED_AVAILABLE:
            # 使用DeepSpeed启动插件
            self._log_if_rank0(f"[INFO] 使用DeepSpeed启动插件")
            from plugins.deepspeed_launcher import DeepSpeedLauncherPlugin
            self.deepspeed_launcher = DeepSpeedLauncherPlugin(self.config)
        else:
            self._log_if_rank0(f"[INFO] 使用Accelerate，模型设备将在Accelerator.prepare中处理")
            
        # 初始化数据集
        self._log_if_rank0(f"[INFO] 加载数据集插件: {self.config.get('dataset_plugin', 'pretrain')}")
        dataset_plugin = self._load_plugin('datasets', self.config.get('dataset_plugin', 'pretrain'))
        self._log_if_rank0(f"[INFO] 创建训练数据集")
        self.train_dataset = dataset_plugin.create_dataset(self.config)
        self._log_if_rank0(f"[INFO] 创建训练数据加载器")
        self.train_loader = dataset_plugin.create_dataloader(self.train_dataset, self.config)
        
        # 初始化优化器
        self._log_if_rank0(f"[INFO] 加载优化器插件: {self.config.get('optimizer_plugin', 'adamw')}")
        optimizer_plugin = self._load_plugin('optimizers', self.config.get('optimizer_plugin', 'adamw'))
        self._log_if_rank0(f"[INFO] 创建优化器")
        self.optimizer = optimizer_plugin.create_optimizer(self.model, self.config)
        
        # 如果使用DeepSpeed，则准备模型和优化器
        if use_deepspeed and DEEPSPEED_AVAILABLE:
            self._log_if_rank0(f"[INFO] 使用DeepSpeed准备模型和优化器")
            self.model, self.optimizer = self.deepspeed_launcher.prepare_model_and_optimizer(self.model, self.optimizer)
            self._log_if_rank0(f"[INFO] DeepSpeed模型和优化器准备完成")
        
        # 初始化学习率调度器
        self._log_if_rank0(f"[INFO] 加载学习率调度器插件: {self.config.get('scheduler_plugin', 'cosine')}")
        self.scheduler_plugin = self._load_plugin('schedulers', self.config.get('scheduler_plugin', 'cosine'))
        
        # 初始化损失函数
        self._log_if_rank0(f"[INFO] 加载损失函数插件: {self.config.get('loss_plugin', 'cross_entropy')}")
        self.loss_plugin = self._load_plugin('losses', self.config.get('loss_plugin', 'cross_entropy'))
        
        # 初始化保存点插件
        self._log_if_rank0(f"[INFO] 加载保存点插件: {self.config.get('checkpointer_plugin', 'early_stopping')}")
        self.checkpointer_plugin = self._load_plugin('checkpointers', self.config.get('checkpointer_plugin', 'early_stopping'))
        
        # 初始化混合精度训练
        self._log_if_rank0(f"[INFO] 初始化混合精度训练")
        device_type = "cuda" if "cuda" in str(self.device) else "cpu"
        dtype = self.config.get('dtype', 'bfloat16')
        
        # 与MiniMind保持一致的混合精度配置
        if use_accelerate and ACCELERATE_AVAILABLE:
            # Accelerate模式下，禁用外部的autocast，由Accelerate内部处理混合精度
            self.ctx = nullcontext()
        else:
            # 其他模式下使用标准的autocast
            self.ctx = nullcontext() if device_type == "cpu" else torch.amp.autocast(device_type=device_type, dtype=getattr(torch, dtype, torch.bfloat16))
            
        self._log_if_rank0(f"[INFO] 设备类型: {device_type}, 数据类型: {dtype}")
        
        # 根据不同训练模式初始化GradScaler
        if use_deepspeed and DEEPSPEED_AVAILABLE:
            # DeepSpeed内部管理混合精度，不需要外部GradScaler
            self.scaler = None
        elif use_accelerate and ACCELERATE_AVAILABLE:
            # Accelerate模式下，我们不需要手动创建GradScaler
            self.scaler = None
        else:
            # 与MiniMind保持一致的混合精度启用条件
            self.scaler = torch.cuda.amp.GradScaler(enabled=(dtype in ['float16', 'bfloat16']))
        
        # Accelerate初始化
        if use_accelerate and ACCELERATE_AVAILABLE:
            self._log_if_rank0(f"[INFO] 初始化Accelerate")
            # 使用Accelerate启动插件
            from plugins.accelerate_launcher import AccelerateLauncherPlugin
            self.accelerate_launcher = AccelerateLauncherPlugin(self.config)
            self.accelerator = self.accelerate_launcher.prepare_accelerator()
            
            # 准备模型、优化器和数据加载器
            self._log_if_rank0(f"[INFO] 使用Accelerator准备模型、优化器和数据加载器")
            self.model, self.optimizer, self.train_loader = self.accelerator.prepare(
                self.model, self.optimizer, self.train_loader
            )
            
            self._log_if_rank0(f"[INFO] Accelerate初始化完成")
        self._log_if_rank0(f"[INFO] 模型组件初始化完成")
            
    def _load_plugin(self, plugin_type, plugin_name):
        """加载插件"""
        self._log_if_rank0(f"[INFO] 加载插件 - 类型: {plugin_type}, 名称: {plugin_name}")
        # 简化版插件加载逻辑
        if plugin_type == 'models' and plugin_name == 'poxiao':
            from plugins.models.poxiao_model import PoXiaoModelPlugin
            return PoXiaoModelPlugin()
        elif plugin_type == 'datasets' and plugin_name == 'pretrain':
            from plugins.datasets.pretrain_dataset import PretrainDatasetPlugin
            return PretrainDatasetPlugin()
        elif plugin_type == 'optimizers' and plugin_name == 'adamw':
            from plugins.optimizers.adamw_optimizer import AdamWOptimizerPlugin
            return AdamWOptimizerPlugin()
        elif plugin_type == 'losses' and plugin_name == 'cross_entropy':
            from plugins.losses.cross_entropy_loss import CrossEntropyLossPlugin
            return CrossEntropyLossPlugin()
        elif plugin_type == 'schedulers' and plugin_name == 'cosine':
            from plugins.schedulers.cosine_scheduler import CosineSchedulerPlugin
            return CosineSchedulerPlugin()
        elif plugin_type == 'schedulers' and plugin_name == 'linear':
            from plugins.schedulers.linear_scheduler import LinearSchedulerPlugin
            return LinearSchedulerPlugin()
        elif plugin_type == 'checkpointers' and plugin_name == 'early_stopping':
            from ..checkpointers.early_stopping_checkpointer import EarlyStoppingCheckpointerPlugin
            return EarlyStoppingCheckpointerPlugin(self.config)
        elif plugin_type == 'checkpointers' and plugin_name == 'noop':
            from ..checkpointers.noop_checkpointer import NoopCheckpointerPlugin
            return NoopCheckpointerPlugin(self.config)
        elif plugin_type == 'checkpointers' and plugin_name == 'sample_printing':
            from ..checkpointers.sample_printing_checkpointer import SamplePrintingCheckpointerPlugin
            return SamplePrintingCheckpointerPlugin(self.config)
        else:
            raise ValueError(f"Unknown plugin: {plugin_type}.{plugin_name}")
            
    def _train_epoch(self, epoch):
        """训练单个epoch"""
        self._log_if_rank0(f"[INFO] 开始训练第 {epoch + 1} 轮")
        self.model.train()
        total_loss = 0
        start_time = time.time()
        iter_per_epoch = len(self.train_loader)
        self._log_if_rank0(f"[INFO] 本轮迭代次数: {iter_per_epoch}")
        
        # 获取训练模式
        use_accelerate = self.config.get('use_accelerate', False)
        use_torchrun = self.config.get('use_torchrun', False)
        use_deepspeed = self.config.get('use_deepspeed', False)
        
        # 获取详细日志参数，默认为False
        verbose_logging = self.config.get('verbose_logging', False)
        
        for step, batch in enumerate(self.train_loader):
            # 只在特定条件下打印调试信息，减少日志输出
            if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                self._log_if_rank0(f"[DEBUG] 处理批次 {step + 1}/{iter_per_epoch}")
            
            # 数据同步由训练框架处理
            if not (use_accelerate and ACCELERATE_AVAILABLE) and not use_torchrun and not use_deepspeed:
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 移动数据到设备: {self.device}")
                input_ids = batch['input_ids'].to(self.device)
                labels = batch['labels'].to(self.device)
                loss_mask = batch['loss_mask'].to(self.device)
            else:
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 使用分布式训练框架处理数据")
                input_ids = batch['input_ids']
                labels = batch['labels']
                loss_mask = batch['loss_mask']
                
            # 计算当前批次的token数量
            batch_tokens = input_ids.numel()  # 批次中token的总数
            # 只在特定条件下打印调试信息
            if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                self._log_if_rank0(f"[DEBUG] 当前批次token数量: {batch_tokens}")
            # 更新全局变量
            import core.trainer
            core.trainer.total_tokens_processed += batch_tokens
                
            # 学习率调度
            if self.config.get('self_schedule', True):
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 应用自定义学习率调度")
                lr = self.scheduler_plugin.get_lr(
                    epoch * iter_per_epoch + step, 
                    self.config.get('epochs', 1) * iter_per_epoch, 
                    self.config.get('learning_rate', 5e-4)
                )
                # DeepSpeed使用自己的学习率调度
                if not (use_deepspeed and DEEPSPEED_AVAILABLE):
                    for param_group in self.optimizer.param_groups:
                        param_group['lr'] = lr
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 设置学习率: {lr}")
                    
            # 前向传播
            # 只在特定条件下打印调试信息
            if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                self._log_if_rank0(f"[DEBUG] 执行前向传播")
            with self.ctx:
                outputs = self.model(input_ids=input_ids, labels=labels)
                # 使用与MiniMind一致的损失计算方式
                if hasattr(outputs, 'logits'):
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 使用标准损失计算")
                    # 与MiniMind保持一致的损失计算方式
                    loss_fct = torch.nn.CrossEntropyLoss(reduction='none')
                    loss = loss_fct(
                        outputs.logits.view(-1, outputs.logits.size(-1)),
                        labels.view(-1)
                    ).view(labels.size())
                    loss = (loss * loss_mask).sum() / loss_mask.sum()
                else:
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 使用模型默认损失")
                    loss = outputs.loss
                
                # 添加辅助损失（如果有）
                if hasattr(outputs, 'aux_loss') and outputs.aux_loss > 0:
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 添加辅助损失: {outputs.aux_loss}")
                    loss = loss + outputs.aux_loss
                    
            # 反向传播
            accumulation_steps = self.config.get('accumulation_steps', 1)
            # 只在特定条件下打印调试信息
            if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                self._log_if_rank0(f"[DEBUG] 梯度累积步数: {accumulation_steps}")
            
            # 根据不同的训练模式处理损失和反向传播
            if use_deepspeed and DEEPSPEED_AVAILABLE:
                # 使用DeepSpeed进行反向传播
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 使用DeepSpeed进行反向传播")
                # 在backward之前添加NaN检测
                if torch.isnan(loss) or torch.isinf(loss):
                    self._log_if_rank0(f"[WARNING] 检测到NaN或Inf损失，跳过此step: {step}，损失值: {loss.item()}")
                    self.optimizer.zero_grad()
                    continue
                    
                self.model.backward(loss)
                
                # 添加梯度累积处理逻辑
                if (step + 1) % accumulation_steps == 0:
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 执行梯度更新步骤")
                    # 检查梯度NaN/Inf
                    has_nan_inf = False
                    for p in self.model.parameters():
                        if p.grad is not None:
                            if torch.isnan(p.grad).any() or torch.isinf(p.grad).any():
                                has_nan_inf = True
                                break
                                
                    if has_nan_inf:
                        self._log_if_rank0(f"[WARNING] 检测到NaN或Inf梯度，跳过此step: {step}")
                        self.optimizer.zero_grad()
                        continue
                        
                    # DeepSpeed内部处理梯度裁剪
                    self.model.step()
            elif use_accelerate and ACCELERATE_AVAILABLE:
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 使用Accelerate进行反向传播")
                # 在accelerator.backward(loss)之前添加NaN检测
                if torch.isnan(loss) or torch.isinf(loss):
                    self._log_if_rank0(f"[WARNING] 检测到NaN或Inf损失，跳过此step: {step}，损失值: {loss.item()}")
                    self.optimizer.zero_grad()
                    continue
                    
                # 正确的Accelerate处理梯度累积和缩放
                # 与MiniMind保持一致，不进行损失缩放
                self.accelerator.backward(loss)
                
                # 添加梯度累积处理逻辑
                if (step + 1) % accumulation_steps == 0:
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 执行梯度更新步骤")
                    # 检查梯度NaN/Inf
                    has_nan_inf = False
                    for p in self.model.parameters():
                        if p.grad is not None:
                            if torch.isnan(p.grad).any() or torch.isinf(p.grad).any():
                                has_nan_inf = True
                                break
                                
                    if has_nan_inf:
                        self._log_if_rank0(f"[WARNING] 检测到NaN或Inf梯度，跳过此step: {step}")
                        self.optimizer.zero_grad()
                        continue
                        
                    # 梯度裁剪
                    if self.accelerator.sync_gradients:
                        # 只在特定条件下打印调试信息
                        if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                            self._log_if_rank0(f"[DEBUG] 执行梯度裁剪")
                        self.accelerator.clip_grad_norm_(self.model.parameters(), self.config.get('grad_clip', 1.0))
                    self.optimizer.step()
                    self.optimizer.zero_grad()
            else:
                # 只在特定条件下打印调试信息
                if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                    self._log_if_rank0(f"[DEBUG] 使用标准PyTorch进行反向传播")
                # 原有逻辑
                # 添加NaN检测
                if torch.isnan(loss) or torch.isinf(loss):
                    self._log_if_rank0(f"[WARNING] 检测到NaN或Inf损失，跳过此step: {step}，损失值: {loss.item()}")
                    self.optimizer.zero_grad()
                    continue
                    
                # 与MiniMind保持一致的损失缩放处理
                loss = loss / accumulation_steps
                if self.scaler is not None:
                    self.scaler.scale(loss).backward()
                else:
                    loss.backward()
                
                if (step + 1) % accumulation_steps == 0:
                    if self.scaler is not None:
                        self.scaler.unscale_(self.optimizer)
                    
                    # 检查梯度NaN/Inf
                    has_nan_inf = False
                    for p in self.model.parameters():
                        if p.grad is not None:
                            if torch.isnan(p.grad).any() or torch.isinf(p.grad).any():
                                has_nan_inf = True
                                break
                                
                    if has_nan_inf:
                        self._log_if_rank0(f"[WARNING] 检测到NaN或Inf梯度，跳过此step: {step}")
                        if self.scaler is not None:
                            self.scaler.step(self.optimizer)
                            self.scaler.update()
                        else:
                            self.optimizer.step()
                        self.optimizer.zero_grad(set_to_none=True)
                        continue
                        
                    torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.get('grad_clip', 1.0))
                    
                    if self.scaler is not None:
                        self.scaler.step(self.optimizer)
                        self.scaler.update()
                    else:
                        self.optimizer.step()
                    
                    self.optimizer.zero_grad(set_to_none=True)
                    
            # 日志记录
            if step % self.config.get('log_interval', 100) == 0:
                if verbose_logging:
                    self._log_if_rank0(f"[DEBUG] 记录训练日志")
                spend_time = time.time() - start_time
                
                # 获取当前学习率
                if use_deepspeed and DEEPSPEED_AVAILABLE:
                    # DeepSpeed的学习率获取方式
                    current_lr = self.optimizer.get_lr()[0] if hasattr(self.optimizer, 'get_lr') else self.optimizer.param_groups[-1]['lr']
                elif use_accelerate and ACCELERATE_AVAILABLE:
                    current_lr = self.optimizer.param_groups[-1]['lr']
                else:
                    current_lr = self.optimizer.param_groups[-1]['lr']
                    
                loss_val = loss.item() * accumulation_steps if not (
                    use_accelerate and ACCELERATE_AVAILABLE) else loss.item()
                total_loss += loss_val
                # 修复：考虑梯度累积的影响，正确计算平均损失
                avg_loss = total_loss / max(1, (step + 1))
                
                # 获取全局token处理数量
                import core.trainer
                tokens_processed = core.trainer.total_tokens_processed
                
                if not self.ddp or self._safe_get_rank() == 0:
                    print(
                        'Epoch:[{}/{}]({}/{}) loss:{:.3f}  avg_loss:{:.3f}  lr:{:.12f} epoch_Time:{}min: tokens:{:,}'.format(
                            epoch + 1,
                            self.config.get('epochs', 1),
                            step,
                            iter_per_epoch,
                            loss_val,
                            avg_loss,
                            current_lr,
                            spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60,
                            tokens_processed
                        )
                    )
                
                # 记录到TensorBoard
                if self.tensorboard_writer is not None and (not self.ddp or self._safe_get_rank() == 0):
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 记录到TensorBoard")
                    self.tensorboard_writer.add_scalar('Loss/loss', loss_val, self.global_step)
                    self.tensorboard_writer.add_scalar('Loss/avg_loss', avg_loss, self.global_step)
                    self.tensorboard_writer.add_scalar('LearningRate/lr', current_lr, self.global_step)
                    self.tensorboard_writer.add_scalar(
                        'Time/epoch_Time', 
                        spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60,
                        self.global_step
                    )
                    self.tensorboard_writer.add_scalar('Tokens/processed', tokens_processed, self.global_step)
                    
                if self.wandb_logger is not None and (not self.ddp or self._safe_get_rank() == 0):
                    # 只在特定条件下打印调试信息
                    if verbose_logging and step % self.config.get('log_interval', 100) == 0:
                        self._log_if_rank0(f"[DEBUG] 记录到WandB")
                    self.wandb_logger.log({
                        "loss": loss_val,
                        "lr": current_lr,
                        "epoch_Time": spend_time / (step + 1) * iter_per_epoch // 60 - spend_time // 60,
                        "tokens_processed": tokens_processed
                    })
                    
            # 保存检查点（使用早停法插件）
            if self.checkpointer_plugin.should_save_checkpoint(loss_val, avg_loss, epoch, step) and (not self.ddp or self._safe_get_rank() == 0):
                # 不再打印保存检查点的日志
                self._save_checkpoint(epoch, step)
                
                # 如果插件有print_sample方法，则打印样本
                # 注意：只有在保存检查点时才打印样本，避免重复打印
                # 这个调用移到_save_checkpoint方法中统一处理
                
            # 添加保存间隔参数支持，参考train_pretrain_accelerate.py
            save_interval = self.config.get('save_interval', 1000)
            if (step + 1) % save_interval == 0 and (not self.ddp or self._safe_get_rank() == 0):
                self._save_checkpoint(epoch, step)
                
            # 检查是否应该早停
            if hasattr(self.checkpointer_plugin, 'should_early_stop') and self.checkpointer_plugin.should_early_stop():
                self._log_if_rank0("[INFO] 触发早停，结束训练")
                return True  # 返回True表示应该早停
                
            # 如果使用的是样本打印插件，则按间隔打印样本
            if (isinstance(self.checkpointer_plugin, 
                          __import__('plugins.checkpointers.sample_printing_checkpointer', fromlist=['SamplePrintingCheckpointerPlugin'])
                          .SamplePrintingCheckpointerPlugin) and 
                (self.global_step + 1) % self.config.get('sample_printing_interval', 100) == 0 and
                (not self.ddp or self._safe_get_rank() == 0)):
                try:
                    # 获取tokenizer
                    tokenizer_path = self.config.get('tokenizer_path', '../model/')
                    from transformers import AutoTokenizer
                    import os
                    # 检查tokenizer路径是否存在
                    if os.path.exists(tokenizer_path):
                        tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
                    else:
                        # 检查项目目录中是否存在默认分词器
                        default_tokenizer_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'model')
                        if os.path.exists(default_tokenizer_path):
                            tokenizer = AutoTokenizer.from_pretrained(default_tokenizer_path)
                        else:
                            tokenizer = AutoTokenizer.from_pretrained('gpt2')
                            
                    self.checkpointer_plugin.print_sample(self.model, tokenizer, self.device)
                    self._log_if_rank0("[INFO] 打印样本完成")
                except Exception as e:
                    self._log_if_rank0(f"[WARNING] 打印样本时出错: {e}")
                
            self.global_step += 1
            
        self._log_if_rank0(f"[INFO] 完成第 {epoch + 1} 轮训练")
        return False  # 返回False表示不应该早停
            
    def _save_checkpoint(self, epoch, step):
        """保存检查点"""
        import os
        self.model.eval()
        use_moe = self.config.get('use_moe', False)
        moe_path = '_moe' if use_moe else ''
        use_accelerate = self.config.get('use_accelerate', False)
        use_torchrun = self.config.get('use_torchrun', False)
        use_deepspeed = self.config.get('use_deepspeed', False)
        
        output_dir = self.config.get("output_dir", "../out")
        os.makedirs(output_dir, exist_ok=True)
        
        # 如果是样本打印插件，直接打印样本而不保存模型
        if isinstance(self.checkpointer_plugin, 
                     __import__('plugins.checkpointers.sample_printing_checkpointer', fromlist=['SamplePrintingCheckpointerPlugin'])
                     .SamplePrintingCheckpointerPlugin):
            try:
                # 获取tokenizer
                tokenizer_path = self.config.get('tokenizer_path', '../model/')
                from transformers import AutoTokenizer
                import os
                # 检查tokenizer路径是否存在
                if os.path.exists(tokenizer_path):
                    tokenizer = AutoTokenizer.from_pretrained(tokenizer_path)
                else:
                    # 检查项目目录中是否存在默认分词器
                    default_tokenizer_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'model')
                    if os.path.exists(default_tokenizer_path):
                        tokenizer = AutoTokenizer.from_pretrained(default_tokenizer_path)
                    else:
                        tokenizer = AutoTokenizer.from_pretrained('gpt2')
                        
                self.checkpointer_plugin.print_sample(self.model, tokenizer, self.device)
                self._log_if_rank0("[INFO] 打印样本完成")
            except Exception as e:
                self._log_if_rank0(f"[WARNING] 打印样本时出错: {e}")
            
            self.model.train()
            return
        
        if use_deepspeed and DEEPSPEED_AVAILABLE:
            # 使用DeepSpeed保存模型
            ckp = f'{output_dir}/poxiao_pretrain_{self.config.get("hidden_size", 512)}{moe_path}_deepspeed'
            # DeepSpeed会自动处理模型保存
            self.model.save_checkpoint(ckp)
        elif use_accelerate and ACCELERATE_AVAILABLE:
            # 使用Accelerate保存模型
            ckp = f'{output_dir}/poxiao_pretrain_{self.config.get("hidden_size", 512)}{moe_path}_accelerate.pth'
            # 统一进行半精度保存
            unwrapped_model = self.accelerator.unwrap_model(self.model)
            state_dict = unwrapped_model.state_dict()
            state_dict = {k: v.half() for k, v in state_dict.items()}
            torch.save(state_dict, ckp)
        elif use_torchrun:
            # 使用TorchRun保存模型
            ckp = f'{output_dir}/poxiao_pretrain_{self.config.get("hidden_size", 512)}{moe_path}_torchrun.pth'
            if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
                state_dict = self.model.module.state_dict()
            else:
                state_dict = self.model.state_dict()
            state_dict = {k: v.half() for k, v in state_dict.items()}  # 半精度保存
            torch.save(state_dict, ckp)
        else:
            # 原有保存逻辑
            ckp = f'{output_dir}/poxiao_pretrain_{self.config.get("hidden_size", 512)}{moe_path}.pth'
            
            if isinstance(self.model, torch.nn.parallel.DistributedDataParallel):
                state_dict = self.model.module.state_dict()
            else:
                state_dict = self.model.state_dict()
                
            state_dict = {k: v.half() for k, v in state_dict.items()}  # 半精度保存
            torch.save(state_dict, ckp)
            
        self.model.train()
