
from transformers.trainer_callback import TrainerCallback
import os
import torch

class NanGradientCallback(TrainerCallback):
    def __init__(self, output_dir="nan_checkpoints"):
        self.output_dir = output_dir
        os.makedirs(self.output_dir, exist_ok=True)
        self.nan_params_log = os.path.join(self.output_dir, "nan_params_log.txt")

    def on_pre_optimizer_step(self, args, state, control, **kwargs):
        nan_detected = False
        nan_params = []

        # 检查所有参数的梯度
        for name, param in kwargs["model"].named_parameters():
            if param.grad is not None and torch.isnan(param.grad).any():
                nan_detected = True
                nan_params.append(name)
                print(f"NaN gradients detected in parameter: {name}")

        if nan_detected:
            # 记录包含NaN梯度的参数
            with open(self.nan_params_log, "a") as f:
                f.write(
                    f"Step {state.global_step}: NaN gradients found in parameters:\n"
                )
                f.write("\n".join(nan_params) + "\n\n")

            # 保存模型参数
            checkpoint_path = os.path.join(
                self.output_dir, f"nan_checkpoint_step_{state.global_step}"
            )
            torch.save(kwargs["model"].state_dict(), checkpoint_path)
            print(f"Saved model parameters at {checkpoint_path} due to NaN gradients")

            # 设置控制标志结束训练
            control.should_training_stop = True
