import torch
from utils.overwatch import initialize_overwatch

logger = initialize_overwatch("vla_qat")


def add_gradient_hooks(model:torch.nn.Module):
    hooks = []

    for name, param in model.named_parameters():
        if param.requires_grad:
            # 为每个可训练参数注册反向钩子
            hook = param.register_hook(
                lambda grad, name=name, model=model, param=param: _check_grad(
                    grad, name, param, model
                )
            )
            hooks.append(hook)
            
    return hooks

@torch.no_grad()
def _check_grad(grad, name, param, model:torch.nn.Module):
    if torch.isnan(grad).any():
        logger.info(f"梯度为 NaN 的参数: {name} abssum:{param.abs().sum()} {param}")
        # logger.error(f"{name} :{dict(model.named_parameters())[name]}")
        exit(0)

    if torch.isinf(grad).any():
        logger.info(f"梯度为 inf 的参数: {name} abssum:{param.abs().sum()} {param}")
        exit(0)
    g_max = grad.max().cpu().item()
    # if ((grad > 1e2) & (grad < 1e12)).any():
    # if g_max > 1e4 and g_max < 1e5:
    logger.info(f"梯度异常 {name.strip('language_model.model.layers.')} {g_max:.4e}")
    return grad  # 返回原始梯度（不修改）
