optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)


model = torch.nn.Sequential(
    torch.nn.Linear(input_dim, hidden_dim),
    torch.nn.BatchNorm1d(hidden_dim),
    torch.nn.ReLU(),
    # 其他层
)


scaler = torch.cuda.amp.GradScaler()
with torch.cuda.amp.autocast():
    output = model(input)
    loss = loss_fn(output, target)

scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()

