optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
optimizer.step()


scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
for epoch in range(num_epochs):
    scheduler.step()
    # 训练代码


optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-5)

