import torch
import torch.nn as nn
import torch.optim as optim

# 假设 model 是你的大模型
model = ...
optimizer = optim.Adam(model.parameters(), lr=0.001)

# 梯度裁剪
def train_step(inputs, targets):
    optimizer.zero_grad()
    outputs = model(inputs)
    loss = nn.functional.cross_entropy(outputs, targets)
    loss.backward()
    
    # 梯度裁剪
    torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)
    
    optimizer.step()
    return loss.item()

