import torch
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR, StepLR, CosineAnnealingLR

# 定义模型、损失函数和优化器
model = ...  # 假设已经定义了模型
criterion = torch.nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)

# 学习率预热
def lr_lambda(epoch):
    if epoch < 5:  # 假设预热5个epoch
        return epoch / 5.0
    else:
        return 1.0

scheduler = LambdaLR(optimizer, lr_lambda=lr_lambda)

# 训练循环
for epoch in range(num_epochs):
    model.train()
    for inputs, targets in dataloader:
        optimizer.zero_grad()
        outputs = model(inputs)
        loss = criterion(outputs, targets)
        loss.backward()
        optimizer.step()
    
    # 根据需要，可以在每个epoch结束后调整学习率
    scheduler.step()

    # 验证集评估（可选）
    # ...

# 其他学习率调度策略示例
# scheduler = StepLR(optimizer, step_size=30, gamma=0.1)  # 每30个epoch学习率乘以0.1
# scheduler = CosineAnnealingLR(optimizer, T_max=num_epochs)  # 余弦退火

