# %load AI/code/models/optimizers.py
# 优化器

# 常用Adam优化器， 细致调参时再用SGD
# Adam: init_lr=5e-4(3e-4)    
# 衰减步骤 [5e-4(3e-4), 1e-4, 1e-5, 1e-6]
# optimizer = torch.optim.SGD(model.parameters(), args.lr[0],
#                                 momentum=args.momentum,
#                                 weight_decay=args.weight_decay)
# optimizer = optim.SGD(model.parameters(), lr = 0.01, momentum=0.9)
# optimizer = optim.Adam([var1, var2], lr = 0.0001)
# optim.SGD([
#                 {'params': model.base.parameters()},
#                 {'params': model.classifier.parameters(), 'lr': 1e-3}
#             ], lr=1e-2, momentum=0.9)
# optimizer = torch.optim.Adam([{'params': model.backbone.parameters(), 'lr': 3e-5},
#                               {'params': model.fc.parameters(), 'lr': 3e-4}, ])
optimizer = optim.Adam(model.parameters(), lr=3e-4)
