import torch

def get_optim_lr(cfg, model):
    model_name = cfg.MODEL.NAME
    model_name = model_name.upper()
    if model_name in ['UPT', 'UPT_POSE']:
        for p in model.detector.parameters():
            p.requires_grad = False
        param_dicts = [{
            "params": [p for n, p in model.named_parameters()
            if "interaction_head" in n and p.requires_grad]
        }]
        optim = torch.optim.AdamW(
            param_dicts, lr=cfg.TRAIN.LR,
            weight_decay=cfg.TRAIN.WEIGHT_DECAY
        )
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, cfg.TRAIN.LR_DROP)
        return optim, lr_scheduler
    elif model_name == 'UPT_FASTERRCNN':
        param_dicts = [{
            "params": [p for n, p in model.named_parameters()
            if "interaction_head" in n and p.requires_grad]
        }]
        optim = torch.optim.AdamW(
            param_dicts, lr=cfg.TRAIN.LR,
            weight_decay=cfg.TRAIN.WEIGHT_DECAY
        )
        lr_scheduler = torch.optim.lr_scheduler.StepLR(optim, cfg.TRAIN.LR_DROP)
        return optim, lr_scheduler
    else:
        TypeError(f'{model_name} do not support now!')
