from paddlenlp.ops.optimizer import AdamWDL
from .text_generation import *
from .token_classification import *
from .base_model import BaseModel, FlatBaseModel, EnsembleModel


def get_optimizer(model, lr, args):
    decay_params = [p.name for n, p in model.named_parameters() if not any(nd in n for nd in ["bias", "norm"])]
    name_dict = dict()
    for n, p in model.named_parameters():
        name_dict[p.name] = n

    optimizer = AdamWDL(
        learning_rate=lr,
        parameters=model.parameters(),
        weight_decay=args.weight_decay,
        name_dict=name_dict,
        apply_decay_param_fun=lambda x: x in decay_params,
    )
    return optimizer
