import paddle.fluid.optimizer
from paddle.fluid.regularizer import L2Decay
from paddle.fluid.io import DataLoader

import helm.dynamic.engine.cls.mix
import helm.transforms
from helm.transforms import Compose
import helm.dynamic.lr_scheduler


def get_transform(cfg):
    transforms = []
    for t in cfg:
        name, params = list(t.items())[0]
        if params is None:
            params = {}
        t = getattr(helm.transforms, name)(**params)
        transforms.append(t)
    return Compose(transforms)


def get_model(cfg, mod):
    typ = cfg.type
    kwargs = {**cfg}
    kwargs.pop("type")
    return getattr(mod, typ)(**kwargs)


def get_lr_scheduler(cfg, step_each_epoch, epochs):
    typ = cfg.type
    cfg.pop("type")
    lr_scheduler = getattr(helm.dynamic.lr_scheduler, typ)(
        step_each_epoch=step_each_epoch, epochs=epochs, **cfg)
    return lr_scheduler


def get_optimizer(cfg, learning_rate, parameter_list):
    typ = cfg.type
    cfg.pop("type")
    reg = L2Decay(cfg.weight_decay)
    cfg.pop("weight_decay")
    return getattr(paddle.fluid.optimizer, typ)(
        learning_rate=learning_rate, regularization=reg, parameter_list=parameter_list, **cfg)


def get_data_loader(cfg, ds, places):
    batch_size = cfg.batch_size
    shuffle = cfg.get("shuffle", False)
    num_workers = cfg.get("num_workers", 2)
    use_shared_memory = cfg.get("use_shared_memory", True)
    return DataLoader(ds, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers,
                      places=places, return_list=True, use_shared_memory=use_shared_memory)


def get_mix(cfg, ds):
    if cfg is None:
        return None
    mix_type = cfg.type
    cfg.pop("type")
    mix = getattr(helm.dynamic.engine.cls.mix, mix_type)(dataset=ds, **cfg)
    return mix
