import copy
import torch.optim.lr_scheduler as scheduler
import numpy as np


def build_lr(cfg, model_optimizer, num_iters, max_epochs=None):
    """
    Build an optimizer and learning rate scheduler to optimize parameters accroding to ```OPTIMIZER``` field in configuration .

    In configuration:
    SCHEDULER:
        iter_step: True
        milestones: [30,50]
        grmma: 0.1
    or
    SCHEDULER:
        iter_step: True
        milestones: [30,50]
        grmma: 0.1
        warmup_epochs: 5
        decay_type: "step" # "step" or "cosine"

    Args:
        cfg (dict): scheduler configuration.
        model_optimizer: already define optimizer
        num_iters : the num_sample of trainloader at each epoch.
        max_epochs: total training epochs
    Returns:
        lr_scheduler (torch.optim.lr_scheduler.LRScheduler): torch LRScheduler.

    """

    cfg_copy = cfg.copy()

    iter_step = cfg_copy.get('iter_step',False)
    if iter_step:
        cfg_copy['num_sample_epoch'] = num_iters
        cfg_copy.pop('iter_step')  # delete cfg_copy['num_iters']
    if 'cosine' in cfg_copy.get('decay_type') and max_epochs is not None:
        cfg_copy['max_epochs'] = max_epochs

    use_warmup = cfg_copy.get('warmup_epochs',0)
    if use_warmup:
        if iter_step:
            lr_lambda = warmup_step_lambda(**cfg_copy)
        else:
            lr_lambda = warmup_epoch_lambda(**cfg_copy)

        lr_scheduler = scheduler.LambdaLR(model_optimizer, lr_lambda=lr_lambda)
    else:  # no warmup : MultiStepLR
        lr_scheduler = scheduler.MultiStepLR(model_optimizer,
                                             milestones=cfg_copy.get('milestones'),
                                             gamma=cfg_copy.get('gamma', 0.1))

    return lr_scheduler


def warmup_step_lambda(warmup_epochs, num_sample_epoch, milestones, gamma=0.1,
                       max_epoch=None, **kwargs):
    warmup_steps = warmup_epochs * num_sample_epoch
    if max_epoch is not None:
        warmup_steps = warmup_epochs * num_sample_epoch
        max_steps = max_epoch * num_sample_epoch
        lr_lambda = lambda step: step / warmup_steps \
            if step < warmup_steps else \
            0.5 * (np.cos((step - warmup_steps) / (max_steps - warmup_steps) * np.pi) + 1)
    else:
        lr_lambda = lambda step: step / warmup_steps \
            if step < warmup_steps else \
            gamma ** np.sum(np.array(milestones) <= step // num_sample_epoch)

    return lr_lambda


def warmup_epoch_lambda(warmup_epochs, milestones, gamma=0.1,
                        max_epoch=None, **kwargs):
    if max_epoch is not None:  # cosine
        lr_lambda = lambda epoch: epoch / warmup_epochs if epoch <= warmup_epochs else 0.5 * (
                np.cos((epoch - warmup_epochs) / (max_epoch - warmup_epochs) * np.pi) + 1)
    else:  # step
        lr_lambda = lambda epoch: epoch / warmup_epochs if epoch <= warmup_epochs else \
            gamma ** np.sum(np.array(milestones) <= epoch)

    return lr_lambda
