# https://github.com/monniert/dti-clustering/blob/b57a77d4c248b16b4b15d6509b6ec493c53257ef/src/optimizer/__init__.py
from torch.optim import SGD, Adam, ASGD, Adamax, Adadelta, Adagrad, RMSprop
# https://github.com/monniert/dti-clustering/blob/b57a77d4c248b16b4b15d6509b6ec493c53257ef/src/optimizer/__init__.py
from torch.optim.lr_scheduler import CosineAnnealingLR, ExponentialLR, MultiStepLR, _LRScheduler
import math
import torch


def get_optimizer(config, model):
    assert config.opt in ['Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'ASGD', 'RMSprop', 'Rprop', 'SGD'], 'Unsupported optimizer!'

    if config.opt == 'Adadelta':
        return torch.optim.Adadelta(
            model.parameters(),
            lr = config.lr,
            rho = config.rho,
            eps = config.eps,
            weight_decay = config.weight_decay
        )
    elif config.opt == 'Adagrad':
        return torch.optim.Adagrad(
            model.parameters(),
            lr = config.lr,
            lr_decay = config.lr_decay,
            eps = config.eps,
            weight_decay = config.weight_decay
        )
    elif config.opt == 'Adam':
        return torch.optim.Adam(
            model.parameters(),
            lr = config.lr,
            betas = config.betas,
            eps = config.eps,
            weight_decay = config.weight_decay,
            amsgrad = config.amsgrad
        )
    elif config.opt == 'AdamW':
        return torch.optim.AdamW(
            model.parameters(),
            lr = config.lr,
            betas = config.betas,
            eps = config.eps,
            weight_decay = config.weight_decay,
            amsgrad = config.amsgrad
        )
    elif config.opt == 'Adamax':
        return torch.optim.Adamax(
            model.parameters(),
            lr = config.lr,
            betas = config.betas,
            eps = config.eps,
            weight_decay = config.weight_decay
        )
    elif config.opt == 'ASGD':
        return torch.optim.ASGD(
            model.parameters(),
            lr = config.lr,
            lambd = config.lambd,
            alpha  = config.alpha,
            t0 = config.t0,
            weight_decay = config.weight_decay
        )
    elif config.opt == 'RMSprop':
        return torch.optim.RMSprop(
            model.parameters(),
            lr = config.lr,
            momentum = config.momentum,
            alpha = config.alpha,
            eps = config.eps,
            centered = config.centered,
            weight_decay = config.weight_decay
        )
    elif config.opt == 'Rprop':
        return torch.optim.Rprop(
            model.parameters(),
            lr = config.lr,
            etas = config.etas,
            step_sizes = config.step_sizes,
        )
    elif config.opt == 'SGD':
        return torch.optim.SGD(
            model.parameters(),
            lr = config.lr,
            momentum = config.momentum,
            weight_decay = config.weight_decay,
            dampening = config.dampening,
            nesterov = config.nesterov
        )
    else: # default opt is SGD
        return torch.optim.SGD(
            model.parameters(),
            lr = 0.01,
            momentum = 0.9,
            weight_decay = 0.05,
        )



class MultipleOptimizer:
    def __init__(self, *op):
        self.optimizers = op

    def zero_grad(self):
        for op in self.optimizers:
            op.zero_grad()

    def step(self):
        for op in self.optimizers:
            op.step()


def get_scheduler(name):
    if name is None:
        name = 'constant_lr'
    return {
        "constant_lr": ConstantLR,
        "poly_lr": PolynomialLR,
        "multi_step": MultiStepLR,
        "cosine_annealing": CosineAnnealingLR,
        "exp_lr": ExponentialLR,
    }[name]


class ConstantLR(_LRScheduler):
    def __init__(self, optimizer, last_epoch=-1):
        super(ConstantLR, self).__init__(optimizer, last_epoch)

    def get_lr(self):
        return [base_lr for base_lr in self.base_lrs]

    def __str__(self):
        return '{}({})'.format(self.__class__.__name__, self.optimizer.__class__.__name__)


class PolynomialLR(_LRScheduler):
    def __init__(self, optimizer, max_iter, decay_iter=1, gamma=0.9, last_epoch=-1):
        self.decay_iter = decay_iter
        self.max_iter = max_iter
        self.gamma = gamma
        super(PolynomialLR, self).__init__(optimizer, last_epoch)

    def get_lr(self):
        if self.last_epoch % self.decay_iter or self.last_epoch % self.max_iter:
            return [base_lr for base_lr in self.base_lrs]
        else:
            factor = (1 - self.last_epoch / float(self.max_iter)) ** self.gamma
            return [base_lr * factor for base_lr in self.base_lrs]

    def __str__(self):
        params = [
            'optimizer: {}'.format(self.optimizer.__class__.__name__),
            'decay_iter: {}'.format(self.decay_iter),
            'max_iter: {}'.format(self.max_iter),
            'gamma: {}'.format(self.gamma),
        ]
        return '{}({})'.format(self.__class__.__name__, ','.join(params))


class LARS(torch.optim.Optimizer):
    """
    LARS optimizer, no rate scaling or weight decay for parameters <= 1D.
    """
    def __init__(self, params, lr=0, weight_decay=0, momentum=0.9, trust_coefficient=0.001):
        defaults = dict(lr=lr, weight_decay=weight_decay, momentum=momentum, trust_coefficient=trust_coefficient)
        super().__init__(params, defaults)

    @torch.no_grad()
    def step(self):
        for g in self.param_groups:
            for p in g['params']:
                dp = p.grad

                if dp is None:
                    continue

                if p.ndim > 1: # if not normalization gamma/beta or bias
                    dp = dp.add(p, alpha=g['weight_decay'])
                    param_norm = torch.norm(p)
                    update_norm = torch.norm(dp)
                    one = torch.ones_like(param_norm)
                    q = torch.where(param_norm > 0.,
                                    torch.where(update_norm > 0,
                                    (g['trust_coefficient'] * param_norm / update_norm), one),
                                    one)
                    dp = dp.mul(q)

                param_state = self.state[p]
                if 'mu' not in param_state:
                    param_state['mu'] = torch.zeros_like(p)
                mu = param_state['mu']
                mu.mul_(g['momentum']).add_(dp)
                p.add_(mu, alpha=-g['lr'])


def adjust_learning_rate(optimizer, base_lr, gamma):
    """Sets the learning rate to the initial LR decayed by 10"""
    lr = base_lr * gamma
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr
    return lr


def save_on_master(*args, **kwargs):
    torch.save(*args, **kwargs)
    
    
    
    
def get_scheduler(config, optimizer):
    assert config.sch in ['StepLR', 'MultiStepLR', 'ExponentialLR', 'CosineAnnealingLR', 'ReduceLROnPlateau',
                        'CosineAnnealingWarmRestarts', 'WP_MultiStepLR', 'WP_CosineLR'], 'Unsupported scheduler!'
    if config.sch == 'StepLR':
        scheduler = torch.optim.lr_scheduler.StepLR(
            optimizer,
            step_size = config.step_size,
            gamma = config.gamma,
            last_epoch = config.last_epoch
        )
    elif config.sch == 'MultiStepLR':
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones = config.milestones,
            gamma = config.gamma,
            last_epoch = config.last_epoch
        )
    elif config.sch == 'ExponentialLR':
        scheduler = torch.optim.lr_scheduler.ExponentialLR(
            optimizer,
            gamma = config.gamma,
            last_epoch = config.last_epoch
        )
    elif config.sch == 'CosineAnnealingLR':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            optimizer,
            T_max = config.T_max,
            eta_min = config.eta_min,
            last_epoch = config.last_epoch
        )
    elif config.sch == 'ReduceLROnPlateau':
        scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            optimizer, 
            mode = config.mode, 
            factor = config.factor, 
            patience = config.patience, 
            threshold = config.threshold, 
            threshold_mode = config.threshold_mode, 
            cooldown = config.cooldown, 
            min_lr = config.min_lr, 
            eps = config.eps
        )
    elif config.sch == 'CosineAnnealingWarmRestarts':
        scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
            optimizer,
            T_0 = config.T_0,
            T_mult = config.T_mult,
            eta_min = config.eta_min,
            last_epoch = config.last_epoch
        )
    elif config.sch == 'WP_MultiStepLR':
        lr_func = lambda epoch: epoch / config.warm_up_epochs if epoch <= config.warm_up_epochs else config.gamma**len(
                [m for m in config.milestones if m <= epoch])
        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func)
    elif config.sch == 'WP_CosineLR':
        lr_func = lambda epoch: epoch / config.warm_up_epochs if epoch <= config.warm_up_epochs else 0.5 * (
                math.cos((epoch - config.warm_up_epochs) / (config.epochs - config.warm_up_epochs) * math.pi) + 1)
        scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_func)

    return scheduler




# sch = 'CosineAnnealingLR'
# if sch == 'StepLR':
#     step_size = epochs // 5 # – Period of learning rate decay.
#     gamma = 0.5 # – Multiplicative factor of learning rate decay. Default: 0.1
#     last_epoch = -1 # – The index of last epoch. Default: -1.
# elif sch == 'MultiStepLR':
#     milestones = [60, 120, 150] # – List of epoch indices. Must be increasing.
#     gamma = 0.1 # – Multiplicative factor of learning rate decay. Default: 0.1.
#     last_epoch = -1 # – The index of last epoch. Default: -1.
# elif sch == 'ExponentialLR':
#     gamma = 0.99 #  – Multiplicative factor of learning rate decay.
#     last_epoch = -1 # – The index of last epoch. Default: -1.
# elif sch == 'CosineAnnealingLR':
#     T_max = 50 # – Maximum number of iterations. Cosine function period.
#     eta_min = 0.00001 # – Minimum learning rate. Default: 0.
#     last_epoch = -1 # – The index of last epoch. Default: -1.  
# elif sch == 'ReduceLROnPlateau':
#     mode = 'min' # – One of min, max. In min mode, lr will be reduced when the quantity monitored has stopped decreasing; in max mode it will be reduced when the quantity monitored has stopped increasing. Default: ‘min’.
#     factor = 0.1 # – Factor by which the learning rate will be reduced. new_lr = lr * factor. Default: 0.1.
#     patience = 10 # – Number of epochs with no improvement after which learning rate will be reduced. For example, if patience = 2, then we will ignore the first 2 epochs with no improvement, and will only decrease the LR after the 3rd epoch if the loss still hasn’t improved then. Default: 10.
#     threshold = 0.0001 # – Threshold for measuring the new optimum, to only focus on significant changes. Default: 1e-4.
#     threshold_mode = 'rel' # – One of rel, abs. In rel mode, dynamic_threshold = best * ( 1 + threshold ) in ‘max’ mode or best * ( 1 - threshold ) in min mode. In abs mode, dynamic_threshold = best + threshold in max mode or best - threshold in min mode. Default: ‘rel’.
#     cooldown = 0 # – Number of epochs to wait before resuming normal operation after lr has been reduced. Default: 0.
#     min_lr = 0 # – A scalar or a list of scalars. A lower bound on the learning rate of all param groups or each group respectively. Default: 0.
#     eps = 1e-08 # – Minimal decay applied to lr. If the difference between new and old lr is smaller than eps, the update is ignored. Default: 1e-8.
# elif sch == 'CosineAnnealingWarmRestarts':
#     T_0 = 50 # – Number of iterations for the first restart.
#     T_mult = 2 # – A factor increases T_{i} after a restart. Default: 1.
#     eta_min = 1e-6 # – Minimum learning rate. Default: 0.
#     last_epoch = -1 # – The index of last epoch. Default: -1. 
# elif sch == 'WP_MultiStepLR':
#     warm_up_epochs = 10
#     gamma = 0.1
#     milestones = [125, 225]
# elif sch == 'WP_CosineLR':
    # warm_up_epochs = 20
'''


opt = 'AdamW'
assert opt in ['Adadelta', 'Adagrad', 'Adam', 'AdamW', 'Adamax', 'ASGD', 'RMSprop', 'Rprop', 'SGD'], 'Unsupported optimizer!'
if opt == 'Adadelta':
    lr = 0.01 # default: 1.0 – coefficient that scale delta before it is applied to the parameters
    rho = 0.9 # default: 0.9 – coefficient used for computing a running average of squared gradients
    eps = 1e-6 # default: 1e-6 – term added to the denominator to improve numerical stability 
    weight_decay = 0.05 # default: 0 – weight decay (L2 penalty) 
elif opt == 'Adagrad':
    lr = 0.01 # default: 0.01 – learning rate
    lr_decay = 0 # default: 0 – learning rate decay
    eps = 1e-10 # default: 1e-10 – term added to the denominator to improve numerical stability
    weight_decay = 0.05 # default: 0 – weight decay (L2 penalty)
elif opt == 'Adam':
    lr = 0.001 # default: 1e-3 – learning rate
    betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square
    eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability 
    weight_decay = 0.0001 # default: 0 – weight decay (L2 penalty) 
    amsgrad = False # default: False – whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond
elif opt == 'AdamW':
    lr = 0.001 # default: 1e-3 – learning rate
    betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square
    eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability
    weight_decay = 1e-2 # default: 1e-2 – weight decay coefficient
    amsgrad = False # default: False – whether to use the AMSGrad variant of this algorithm from the paper On the Convergence of Adam and Beyond 
elif opt == 'Adamax':
    lr = 2e-3 # default: 2e-3 – learning rate
    betas = (0.9, 0.999) # default: (0.9, 0.999) – coefficients used for computing running averages of gradient and its square
    eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability
    weight_decay = 0 # default: 0 – weight decay (L2 penalty) 
elif opt == 'ASGD':
    lr = 0.01 # default: 1e-2 – learning rate 
    lambd = 1e-4 # default: 1e-4 – decay term
    alpha = 0.75 # default: 0.75 – power for eta update
    t0 = 1e6 # default: 1e6 – point at which to start averaging
    weight_decay = 0 # default: 0 – weight decay
elif opt == 'RMSprop':
    lr = 1e-2 # default: 1e-2 – learning rate
    momentum = 0 # default: 0 – momentum factor
    alpha = 0.99 # default: 0.99 – smoothing constant
    eps = 1e-8 # default: 1e-8 – term added to the denominator to improve numerical stability
    centered = False # default: False – if True, compute the centered RMSProp, the gradient is normalized by an estimation of its variance
    weight_decay = 0 # default: 0 – weight decay (L2 penalty)
elif opt == 'Rprop':
    lr = 1e-2 # default: 1e-2 – learning rate
    etas = (0.5, 1.2) # default: (0.5, 1.2) – pair of (etaminus, etaplis), that are multiplicative increase and decrease factors
    step_sizes = (1e-6, 50) # default: (1e-6, 50) – a pair of minimal and maximal allowed step sizes 
elif opt == 'SGD':
    lr = 0.01 # – learning rate
    momentum = 0.9 # default: 0 – momentum factor 
    weight_decay = 0.05 # default: 0 – weight decay (L2 penalty) 
    dampening = 0 # default: 0 – dampening for momentum
    nesterov = False # default: False – enables Nesterov momentum 


'''