import torch
from basic.scheduler import *
from transformers.optimization import Adafactor
from torch.optim.lr_scheduler import StepLR

class Optimizer:
    def __init__(self, parameter, config):
        self.optim = torch.optim.Adam(parameter, lr=config.learning_rate, betas=(config.beta_1, config.beta_2),
                                      eps=config.epsilon, weight_decay=config.L2_REG, amsgrad=False)
        decay, decay_step = config.decay, config.decay_steps
        l = lambda epoch: decay ** (epoch // decay_step)
        self.scheduler = torch.optim.lr_scheduler.LambdaLR(self.optim, lr_lambda=l)

    def step(self):
        self.optim.step()
        self.schedule()
        self.optim.zero_grad()

    def schedule(self):
        self.scheduler.step()

    def zero_grad(self):
        self.optim.zero_grad()

    @property
    def lr(self):
        return self.scheduler.get_lr()

class OptimizerWarmUp:
    def __init__(self, parameter, config):
        
        self.optim = Adafactor(
            parameter,
            lr=config.learning_rate,
            eps=(1e-30, 1e-3),
            clip_threshold=1.0,
            decay_rate=-0.8,
            beta1=None,
            weight_decay=config.L2_REG,
            relative_step=False,
            scale_parameter=False,
            warmup_init=False,
        )

        decay, decay_step = config.decay, config.decay_steps

        total_step = config.total_step
        scheduler_steplr = StepLR(self.optim, step_size=decay_step, gamma=decay)
        scheduler_warmup = GradualWarmupScheduler(self.optim, multiplier=1, total_epoch=total_step, after_scheduler=scheduler_steplr)

        self.scheduler = scheduler_warmup

    def step(self):
        self.optim.step()
        self.schedule()
        self.optim.zero_grad()

    def schedule(self):
        self.scheduler.step()

    def zero_grad(self):
        self.optim.zero_grad()

    @property
    def lr(self):
        return self.scheduler.get_lr()