from transformers import get_linear_schedule_with_warmup


class Scheduler:
    def __init__(self, args, optimizer, n_embd):
        self.args = args
        self.n_embd = n_embd
        self.curr_step = 0
        self.optimizer = optimizer

        self.init_lr = list()
        for param_group in optimizer.param_groups:
            self.init_lr.append(param_group['lr'])

        if args.scheduler == 'noam':
            self._set_lr(self.optimizer, self.args.num_warmup_steps, self.n_embd)

        elif args.scheduler == 'linear':
            self.scheduler = get_linear_schedule_with_warmup(self.optimizer,
                                                             num_warmup_steps=self.args.num_warmup_steps,
                                                             num_training_steps=self.args.num_training_steps)
        elif args.scheduler == 'None':
            pass

        else:
            assert False

    def step(self):
        if self.args.scheduler == 'noam':
            self._set_lr(self.optimizer, self.args.num_warmup_steps, self.n_embd)

        elif self.args.scheduler == 'linear':
            self.scheduler.step()

        elif self.args.scheduler == 'None':
            pass

        else:
            assert False

    @staticmethod
    def _noam_decay(step, warmup_steps, model_size):
        """Learning rate schedule described in
        https://arxiv.org/pdf/1706.03762.pdf.
        """
        return model_size ** (-0.5) * min(step ** (-0.5), step * warmup_steps ** (-1.5))

    def _set_lr(self, optimizer, warmup_steps, n_embd):
        self.curr_step += 1
        for lr, param_group in zip(self.init_lr, optimizer.param_groups):
            lr_this_step = lr * 1e4 * self._noam_decay(self.curr_step, warmup_steps, n_embd)
            param_group['lr'] = lr_this_step
