import itertools
import torch
from torch import optim


class Optimizer(object):
    """ The Optimizer class encapsulates torch.optim package and provides functionalities
    for learning rate scheduling and gradient norm clipping.

    Args:
        opt_name (str): optimizer name e.g. sgd
        lr (float): init learning rate
        max_grad_norm (float, optional): value used for gradient norm clipping,
            set 0 to disable (default 0)
    """

    _ARG_MAX_GRAD_NORM = 'max_grad_norm'

    def __init__(self, params, opt_name, lr, max_grad_norm=0):
        self._optimizer_name = opt_name
        self.init_lr = lr
        self._params = params
        self.optimizer = self.get_optimizer(params)

        self.scheduler = None
        self.max_grad_norm = max_grad_norm

    def get_optimizer(self, params):
        """
        get optimizer
        """
        if self._optimizer_name.lower() == 'adam':
            return optim.Adam(params=params, lr=self.init_lr)
        elif self._optimizer_name.lower() == 'sgd':
            return optim.SGD(params=params, lr=self.init_lr)
        else:
            raise NotImplementedError("Not Support Optimizer: {}".format(self._optimizer_name))

    def set_scheduler(self, scheduler):
        """ Set the learning rate scheduler.

        Args:
            scheduler (torch.optim.lr_scheduler.*): object of learning rate scheduler,
               e.g. torch.optim.lr_scheduler.StepLR
        """
        self.scheduler = scheduler

    def step(self):
        """ Performs a single optimization step, including gradient norm clipping if necessary. """
        if self.max_grad_norm > 0:
            # params = itertools.chain.from_iterable([group['params'] for group in self.optimizer.param_groups])
            # torch.nn.utils.clip_grad_norm(params, self.max_grad_norm)
            _ = torch.nn.utils.clip_grad_norm_(self._params, self.max_grad_norm)
        self.optimizer.step()

    def update(self, loss):
        """ Update the learning rate if the criteria of the scheduler are met.

        Args:
            loss (float): The current loss.  It could be training loss or developing loss
                depending on the caller.  By default the supervised trainer uses developing
                loss.
        """
        if self.scheduler is None:
            pass
        elif isinstance(self.scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):
            self.scheduler.step(loss)
        else:
            self.scheduler.step()
