import torch
from torch.optim.optimizer import Optimizer
import math
import copy
from typing import Any, Callable, Dict, Iterable, Optional, Tuple, Union, List

from torch import Tensor


Params = Union[Iterable[Tensor], Iterable[Dict[str, Any]]]

LossClosure = Callable[[], float]
OptLossClosure = Optional[LossClosure]
Betas2 = Tuple[float, float]
State = Dict[str, Any]
OptFloat = Optional[float]
Nus2 = Tuple[float, float]


def _matrix_power(matrix: torch.Tensor, power: float) -> torch.Tensor:
    # use CPU for svd for speed up
    device = matrix.device
    matrix = matrix.cpu()
    u, s, v = torch.svd(matrix)
    return (u @ s.pow_(power).diag() @ v.t()).to(device)


class Shampoo(Optimizer):
    r"""Implements Shampoo Optimizer Algorithm.
    It has been proposed in `Shampoo: Preconditioned Stochastic Tensor
    Optimization`__.
    Arguments:
        params: iterable of parameters to optimize or dicts defining
            parameter groups
        lr: learning rate (default: 1e-3)
        momentum: momentum factor (default: 0)
        weight_decay: weight decay (L2 penalty) (default: 0)
        epsilon: epsilon added to each mat_gbar_j for numerical stability
            (default: 1e-4)
        update_freq: update frequency to compute inverse (default: 1)
    Example:
        >>> import torch_optimizer as optim
        >>> optimizer = optim.Shampoo(model.parameters(), lr=0.01)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()
    __ https://arxiv.org/abs/1802.09568
    Note:
        Reference code: https://github.com/moskomule/shampoo.pytorch
    """

    def __init__(
        self,
        params: Params,
        lr: float = 1e-1,
        momentum: float = 0.0,
        weight_decay: float = 0.0,
        epsilon: float = 1e-4,
        update_freq: int = 1,
    ):

        if lr <= 0.0:
            raise ValueError('Invalid learning rate: {}'.format(lr))
        if momentum < 0.0:
            raise ValueError('Invalid momentum value: {}'.format(momentum))
        if weight_decay < 0.0:
            raise ValueError(
                'Invalid weight_decay value: {}'.format(weight_decay)
            )
        if epsilon < 0.0:
            raise ValueError('Invalid momentum value: {}'.format(momentum))
        if update_freq < 1:
            raise ValueError('Invalid momentum value: {}'.format(momentum))

        defaults = dict(
            lr=lr,
            momentum=momentum,
            weight_decay=weight_decay,
            epsilon=epsilon,
            update_freq=update_freq,
        )
        super(Shampoo, self).__init__(params, defaults)

    def step(self, closure: OptLossClosure = None) -> OptFloat:
        """Performs a single optimization step.
        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                order = grad.ndimension()
                original_size = grad.size()
                state = self.state[p]
                momentum = group['momentum']
                weight_decay = group['weight_decay']
                if len(state) == 0:
                    state['step'] = 0
                    if momentum > 0:
                        state['momentum_buffer'] = grad.clone()
                    for dim_id, dim in enumerate(grad.size()):
                        # precondition matrices
                        state['precond_{}'.format(dim_id)] = group[
                            'epsilon'
                        ] * torch.eye(dim, out=grad.new(dim, dim))
                        state[
                            'inv_precond_{dim_id}'.format(dim_id=dim_id)
                        ] = grad.new(dim, dim).zero_()

                if momentum > 0:
                    grad.mul_(1 - momentum).add_(
                        state['momentum_buffer'], alpha=momentum
                    )

                if weight_decay > 0:
                    grad.add_(p.data, alpha=group['weight_decay'])

                # See Algorithm 2 for detail
                for dim_id, dim in enumerate(grad.size()):
                    precond = state['precond_{}'.format(dim_id)]
                    inv_precond = state['inv_precond_{}'.format(dim_id)]

                    # mat_{dim_id}(grad)
                    grad = grad.transpose_(0, dim_id).contiguous()
                    transposed_size = grad.size()
                    grad = grad.view(dim, -1)

                    grad_t = grad.t()
                    precond.add_(grad @ grad_t)
                    if state['step'] % group['update_freq'] == 0:
                        inv_precond.copy_(_matrix_power(precond, -1 / order))

                    if dim_id == order - 1:
                        # finally
                        grad = grad_t @ inv_precond
                        # grad: (-1, last_dim)
                        grad = grad.view(original_size)
                    else:
                        # if not final
                        grad = inv_precond @ grad
                        # grad (dim, -1)
                        grad = grad.view(transposed_size)

                state['step'] += 1
                state['momentum_buffer'] = grad
                p.data.add_(grad, alpha=-group['lr'])

        return loss


Grads = Params

__all__ = ('Adahessian',)


class Adahessian(Optimizer):
    r"""Implements Adahessian Algorithm.
    It has been proposed in `ADAHESSIAN: An Adaptive Second Order Optimizer
    for Machine Learning`.
    Arguments:
        params (iterable): iterable of parameters to optimize or dicts defining
            parameter groups
        lr (float, optional): learning rate (default: 0.15)
        betas (Tuple[float, float], optional): coefficients used for computing
            running averages of gradient and its square (default: (0.9, 0.999))
        eps (float, optional): term added to the denominator to improve
            numerical stability (default: 1e-4)
        weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
        hessian_power (float, optional): Hessian power (default: 0.5)
        seed (int, optional): Random number generator seed (default: None)
        Example:
        >>> import torch_optimizer as optim
        >>> optimizer = optim.Adahessian(model.parameters(), lr = 1.0)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward(create_graph=True)
        >>> optimizer.step()
        __ https://arxiv.org/abs/2006.00719
        Note:
            Reference code: https://github.com/amirgholami/adahessian
    """

    def __init__(
        self,
        params: Params,
        lr: float = 0.15,
        betas: Betas2 = (0.9, 0.999),
        eps: float = 1e-4,
        weight_decay: float = 0,
        hessian_power: float = 0.5,
        seed: Optional[int] = None,
    ) -> None:
        if lr <= 0.0:
            raise ValueError('Invalid learning rate: {}'.format(lr))
        if eps <= 0.0:
            raise ValueError('Invalid epsilon value: {}'.format(eps))
        if not 0.0 <= betas[0] < 1.0:
            raise ValueError(
                'Invalid beta parameter at index 0: {}'.format(betas[0])
            )
        if not 0.0 <= betas[1] < 1.0:
            raise ValueError(
                'Invalid beta parameter at index 1: {}'.format(betas[1])
            )
        if not 0.0 <= hessian_power <= 1.0:
            raise ValueError(
                'Invalid Hessian power value: {}'.format(hessian_power)
            )
        if seed is not None:
            torch.manual_seed(seed)
        defaults = dict(
            lr=lr,
            betas=betas,
            eps=eps,
            weight_decay=weight_decay,
            hessian_power=hessian_power,
        )
        super(Adahessian, self).__init__(params, defaults)

    def get_trace(self, params: Params, grads: Grads) -> List[torch.Tensor]:
        """Get an estimate of Hessian Trace.
        This is done by computing the Hessian vector product with a random
        vector v at the current gradient point, to estimate Hessian trace by
        computing the gradient of <gradsH,v>.
        :param gradsH: a list of torch variables
        :return: a list of torch tensors
        """

        # Check backward was called with create_graph set to True
        for i, grad in enumerate(grads):
            if grad.grad_fn is None:
                msg = (
                    'Gradient tensor {:} does not have grad_fn. When '
                    'calling loss.backward(), make sure the option '
                    'create_graph is set to True.'
                )
                raise RuntimeError(msg.format(i))

        v = [
            2
            * torch.randint_like(
                p, high=2, memory_format=torch.preserve_format
            )
            - 1
            for p in params
        ]

        # this is for distributed setting with single node and multi-gpus,
        # for multi nodes setting, we have not support it yet.
        hvs = torch.autograd.grad(
            grads, params, grad_outputs=v, only_inputs=True, retain_graph=True
        )

        hutchinson_trace = []
        for hv in hvs:
            param_size = hv.size()
            if len(param_size) <= 2:  # for 0/1/2D tensor
                # Hessian diagonal block size is 1 here.
                # We use that torch.abs(hv * vi) = hv.abs()
                tmp_output = hv.abs()

            elif len(param_size) == 4:  # Conv kernel
                # Hessian diagonal block size is 9 here: torch.sum() reduces
                # the dim 2/3.
                # We use that torch.abs(hv * vi) = hv.abs()
                tmp_output = torch.mean(hv.abs(), dim=[2, 3], keepdim=True)
            hutchinson_trace.append(tmp_output)

        return hutchinson_trace

    def step(self, closure: OptLossClosure = None) -> OptFloat:
        """Perform a single optimization step.
        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        params = []
        groups = []
        grads = []

        # Flatten groups into lists, so that
        #  hut_traces can be called with lists of parameters
        #  and grads
        for group in self.param_groups:
            for p in group['params']:
                if p.grad is not None:
                    params.append(p)
                    groups.append(group)
                    grads.append(p.grad)

        # get the Hessian diagonal

        hut_traces = self.get_trace(params, grads)

        for (p, group, grad, hut_trace) in zip(
            params, groups, grads, hut_traces
        ):

            state = self.state[p]

            # State initialization
            if len(state) == 0:
                state['step'] = 0
                # Exponential moving average of gradient values
                state['exp_avg'] = torch.zeros_like(p.data)
                # Exponential moving average of Hessian diagonal square values
                state['exp_hessian_diag_sq'] = torch.zeros_like(p.data)

            exp_avg, exp_hessian_diag_sq = (
                state['exp_avg'],
                state['exp_hessian_diag_sq'],
            )

            beta1, beta2 = group['betas']

            state['step'] += 1

            # Decay the first and second moment running average coefficient
            exp_avg.mul_(beta1).add_(grad.detach_(), alpha=1 - beta1)
            exp_hessian_diag_sq.mul_(beta2).addcmul_(
                hut_trace, hut_trace, value=1 - beta2
            )

            bias_correction1 = 1 - beta1 ** state['step']
            bias_correction2 = 1 - beta2 ** state['step']

            # make the square root, and the Hessian power
            k = group['hessian_power']
            denom = (
                (exp_hessian_diag_sq.sqrt() ** k)
                / math.sqrt(bias_correction2) ** k
            ).add_(group['eps'])

            # make update
            p.data = p.data - group['lr'] * (
                exp_avg / bias_correction1 / denom
                + group['weight_decay'] * p.data
            )

        return 

__all__ = ('A2GradUni', 'A2GradInc', 'A2GradExp')


class A2GradUni(Optimizer):
    r"""Implements A2GradUni Optimizer Algorithm.
    It has been proposed in `Optimal Adaptive and Accelerated Stochastic
    Gradient Descent`__.
    Arguments:
        params: iterable of parameters to optimize or dicts defining
            parameter groups
        lr: not used for this optimizer (default: None)
        beta:  (default: 10)
        lips: Lipschitz constant (default: 10)
    Example:
        >>> import torch_optimizer as optim
        >>> optimizer = optim.A2GradUni(model.parameters(), lips=10)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()
    __ https://arxiv.org/abs/1810.00553
    Note:
        Reference code: https://github.com/severilov/A2Grad_optimizer
    """

    def __init__(
        self,
        params: Params,
        lr: Optional[float] = None,
        beta: float = 10,
        lips: float = 10,
    ):

        defaults = dict(beta=beta, lips=lips, lr=lr)
        # lr is not supported for this optimizer, we need to make tests work
        # and schedulers not to fail
        if beta < 0.0:
            raise ValueError('Invalid beta value: {}'.format(beta))
        if lips < 0.0:
            raise ValueError('Invalid lips value: {}'.format(lips))

        super().__init__(params, defaults)

    def step(self, closure: OptLossClosure = None) -> OptFloat:
        r"""Performs a single optimization step.
        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                state = self.state[p]

                if len(state) == 0:
                    state['step'] = 0
                    state['alpha_k'] = 1
                    state['v_k'] = 0
                    state['avg_grad'] = copy.deepcopy(grad)
                    state['x_k'] = copy.deepcopy(p.data)

                gamma_k = 2 * group['lips'] / (state['step'] + 1)

                avg_grad = state['avg_grad']
                avg_grad.mul_(state['step'])
                avg_grad.add_(grad)
                avg_grad.div_(state['step'] + 1)

                delta_k = torch.add(grad, avg_grad, alpha=-1)

                state['v_k'] += torch.sum(delta_k * delta_k).item()

                h_k = math.sqrt(state['v_k'])
                alpha_k_1 = 2 / (state['step'] + 3)
                coef = 1 / (gamma_k + group['beta'] * h_k)
                x_k_1 = state['x_k']
                x_k_1.add_(grad, alpha=-coef)

                p.data.mul_(1 - alpha_k_1)
                p.data.add_(x_k_1, alpha=alpha_k_1)
                p.data.add_(
                    grad, alpha=-(1 - alpha_k_1) * state['alpha_k'] * coef
                )

                state['alpha_k'] = alpha_k_1
                state['step'] += 1

        return loss


class A2GradInc(Optimizer):
    r"""Implements A2GradInc Optimizer Algorithm.
    It has been proposed in `Optimal Adaptive and Accelerated Stochastic
    Gradient Descent`__.
    Arguments:
        params: iterable of parameters to optimize or dicts defining
            parameter groups
        lr: not used for this optimizer (default: None)
        beta:  (default: 10)
        lips: Lipschitz constant (default: 10)
    Example:
        >>> import torch_optimizer as optim
        >>> optimizer = optim.A2GradInc(model.parameters(), lips=10)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()
    __ https://arxiv.org/abs/1810.00553
    Note:
        Reference code: https://github.com/severilov/A2Grad_optimizer
    """

    def __init__(
        self,
        params: Params,
        lr: Optional[float] = None,
        beta: float = 10,
        lips: float = 10,
    ):
        if beta < 0.0:
            raise ValueError('Invalid beta value: {}'.format(beta))
        if lips < 0.0:
            raise ValueError('Invalid weight_decay value: {}'.format(lips))
        defaults = dict(beta=beta, lips=lips, lr=lr)
        super(A2GradInc, self).__init__(params, defaults)

    def step(self, closure: OptLossClosure = None) -> OptFloat:
        r"""Performs a single optimization step.
        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                state = self.state[p]

                if len(state) == 0:
                    state['step'] = 0
                    state['alpha_k'] = 1
                    state['v_k'] = 0
                    state['avg_grad'] = copy.deepcopy(grad)
                    state['x_k'] = copy.deepcopy(p.data)

                gamma_k = 2 * group['lips'] / (state['step'] + 1)

                avg_grad = state['avg_grad']
                avg_grad.mul_(state['step'])
                avg_grad.add_(grad)
                avg_grad.div_(state['step'] + 1)

                delta_k = torch.add(grad, avg_grad, alpha=-1)

                state['v_k'] *= (state['step'] / (state['step'] + 1)) ** 2
                state['v_k'] += torch.sum(delta_k * delta_k).item()

                h_k = math.sqrt(state['v_k'])
                alpha_k_1 = 2 / (state['step'] + 3)
                coef = 1 / (gamma_k + group['beta'] * h_k)
                x_k_1 = state['x_k']
                x_k_1.add_(grad, alpha=-coef)

                p.data.mul_(1 - alpha_k_1)
                p.data.add_(x_k_1, alpha=alpha_k_1)
                p.data.add_(
                    grad, alpha=-(1 - alpha_k_1) * state['alpha_k'] * coef
                )

                state['alpha_k'] = alpha_k_1
                state['step'] += 1

        return loss


class A2GradExp(Optimizer):
    r"""Implements A2GradExp Optimizer Algorithm.
    It has been proposed in `Optimal Adaptive and Accelerated Stochastic
    Gradient Descent`__.
    Arguments:
        params: iterable of parameters to optimize or dicts defining
            parameter groups
        lr: not used for this optimizer (default: None)
        beta:  (default: 10)
        lips: Lipschitz constant (default: 10)
        rho: represents the degree of weighting decrease, a constant
            smoothing factor between 0 and 1 (default: 0.5)
    Example:
        >>> import torch_optimizer as optim
        >>> optimizer = optim.A2GradExp(model.parameters(), lips=10)
        >>> optimizer.zero_grad()
        >>> loss_fn(model(input), target).backward()
        >>> optimizer.step()
    __ https://arxiv.org/abs/1810.00553
    Note:
        Reference code: https://github.com/severilov/A2Grad_optimizer
    """

    def __init__(
        self,
        params: Params,
        lr: Optional[float] = None,
        beta: float = 10,
        lips: float = 10,
        rho: float = 0.5,
    ):

        defaults = dict(beta=beta, lips=lips, rho=rho, lr=lr)
        super(A2GradExp, self).__init__(params, defaults)
        if beta < 0.0:
            raise ValueError('Invalid beta value: {}'.format(beta))
        if lips < 0.0:
            raise ValueError('Invalid lips value: {}'.format(lips))
        if rho < 0.0 or rho > 1.0:
            raise ValueError('Invalid rho value: {}'.format(rho))

    def step(self, closure: OptLossClosure = None) -> OptFloat:
        r"""Performs a single optimization step.
        Arguments:
            closure: A closure that reevaluates the model and returns the loss.
        """
        loss = None
        if closure is not None:
            loss = closure()

        for group in self.param_groups:
            for p in group['params']:
                if p.grad is None:
                    continue
                grad = p.grad.data
                state = self.state[p]

                if len(state) == 0:
                    state['step'] = 0
                    state['alpha_k'] = 1
                    state['v_k'] = 0
                    state['avg_grad'] = copy.deepcopy(grad)
                    state['x_k'] = copy.deepcopy(p.data)

                gamma_k = 2 * group['lips'] / (state['step'] + 1)

                avg_grad = state['avg_grad']
                avg_grad.mul_(state['step'])
                avg_grad.add_(grad)
                avg_grad.div_(state['step'] + 1)

                delta_k = torch.add(grad, avg_grad, alpha=-1)

                if state['step'] == 0:
                    state['v_kk'] = torch.sum(delta_k * delta_k).item()
                else:
                    state['v_kk'] *= group['rho']
                    state['v_kk'] += (1 - group['rho']) * torch.sum(
                        delta_k * delta_k
                    ).item()
                state['v_k'] = max([state['v_kk'], state['v_k']])

                h_k = math.sqrt((state['step'] + 1) * state['v_k'])

                alpha_k_1 = 2 / (state['step'] + 3)

                coef = -1 / (gamma_k + group['beta'] * h_k)
                x_k_1 = state['x_k']
                x_k_1.add_(grad, alpha=coef)

                p.data.mul_(1 - alpha_k_1)
                p.data.add_(x_k_1, alpha=alpha_k_1)
                p.data.add_(
                    grad, alpha=(1 - alpha_k_1) * state['alpha_k'] * coef
                )

                state['alpha_k'] = alpha_k_1
                state['step'] += 1

        return loss