from collections import defaultdict
from copy import deepcopy
from math import sqrt
import torch
import typing

class BaseOptimizer():
    def __init__(self, params, lr=0.001):
        self.params = list(params)
        self.lr = lr
    
    def step(self):
        raise NotImplementedError
    
    def zero_grad(self):
        for p in self.params:
            if p.grad is not None:
                p.grad.detach_()
                p.grad.zero_()


class GdOptimizer(BaseOptimizer):
    def __init__(self, params, lr=0.001):
        super().__init__(params, lr)

    @torch.no_grad()
    def step(self, closure=None):
        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for p in self.params:
            assert isinstance(p, torch.Tensor)
            if p.grad is not None:
                p.add_(p.grad, alpha=-self.lr)
        return loss

class AdamOptimizer(BaseOptimizer):
    def __init__(self, params, alpha=0.001, beta_1=0.9, beta_2=0.999, eps=1e-8, maximize=False):
        self.params = list(params)
        self.alpha = alpha
        self.beta_1 = beta_1
        self.beta_2 = beta_2
        self.eps = eps
        self.maximize = maximize

        self.state = defaultdict(dict)
        self.t = 0
        
    @torch.no_grad()
    def step(self, closure=None):
        self.t += 1
        t = self.t
        beta_1, beta_2 = self.beta_1, self.beta_2

        loss = None
        if closure is not None:
            with torch.enable_grad():
                loss = closure()

        for p in self.params:
            assert isinstance(p, torch.Tensor)
            if p.grad is not None:
                state = self.state[p]

                if len(state) == 0:
                    # 第一次调用
                    m: torch.Tensor = torch.zeros_like(p, memory_format=torch.preserve_format)
                    v: torch.Tensor = torch.zeros_like(p, memory_format=torch.preserve_format)
                    state['m'] = m
                    state['v'] = v

                state['m'] = state['m'].mul_(beta_1).add_(p.grad, alpha=(1-beta_1))
                state['v'] = state['v'].mul_(beta_2).addcmul_(p.grad, p.grad.conj(), value=(1-beta_2))

                
                alpha = self.alpha*sqrt(1-beta_2**t)/(1-beta_1**t)
        
                # m_hat = state['m'].mul_(1/(1-self.beta_1**(t)))
                # v_hat = state['v'].mul_(1/(1-self.beta_2**(t)))
                
                if self.maximize:
                    p.addcdiv_(state['m'], state['v'].sqrt().add_(self.eps), value=alpha)
                else:
                    p.addcdiv_(state['m'], state['v'].sqrt().add_(self.eps), value=-alpha)

        return loss