import math
import torch
from torch import nn
from typing import Optional, Tuple, Callable


class SGD(torch.optim.Optimizer):
    """随机梯度下降优化器"""

    def __init__(self, params, lr: float = 1e-3):
        """
        构建随机梯度下降优化器

        Args:
            params:     待更新的参数
            lr (float): 学习率
        """

        if lr < 0:
            raise ValueError(f"Invalid learning rate: {lr}")

        defaults = {"lr": lr}
        super().__init__(params, defaults)

    def step(self, closure: Optional[Callable] = None):
        """执行一次参数更新"""
        loss = None if closure is None else closure()

        for group in self.param_groups:
            lr = group["lr"]  # Get the learning rate.

            p: nn.Parameter
            for p in group["params"]:
                if p.grad is None:
                    continue

                state = self.state[p]  # Get state associated with p.
                t = state.get(
                    "t", 0
                )  # Get iteration number from the state, or initial value.
                grad = p.grad.data  # Get the gradient of loss with respect to p.
                p.data -= lr / math.sqrt(t + 1) * grad  # Update weight tensor in-place.
                state["t"] = t + 1  # Increment iteration number.

        return loss


class AdamW(torch.optim.Optimizer):
    """AdamW 优化器"""

    def __init__(
        self,
        params,
        lr: float,
        weight_decay: float,
        betas: Tuple[float, float],
        eps: float = 1e-8,
    ) -> None:
        defaults = {
            "lr": lr,
            "weight_decay": weight_decay,
            "betas": betas,
            "eps": eps,
        }
        super().__init__(params, defaults)

    def step(self, closure: Optional[Callable] = None):
        """执行一次参数更新"""
        loss = None if closure is None else closure()

        for group in self.param_groups:
            lr: float = group["lr"]
            weight_decay: float = group["weight_decay"]
            b1, b2 = group["betas"]
            eps: float = group["eps"]

            p: nn.Parameter
            for p in group["params"]:  # 遍历每一个权重矩阵 p
                if p.grad is None:
                    continue

                state = self.state[p]
                if len(state) == 0:
                    state["t"] = 1
                    state["m"] = torch.zeros_like(p)
                    state["v"] = torch.zeros_like(p)

                grad = p.grad.data  # p 的梯度
                t = state["t"]  # 当前迭代次数
                m = state["m"]  # 一阶动量
                v = state["v"]  # 二阶动量

                # 更新一阶动量和二阶动量
                m = b1 * m + (1 - b1) * grad
                state["m"] = m
                v = b2 * v + (1 - b2) * (grad**2)
                state["v"] = v

                # 矫正当前时刻的学习率
                lr_t = lr * math.sqrt(1 - (b2**t)) / (1 - (b1**t))

                # 参数更新
                p.data -= lr_t * m / (torch.sqrt(v) + eps)
                p.data -= lr * weight_decay * p.data

                # 更新迭代次数
                state["t"] = t + 1

        return loss


if __name__ == "__main__":
    weights = nn.Parameter(torch.randn(10, 10))

    for lr in [1, 10, 100, 1000]:
        optimizer = SGD([weights], lr=lr)

        for t in range(10):
            optimizer.zero_grad()
            loss = (weights**2).mean()
            # print(loss.cpu().item())
            loss.backward()
            optimizer.step()

        print(f"lr: {lr},  loss: {loss.cpu().item()}")
