import numpy as np
from typing import List, Dict, Any, Optional, Union

from ml_lib.optim import Optimizer

class Adam(Optimizer):
    """
    Adam优化器
    
    实现了Adam: A Method for Stochastic Optimization算法
    https://arxiv.org/abs/1412.6980
    """
    def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-8,
                 weight_decay=0):
        """
        初始化Adam优化器
        
        Args:
            params: 要优化的参数
            lr: 学习率
            betas: 用于计算梯度和平方梯度的移动平均的系数
            eps: 添加到分母以提高数值稳定性的小常数
            weight_decay: 权重衰减系数
        """
        if lr < 0.0:
            raise ValueError(f"学习率必须大于等于0: {lr}")
        if eps < 0.0:
            raise ValueError(f"epsilon必须大于等于0: {eps}")
        if not (0.0 <= betas[0] < 1.0 and 0.0 <= betas[1] < 1.0):
            raise ValueError(f"beta参数必须在区间[0, 1)内: {betas}")
        if weight_decay < 0.0:
            raise ValueError(f"权重衰减系数必须大于等于0: {weight_decay}")
            
        defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
        super(Adam, self).__init__(params, defaults)
        
    def step(self):
        """
        执行一步参数更新
        """
        for group in self.param_groups:
            for p in group['params']:
                if not hasattr(p, 'grad') or p.grad is None:
                    continue
                    
                grad = p.grad
                
                # 转换为numpy数组
                if not isinstance(grad, np.ndarray):
                    grad = np.array(grad)
                
                # 确保p.data是numpy数组
                if hasattr(p.data, 'data'):  # 如果p.data是Tensor对象
                    p_data = np.array(p.data.data)
                elif not isinstance(p.data, np.ndarray):
                    p_data = np.array(p.data)
                else:
                    p_data = p.data.copy()  # 创建副本避免修改原始数据
                
                # 应用权重衰减
                if group['weight_decay'] != 0:
                    grad = grad + group['weight_decay'] * p_data
                    
                # 状态初始化
                state = self.state.get(id(p), {})
                if len(state) == 0:
                    state['step'] = 0
                    # 历史梯度的指数移动平均
                    state['exp_avg'] = np.zeros_like(p_data)
                    # 历史梯度平方的指数移动平均
                    state['exp_avg_sq'] = np.zeros_like(p_data)
                    
                # 提取参数
                exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
                beta1, beta2 = group['betas']
                
                state['step'] += 1
                
                # 更新梯度的移动平均
                exp_avg = beta1 * exp_avg + (1 - beta1) * grad
                exp_avg_sq = beta2 * exp_avg_sq + (1 - beta2) * grad * grad
                
                # 计算偏差校正
                bias_correction1 = 1 - beta1 ** state['step']
                bias_correction2 = 1 - beta2 ** state['step']
                
                # 使用校正计算自适应学习率
                denom = np.sqrt(exp_avg_sq) / np.sqrt(bias_correction2) + group['eps']
                
                # 更新参数
                step_size = group['lr'] / bias_correction1
                p.data = p_data - step_size * exp_avg / denom
                
                # 更新状态
                state['exp_avg'] = exp_avg
                state['exp_avg_sq'] = exp_avg_sq 