import numpy as np
from ml_lib.optim import Optimizer

class SGD(Optimizer):
    """
    随机梯度下降优化器
    
    支持动量、权重衰减等特性
    """
    def __init__(self, params, lr=0.01, momentum=0, dampening=0,
                 weight_decay=0, nesterov=False):
        """
        初始化SGD优化器
        
        Args:
            params: 要优化的参数
            lr: 学习率
            momentum: 动量系数，默认为0（即无动量）
            dampening: 动量的抑制因子，默认为0
            weight_decay: 权重衰减系数，默认为0
            nesterov: 是否使用Nesterov动量
        """
        if lr < 0.0:
            raise ValueError(f"学习率必须大于等于0: {lr}")
        if momentum < 0.0:
            raise ValueError(f"动量系数必须大于等于0: {momentum}")
        if weight_decay < 0.0:
            raise ValueError(f"权重衰减系数必须大于等于0: {weight_decay}")
            
        defaults = dict(lr=lr, momentum=momentum, dampening=dampening,
                       weight_decay=weight_decay, nesterov=nesterov)
        
        if nesterov and (momentum <= 0 or dampening != 0):
            raise ValueError("Nesterov动量需要动量大于0且dampening为0")
            
        super(SGD, self).__init__(params, defaults)
        
    def step(self):
        """
        执行单步参数更新
        """
        for group in self.param_groups:
            weight_decay = group['weight_decay']
            momentum = group['momentum']
            dampening = group['dampening']
            nesterov = group['nesterov']
            lr = group['lr']
            
            for p in group['params']:
                if not hasattr(p.data, 'grad') or p.data.grad is None:
                    continue
                    
                # 处理原始数据和梯度
                if hasattr(p.data, 'data'):
                    p_data = p.data.data
                else:
                    p_data = p.data
                
                if hasattr(p.data, 'grad'):
                    grad = p.data.grad
                else:
                    grad = p.grad  # 这是备用，但应该不会用到
                
                # 确保我们有numpy数组
                if not isinstance(grad, np.ndarray):
                    grad = np.array(grad)
                if not isinstance(p_data, np.ndarray):
                    p_data = np.array(p_data)
                
                # 应用权重衰减
                if weight_decay != 0:
                    grad = grad + weight_decay * p_data
                
                # 应用动量
                param_state = self.state.get(id(p), {})
                if momentum != 0:
                    if 'momentum_buffer' not in param_state:
                        buf = param_state['momentum_buffer'] = np.zeros_like(p_data)
                    else:
                        buf = param_state['momentum_buffer']
                        
                    buf = momentum * buf + (1 - dampening) * grad
                    param_state['momentum_buffer'] = buf
                    
                    if nesterov:
                        grad = grad + momentum * buf
                    else:
                        grad = buf
                        
                # 更新参数 - 使用新数组赋值，而不是就地修改
                new_data = p_data - lr * grad
                
                # 确保更新参数值
                if hasattr(p.data, 'data'):
                    p.data.data = new_data.copy()  # 复制数组，避免引用问题
                else:
                    p.data = new_data.copy()  # 复制数组，避免引用问题