import numpy as np
class AdaGrad:
    def __init__(self, learning_rate=0.001):
        self.learning_rate = learning_rate
        self.sum = 0 # 累积平方和;

    def set_lr_Rate(self, learning_rate=0.001):
        self.learning_rate = learning_rate

    def update(self, grads):
        for key in range(len(grads)):
            self.sum += grads[key] * grads[key]  # 累积之前的所有梯度值的平方和;
        self.learning_rate = self.learning_rate / (np.sqrt(self.sum) + 1e-7)  # 1e-7微小值避免分母为0;

        return self.learning_rate[0]