# @Time : 2021/8/7 16:34
# @Author : Li Kunlun
# @Description : AdaGrad算法
import utils as d2l
import math
from mxnet import nd


# 1、对目标函数 f(x) = 0.1*(x_1)^2 + 2*(x_2)^2为例观察AdaGrad算法对自变量的迭代轨迹
def adagrad_2d(x1, x2, s1, s2):
    g1, g2, eps = 0.2 * x1, 4 * x2, 1e-6  # 前两项为自变量梯度
    s1 += g1 ** 2
    s2 += g2 ** 2
    x1 -= eta / math.sqrt(s1 + eps) * g1
    x2 -= eta / math.sqrt(s2 + eps) * g2
    return x1, x2, s1, s2


def f_2d(x1, x2):
    return 0.1 * x1 ** 2 + 2 * x2 ** 2


eta = 0.4
d2l.show_trace_2d(f_2d, d2l.train_2d(adagrad_2d))

# 2、从零开始实现
# 同动量法一样，AdaGrad算法需要对每个自变量维护同它一样形状的状态变量
features, labels = d2l.get_data_ch7()


def init_adagrad_states():
    s_w = nd.zeros((features.shape[1], 1))
    s_b = nd.zeros(1)
    return (s_w, s_b)


def adagrad(params, states, hyperparams):
    eps = 1e-6
    for p, s in zip(params, states):
        """
        对元素进行平方：
        p.grad ->  
        [[ 2.04494882]
         [-0.25979674]
         [ 0.63007796]
         [-0.02535696]
         [-0.11922718]]
        <NDArray 5x1 @cpu(0)>
        p.grad.square()-> 
        [[  4.18181562e+00]
         [  6.74943477e-02]
         [  3.96998227e-01]
         [  6.42975559e-04]
         [  1.42151201e-02]]
        <NDArray 5x1 @cpu(0)>
        """
        # print("p.grad -> ", p.grad)
        # print("p.grad.square()->", p.grad.square())
        s[:] += p.grad.square()
        p[:] -= hyperparams['lr'] * p.grad / (s + eps).sqrt()


d2l.train_ch7(adagrad, init_adagrad_states(), {'lr': 0.1}, features, labels)

print("------------------------简洁实现----------------------------------------------")
d2l.train_gluon_ch7('adagrad', {'learning_rate': 0.1}, features, labels)
