"""
    偏导数 -> 梯度下降法
    gradient descent method
    gradient ascent method
"""
import numpy as np

def numerical_gradient(f,x):
    """
    梯度 -> 计算出每个位置的数值微分，最后得到的数组就是该点的梯度
    :param f:   函数
    :param x:   某一点
    :return:    该点出的偏导数
    """
    h = 1e-4
    # 和输入点相同的数组
    grad = np.zeros_like(x)
    if(len(x.shape) > 1):
        for idx in range(x.shape[0]):
            for idy in range(x.shape[1]):
                # 某个点的值
                tmp_val = x[idx][idy]
                # f(x+h)
                x[idx][idy] = tmp_val + h
                fxh1 = f(x)

                # f(x-h)
                x[idx][idy] = tmp_val - h
                fxh2 = f(x)

                grad[idx][idy] = (fxh1 - fxh2) / (2 * h)
                # 归位 idx 处的值
                x[idx][idy] = tmp_val
    else:
        for idx in range(x.size):
            # 某个点的值
            tmp_val = x[idx]
            # f(x+h)
            x[idx] = tmp_val + h
            fxh1 = f(x)

            # f(x-h)
            x[idx] = tmp_val - h
            fxh2 = f(x)

            grad[idx] = (fxh1 - fxh2)/(2*h)
            # 归位 idx 处的值
            x[idx] = tmp_val

    return grad

def gradient_descent(f,init_x,lr=0.01,step_num=100):
    """
    梯度下降法
    :param f:           函数
    :param init_x:      初始x数组
    :param lr:          学习率(learning rate)
    :param step_num:    学习次数
    :return:            最终的 x 数组
    """
    x = init_x
    for i in range(step_num):
        x = x-lr*numerical_gradient(f,x)

    return x

def function_test(x):
    return x[0]**2 + x[1]**2

def numerical_gradient_test():
    print(numerical_gradient(function_test, np.array([3.0, 4.0])))
    print(numerical_gradient(function_test, np.array([0.0, 2.0])))
    print(numerical_gradient(function_test, np.array([3.0, 0.0])))

def gradient_descent_test():
    init_x = np.array([3.0, 4.0])
    print(gradient_descent(function_test,init_x,0.1,100))
    print(gradient_descent(function_test,init_x,10,100))
    print(gradient_descent(function_test,init_x,1e-10,100))

if __name__ == '__main__':
    # numerical_gradient_test()
    gradient_descent_test()


