import numpy as np


def numerical_diff(f, x):
    h = 1e-4
    return (f(x + h) - f(x - h)) / (2 * h)


def function(x):
    return 0.01 * x ** 2 + 0.1 * x


def function2(x):
    return np.sum(x ** 2)


def line_function(x):
    k = numerical_diff(function, x)
    b = function(x) - k * x
    return lambda t: k * t + b


# 梯度下降法
def gradient_descent(func, init_x, lr=0.01, step_num=100):
    x = init_x
    for i in range(step_num):
        grad = numerical_gradient(func, x)
        x -= lr * grad
    return x


def numerical_gradient(f, x):
    '''
    求导
    :param f: 包含 x 的函数
    :param x: 一般来说是个参数矩阵。
    :return: 对参数求导后的结果
    '''
    h = 1e-4
    grad = np.zeros_like(x)
    # nditor 的作用是逐个访问元素
    # op_flags 控制读写模式
    it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite'])
    while not it.finished:
        idx = it.multi_index  # 索引
        tmp_val = x[idx]
        x[idx] = float(tmp_val) + h
        fxh1 = f(x)  # f(x+h)

        x[idx] = tmp_val - h
        fxh2 = f(x)  # f(x-h)
        grad[idx] = (fxh1 - fxh2) / (2 * h)

        x[idx] = tmp_val  # 还原值
        it.iternext()
    return grad


if __name__ == '__main__':
    init_x = np.array([-3.0, 4.0])
    # init_x=np.random.randn(2,3)
    # print(gradient_descent(function2, init_x=init_x, lr=0.1))
    it = np.nditer(init_x, flags=["multi_index"], op_flags=['readwrite'])
    while not it.finished:
        print(it.multi_index)
        it.iternext()
