import numpy as np

def grad(f, x):
    '''
    x: Array
    f: a function
    '''
    h = 1e-6
    grad = np.zeros_like(x)
    for idx in range(len(x)):
        tmp_val = x[idx]
        x[idx] = tmp_val + h
        fxh1 = f(x)

        x[idx] = tmp_val - h
        fxh2 = f(x)

        grad[idx] = (fxh1 - fxh2) / (2 * h)
        x[idx] = tmp_val
    return grad

def grad_descent(f, x, lr=0.01, step_num=100):
    
    for i in range(step_num):
        gradfx = grad(f, x)
        x -= lr * gradfx

    return x


def line_search(f, x, n, nabla):
    return goldSection(f, x, n, nabla)


def goldSection(f,  a,  b,  epsilon):

    cnt = 0
    x1 = a + 0.382 * (b - a)
    x2 = a + 0.618 * (b - a)
    f1 = f(x1)
    f2 = f(x2)
    while 1:
        if f1 < f2 :
            b = x2
            f2 = f1
            x2 = x1
            x1 = a + 0.382 * (b - a)
            f1 = f(x1)
        else:
            a = x1
            f1 = f2
            x1 = x2
            x2 = a + 0.618 * (b - a)
            f2 = f(x2)

        cnt += 1
        if (b - a) < epsilon:break

    # print("a = %f, b = %f\n"%(a, b))
    # print(cnt)
    return (a + b) / 2.0

def normal_descent(f, x, epsilon):
    d = grad(f, x)
    # print(d)
    while np.any((d ** 2) > epsilon):
        d = -grad(f, x)
        theta = lambda alpha: f(x + d * alpha)
        alphak = goldSection(theta, -100, 100, 0.001)
        x = x + alphak * d
        # print(d**2, epsilon)
    print(x)
    return x


def unconstrained_optimize(f, x, epsilon):
    return normal_descent(f, x, epsilon)

def func_line_1(x):
    return x * x * x - 2 * x * x + 1

def func2(x):
    x1 = x[0]
    x2 = x[1]
    return 2 * x1 **2 - 2 * x1 * x2 + x2 ** 2 + 2 * x1 - 2 * x2

if __name__ == '__main__':
    # goldSection(func_line_1, 0, 2, 0.001)
    # normal_descent(func2, np.array([10, 10]), 0.001)

    f = lambda x: x[0]**2 + x[1]**2
    x = np.array([-3.0, 4.0])
    print(grad_descent(f, x, 0.1))
