import numpy as np
from vector_util import l2_norm


def solve(f, g, h, x0, eps, max_iter):
    """
    Modified Newton optimizer with Wolfe line search,
    for comparison with trust region method.

    :param f: objective function
    :param g: gradient
    :param h: hessian
    :param x0: starting point
    :param eps: precision
    :param max_iter: max iterations
    :return: (x, #iterations, #LS iterations, UNUSED, terminate reason)
    """
    sum_it_ls = 0
    ii = np.eye(x0.shape[0], dtype=np.float64)
    x = x0
    for it in range(max_iter):
        fk = f(x)
        gk = g(x)
        if l2_norm(gk) < eps:
            return x, it, sum_it_ls, 0, "done"
        hk = h(x)
        min_eig = np.min(np.linalg.eigvalsh(hk))
        u = max(0.0, -min_eig + eps)
        d = np.linalg.solve(hk + u * ii, -gk)
        if (gk @ d) > 0:
            d = -d
        a, it_ls = wolfe_line_search(f, g, x, d, fk, gk)
        sum_it_ls += it_ls
        x = x + a * d
    print("#WARNING: failed to converge")
    return x, max_iter, sum_it_ls, 0, "expired"


def wolfe_line_search(f, g, x0, d, f0, g0):
    return wolfe(f, g, x0, d, f0, g0, 0.25, 0.75, 1.0, 10000.0, 100)


def wolfe(f, g, x0, d, f0, g0, c1, c2, a_init, a_max, max_iter):
    """
    Perform Wolfe line search on specified function

    :param f: objective function
    :param g: gradient of f
    :param x0: starting point
    :param d: search direction
    :param f0: f(x0)
    :param g0: g(x0)
    :param c1: sufficient decrease factor
    :param c2: curvature factor
    :param a_init: initial step length
    :param a_max: max step length
    :param max_iter: max iterations
    :return: (a, iterations)
    """

    def fk(a):
        return f(x0 + a * d)

    def gk(a):
        return g(x0 + a * d)

    g0td = g0 @ d
    if g0td >= 0:
        print('###WARNING: not descent direction, g0^Td=%f' % g0td)
    if c1 > c2 or c1 < 0 or c2 > 1:
        print('###WARNING: bad argument, line search may not converge')
    c1g0td = c1 * g0td
    c2g0td = c2 * g0td
    a1 = 0
    a2 = a_init
    has_initial_range = False
    for it in range(max_iter):
        if has_initial_range:
            ak = (a1 + a2) * 0.5
            if fk(ak) > f0 + ak * c1g0td:
                a2 = ak
            elif (gk(ak) @ d) < c2g0td:
                a1 = ak
            else:
                return ak, it + 1
        else:
            ak = a2
            if fk(ak) > f0 + ak * c1g0td:
                has_initial_range = True
            elif (gk(ak) @ d) < c2g0td:
                a2 = ak * 2
                if a2 >= a_max:
                    a2 = a_max
                    has_initial_range = True
            else:
                return ak, it + 1
    print('###WARNING: line search failed to converge')
    return (a1 + a2) * 0.5, max_iter
