import numpy as np
import optimizer_common
import scipy.sparse.linalg


def inexact_newton(f, g, h, x0, max_iter, eps, eta_fun, eta_init):
    """
    Inexact Newton optimizer

    :param f: objective function
    :param g: gradient
    :param h: hessian
    :param x0: starting point
    :param max_iter: maximum number of iterations
    :param eps: precision
    :param eta_fun: forcing sequence selection strategy
    :param eta_init: initial eta
    :return: (x*, #iterations, #LS iterations, terminate reason)
    """
    n = x0.shape[0]
    d_init = np.zeros(n, dtype=np.float64)
    it_ls_sum = 0
    x_opt = x = x0
    f_opt = fk = f(x)
    gk = g(x)
    eta = eta_init
    for it in range(max_iter):
        # print("#Iteration %d" % it)
        if optimizer_common.is_stopping(x, fk, gk, eps):
            return x_opt, it, it_ls_sum, 'done'
        d, solve_info = scipy.sparse.linalg.gmres(A=h(x), b=-gk, x0=d_init, tol=eta, atol=0.0, maxiter=100)
        if solve_info != 0:
            print("###WARNING: failed to converge in solving linear equation")
        x_new, f_new, it_ls = optimizer_common.step(f, g, x, d, fk, gk)
        it_ls_sum += it_ls
        fk = f_new
        x = x_new
        last_g = gk
        gk = g(x)
        eta = eta_fun(last_g, gk)
        if fk < f_opt:
            f_opt = fk
            x_opt = x
    print('###WARNING: failed to converge')
    return x_opt, max_iter, it_ls_sum, 'expired'


def create_inexact_newton_fixed_eta(fixed_eta):
    def eta(*_):
        return fixed_eta

    def fun(f, g, h, x0, max_iter, eps):
        return inexact_newton(f, g, h, x0, max_iter, eps, eta, fixed_eta)

    return fun


def create_inexact_newton_2(delta, alpha, eta_init):
    def eta(g_last: np.ndarray, gk: np.ndarray):
        return delta * (np.dot(gk, gk) / np.dot(g_last, g_last)) ** (alpha * 0.5)

    def fun(f, g, h, x0, max_iter, eps):
        return inexact_newton(f, g, h, x0, max_iter, eps, eta, eta_init)

    return fun
