import numpy as np
import optimizer_common


def mcd(g: np.ndarray, eps):
    """
    Perform modified Cholesky decomposition on given matrix

    :param g: matrix G
    :param eps: precision
    :return: (L, d)  s.t. L@np.diag(d)@L.transpose()=G+np.diag(e)
    """
    n = g.shape[0]
    lu = np.eye(n, dtype=np.float64)
    c = np.zeros(n, dtype=np.float64)
    d = np.zeros(n, dtype=np.float64)
    dg = np.diag(np.diag(g))
    max_diag = abs(dg).max()
    max_non_diag = abs(d - dg).max()
    delta = eps * max(1, max_diag + max_non_diag)
    beta2 = max(eps, max_non_diag / np.sqrt(n * n - 1), max_diag)
    for j in range(n):
        d[j] = g[j, j]
        d[j] -= np.sum(lu[j, :j] ** 2 * d[:j])
        c[:] = 0
        for i in range(j + 1, n):
            c[i] = g[i, j]
            c[i] -= np.sum(lu[j, :j] * lu[i, :j] * d[:j])
        theta = abs(c).max()
        d[j] = max(abs(d[j]), delta, theta ** 2 / beta2)
        lu[j + 1:n, j] = c[j + 1:n] * (1.0 / d[j])
    return lu, d


def optimize(f, g, h, x0, max_iter, eps, ls):
    """
    Stable Newton optimizer based on modified Cholesky decomposition

    :param f: objective function
    :param g: gradient of f
    :param h: hessian of f
    :param x0: starting point
    :param max_iter: max iterations
    :param eps: precision
    :param ls: line search
    :return: x, Newton iterations, line search iterations, terminate reason
    """
    ls_it_sum = 0
    f_opt = fk = f(x0)
    x_opt = x = x0
    for it in range(max_iter):
        print("#Iteration %d" % it)
        gk = g(x)
        hk = h(x)
        lu, dd = mcd(hk, eps)
        dm = np.diag(dd)
        hh = lu @ dm @ lu.transpose()
        if np.sqrt(np.dot(gk, gk)) < eps:
            psi = dd - np.diag(hh - hk)
            t = psi.argmin()
            if psi[t] >= 0:
                return x_opt, it + 1, ls_it_sum, "done"
            else:
                print("#NOTICE: small gradient, use NCD")
                e_bar = np.zeros(gk.shape[0], dtype=np.float64)
                e_bar[t] = 1.0
                d = np.linalg.solve(lu.transpose(), e_bar)
        else:
            d = np.linalg.solve(hh, -1.0 * gk)
        stop, x_new, f_new, it_ls = optimizer_common.step(f, g, x, d, fk, gk, eps, ls)
        ls_it_sum += it_ls
        if stop:
            return x_opt, it + 1, ls_it_sum, "no further improvement"
        fk = f_new
        x = x_new
        if fk < f_opt:
            f_opt = fk
            x_opt = x
    print('###WARNING: failed to converge')
    return x_opt, max_iter, ls_it_sum, "expired"
