import numpy as np
import scipy.linalg
import optimizer_common


def modify_d(d, eps):
    """
    Examine eigenvalues of matrix D, then produce matrix ~D+ and vector a

    :param d: matrix D
    :param eps: precision
    :return: (#Has negative, #Has zero, matrix ~D+, vector a)
    """
    n = d.shape[0]
    d_out = np.zeros((n, n), dtype=np.float64)
    a_out = np.zeros(n, dtype=np.float64)
    i = 0
    has_neg = False
    has_zero = False
    while i < n:
        if i + 1 < n and abs(d[i, i + 1]) >= eps:
            # 2x2 diagonal block
            cur_has_neg = False
            blk = d[i:i + 2, i:i + 2]
            blk_out = None
            w, v = np.linalg.eigh(blk)
            for t in range(2):
                if w[t] >= eps:
                    blk_out = (1.0 / w[t]) * (v[:, t:t + 1] @ v[:, t:t + 1].transpose())
                elif w[t] <= -eps:
                    cur_has_neg = True
                    a_out[i] = v[0, t]
                    a_out[i + 1] = v[1, t]
            if blk_out is None:
                print("###WARNING: bad 2x2 block (no positive eigenvalue)")
            else:
                d_out[i:i + 2, i:i + 2] = blk_out
            if cur_has_neg:
                has_neg = True
            else:
                print("###WARNING: bad 2x2 block (no negative eigenvalue)")
            i += 2
        else:
            # 1x1 diagonal block
            if abs(d[i, i]) < eps:
                has_zero = True
            elif d[i, i] < 0:
                has_neg = True
                a_out[i] = 1.0
            else:
                d_out[i][i] = 1.0 / d[i][i]
            i += 1
    return has_neg, has_zero, d_out, a_out


def optimize(f, g, h, x0, max_iter, eps, ls):
    """
    Fletcher-Freeman optimizer

    :param f: objective function
    :param g: gradient of f
    :param h: hessian of f
    :param x0: starting point
    :param max_iter: max iterations
    :param eps: precision
    :param ls: line search
    :return: x, Newton iterations, line search iterations, terminate reason
    """
    ls_it_sum = 0
    f_opt = fk = f(x0)
    x_opt = x = x0
    last_use_nd = True
    for it in range(max_iter):
        print("#Iteration %d" % it)
        gk = g(x)
        hk = h(x)
        lu, dm, p_ = scipy.linalg.ldl(hk)
        has_neg, has_zero, d_gen_inv, a_vec = modify_d(dm, eps)
        l_inv = np.linalg.inv(lu)
        if np.sqrt(np.dot(gk, gk)) < eps:
            last_use_nd = False
            if not has_neg:
                return x_opt, it + 1, ls_it_sum, "done"
            else:
                print("#NOTICE: small gradient, use NCD")
                d = l_inv.transpose() @ a_vec
        else:
            d = -(l_inv.transpose() @ (d_gen_inv @ (l_inv @ gk)))
            cur_use_nd = True
            # alternately use newton direction or negative/zero-curvature direction
            if has_neg:
                if last_use_nd or np.dot(d, d) < eps:
                    print("#NOTICE: use NCD")
                    d = l_inv.transpose() @ a_vec
                    cur_use_nd = False
            elif has_zero:
                ns = scipy.linalg.null_space(hk, rcond=eps)
                d_z = None
                for t in range(ns.shape[1]):
                    d_c = ns[:, t]
                    if (gk @ d_c) >= eps:
                        d_z = d_c
                        break
                if d_z is None:
                    print("#NOTICE: singular hessian but no valid ZCD")
                elif last_use_nd or np.dot(d, d) < eps:
                    print("#NOTICE: use ZCD")
                    d = d_z
                    cur_use_nd = False
            last_use_nd = cur_use_nd
        stop, x_new, f_new, it_ls = optimizer_common.step(f, g, x, d, fk, gk, eps, ls)
        ls_it_sum += it_ls
        if stop:
            return x_opt, it + 1, ls_it_sum, "no further improvement"
        fk = f_new
        x = x_new
        if fk < f_opt:
            f_opt = fk
            x_opt = x
    print('###WARNING: failed to converge')
    return x_opt, max_iter, ls_it_sum, "expired"
