import numpy as np
from vector_util import l2_norm
from scipy.linalg import cholesky, solve_triangular


def solve(f, g, h, x0, eps, max_iter, init_radius, max_radius, sub_solve):
    """
    Trust region optimizer

    :param f: objective function
    :param g: gradient
    :param h: hessian
    :param x0: starting point
    :param eps: precision
    :param max_iter: max iterations
    :param init_radius: initial trust region radius
    :param max_radius: maximum trust region radius
    :param sub_solve: trust region sub-problem solver
    :return: (x, #iterations, #sub iterations, #hard cases, terminate reason)
    """
    hard_cases = 0
    sum_it_sub = 0
    x = x0
    fk = f(x)
    radius = init_radius
    for it in range(max_iter):
        gk = g(x)
        if l2_norm(gk) < eps:
            return x, it, sum_it_sub, hard_cases, 'done'
        hk = h(x)
        d, c_active, hard, it_sub = sub_solve(gk, hk, radius, eps)
        sum_it_sub += it_sub
        hard_cases += int(hard)
        x_new = x + d
        f_new = f(x_new)
        r_fk = fk - f_new
        r_qk = abs((gk @ d) + 0.5 * (d @ hk @ d))
        gamma = r_fk / r_qk
        if gamma <= 0.25:
            radius *= 0.25
        elif gamma >= 0.75 and c_active:
            radius = min(radius * 2.0, max_radius)
        if gamma > 0:
            x = x_new
            fk = f_new
    print("###WARNING: failed to converge")
    return x, max_iter, sum_it_sub, hard_cases, 'expired'


def safeguard(min_eig, eps):
    """
    Create a safeguard for specific sub-problem

    Note: scalar `u` is considered safe iff `B+uI` is positive definite

    :param min_eig: smallest (most negative) eigenvalue of matrix B
    :param eps: precision
    :return: (minimal safe u, hard case possible?, safeguard subroutine, range update subroutine)
    """
    u_safe_min = -min_eig + eps
    u_lower = u_safe_min
    u_upper = np.inf
    if u_safe_min > 0.0:
        u_init = u_safe_min
        may_hard = True
    else:
        u_init = 0.0
        may_hard = False

    def update(u, l2_d_minus_radius):
        nonlocal u_lower, u_upper
        if l2_d_minus_radius > 0 and u > u_lower:
            u_lower = u
        if l2_d_minus_radius < 0 and u < u_upper:
            u_upper = u

    def guard(u):
        assert u_lower >= u_safe_min
        if u < u_lower or u > u_upper:
            return u_lower + (u_lower - u) if np.isinf(u_upper) else 0.5 * (u_lower + u_upper)
        return u

    return u_init, may_hard, guard, update


def hebden(g, h, radius, eps, max_sub_iter=100):
    """
    Trust region sub-problem solver, using Hebden's method.

    :param g: gradient at current point
    :param h: hessian at current point
    :param radius: trust region radius
    :param eps: precision
    :param max_sub_iter: max iterations
    :return: (d, constraint active?, hard case?, #iterations)
    """
    ii = np.eye(g.shape[0], dtype=np.float64)
    min_eig = np.min(np.linalg.eigvalsh(h))
    u, hard_case, guard, update = safeguard(min_eig, eps)
    h_inv = np.linalg.inv(h + u * ii)
    d = h_inv @ -g
    l2_d = l2_norm(d)
    if l2_d < radius:
        return d, False, hard_case, 1
    for it in range(max_sub_iter):
        # phi = l2_d - radius
        if abs(l2_d - radius) < eps:
            return d, True, False, it + 1
        update(u, l2_d - radius)
        dd = -h_inv @ d
        d_phi = (d @ dd) / l2_d
        # l2_d = phi + radius
        lam = -u - l2_d / d_phi
        beta = l2_d * (lam + u)
        u = guard(beta / radius - lam)
        h_inv = np.linalg.inv(h + u * ii)
        d = h_inv @ -g
        l2_d = l2_norm(d)
    print("###WARNING: Hebden iterations failed to converge")
    return d, True, False, max_sub_iter


def more_sorensen(g, h, radius, eps, max_sub_iter=100):
    """
    Trust region sub-problem solver, using More-Sorensen's method.

    :param g: gradient at current point
    :param h: hessian at current point
    :param radius: trust region radius
    :param eps: precision
    :param max_sub_iter: max iterations
    :return: (d, constraint active?, hard case?, #iterations)
    """
    ii = np.eye(g.shape[0], dtype=np.float64)
    min_eig = np.min(np.linalg.eigvalsh(h))
    u, hard_case, guard, update = safeguard(min_eig, eps)

    def calc_inv():
        ll = cholesky(h + u * ii, lower=True)
        lt_p = solve_triangular(ll, -g, lower=True, unit_diagonal=False)
        p = solve_triangular(ll, lt_p, trans='T', lower=True, unit_diagonal=False)
        l2_p = l2_norm(p)
        return p, l2_p, l2_p - radius, ll

    d, l2_d, l2_d_minus_radius, l_mat = calc_inv()
    if l2_d_minus_radius < 0:
        return d, False, hard_case, 1
    for it in range(max_sub_iter):
        if abs(l2_d_minus_radius) < eps:
            return d, True, False, it + 1
        update(u, l2_d_minus_radius)
        q = solve_triangular(l_mat, d, lower=True, unit_diagonal=False)
        u = guard(u + (l2_d / l2_norm(q)) ** 2 * (l2_d_minus_radius / radius))
        d, l2_d, l2_d_minus_radius, l_mat = calc_inv()
    print("###WARNING: More-Sorensen iterations failed to converge")
    return d, True, False, max_sub_iter


def subspace(g, h, radius, eps):
    """
    Trust region sub-problem solver, using two-dimensional subspace minimization method.

    :param g: gradient at current point
    :param h: hessian at current point
    :param radius: trust region radius
    :param eps: precision
    :return: (d, constraint active?, hard case?, UNUSED)
    """
    ii_h = np.eye(g.shape[0], dtype=np.float64)
    min_eig_h = np.min(np.linalg.eigvalsh(h))
    if abs(min_eig_h) < eps:
        print("#NOTICE: positive semi-definite, use cauchy point instead")
        return cauchy(g, h, radius, eps)
    alpha_h = max(0.0, -min_eig_h * 1.5)
    d2 = np.linalg.solve(h + alpha_h * ii_h, g)
    # derive an orthonormal basis of desired subspace
    # to simplify further calculations
    # so we have \bar{B}=I
    d1 = g / l2_norm(g)
    d2 = d2 / l2_norm(d2)
    d2 = d2 - (d1 @ d2) * d1
    l2_d2 = l2_norm(d2)
    if l2_d2 < eps:  # oops, our subspace is 1d
        print("#NOTICE: 1d subspace, use cauchy point instead")
        return cauchy(g, h, radius, eps)
    d2 = d2 / l2_d2
    ii = np.eye(2, dtype=np.float64)
    vv = np.column_stack([d1, d2])
    gk = vv.transpose() @ g
    bb = vv.transpose() @ h @ vv
    min_eig = np.min(np.linalg.eigvalsh(bb))
    alpha, hard_case, _unused_1, _unused_2 = safeguard(min_eig, eps)
    u0 = np.linalg.solve(bb + alpha * ii, -gk)
    if l2_norm(u0) < radius:
        return vv @ u0, False, hard_case, 0
    # transform our problem into a quartic equation
    p, q, r = bb[0, 0], bb[0, 1], bb[1, 1]
    k0, k1 = (p * r - q * q), p + r
    a4 = radius * radius
    a0 = a4 * k0 * k0
    a1 = a4 * 2 * k0 * k1
    a2 = a4 * (k1 * k1 + 2 * k0)
    a3 = a4 * 2 * k1
    bb2 = np.array([[r, -q], [-q, p]], dtype=np.float64)
    left_a0 = gk @ bb2 @ bb2 @ gk
    left_a1 = 2 * (gk @ bb2 @ gk)
    left_a2 = gk @ gk
    x = get_real_root([a4, a3, a2 - left_a2, a1 - left_a1, a0 - left_a0], eps)
    u1 = np.linalg.solve(bb + x * ii, -gk)
    return vv @ u1, True, False, 0


def get_real_root(c, eps):
    roots = np.roots(c)
    results = []
    for x in roots:
        if abs(np.imag(x)) < eps:
            results.append(np.real(x))
    if len(results) == 0:
        raise ValueError("no solution, this should never happen")
    return max(results)


def cauchy(g, h, radius, eps):
    """
    Trust region sub-problem solver, using cauchy point method,
    as a fallback in case of 2d subspace unavailable.

    :param g: gradient at current point
    :param h: hessian at current point
    :param radius: trust region radius
    :param eps: precision
    :return: (d, constraint active?, hard case?, UNUSED)
    """
    l2_g = l2_norm(g)
    d = -radius * (g / l2_g)
    ghg = g @ h @ g
    if ghg >= eps:
        d *= min(1.0, (l2_g / radius) * (l2_g ** 2 / ghg))
    return d, (l2_norm(d) - radius) < eps, False, 0
