import numpy as np
import optimizer_common
from collections import deque


def l_bfgs(f, g, _unused_h, x0, max_iter, eps, m):
    """
    L-BFGS optimizer

    :param f: objective function
    :param g: gradient
    :param _unused_h: hessian (unused)
    :param x0: starting point
    :param max_iter: maximum number of iterations
    :param eps: precision
    :param m: memory size
    :return: (x*, #iterations, #LS iterations, terminate reason)
    """
    it_ls_sum = 0
    x_opt = x = x0
    f_opt = fk = f(x0)
    gk = g(x0)
    d = -gk
    rho_list = deque()
    y_list = deque()
    s_list = deque()
    for it in range(max_iter):
        # step
        if optimizer_common.is_stopping(x, fk, gk, eps):
            return x_opt, it, it_ls_sum, 'done'
        x_new, f_new, it_ls = optimizer_common.step(f, g, x, d, fk, gk)
        it_ls_sum += it_ls
        g_new = g(x_new)
        sk = x_new - x
        yk = g_new - gk
        x, fk, gk = x_new, f_new, g_new
        if fk < f_opt:
            f_opt = fk
            x_opt = x
        # append $y_k$, $s_k$, $\rho_k$ into memory
        rho_list.append(1.0 / np.dot(yk, sk))
        y_list.append(yk)
        s_list.append(sk)
        if len(rho_list) > m:
            rho_list.popleft()
            y_list.popleft()
            s_list.popleft()
        # calculate $d_k=-H_kg_k$
        q = gk
        a_list = []
        for rho, s, y in zip(reversed(rho_list), reversed(s_list), reversed(y_list)):
            a = rho * np.dot(s, q)
            q = q - a * y
            a_list.append(a)
        gamma = np.dot(sk, yk) / np.dot(yk, yk)
        z = gamma * q
        for rho, s, y, a in zip(rho_list, s_list, y_list, reversed(a_list)):
            b = rho * np.dot(y, z)
            z = z + (a - b) * s
        d = -z
    print('###WARNING: failed to converge')
    return x_opt, max_iter, it_ls_sum, 'expired'


def create(m):
    def fun(f, g, h, x0, max_iter, eps):
        return l_bfgs(f, g, h, x0, max_iter, eps, m)

    return fun
