# Port for M1QN3 quasi-Newton optimization using Optim.jl backend

module QuasiNewton

using LinearAlgebra

export m1qn3!, QNOptions, QNResult, lbfgs_optimize

"""
    QNOptions

Options for quasi-Newton optimization.
"""
struct QNOptions
    maxiter::Int         # Maximum iterations
    tol::Float64        # Convergence tolerance
    f_tol::Float64      # Function tolerance
    g_tol::Float64      # Gradient tolerance
    memory_size::Int    # L-BFGS memory size
    verbose::Bool       # Verbose output
    linesearch::Symbol  # Line search method
end

"""
    QNResult

Result structure for quasi-Newton optimization.
"""
struct QNResult
    x::Vector{Float64}           # Final solution
    success::Bool                # Convergence flag
    iterations::Int              # Number of iterations
    final_cost::Float64          # Final cost value
    final_gradient_norm::Float64 # Final gradient norm
    cost_history::Vector{Float64}      # Cost history
    gradient_norm_history::Vector{Float64} # Gradient norm history
end

"""
    m1qn3!(cost_func, grad_func, x0::Vector{Float64}; options...) -> QNResult

M1QN3-style quasi-Newton optimization using L-BFGS.

# Arguments
- `cost_func`: Function that computes cost given state vector
- `grad_func`: Function that computes gradient given state vector
- `x0`: Initial state vector

# Keyword Arguments
- `maxiter=200`: Maximum iterations
- `tol=1e-5`: Gradient tolerance
- `f_tol=1e-8`: Function tolerance
- `memory_size=10`: L-BFGS memory size
- `verbose=true`: Print iteration information
- `linesearch=:backtrack`: Line search method

# Returns
- QNResult with optimization results
"""
function m1qn3!(cost_func, grad_func, x0::Vector{Float64};
               maxiter::Int=200, tol::Float64=1e-5, f_tol::Float64=1e-8,
               memory_size::Int=10, verbose::Bool=true, linesearch::Symbol=:backtrack)

    # Use our own L-BFGS implementation
    return lbfgs_optimize(cost_func, grad_func, x0;
                         maxiter=maxiter, tol=tol, f_tol=f_tol,
                         memory_size=memory_size, verbose=verbose)
end

"""
    lbfgs_optimize(cost_func, grad_func, x0::Vector{Float64}; options...) -> QNResult

L-BFGS optimization algorithm implementation.

# Arguments
- `cost_func`: Cost function
- `grad_func`: Gradient function
- `x0`: Initial point

# Returns
- QNResult with optimization results
"""
function lbfgs_optimize(cost_func, grad_func, x0::Vector{Float64};
                       maxiter::Int=200, tol::Float64=1e-5, f_tol::Float64=1e-8,
                       memory_size::Int=10, verbose::Bool=true)

    n = length(x0)
    x = copy(x0)

    # L-BFGS memory
    s_history = Matrix{Float64}(undef, n, memory_size)  # Position differences
    y_history = Matrix{Float64}(undef, n, memory_size)  # Gradient differences
    rho_history = zeros(Float64, memory_size)           # 1 / (y^T s)
    memory_count = 0

    # Initialize
    f = cost_func(x)
    g = grad_func(x)

    cost_history = [f]
    gradient_norm_history = [norm(g)]

    if verbose
        @info "L-BFGS optimization started"
        @info "Initial cost: $f, Initial |grad|: $(norm(g))"
    end

    # Main optimization loop
    converged = false
    for iter in 1:maxiter
        g_norm = norm(g)

        # Check convergence
        if g_norm < tol
            converged = true
            if verbose
                @info "Converged: gradient norm below tolerance"
            end
            break
        end

        # Compute search direction using L-BFGS two-loop recursion
        p = lbfgs_direction(g, s_history, y_history, rho_history, memory_count, memory_size)

        # Line search
        alpha = wolfe_line_search(cost_func, grad_func, x, p, f, g)

        # Update
        x_new = x + alpha * p
        f_new = cost_func(x_new)
        g_new = grad_func(x_new)

        # Store for L-BFGS update
        s = alpha * p
        y = g_new - g

        # Check for valid L-BFGS update
        sy = dot(s, y)
        if sy > 1e-10  # Curvature condition
            # Update L-BFGS memory
            idx = (memory_count % memory_size) + 1
            s_history[:, idx] = s
            y_history[:, idx] = y
            rho_history[idx] = 1.0 / sy
            memory_count += 1
        end

        # Function value convergence check
        if abs(f_new - f) < f_tol
            converged = true
            if verbose
                @info "Converged: function change below tolerance"
            end
        end

        # Update for next iteration
        x = x_new
        f = f_new
        g = g_new

        push!(cost_history, f)
        push!(gradient_norm_history, norm(g))

        if verbose && (iter % 10 == 0 || iter <= 5)
            @info "Iter $iter: cost = $f, |grad| = $(norm(g)), step = $alpha"
        end

        if converged
            break
        end
    end

    if verbose
        status = converged ? "converged" : "did not converge"
        @info "L-BFGS $status after $(length(cost_history)-1) iterations"
        @info "Final cost: $f, Final |grad|: $(norm(g))"
    end

    return QNResult(x, converged, length(cost_history)-1, f, norm(g),
                   cost_history, gradient_norm_history)
end

"""
    lbfgs_direction(g, s_history, y_history, rho_history, memory_count, memory_size)

Compute L-BFGS search direction using two-loop recursion.
"""
function lbfgs_direction(g::Vector{Float64}, s_history::Matrix{Float64},
                        y_history::Matrix{Float64}, rho_history::Vector{Float64},
                        memory_count::Int, memory_size::Int)

    q = copy(g)
    m = min(memory_count, memory_size)

    if m == 0
        # No history, use steepest descent
        return -q
    end

    alpha = zeros(Float64, m)

    # First loop (backward)
    for i in 1:m
        idx = ((memory_count - i - 1) % memory_size) + 1
        alpha[i] = rho_history[idx] * dot(s_history[:, idx], q)
        q .-= alpha[i] .* y_history[:, idx]
    end

    # Initial Hessian approximation (H_0 = γI)
    if m > 0
        latest_idx = ((memory_count - 1) % memory_size) + 1
        s_latest = s_history[:, latest_idx]
        y_latest = y_history[:, latest_idx]
        gamma = dot(s_latest, y_latest) / dot(y_latest, y_latest)
        q .*= gamma
    end

    # Second loop (forward)
    for i in m:-1:1
        idx = ((memory_count - i - 1) % memory_size) + 1
        beta = rho_history[idx] * dot(y_history[:, idx], q)
        q .+= (alpha[i] - beta) .* s_history[:, idx]
    end

    return -q  # Search direction
end

"""
    wolfe_line_search(cost_func, grad_func, x, p, f0, g0)

Wolfe line search satisfying strong Wolfe conditions.
"""
function wolfe_line_search(cost_func, grad_func, x::Vector{Float64}, p::Vector{Float64},
                          f0::Float64, g0::Vector{Float64})

    # Wolfe conditions parameters
    c1 = 1e-4  # Armijo parameter
    c2 = 0.9   # Curvature parameter

    alpha_max = 10.0
    alpha = 1.0

    # Ensure descent direction
    derphi0 = dot(g0, p)
    if derphi0 >= 0
        @warn "Not a descent direction"
        return 1e-6
    end

    # Simple backtracking for now
    for i in 1:20
        x_new = x + alpha * p
        f_new = cost_func(x_new)

        # Armijo condition
        if f_new <= f0 + c1 * alpha * derphi0
            # Check curvature condition
            g_new = grad_func(x_new)
            derphi = dot(g_new, p)

            if abs(derphi) <= -c2 * derphi0
                return alpha  # Strong Wolfe conditions satisfied
            end
        end

        alpha *= 0.5

        if alpha < 1e-10
            break
        end
    end

    return max(alpha, 1e-6)
end

"""
    bfgs_optimize(cost_func, grad_func, x0::Vector{Float64}; options...) -> QNResult

Full BFGS optimization (stores full Hessian approximation).
"""
function bfgs_optimize(cost_func, grad_func, x0::Vector{Float64};
                      maxiter::Int=200, tol::Float64=1e-5, verbose::Bool=true)

    n = length(x0)
    x = copy(x0)

    # Initial Hessian approximation (identity)
    H = Matrix{Float64}(I, n, n)

    f = cost_func(x)
    g = grad_func(x)

    cost_history = [f]
    gradient_norm_history = [norm(g)]

    if verbose
        @info "BFGS optimization started"
        @info "Initial cost: $f, Initial |grad|: $(norm(g))"
    end

    converged = false
    for iter in 1:maxiter
        g_norm = norm(g)

        if g_norm < tol
            converged = true
            if verbose
                @info "Converged: gradient norm below tolerance"
            end
            break
        end

        # Search direction
        p = -H * g

        # Line search
        alpha = wolfe_line_search(cost_func, grad_func, x, p, f, g)

        # Update
        s = alpha * p
        x_new = x + s
        f_new = cost_func(x_new)
        g_new = grad_func(x_new)

        y = g_new - g

        # BFGS update of Hessian approximation
        sy = dot(s, y)
        if sy > 1e-10
            # Sherman-Morrison-Woodbury formula
            Hy = H * y
            H = H - (s * Hy' + Hy * s') / sy + (1 + dot(y, Hy) / sy) * (s * s') / sy
        end

        x = x_new
        f = f_new
        g = g_new

        push!(cost_history, f)
        push!(gradient_norm_history, norm(g))

        if verbose && (iter % 10 == 0 || iter <= 5)
            @info "Iter $iter: cost = $f, |grad| = $(norm(g))"
        end
    end

    if verbose
        status = converged ? "converged" : "did not converge"
        @info "BFGS $status after $(length(cost_history)-1) iterations"
    end

    return QNResult(x, converged, length(cost_history)-1, f, norm(g),
                   cost_history, gradient_norm_history)
end

end # module QuasiNewton

