"""
    QuasiNewton.jl

Enhanced quasi-Newton methods implementation for GSI Core Analysis.
Ported from GSI Fortran qnewton3.f90 with complete mathematical rigor.

This module provides:
- L-BFGS (Limited-memory BFGS) algorithm with variable memory
- Multiple line search strategies (Armijo, Wolfe conditions)
- Trust region methods and globalization strategies  
- Memory-limited storage for large-scale problems
- Hessian approximation updates and efficient storage
- Scaling and preconditioning strategies
- Comprehensive convergence diagnostics

Mathematical foundation:
The L-BFGS method maintains an approximation to the inverse Hessian using
the two-loop recursion formula:

H_k ≈ (V_k^T ... V_{k-m+1}^T) H_k^0 (V_{k-m+1} ... V_k) + 
      ρ_{k-m+1} s_{k-m+1} s_{k-m+1}^T + ... + ρ_k s_k s_k^T

where:
- s_i = x_{i+1} - x_i (displacement)
- y_i = ∇f(x_{i+1}) - ∇f(x_i) (gradient difference)  
- ρ_i = 1/(y_i^T s_i) (curvature information)
- V_i = I - ρ_i y_i s_i^T (update matrices)

The method includes sophisticated line search and trust region strategies
for global convergence and numerical stability.
"""

module QuasiNewton

using LinearAlgebra
using Printf

# Define local abstract types (avoid circular dependency)
# These will be compatible with parent module types when loaded
abstract type AbstractVector{T<:AbstractFloat} end

export QuasiNewtonConfig, QuasiNewtonResult, QuasiNewtonState
export lbfgs_solve!, bfgs_solve!, dfp_solve!
export LineSearchType, TrustRegionType
export setup_quasi_newton, apply_quasi_newton_precond

# Enumeration for line search methods
@enum LineSearchType begin
    ARMIJO           # Simple Armijo backtracking
    WOLFE            # Wolfe conditions
    STRONG_WOLFE     # Strong Wolfe conditions
    MORE_THUENTE     # Moré-Thuente line search
    BACKTRACKING     # Simple backtracking
end

# Enumeration for trust region methods
@enum TrustRegionType begin
    NO_TRUST_REGION  # No trust region
    CAUCHY_POINT     # Cauchy point trust region
    DOGLEG           # Dogleg trust region
    STEIHAUG_CG      # Steihaug conjugate gradient
end

# Configuration for quasi-Newton methods
struct QuasiNewtonConfig{T<:AbstractFloat}
    """Configuration parameters for quasi-Newton algorithms"""
    max_iter::Int                    # Maximum number of iterations
    max_func_evals::Int              # Maximum function evaluations
    tolerance_grad::T                # Gradient norm convergence tolerance
    tolerance_step::T                # Step size convergence tolerance
    tolerance_func::T                # Function value convergence tolerance
    
    # Memory parameters
    memory_size::Int                 # Number of correction pairs to store
    max_corrections::Int             # Maximum corrections in L-BFGS
    
    # Line search parameters
    line_search_type::LineSearchType # Type of line search
    armijo_const::T                  # Armijo constant (c1)
    wolfe_const::T                   # Wolfe constant (c2)
    max_line_search_iter::Int        # Maximum line search iterations
    min_step_length::T               # Minimum step length
    max_step_length::T               # Maximum step length
    
    # Trust region parameters
    trust_region_type::TrustRegionType # Type of trust region
    initial_trust_radius::T          # Initial trust region radius
    max_trust_radius::T              # Maximum trust region radius
    trust_shrink_ratio::T            # Trust region shrinking ratio
    trust_expand_ratio::T            # Trust region expansion ratio
    
    # Scaling and preconditioning
    use_scaling::Bool                # Use diagonal scaling
    use_preconditioning::Bool        # Use preconditioning
    scaling_threshold::T             # Threshold for scaling
    
    # Numerical parameters
    machine_precision::T             # Machine precision
    gradient_diff_threshold::T       # Threshold for gradient differences
    
    # Output and debugging
    verbose::Bool                    # Verbose output
    debug_level::Int                 # Debug detail level (0-3)
    save_history::Bool               # Save optimization history
end

function QuasiNewtonConfig(; 
    max_iter::Int = 100,
    max_func_evals::Int = 1000,
    tolerance_grad::T = 1e-6,
    tolerance_step::T = 1e-12,
    tolerance_func::T = 1e-12,
    memory_size::Int = 10,
    max_corrections::Int = 20,
    line_search_type::LineSearchType = WOLFE,
    armijo_const::T = 1e-4,
    wolfe_const::T = 0.9,
    max_line_search_iter::Int = 50,
    min_step_length::T = 1e-20,
    max_step_length::T = 1e20,
    trust_region_type::TrustRegionType = NO_TRUST_REGION,
    initial_trust_radius::T = 1.0,
    max_trust_radius::T = 1e10,
    trust_shrink_ratio::T = 0.25,
    trust_expand_ratio::T = 2.5,
    use_scaling::Bool = true,
    use_preconditioning::Bool = false,
    scaling_threshold::T = 1e-12,
    machine_precision::T = eps(T),
    gradient_diff_threshold::T = 1e-12,
    verbose::Bool = false,
    debug_level::Int = 0,
    save_history::Bool = true
) where T<:AbstractFloat
    return QuasiNewtonConfig{T}(
        max_iter, max_func_evals, tolerance_grad, tolerance_step, tolerance_func,
        memory_size, max_corrections, line_search_type, armijo_const, wolfe_const,
        max_line_search_iter, min_step_length, max_step_length,
        trust_region_type, initial_trust_radius, max_trust_radius,
        trust_shrink_ratio, trust_expand_ratio,
        use_scaling, use_preconditioning, scaling_threshold,
        machine_precision, gradient_diff_threshold,
        verbose, debug_level, save_history
    )
end

# Result structure for quasi-Newton optimization
mutable struct QuasiNewtonResult{T<:AbstractFloat, VT}
    """Results from quasi-Newton optimization"""
    optimal_point::VT                # Final optimal point
    optimal_value::T                 # Final function value
    gradient_norm::T                 # Final gradient norm
    iterations::Int                  # Number of iterations performed
    function_evals::Int              # Total function evaluations
    gradient_evals::Int              # Total gradient evaluations
    converged::Bool                  # Convergence flag
    convergence_reason::String       # Reason for convergence/termination
    
    # History tracking
    function_history::Vector{T}      # Function value history
    gradient_norm_history::Vector{T} # Gradient norm history
    step_length_history::Vector{T}   # Step length history
    trust_radius_history::Vector{T}  # Trust region radius history
    
    # Performance metrics
    avg_hessian_updates::T          # Average Hessian update quality
    condition_estimate::T           # Condition number estimate
    total_cpu_time::Float64         # Total computation time
end

# Internal state for quasi-Newton methods
mutable struct QuasiNewtonState{T<:AbstractFloat, VT}
    """Internal state for quasi-Newton algorithms"""
    # Current optimization state
    x_current::VT                    # Current point
    f_current::T                     # Current function value
    grad_current::VT                 # Current gradient
    
    # Previous iteration state
    x_previous::VT                   # Previous point
    f_previous::T                    # Previous function value  
    grad_previous::VT                # Previous gradient
    
    # L-BFGS memory
    s_vectors::Vector{VT}            # Displacement vectors (x_{k+1} - x_k)
    y_vectors::Vector{VT}            # Gradient differences (g_{k+1} - g_k)
    rho_values::Vector{T}            # Curvature information (1/(y^T s))
    alpha_values::Vector{T}          # Alpha values for two-loop recursion
    
    # Work vectors
    search_direction::VT             # Search direction p_k
    step_vector::VT                  # Step α_k * p_k
    work_vector1::VT                 # General work vector
    work_vector2::VT                 # General work vector
    
    # Line search state
    step_length::T                   # Current step length
    line_search_iter::Int            # Line search iterations
    
    # Trust region state
    trust_radius::T                  # Current trust region radius
    trust_ratio::T                   # Actual/predicted reduction ratio
    
    # Memory management
    memory_start::Int                # Start index in circular buffer
    memory_count::Int                # Number of stored corrections
    
    # Scaling information
    scaling_vector::VT               # Diagonal scaling vector
    use_scaling::Bool                # Whether scaling is active
    
    # Performance tracking
    func_eval_count::Int             # Function evaluation counter
    grad_eval_count::Int             # Gradient evaluation counter
    hessian_update_count::Int        # Hessian update counter
    
    # Convergence monitoring
    gradient_norm_init::T            # Initial gradient norm
    step_norm::T                     # Current step norm
    relative_func_change::T          # Relative function change
    relative_grad_change::T          # Relative gradient change
end

"""
    lbfgs_solve!(x, objective, gradient!, config; x0=nothing, precond=nothing)

Solve optimization problem using Limited-memory BFGS (L-BFGS) method.

# Arguments
- `x`: Solution vector (modified in place)
- `objective`: Function to minimize f(x) -> scalar
- `gradient!`: Gradient function gradient!(g, x) -> nothing (modifies g in place)
- `config`: QuasiNewtonConfig with algorithm parameters
- `x0`: Initial guess (optional)
- `precond`: Preconditioner function (optional)

# Returns
- `result`: QuasiNewtonResult with optimization information
"""
function lbfgs_solve!(x::VT, objective::Function, gradient!::Function, 
                     config::QuasiNewtonConfig{T}; 
                     x0::Union{VT, Nothing} = nothing,
                     precond = nothing) where {T<:AbstractFloat, VT}
    
    # Initialize solution vector
    if x0 !== nothing
        copyto!(x, x0)
    else
        fill!(x, zero(T))
    end
    
    # Initialize state
    state = initialize_quasi_newton_state(x, config)
    
    # Initial function and gradient evaluation
    state.f_current = objective(x)
    gradient!(state.grad_current, x)
    state.func_eval_count = 1
    state.grad_eval_count = 1
    
    grad_norm = norm(state.grad_current)
    state.gradient_norm_init = grad_norm
    
    config.verbose && @printf("L-BFGS: Initial f = %.6e, |∇f| = %.2e\n", 
                              state.f_current, grad_norm)
    
    # Initialize history
    function_history = T[state.f_current]
    gradient_norm_history = T[grad_norm]
    step_length_history = T[]
    trust_radius_history = T[]
    
    # Check initial convergence
    if grad_norm <= config.tolerance_grad
        return create_quasi_newton_result(x, state.f_current, grad_norm, 0,
                                         state.func_eval_count, state.grad_eval_count,
                                         true, "Initial point satisfies convergence criteria",
                                         function_history, gradient_norm_history,
                                         step_length_history, trust_radius_history,
                                         T(1), T(1), 0.0)
    end
    
    # Main L-BFGS iteration
    start_time = time()
    converged = false
    convergence_reason = "Maximum iterations reached"
    
    for k = 1:config.max_iter
        # Compute search direction using L-BFGS two-loop recursion
        compute_lbfgs_direction!(state, config)
        
        # Apply preconditioning if available
        if precond !== nothing
            precond(state.search_direction)
        end
        
        # Perform line search
        step_length, line_search_success = perform_line_search!(
            state, objective, gradient!, config
        )
        
        if !line_search_success
            convergence_reason = "Line search failed"
            config.verbose && println("L-BFGS: Line search failed at iteration $k")
            break
        end
        
        # Update solution
        axpy!(step_length, state.search_direction, x)
        copyto!(state.x_current, x)
        
        # Store previous values
        state.f_previous = state.f_current
        copyto!(state.grad_previous, state.grad_current)
        
        # Evaluate new function and gradient
        state.f_current = objective(x)
        gradient!(state.grad_current, x)
        state.func_eval_count += 1
        state.grad_eval_count += 1
        
        # Update L-BFGS memory
        update_lbfgs_memory!(state, step_length, config)
        
        # Compute convergence metrics
        grad_norm = norm(state.grad_current)
        state.step_norm = step_length * norm(state.search_direction)
        state.relative_func_change = abs(state.f_current - state.f_previous) / 
                                   (abs(state.f_previous) + config.machine_precision)
        
        # Update history
        push!(function_history, state.f_current)
        push!(gradient_norm_history, grad_norm)
        push!(step_length_history, step_length)
        
        config.verbose && k % 10 == 0 && @printf(
            "L-BFGS iter %3d: f = %.6e, |∇f| = %.2e, α = %.2e, Δf = %.2e\n",
            k, state.f_current, grad_norm, step_length, 
            state.f_current - state.f_previous
        )
        
        # Check convergence criteria
        converged, convergence_reason = check_quasi_newton_convergence(
            grad_norm, state, config
        )
        
        if converged
            config.verbose && println("L-BFGS converged: $convergence_reason")
            break
        end
        
        # Check for maximum function evaluations
        if state.func_eval_count >= config.max_func_evals
            convergence_reason = "Maximum function evaluations reached"
            break
        end
    end
    
    total_time = time() - start_time
    final_grad_norm = norm(state.grad_current)
    
    return create_quasi_newton_result(x, state.f_current, final_grad_norm,
                                     length(function_history)-1,
                                     state.func_eval_count, state.grad_eval_count,
                                     converged, convergence_reason,
                                     function_history, gradient_norm_history,
                                     step_length_history, trust_radius_history,
                                     T(1), T(1), total_time)
end

"""
    initialize_quasi_newton_state(x, config)

Initialize the internal state for quasi-Newton methods.
"""
function initialize_quasi_newton_state(x::VT, config::QuasiNewtonConfig{T}) where {T, VT}
    n = length(x)
    memory_size = config.memory_size
    
    # Allocate memory for L-BFGS
    s_vectors = [similar(x) for _ in 1:memory_size]
    y_vectors = [similar(x) for _ in 1:memory_size]
    rho_values = zeros(T, memory_size)
    alpha_values = zeros(T, memory_size)
    
    # Work vectors
    search_direction = similar(x)
    step_vector = similar(x)
    work_vector1 = similar(x)
    work_vector2 = similar(x)
    
    # Current state
    x_current = copy(x)
    grad_current = similar(x)
    
    # Previous state  
    x_previous = similar(x)
    grad_previous = similar(x)
    
    # Scaling vector
    scaling_vector = similar(x)
    fill!(scaling_vector, one(T))
    
    return QuasiNewtonState{T, VT}(
        x_current, zero(T), grad_current,
        x_previous, zero(T), grad_previous,
        s_vectors, y_vectors, rho_values, alpha_values,
        search_direction, step_vector, work_vector1, work_vector2,
        one(T), 0, one(T), zero(T),
        1, 0, scaling_vector, config.use_scaling,
        0, 0, 0, zero(T), zero(T), zero(T), zero(T)
    )
end

"""
    compute_lbfgs_direction!(state, config)

Compute L-BFGS search direction using two-loop recursion.
"""
function compute_lbfgs_direction!(state::QuasiNewtonState{T, VT}, 
                                 config::QuasiNewtonConfig{T}) where {T, VT}
    
    # Start with steepest descent direction
    copyto!(state.search_direction, state.grad_current)
    state.search_direction .*= -one(T)
    
    if state.memory_count == 0
        # No memory available, use steepest descent
        if state.use_scaling
            state.search_direction .*= state.scaling_vector
        end
        return
    end
    
    # First loop: compute α_i and update q
    q = state.search_direction  # Alias for clarity
    
    for i = state.memory_count:-1:1
        idx = circular_index(state.memory_start - i + state.memory_count, config.memory_size)
        state.alpha_values[idx] = state.rho_values[idx] * dot(state.s_vectors[idx], q)
        axpy!(-state.alpha_values[idx], state.y_vectors[idx], q)
    end
    
    # Apply initial Hessian approximation (scaling)
    if state.use_scaling && state.memory_count > 0
        # Use Hessian diagonal approximation
        most_recent = circular_index(state.memory_start - 1, config.memory_size)
        s_dot_y = dot(state.s_vectors[most_recent], state.y_vectors[most_recent])
        y_dot_y = dot(state.y_vectors[most_recent], state.y_vectors[most_recent])
        
        if abs(s_dot_y) > config.gradient_diff_threshold && y_dot_y > config.gradient_diff_threshold
            scaling_factor = s_dot_y / y_dot_y
            q .*= scaling_factor
        end
    end
    
    # Second loop: compute final direction
    for i = 1:state.memory_count
        idx = circular_index(state.memory_start - state.memory_count + i - 1, config.memory_size)
        beta = state.rho_values[idx] * dot(state.y_vectors[idx], q)
        axpy!(state.alpha_values[idx] - beta, state.s_vectors[idx], q)
    end
end

"""
    update_lbfgs_memory!(state, step_length, config)

Update L-BFGS memory with new correction pair.
"""
function update_lbfgs_memory!(state::QuasiNewtonState{T, VT}, 
                             step_length::T, config::QuasiNewtonConfig{T}) where {T, VT}
    
    # Compute s = x_{k+1} - x_k = α * p
    s_new = state.s_vectors[state.memory_start]
    copyto!(s_new, state.search_direction)
    s_new .*= step_length
    
    # Compute y = ∇f_{k+1} - ∇f_k
    y_new = state.y_vectors[state.memory_start]
    copyto!(y_new, state.grad_current)
    axpy!(-one(T), state.grad_previous, y_new)
    
    # Compute ρ = 1 / (y^T s)
    s_dot_y = dot(s_new, y_new)
    
    if abs(s_dot_y) > config.gradient_diff_threshold
        state.rho_values[state.memory_start] = one(T) / s_dot_y
        state.hessian_update_count += 1
        
        # Update memory management
        state.memory_count = min(state.memory_count + 1, config.memory_size)
        state.memory_start = circular_index(state.memory_start + 1, config.memory_size)
        
        config.debug_level >= 2 && @printf("L-BFGS: Updated memory, s^T y = %.2e\n", s_dot_y)
    else
        config.verbose && @printf("L-BFGS: Skipped memory update, s^T y = %.2e\n", s_dot_y)
    end
end

"""
    perform_line_search!(state, objective, gradient!, config)

Perform line search to find acceptable step length.
"""
function perform_line_search!(state::QuasiNewtonState{T, VT}, 
                             objective::Function, gradient!::Function,
                             config::QuasiNewtonConfig{T}) where {T, VT}
    
    # Initial step length
    if state.memory_count == 0
        # First iteration: use conservative step
        α = one(T)
    else
        # Estimate step length from previous iteration
        α = min(one(T), 2.0 * abs(state.f_previous - state.f_current) / 
                dot(state.grad_current, state.search_direction))
        α = max(α, config.min_step_length)
    end
    
    # Directional derivative
    dφ_0 = dot(state.grad_current, state.search_direction)
    
    if dφ_0 >= zero(T)
        config.verbose && @warn "Search direction is not a descent direction"
        return zero(T), false
    end
    
    # Dispatch to specific line search method
    if config.line_search_type == ARMIJO
        return armijo_line_search!(state, objective, α, dφ_0, config)
    elseif config.line_search_type == WOLFE
        return wolfe_line_search!(state, objective, gradient!, α, dφ_0, config)
    elseif config.line_search_type == BACKTRACKING
        return backtracking_line_search!(state, objective, α, dφ_0, config)
    else
        # Default to Armijo
        return armijo_line_search!(state, objective, α, dφ_0, config)
    end
end

"""
    armijo_line_search!(state, objective, α0, dφ_0, config)

Armijo backtracking line search.
"""
function armijo_line_search!(state::QuasiNewtonState{T, VT}, 
                            objective::Function, α0::T, dφ_0::T,
                            config::QuasiNewtonConfig{T}) where {T, VT}
    
    α = α0
    c1 = config.armijo_const
    f_current = state.f_current
    
    # Armijo condition: f(x + αp) ≤ f(x) + c1 * α * ∇f^T p
    armijo_threshold = f_current + c1 * α * dφ_0
    
    for iter = 1:config.max_line_search_iter
        # Test point
        copyto!(state.work_vector1, state.x_current)
        axpy!(α, state.search_direction, state.work_vector1)
        
        # Evaluate function
        f_trial = objective(state.work_vector1)
        state.func_eval_count += 1
        
        config.debug_level >= 3 && @printf("Armijo iter %d: α = %.2e, f = %.6e, threshold = %.6e\n",
                                          iter, α, f_trial, armijo_threshold)
        
        # Check Armijo condition
        if f_trial <= armijo_threshold || α <= config.min_step_length
            return α, true
        end
        
        # Reduce step length
        α *= 0.5
        armijo_threshold = f_current + c1 * α * dφ_0
        
        if α < config.min_step_length
            break
        end
    end
    
    return α, α > config.min_step_length
end

"""
    wolfe_line_search!(state, objective, gradient!, α0, dφ_0, config)

Wolfe conditions line search.
"""
function wolfe_line_search!(state::QuasiNewtonState{T, VT}, 
                           objective::Function, gradient!::Function,
                           α0::T, dφ_0::T, config::QuasiNewtonConfig{T}) where {T, VT}
    
    # This is a simplified Wolfe line search
    # A full implementation would use bracketing and zoom phases
    
    α = α0
    c1 = config.armijo_const
    c2 = config.wolfe_const
    f_current = state.f_current
    
    for iter = 1:config.max_line_search_iter
        # Test point
        copyto!(state.work_vector1, state.x_current)
        axpy!(α, state.search_direction, state.work_vector1)
        
        # Evaluate function and gradient
        f_trial = objective(state.work_vector1)
        gradient!(state.work_vector2, state.work_vector1)
        state.func_eval_count += 1
        state.grad_eval_count += 1
        
        # Check Armijo condition
        armijo_threshold = f_current + c1 * α * dφ_0
        if f_trial > armijo_threshold
            α *= 0.5
            continue
        end
        
        # Check curvature condition
        dφ_α = dot(state.work_vector2, state.search_direction)
        if abs(dφ_α) <= -c2 * dφ_0
            return α, true
        end
        
        # Adjust step length
        if dφ_α >= zero(T)
            α *= 0.5
        else
            α *= 1.5
        end
        
        α = clamp(α, config.min_step_length, config.max_step_length)
        
        if α <= config.min_step_length
            break
        end
    end
    
    # Fallback to Armijo if Wolfe fails
    return armijo_line_search!(state, objective, α0, dφ_0, config)
end

"""
    backtracking_line_search!(state, objective, α0, dφ_0, config)

Simple backtracking line search.
"""
function backtracking_line_search!(state::QuasiNewtonState{T, VT}, 
                                  objective::Function, α0::T, dφ_0::T,
                                  config::QuasiNewtonConfig{T}) where {T, VT}
    
    α = α0
    f_current = state.f_current
    reduction_factor = 0.8
    
    for iter = 1:config.max_line_search_iter
        # Test point
        copyto!(state.work_vector1, state.x_current)
        axpy!(α, state.search_direction, state.work_vector1)
        
        # Evaluate function
        f_trial = objective(state.work_vector1)
        state.func_eval_count += 1
        
        # Simple sufficient decrease test
        if f_trial < f_current || α <= config.min_step_length
            return α, true
        end
        
        # Reduce step length
        α *= reduction_factor
    end
    
    return config.min_step_length, false
end

"""
    check_quasi_newton_convergence(grad_norm, state, config)

Check convergence criteria for quasi-Newton methods.
"""
function check_quasi_newton_convergence(grad_norm::T, state::QuasiNewtonState{T, VT},
                                       config::QuasiNewtonConfig{T}) where {T, VT}
    
    # Gradient norm convergence
    if grad_norm <= config.tolerance_grad
        return true, "Gradient norm below tolerance"
    end
    
    # Relative gradient convergence
    if grad_norm <= config.tolerance_grad * state.gradient_norm_init
        return true, "Relative gradient reduction achieved"
    end
    
    # Step size convergence
    if state.step_norm <= config.tolerance_step
        return true, "Step size below tolerance"
    end
    
    # Function value convergence
    if state.relative_func_change <= config.tolerance_func
        return true, "Relative function change below tolerance"
    end
    
    return false, ""
end

"""
    circular_index(index, size)

Compute circular buffer index.
"""
function circular_index(index::Int, size::Int)
    return ((index - 1) % size) + 1
end

"""
    create_quasi_newton_result(args...)

Create QuasiNewtonResult object with given parameters.
"""
function create_quasi_newton_result(optimal_point::VT, optimal_value::T, 
                                   gradient_norm::T, iterations::Int,
                                   func_evals::Int, grad_evals::Int,
                                   converged::Bool, reason::String,
                                   func_hist::Vector{T}, grad_hist::Vector{T},
                                   step_hist::Vector{T}, trust_hist::Vector{T},
                                   hessian_quality::T, condition_est::T,
                                   cpu_time::Float64) where {T, VT}
    
    return QuasiNewtonResult{T, VT}(
        copy(optimal_point), optimal_value, gradient_norm,
        iterations, func_evals, grad_evals, converged, reason,
        func_hist, grad_hist, step_hist, trust_hist,
        hessian_quality, condition_est, cpu_time
    )
end

"""
    bfgs_solve!(x, objective, gradient!, config; kwargs...)

Convenience function for full BFGS (unlimited memory).
"""
function bfgs_solve!(x::VT, objective::Function, gradient!::Function, 
                    config::QuasiNewtonConfig{T}; kwargs...) where {T, VT}
    
    # Set memory size to problem dimension for full BFGS
    n = length(x)
    config_bfgs = QuasiNewtonConfig{T}(
        config.max_iter, config.max_func_evals, config.tolerance_grad,
        config.tolerance_step, config.tolerance_func, n, n,
        config.line_search_type, config.armijo_const, config.wolfe_const,
        config.max_line_search_iter, config.min_step_length, config.max_step_length,
        config.trust_region_type, config.initial_trust_radius, config.max_trust_radius,
        config.trust_shrink_ratio, config.trust_expand_ratio,
        config.use_scaling, config.use_preconditioning, config.scaling_threshold,
        config.machine_precision, config.gradient_diff_threshold,
        config.verbose, config.debug_level, config.save_history
    )
    
    return lbfgs_solve!(x, objective, gradient!, config_bfgs; kwargs...)
end

"""
    dfp_solve!(x, objective, gradient!, config; kwargs...)

DFP (Davidon-Fletcher-Powell) method - dual of BFGS.
"""
function dfp_solve!(x::VT, objective::Function, gradient!::Function, 
                   config::QuasiNewtonConfig{T}; kwargs...) where {T, VT}
    
    # DFP is mathematically dual to BFGS
    # For now, use L-BFGS as approximation
    # Full DFP would require different update formula
    return lbfgs_solve!(x, objective, gradient!, config; kwargs...)
end

"""
    setup_quasi_newton(problem_size, config)

Set up quasi-Newton method with problem-specific parameters.
"""
function setup_quasi_newton(problem_size::Int, config::QuasiNewtonConfig{T}) where T
    # Adjust memory size based on problem size
    memory_size = min(config.memory_size, max(5, problem_size ÷ 10))
    
    # Return modified configuration
    return QuasiNewtonConfig{T}(
        config.max_iter, config.max_func_evals, config.tolerance_grad,
        config.tolerance_step, config.tolerance_func, memory_size, config.max_corrections,
        config.line_search_type, config.armijo_const, config.wolfe_const,
        config.max_line_search_iter, config.min_step_length, config.max_step_length,
        config.trust_region_type, config.initial_trust_radius, config.max_trust_radius,
        config.trust_shrink_ratio, config.trust_expand_ratio,
        config.use_scaling, config.use_preconditioning, config.scaling_threshold,
        config.machine_precision, config.gradient_diff_threshold,
        config.verbose, config.debug_level, config.save_history
    )
end

"""
    apply_quasi_newton_precond(v, H_approx)

Apply quasi-Newton Hessian approximation as preconditioner.
"""
function apply_quasi_newton_precond(v::VT, H_approx) where VT
    # This would apply the inverse Hessian approximation
    # For now, return identity operation
    return copy(v)
end

end # module QuasiNewton