# Port of congrad.f90 - Conjugate gradient/Lanczos optimization

module ConjugateGradient

using LinearAlgebra

export congrad!, CGOptions, CGResult

"""
    CGOptions

Options for conjugate gradient optimization.
"""
struct CGOptions
    maxiter::Int          # Maximum number of iterations
    tol::Float64         # Convergence tolerance on gradient norm
    reqrd::Float64       # Required reduction in gradient norm
    verbose::Bool        # Print iteration information
    save_vectors::Bool   # Save Lanczos vectors for eigenvalue analysis
end

"""
    CGResult

Result structure for conjugate gradient optimization.
"""
struct CGResult
    x::Vector{Float64}           # Final solution
    success::Bool                # Convergence flag
    iterations::Int              # Number of iterations performed
    final_gradient_norm::Float64 # Final gradient norm
    gradient_reduction::Float64  # Reduction in gradient norm
    cost_history::Vector{Float64}      # Cost function history
    gradient_norm_history::Vector{Float64} # Gradient norm history
    eigenvalues::Vector{Float64}       # Hessian eigenvalues (if computed)
end

"""
    congrad!(cost_func, grad_func, x0::Vector{Float64}; options...) -> CGResult

Conjugate gradient optimization using Lanczos algorithm.

# Arguments
- `cost_func`: Function that computes cost given state vector
- `grad_func`: Function that computes gradient given state vector
- `x0`: Initial state vector

# Keyword Arguments
- `maxiter=100`: Maximum iterations
- `tol=1e-5`: Convergence tolerance on gradient norm
- `reqrd=0.01`: Required relative reduction in gradient norm
- `verbose=true`: Print iteration information
- `save_vectors=false`: Save Lanczos vectors for eigenvalue analysis

# Returns
- CGResult with optimization results
"""
function congrad!(cost_func, grad_func, x0::Vector{Float64};
                 maxiter::Int=100, tol::Float64=1e-5, reqrd::Float64=0.01,
                 verbose::Bool=true, save_vectors::Bool=false)

    n = length(x0)
    x = copy(x0)

    # Initialize histories
    cost_history = Float64[]
    gradient_norm_history = Float64[]

    # Initial cost and gradient
    cost0 = cost_func(x)
    grad = grad_func(x)

    push!(cost_history, cost0)
    initial_grad_norm = norm(grad)
    push!(gradient_norm_history, initial_grad_norm)

    if verbose
        @info "CG Optimization started"
        @info "Initial cost: $cost0, Initial |grad|: $initial_grad_norm"
    end

    # Check if already converged
    if initial_grad_norm < tol
        return CGResult(x, true, 0, initial_grad_norm, 0.0,
                       cost_history, gradient_norm_history, Float64[])
    end

    # Lanczos vectors storage
    lanczos_vectors = save_vectors ? Matrix{Float64}(undef, n, maxiter+1) : nothing
    if save_vectors
        lanczos_vectors[:, 1] = grad / initial_grad_norm
    end

    # Lanczos algorithm setup
    beta = zeros(Float64, maxiter+1)
    delta = zeros(Float64, maxiter)

    # Initial search direction
    p = -grad

    # CG iteration
    iter = 0
    converged = false

    for iter in 1:maxiter
        # Line search: find optimal step size
        alpha = line_search(cost_func, grad_func, x, p)

        # Update state
        x .+= alpha .* p

        # Compute new gradient
        grad_new = grad_func(x)
        grad_norm = norm(grad_new)

        # Update cost
        cost_new = cost_func(x)
        push!(cost_history, cost_new)
        push!(gradient_norm_history, grad_norm)

        # Check convergence
        gradient_reduction = initial_grad_norm / grad_norm
        relative_reduction = 1.0 - grad_norm / initial_grad_norm

        if verbose && (iter % 10 == 0 || iter <= 5)
            @info "Iter $iter: cost = $cost_new, |grad| = $grad_norm, reduction = $gradient_reduction"
        end

        # Convergence checks
        if grad_norm < tol
            converged = true
            if verbose
                @info "Converged: gradient norm below tolerance"
            end
            break
        end

        if relative_reduction >= reqrd
            converged = true
            if verbose
                @info "Converged: required gradient reduction achieved"
            end
            break
        end

        # Fletcher-Reeves conjugate gradient update
        beta_fr = dot(grad_new, grad_new) / dot(grad, grad)

        # Update search direction
        p = -grad_new + beta_fr * p

        # Store for next iteration
        grad = grad_new
    end

    final_reduction = initial_grad_norm / norm(grad)

    if verbose
        if converged
            @info "CG optimization converged after $iter iterations"
        else
            @info "CG optimization did not converge after $iter iterations"
        end
        @info "Final gradient norm: $(norm(grad)), Total reduction: $final_reduction"
    end

    # Compute eigenvalues if requested (simplified)
    eigenvalues = Float64[]
    if save_vectors && iter > 1
        # This would compute Hessian eigenvalues from Lanczos vectors
        # Simplified implementation for now
        eigenvalues = Float64[]
    end

    return CGResult(x, converged, iter, norm(grad), final_reduction,
                   cost_history, gradient_norm_history, eigenvalues)
end

"""
    line_search(cost_func, grad_func, x::Vector{Float64}, p::Vector{Float64}) -> Float64

Simple backtracking line search to find optimal step size.

# Arguments
- `cost_func`: Cost function
- `grad_func`: Gradient function
- `x`: Current point
- `p`: Search direction

# Returns
- Optimal step size alpha
"""
function line_search(cost_func, grad_func, x::Vector{Float64}, p::Vector{Float64})
    # Parameters for backtracking line search
    alpha = 1.0
    rho = 0.5      # Backtracking factor
    c1 = 1e-4      # Armijo condition parameter
    max_backtracks = 20

    # Current cost and gradient
    f0 = cost_func(x)
    g0 = grad_func(x)
    directional_derivative = dot(g0, p)

    # If not a descent direction, use steepest descent
    if directional_derivative >= 0
        @warn "Not a descent direction, using steepest descent"
        p = -g0
        directional_derivative = dot(g0, p)
    end

    # Backtracking line search
    for i in 1:max_backtracks
        x_new = x + alpha * p
        f_new = cost_func(x_new)

        # Armijo condition
        if f_new <= f0 + c1 * alpha * directional_derivative
            return alpha
        end

        alpha *= rho
    end

    # If line search fails, return small step
    @warn "Line search failed, using small step size"
    return 1e-6
end

"""
    preconditioned_congrad!(cost_func, grad_func, preconditioner, x0::Vector{Float64}; options...) -> CGResult

Preconditioned conjugate gradient optimization.

# Arguments
- `cost_func`: Cost function
- `grad_func`: Gradient function
- `preconditioner`: Function that applies preconditioning M^{-1} * v
- `x0`: Initial state vector

# Returns
- CGResult with optimization results
"""
function preconditioned_congrad!(cost_func, grad_func, preconditioner, x0::Vector{Float64};
                                maxiter::Int=100, tol::Float64=1e-5, reqrd::Float64=0.01,
                                verbose::Bool=true)

    n = length(x0)
    x = copy(x0)

    # Initialize histories
    cost_history = Float64[]
    gradient_norm_history = Float64[]

    # Initial cost and gradient
    cost0 = cost_func(x)
    grad = grad_func(x)

    push!(cost_history, cost0)
    initial_grad_norm = norm(grad)
    push!(gradient_norm_history, initial_grad_norm)

    if verbose
        @info "Preconditioned CG started"
        @info "Initial cost: $cost0, Initial |grad|: $initial_grad_norm"
    end

    # Apply preconditioning to initial gradient
    z = preconditioner(grad)
    p = -z

    iter = 0
    converged = false

    for iter in 1:maxiter
        # Line search with preconditioned direction
        alpha = line_search(cost_func, grad_func, x, p)

        # Update state
        x .+= alpha .* p

        # Compute new gradient
        grad_new = grad_func(x)
        grad_norm = norm(grad_new)

        # Update cost
        cost_new = cost_func(x)
        push!(cost_history, cost_new)
        push!(gradient_norm_history, grad_norm)

        # Check convergence
        gradient_reduction = initial_grad_norm / grad_norm

        if verbose && (iter % 10 == 0 || iter <= 5)
            @info "Iter $iter: cost = $cost_new, |grad| = $grad_norm, reduction = $gradient_reduction"
        end

        if grad_norm < tol || (1.0 - grad_norm / initial_grad_norm) >= reqrd
            converged = true
            break
        end

        # Preconditioned conjugate gradient update
        z_new = preconditioner(grad_new)
        beta_pr = dot(grad_new, z_new) / dot(grad, z)

        # Update search direction
        p = -z_new + beta_pr * p

        # Store for next iteration
        grad = grad_new
        z = z_new
    end

    final_reduction = initial_grad_norm / norm(grad)

    if verbose
        status = converged ? "converged" : "did not converge"
        @info "Preconditioned CG $status after $iter iterations"
        @info "Final |grad|: $(norm(grad)), reduction: $final_reduction"
    end

    return CGResult(x, converged, iter, norm(grad), final_reduction,
                   cost_history, gradient_norm_history, Float64[])
end

end # module ConjugateGradient

