"""
    FourDVar

Four-dimensional variational data assimilation with DRP-4DVar (Dimensionality Reduction Projection) implementation.

This module implements the DRP-4DVar algorithm which addresses traditional 4D-Var computational challenges by
projecting the optimization problem into a reduced-dimension ensemble subspace, eliminating the need for
expensive tangent linear and adjoint model development.

# Key Components

- `DRP4DVar`: Main DRP-4DVar data assimilation method
- `EnsembleProjection`: Ensemble perturbation generation and projection matrices
- `ReducedSpaceCostFunction`: Cost function evaluation in ensemble subspace
- `DRP4DVariationalSolver`: Optimization algorithms for reduced-space problems

# Mathematical Framework

The DRP-4DVar method transforms the traditional 4D-Var cost function:
```
J(x) = ½(x-x_b)ᵀB⁻¹(x-x_b) + Σᵢ ½(Hᵢ(xᵢ)-yᵢ)ᵀRᵢ⁻¹(Hᵢ(xᵢ)-yᵢ)
```

into a reduced-dimension control space:
```
J(α) = ½αᵀα + Σᵢ ½(P_y,ᵢα - dᵢ)ᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
```

where α represents the control variable in the ensemble subspace, and P_y,i = H_i * M_i * P_x
represents the projected observation operator.

# References

- Wang, X., et al. (2007). A hybrid ETKF-3DVAR data assimilation scheme
- Liu, C., et al. (2008). An ensemble-based four-dimensional variational data assimilation scheme
- Lorenc, A. C., et al. (2015). Comparison of hybrid-4DEnVar and hybrid-4DVar data assimilation methods
"""
module FourDVar

using LinearAlgebra
using Statistics
using Random
using Printf
using SparseArrays

using ..BackgroundError

const CovLocalization = BackgroundError.CovarianceLocalization

# Load GSI integration module early so downstream routines can reference it.
include("GSIIntegration.jl")
using .GSIIntegration

# Import parent module types (commented out for now to avoid dependency issues)
# using ..GSICoreAnalysis: AbstractAnalysisConfig, AnalysisConfig
# using ..GSICoreAnalysis: AbstractControlVector, ControlVector
# using ..GSICoreAnalysis: AbstractStateVector, StateVector

# Export main types and functions
export DRP4DVar, EnsembleProjection, ReducedSpaceCostFunction
export run_drp4dvar, ensemble_perturbations, project_to_observation_space
export reduced_cost_function, reduced_gradient, optimize_reduced_space
export DRP4DVariationalSolver

# =============================================================================
# Core DRP-4DVar Types and Structures
# =============================================================================

"""
    DRP4DVar

Dimensionality Reduction Projection Four-Dimensional Variational data assimilation method.

This structure contains all parameters and state information needed for DRP-4DVar analysis,
including ensemble configuration, optimization settings, and convergence criteria.

# Fields
- `ensemble_size::Int`: Number of ensemble members for dimensionality reduction
- `max_outer_loops::Int`: Maximum outer loop iterations for nonlinear problems
- `max_inner_loops::Int`: Maximum inner loop iterations for optimization
- `convergence_tolerance::Float64`: Convergence tolerance for cost function gradient
- `time_window::Int`: Length of 4D-Var assimilation window
- `background_error_variance::Float64`: Background error variance scaling
- `ensemble_inflation::Float64`: Ensemble inflation factor
- `localization_radius::Float64`: Spatial localization radius (km)
- `optimizer::String`: Optimization algorithm ("lbfgs", "gauss_newton", "conjugate_gradient")
- `use_localization::Bool`: Enable spatial localization
- `adaptive_inflation::Bool`: Use adaptive ensemble inflation

# Constructor
```julia
drp4dvar = DRP4DVar(
    ensemble_size = 40,
    max_outer_loops = 3,
    max_inner_loops = 100,
    convergence_tolerance = 1e-6,
    time_window = 6,
    optimizer = "lbfgs"
)
```
"""
struct DRP4DVar
    # Core algorithm parameters
    ensemble_size::Int
    max_outer_loops::Int
    max_inner_loops::Int
    convergence_tolerance::Float64
    time_window::Int
    
    # Background error parameters
    background_error_variance::Float64
    ensemble_inflation::Float64
    localization_radius::Float64
    
    # Algorithm options
    optimizer::String
    use_localization::Bool
    adaptive_inflation::Bool
    
    # Internal state (mutable for statistics)
    statistics::Dict{String, Any}
    
    # Constructor with defaults
    function DRP4DVar(;
        ensemble_size::Int = 40,
        max_outer_loops::Int = 3,
        max_inner_loops::Int = 100,
        convergence_tolerance::Float64 = 1e-6,
        time_window::Int = 6,
        background_error_variance::Float64 = 1.0,
        ensemble_inflation::Float64 = 1.02,
        localization_radius::Float64 = 1000.0,
        optimizer::String = "lbfgs",
        use_localization::Bool = true,
        adaptive_inflation::Bool = false
    )
        statistics = Dict{String, Any}(
            "total_iterations" => 0,
            "convergence_history" => Float64[],
            "cost_function_history" => Float64[],
            "timing_statistics" => Dict{String, Float64}(),
            "analysis_increments" => Dict{Int, Vector{Float64}}()
        )
        
        new(ensemble_size, max_outer_loops, max_inner_loops, convergence_tolerance, time_window,
            background_error_variance, ensemble_inflation, localization_radius,
            optimizer, use_localization, adaptive_inflation, statistics)
    end
end

"""
    EnsembleProjection

Container for ensemble-based projection matrices used in DRP-4DVar.

This structure holds the projection matrices that map between full state space
and the reduced ensemble subspace, as well as the corresponding observation
space projections.

# Fields
- `P_x::Matrix{Float64}`: State space projection matrix (n × k)
- `P_y::Dict{Int, Matrix{Float64}}`: Observation space projection matrices by time
- `ensemble_perturbations::Matrix{Float64}`: Centered ensemble perturbations
- `ensemble_mean::Vector{Float64}`: Ensemble mean state
- `eigenvalues::Vector{Float64}`: Eigenvalues of background error covariance
- `explained_variance::Float64`: Fraction of variance captured by ensemble
"""
struct EnsembleProjection
    P_x::Matrix{Float64}                    # n × k projection matrix
    P_y::Dict{Int, Matrix{Float64}}         # Observation space projections
    ensemble_perturbations::Matrix{Float64} # n × k centered perturbations
    ensemble_mean::Vector{Float64}          # Mean of ensemble
    eigenvalues::Vector{Float64}            # Background error eigenvalues
    explained_variance::Float64             # Fraction of variance explained
end

"""
    ReducedSpaceCostFunction

Cost function evaluation in the reduced ensemble subspace.

This structure contains all data needed to evaluate the DRP-4DVar cost function
J(α) and its gradient ∇J(α) in the control variable space α.

# Fields
- `projection::EnsembleProjection`: Projection matrices and ensemble data
- `innovations::Dict{Int, Vector{Float64}}`: Innovation vectors by observation time
- `obs_error_inv::Dict{Int, Matrix{Float64}}`: Inverse observation error covariance
- `current_cost::Float64`: Current cost function value
- `current_gradient::Vector{Float64}`: Current gradient in control space
"""
mutable struct ReducedSpaceCostFunction
    projection::EnsembleProjection
    innovations::Dict{Int, Vector{Float64}}
    obs_error_inv::Dict{Int, Matrix{Float64}}
    current_cost::Float64
    current_gradient::Vector{Float64}
    
    # Constructor
    function ReducedSpaceCostFunction(projection::EnsembleProjection,
                                    innovations::Dict{Int, Vector{Float64}},
                                    obs_error_inv::Dict{Int, Matrix{Float64}})
        k = size(projection.P_x, 2)
        new(projection, innovations, obs_error_inv, Inf, zeros(k))
    end
end

"""
    DRP4DVariationalSolver

Optimization solver for DRP-4DVar reduced-space problems.

This structure manages the optimization process in the ensemble subspace,
including different optimization algorithms and convergence monitoring.

# Fields
- `algorithm::String`: Optimization algorithm name
- `max_iterations::Int`: Maximum iterations allowed
- `tolerance::Float64`: Convergence tolerance
- `line_search::Bool`: Enable line search
- `step_history::Vector{Vector{Float64}}`: Step history for L-BFGS
- `gradient_history::Vector{Vector{Float64}}`: Gradient history for L-BFGS
- `convergence_history::Vector{Float64}`: Convergence metric history
"""
mutable struct DRP4DVariationalSolver
    algorithm::String
    max_iterations::Int
    tolerance::Float64
    line_search::Bool
    step_history::Vector{Vector{Float64}}
    gradient_history::Vector{Vector{Float64}}
    convergence_history::Vector{Float64}
    
    # Constructor
    function DRP4DVariationalSolver(algorithm::String = "lbfgs";
                                   max_iterations::Int = 100,
                                   tolerance::Float64 = 1e-6,
                                   line_search::Bool = true)
        new(algorithm, max_iterations, tolerance, line_search,
            Vector{Vector{Float64}}(), Vector{Vector{Float64}}(), Float64[])
    end
end

# =============================================================================
# Ensemble Perturbation Generation
# =============================================================================

"""
    ensemble_perturbations(background_state, background_error_cov, ensemble_size;
                          inflation=1.0, localization_radius=Inf,
                          coordinates=nothing, random_seed=nothing)

Generate ensemble perturbations from either an explicit covariance matrix or an
implicit background-error operator.

# Arguments
- `background_state::Vector{Float64}`: Background state vector
- `background_error_cov`: Background error representation (`Matrix` or
  `GSIIntegration.GSIBackgroundErrorInterface`)
- `ensemble_size::Int`: Number of ensemble members to generate

# Keyword Arguments
- `inflation::Float64=1.0`: Ensemble inflation factor
- `localization_radius::Float64=Inf`: Spatial localization radius (km)
- `coordinates::Union{Nothing,Matrix{Float64}}=nothing`: Grid coordinates for localization
- `random_seed::Union{Nothing,Int}=nothing`: Random seed for reproducibility

# Returns
- `EnsembleProjection`: Projection matrices and ensemble data
"""
function ensemble_perturbations(background_state::Vector{Float64},
                               background_error_cov,
                               ensemble_size::Int;
                               inflation::Float64 = 1.0,
                               localization_radius::Float64 = Inf,
                               coordinates::Union{Nothing,Matrix{Float64}} = nothing,
                               random_seed::Union{Nothing,Int} = nothing)

    if random_seed !== nothing
        Random.seed!(random_seed)
    end

    if background_error_cov isa AbstractMatrix{<:Real}
        return _ensemble_from_matrix(background_state, background_error_cov, ensemble_size;
                                     inflation = inflation,
                                     localization_radius = localization_radius,
                                     coordinates = coordinates)
    elseif background_error_cov isa GSIIntegration.GSIBackgroundErrorInterface
        return _ensemble_from_operator(background_state, background_error_cov, ensemble_size;
                                       inflation = inflation,
                                       localization_radius = localization_radius)
    else
        throw(ArgumentError("Unsupported background error representation: $(typeof(background_error_cov))"))
    end
end

function _ensemble_from_matrix(background_state::Vector{Float64},
                               background_error_cov::AbstractMatrix{<:Real},
                               ensemble_size::Int;
                               inflation::Float64,
                               localization_radius::Float64,
                               coordinates)
    n = length(background_state)
    k = ensemble_size

    @printf("Generating %d ensemble perturbations for state dimension %d (explicit B)\n", k, n)

    B_work = copy(background_error_cov)
    if localization_radius < Inf && coordinates !== nothing
        B_work = apply_gaspari_cohn_localization(Matrix{Float64}(B_work), coordinates, localization_radius)
        println("Applied Gaspari-Cohn localization with radius $(localization_radius) km")
    end

    println("Computing eigendecomposition of background error covariance...")
    start_time = time()
    eigen_result = eigen(Symmetric(Matrix{Float64}(B_work)))
    eigenvals = eigen_result.values
    eigenvecs = eigen_result.vectors

    positive_indices = eigenvals .> 1e-12
    eigenvals = eigenvals[positive_indices]
    eigenvecs = eigenvecs[:, positive_indices]

    sort_indices = sortperm(eigenvals, rev=true)
    eigenvals = eigenvals[sort_indices]
    eigenvecs = eigenvecs[:, sort_indices]

    decomp_time = time() - start_time
    @printf("Eigendecomposition completed in %.3f seconds\n", decomp_time)
    @printf("Retained %d positive eigenvalues out of %d\n", length(eigenvals), n)

    k_effective = min(k, length(eigenvals))
    eigenvals_trunc = eigenvals[1:k_effective]
    eigenvecs_trunc = eigenvecs[:, 1:k_effective]

    total_variance = sum(eigenvals)
    explained_variance = total_variance > 0 ? sum(eigenvals_trunc) / total_variance : 0.0
    @printf("Ensemble captures %.2f%% of total error variance\n", explained_variance * 100)

    sqrt_eigenvals = sqrt.(eigenvals_trunc)
    L = eigenvecs_trunc * Diagonal(sqrt_eigenvals)

    perturbations = zeros(Float64, n, k_effective)
    for i in 1:k_effective
        ξ = randn(k_effective)
        perturbations[:, i] = L * ξ
    end

    if inflation != 1.0
        perturbations .*= inflation
        @printf("Applied ensemble inflation factor %.3f\n", inflation)
    end

    ensemble_mean = vec(mean(perturbations, dims=2))
    perturbations .-= ensemble_mean

    P_x = perturbations / sqrt(k_effective - 1)
    P_y = Dict{Int, Matrix{Float64}}()

    @printf("Generated ensemble projection matrix of size %d×%d\n", size(P_x)...)

    return EnsembleProjection(
        P_x,
        P_y,
        perturbations,
        ensemble_mean,
        eigenvals_trunc,
        explained_variance
    )
end

function _ensemble_from_operator(background_state::Vector{Float64},
                                 bg_interface::GSIIntegration.GSIBackgroundErrorInterface,
                                 ensemble_size::Int;
                                 inflation::Float64,
                                 localization_radius::Float64)
    n = length(background_state)
    k = ensemble_size

    if k < 2
        throw(ArgumentError("ensemble_size must be at least 2"))
    end

    @printf("Generating %d ensemble perturbations for state dimension %d (implicit B)\n", k, n)

    perturbations = zeros(Float64, n, k)
    for i in 1:k
        ξ = randn(n)
        perturbations[:, i] = GSIIntegration.sample_background_covariance(bg_interface, ξ)
    end

    if inflation != 1.0
        perturbations .*= inflation
        @printf("Applied ensemble inflation factor %.3f\n", inflation)
    end

    ensemble_mean = vec(mean(perturbations, dims=2))
    perturbations .-= ensemble_mean

    if localization_radius < Inf
        _apply_operator_localization!(perturbations, bg_interface, localization_radius)
        # Recenter after localization to avoid numerical drift
        ensemble_mean = vec(mean(perturbations, dims=2))
        perturbations .-= ensemble_mean
    end

    P_x = perturbations / sqrt(k - 1)
    P_y = Dict{Int, Matrix{Float64}}()

    # Estimate eigenvalues from sample covariance in ensemble space
    cov_subspace = Symmetric((perturbations' * perturbations) / (k - 1))
    eigenvals_subspace = eigen(cov_subspace).values
    sort!(eigenvals_subspace, rev=true)
    explained_variance = !iszero(sum(eigenvals_subspace)) ? 1.0 : 0.0

    @printf("Generated ensemble projection matrix of size %d×%d using implicit operator\n",
            size(P_x, 1), size(P_x, 2))

    return EnsembleProjection(
        P_x,
        P_y,
        perturbations,
        ensemble_mean,
        eigenvals_subspace,
        explained_variance
    )
end

function _apply_operator_localization!(perturbations::Matrix{Float64},
                                       bg_interface::GSIIntegration.GSIBackgroundErrorInterface,
                                       localization_radius_km::Float64)
    T = eltype(perturbations)
    coords = _build_state_localization_coordinates(bg_interface, T)

    loc_radius = T(localization_radius_km)
    loc_function = CovLocalization.GaspariCohnFunction{T}(loc_radius)
    loc_config = CovLocalization.LocalizationConfiguration{T}(base_radius = loc_radius)
    loc_operator = CovLocalization.LocalizationOperator{T}(loc_function, loc_config, coords)

    localized = CovLocalization.apply_localization(loc_operator, perturbations)
    perturbations .= localized
    return perturbations
end

function _build_state_localization_coordinates(bg_interface::GSIIntegration.GSIBackgroundErrorInterface,
                                               ::Type{T}) where {T<:AbstractFloat}
    analysis_config = bg_interface.analysis_config
    nx, ny, nz = analysis_config.grid_size
    grid = bg_interface.grid

    dx_raw = float(_grid_value(grid, :dx, 1000.0))
    dy_raw = float(_grid_value(grid, :dy, 1000.0))
    dx = T(dx_raw) / T(1000.0)
    dy = T(dy_raw) / T(1000.0)
    dz_raw_default = max(dx_raw, dy_raw) / 5
    dz_raw = float(_grid_value(grid, :dz, dz_raw_default))
    dz = T(dz_raw) / T(1000.0)
    dz = dz == T(0) ? T(0.1) : dz

    n_three_d = nx * ny * nz
    n_two_d = nx * ny
    n_state = 4 * n_three_d + n_two_d

    coords = zeros(T, n_state, 3)
    idx = 1
    z_levels = nz > 0 ? zeros(T, nz) : T[0]
    for k in 1:nz
        z_levels[k] = (T(k) - T(1)) * dz
    end

    for var in 1:4
        var_offset = T(var - 1) * dz * T(0.1)
        for k in 1:nz, j in 1:ny, i in 1:nx
            coords[idx, 1] = (T(i) - T(1)) * dx
            coords[idx, 2] = (T(j) - T(1)) * dy
            coords[idx, 3] = z_levels[k] + var_offset
            idx += 1
        end
    end

    surface_offset = T(4) * dz * T(0.1)
    surface_level = nz > 0 ? (z_levels[end] + dz) : T(0)
    for j in 1:ny, i in 1:nx
        coords[idx, 1] = (T(i) - T(1)) * dx
        coords[idx, 2] = (T(j) - T(1)) * dy
        coords[idx, 3] = surface_level + surface_offset
        idx += 1
    end

    return coords
end

function _grid_value(grid, key::Symbol, default)
    if grid === nothing
        return default
    elseif grid isa AbstractDict
        return get(grid, key, default)
    elseif hasproperty(grid, key)
        return getproperty(grid, key)
    else
        return default
    end
end

"""
    apply_gaspari_cohn_localization(B, coordinates, radius)

Apply Gaspari-Cohn localization function to background error covariance.

# Arguments
- `B::Matrix{Float64}`: Background error covariance matrix
- `coordinates::Matrix{Float64}`: Spatial coordinates (n×ndim)
- `radius::Float64`: Localization radius in km

# Returns
- `Matrix{Float64}`: Localized covariance matrix B ∘ ρ (Schur product)
"""
function apply_gaspari_cohn_localization(B::Matrix{Float64},
                                        coordinates::Matrix{Float64},
                                        radius::Float64)
    
    n = size(B, 1)
    localization_matrix = zeros(n, n)
    
    # Compute pairwise distances and apply localization function
    for i in 1:n
        for j in 1:n
            if i == j
                localization_matrix[i, j] = 1.0
            else
                # Compute great circle distance (simplified)
                dist = norm(coordinates[i, :] - coordinates[j, :])
                localization_matrix[i, j] = gaspari_cohn_function(dist, radius)
            end
        end
    end
    
    # Apply Schur product (element-wise multiplication)
    return B .* localization_matrix
end

"""
    gaspari_cohn_function(distance, radius)

Evaluate the Gaspari-Cohn localization function.

This function provides smooth, compactly supported correlation that goes to
zero at 2×radius, commonly used in ensemble data assimilation.

# Arguments
- `distance::Float64`: Distance between points
- `radius::Float64`: Localization radius

# Returns
- `Float64`: Localization correlation value ∈ [0,1]
"""
function gaspari_cohn_function(distance::Float64, radius::Float64)
    r = abs(distance) / radius
    
    if r >= 2.0
        return 0.0
    elseif r >= 1.0
        return -0.25 * r^5 + 0.5 * r^4 + 0.625 * r^3 - 5/3 * r^2 + 5*r + 4 - 2/3 * 1/r
    else
        return -0.25 * r^5 + 0.5 * r^4 + 0.625 * r^3 - 5/3 * r^2 + 1.0
    end
end

# =============================================================================
# Observation Space Projection
# =============================================================================

"""
    project_to_observation_space!(projection::EnsembleProjection, 
                                  observation_operators, model_operators, 
                                  time_steps, background_state)

Project ensemble perturbations to observation space for each time in the 4D-Var window.

This function computes P_y,i = H_i * M_{i,0} * P_x for each observation time,
where M_{i,0} represents the tangent linear model from initial time to time i.

# Arguments
- `projection::EnsembleProjection`: Ensemble projection structure (modified in-place)
- `observation_operators`: Collection of observation operators H_i
- `model_operators`: Collection of tangent linear model operators M_i
- `time_steps::Vector{Int}`: Time indices for observations
- `background_state::Vector{Float64}`: Background state at initial time

# Returns
- `Nothing`: Modifies projection.P_y in-place

# Mathematical Details

For each observation time t_i, we compute:
```
P_y,i = H_i * M_{i,i-1} * M_{i-1,i-2} * ... * M_{1,0} * P_x
```

This represents how ensemble perturbations at initial time map to observation space
at time t_i through the sequence of model integrations.
"""
function project_to_observation_space!(projection::EnsembleProjection,
                                      observation_operators,
                                      model_operators,
                                      time_steps::Vector{Int},
                                      background_state::Vector{Float64})
    
    println("Projecting ensemble to observation space for $(length(time_steps)) time steps...")
    start_time = time()
    
    propagated_ensemble = copy(projection.P_x)
    k = size(propagated_ensemble, 2)
    
    for (i, t) in enumerate(time_steps)
        @printf("  Processing time step %d (%d/%d)\n", t, i, length(time_steps))
        
        # Apply model operator from previous time to current time
        if t > 0 && haskey(model_operators, t)
            M_t = model_operators[t]
            if M_t isa AbstractMatrix
                propagated_ensemble = M_t * propagated_ensemble
            elseif M_t isa Function
                for j in 1:k
                    propagated_ensemble[:, j] = M_t(propagated_ensemble[:, j])
                end
            else
                @warn "Unsupported model operator type $(typeof(M_t)); skipping propagation"
            end
        elseif t > 0
            @warn "No model operator available for time step $t, using identity"
        end
        
        # Apply observation operator at current time
        if haskey(observation_operators, t)
            H_t = observation_operators[t]
            
            # Compute projected observation operator: P_y,t = H_t * M_{t,0} * P_x  
            # Apply observation operator to each column of M_{t,0} * P_x
            if H_t isa AbstractMatrix
                P_y_t = H_t * propagated_ensemble
            else
                test_obs = H_t(propagated_ensemble[:, 1])
                m_obs = length(test_obs)
                P_y_t = zeros(m_obs, k)
                P_y_t[:, 1] = test_obs
                for j in 2:k
                    P_y_t[:, j] = H_t(propagated_ensemble[:, j])
                end
            end
            
            # Store in projection structure
            projection.P_y[t] = P_y_t
            
            @printf("    P_y[%d] size: %d×%d\n", t, size(P_y_t)...)
        else
            @warn "No observation operator available for time step $t"
        end
    end
    
    projection_time = time() - start_time
    @printf("Observation space projection completed in %.3f seconds\n", projection_time)
    
    return nothing
end

# =============================================================================
# Reduced-Space Cost Function and Gradient
# =============================================================================

"""
    reduced_cost_function(α::Vector{Float64}, cost_func::ReducedSpaceCostFunction)

Evaluate the DRP-4DVar cost function in the reduced ensemble subspace.

The reduced-space cost function is:
```
J(α) = ½αᵀα + Σᵢ ½(P_y,ᵢα - dᵢ)ᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
```

# Arguments
- `α::Vector{Float64}`: Control variable in ensemble subspace
- `cost_func::ReducedSpaceCostFunction`: Cost function structure

# Returns
- `Float64`: Cost function value J(α)
"""
function reduced_cost_function(α::Vector{Float64}, cost_func::ReducedSpaceCostFunction)
    
    # Background term: J_b = ½αᵀα (simplified due to preconditioning)
    J_b = 0.5 * dot(α, α)
    
    # Observation term: J_o = Σᵢ ½(P_y,ᵢα - dᵢ)ᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
    J_o = 0.0
    
    for (t, P_y_t) in cost_func.projection.P_y
        if haskey(cost_func.innovations, t) && haskey(cost_func.obs_error_inv, t)
            # Projected increment in observation space
            h_α = P_y_t * α
            
            # Innovation vector
            d_t = cost_func.innovations[t]
            
            # Observation error inverse
            R_inv_t = cost_func.obs_error_inv[t]
            
            # Cost contribution: ½(h_α - d_t)ᵀ R⁻¹ (h_α - d_t)
            residual = h_α - d_t
            J_o += 0.5 * dot(residual, R_inv_t * residual)
        end
    end
    
    total_cost = J_b + J_o
    
    # Store current cost (for gradient computation)
    cost_func.current_cost = total_cost
    
    return total_cost
end

"""
    reduced_gradient(α::Vector{Float64}, cost_func::ReducedSpaceCostFunction)

Compute the gradient of the DRP-4DVar cost function in the reduced ensemble subspace.

The reduced-space gradient is:
```
∇J(α) = α + Σᵢ P_y,ᵢᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
```

# Arguments
- `α::Vector{Float64}`: Control variable in ensemble subspace
- `cost_func::ReducedSpaceCostFunction`: Cost function structure

# Returns
- `Vector{Float64}`: Gradient vector ∇J(α)
"""
function reduced_gradient(α::Vector{Float64}, cost_func::ReducedSpaceCostFunction)
    
    k = length(α)
    
    # Background term gradient: ∇J_b = α
    grad_b = copy(α)
    
    # Observation term gradient: ∇J_o = Σᵢ P_y,ᵢᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
    grad_o = zeros(k)
    
    for (t, P_y_t) in cost_func.projection.P_y
        if haskey(cost_func.innovations, t) && haskey(cost_func.obs_error_inv, t)
            # Projected increment in observation space
            h_α = P_y_t * α
            
            # Innovation vector
            d_t = cost_func.innovations[t]
            
            # Observation error inverse
            R_inv_t = cost_func.obs_error_inv[t]
            
            # Gradient contribution: P_y,ᵢᵀRᵢ⁻¹(P_y,ᵢα - dᵢ)
            residual = h_α - d_t
            grad_o += P_y_t' * (R_inv_t * residual)
        end
    end
    
    total_gradient = grad_b + grad_o
    
    # Store current gradient
    cost_func.current_gradient = total_gradient
    
    return total_gradient
end

# =============================================================================
# Optimization Algorithms
# =============================================================================

"""
    optimize_reduced_space(cost_func::ReducedSpaceCostFunction, 
                          solver::DRP4DVariationalSolver,
                          initial_guess::Vector{Float64})

Optimize the DRP-4DVar cost function in the reduced ensemble subspace.

# Arguments
- `cost_func::ReducedSpaceCostFunction`: Cost function to minimize
- `solver::DRP4DVariationalSolver`: Optimization solver configuration
- `initial_guess::Vector{Float64}`: Initial guess for control variable α

# Returns
- `Vector{Float64}`: Optimal control variable α*
- `Dict{String,Any}`: Optimization statistics and convergence information
"""
function optimize_reduced_space(cost_func::ReducedSpaceCostFunction,
                               solver::DRP4DVariationalSolver,
                               initial_guess::Vector{Float64})
    
    println("Starting reduced-space optimization with $(solver.algorithm) algorithm...")
    start_time = time()
    
    α = copy(initial_guess)
    k = length(α)
    
    # Clear solver history
    empty!(solver.step_history)
    empty!(solver.gradient_history)
    empty!(solver.convergence_history)
    
    converged = false
    final_cost = Inf
    final_gradient_norm = Inf
    
    for iter in 1:solver.max_iterations
        # Evaluate cost function and gradient
        cost = reduced_cost_function(α, cost_func)
        grad = reduced_gradient(α, cost_func)
        grad_norm = norm(grad)
        
        # Store convergence history
        push!(solver.convergence_history, grad_norm)
        
        @printf("  Iter %3d: Cost = %12.6e, |∇J| = %12.6e\n", iter, cost, grad_norm)
        
        # Check convergence
        if grad_norm < solver.tolerance * k
            converged = true
            final_cost = cost
            final_gradient_norm = grad_norm
            @printf("  Converged after %d iterations!\n", iter)
            break
        end
        
        # Compute optimization step based on algorithm
        if solver.algorithm == "lbfgs"
            step = lbfgs_step(α, grad, solver)
        elseif solver.algorithm == "gauss_newton"
            step = gauss_newton_step(α, grad, cost_func)
        elseif solver.algorithm == "conjugate_gradient"
            step = conjugate_gradient_step(α, grad, solver, iter)
        else
            # Fallback to steepest descent
            step = -grad
        end
        
        # Apply line search if enabled
        if solver.line_search
            step_size = armijo_line_search(α, step, cost_func)
            step *= step_size
        end
        
        # Update control variable
        α += step
        
        # Store step and gradient for L-BFGS
        if solver.algorithm == "lbfgs"
            push!(solver.step_history, copy(step))
            push!(solver.gradient_history, copy(grad))
            
            # Keep only recent history (memory limit)
            max_memory = min(10, k)
            if length(solver.step_history) > max_memory
                popfirst!(solver.step_history)
                popfirst!(solver.gradient_history)
            end
        end
    end
    
    optimization_time = time() - start_time
    
    if !converged
        @warn "Optimization did not converge within $(solver.max_iterations) iterations"
        final_cost = reduced_cost_function(α, cost_func)
        final_gradient_norm = norm(reduced_gradient(α, cost_func))
    end
    
    # Compilation optimization statistics
    stats = Dict{String,Any}(
        "converged" => converged,
        "final_cost" => final_cost,
        "final_gradient_norm" => final_gradient_norm,
        "iterations" => length(solver.convergence_history),
        "optimization_time" => optimization_time,
        "convergence_history" => copy(solver.convergence_history),
        "algorithm" => solver.algorithm
    )
    
    @printf("Optimization completed in %.3f seconds\n", optimization_time)
    
    return α, stats
end

# =============================================================================
# Optimization Step Functions
# =============================================================================

"""
    lbfgs_step(α, grad, solver)

Compute L-BFGS step direction using two-loop recursion.
"""
function lbfgs_step(α::Vector{Float64}, grad::Vector{Float64}, 
                   solver::DRP4DVariationalSolver)
    
    if length(solver.gradient_history) == 0
        # First iteration: steepest descent
        return -grad
    end
    
    # L-BFGS two-loop recursion
    q = copy(grad)
    α_hist = Float64[]
    ρ_hist = Float64[]
    
    m = length(solver.step_history)
    
    # First loop (backward)
    for i in m:-1:1
        s_i = solver.step_history[i]
        y_i = solver.gradient_history[i]
        
        ρ_i = 1.0 / (dot(y_i, s_i) + 1e-16)
        α_i = ρ_i * dot(s_i, q)
        q -= α_i * y_i
        
        pushfirst!(α_hist, α_i)
        pushfirst!(ρ_hist, ρ_i)
    end
    
    # Initial Hessian scaling
    if m > 0
        s_recent = solver.step_history[end]
        y_recent = solver.gradient_history[end]
        γ = dot(s_recent, y_recent) / (dot(y_recent, y_recent) + 1e-16)
        r = γ * q
    else
        r = q
    end
    
    # Second loop (forward)
    for i in 1:m
        s_i = solver.step_history[i]
        y_i = solver.gradient_history[i]
        β = ρ_hist[i] * dot(y_i, r)
        r += (α_hist[i] - β) * s_i
    end
    
    return -r
end

"""
    gauss_newton_step(α, grad, cost_func)

Compute Gauss-Newton step for the reduced-space problem.
"""
function gauss_newton_step(α::Vector{Float64}, grad::Vector{Float64},
                          cost_func::ReducedSpaceCostFunction)
    
    k = length(α)
    
    # Approximate Hessian: H ≈ I + Σᵢ P_y,ᵢᵀRᵢ⁻¹P_y,ᵢ
    H = Matrix{Float64}(I, k, k)  # Background term contributes identity
    
    for (t, P_y_t) in cost_func.projection.P_y
        if haskey(cost_func.obs_error_inv, t)
            R_inv_t = cost_func.obs_error_inv[t]
            H += P_y_t' * (R_inv_t * P_y_t)
        end
    end
    
    # Solve H * step = -grad
    try
        return -H \ grad
    catch
        # Fallback to regularized version
        @warn "Gauss-Newton step failed, using regularized version"
        H_reg = H + 0.1 * norm(H) * I
        return -H_reg \ grad
    end
end

"""
    conjugate_gradient_step(α, grad, solver, iteration)

Compute conjugate gradient step direction.
"""
function conjugate_gradient_step(α::Vector{Float64}, grad::Vector{Float64},
                                solver::DRP4DVariationalSolver, iteration::Int)
    
    if iteration == 1 || isempty(solver.gradient_history)
        # First iteration: steepest descent
        return -grad
    end
    
    # Fletcher-Reeves conjugate gradient
    grad_old = solver.gradient_history[end]
    β = dot(grad, grad) / (dot(grad_old, grad_old) + 1e-16)
    
    if isempty(solver.step_history)
        return -grad
    end
    
    # Update search direction
    d_old = solver.step_history[end]
    return -grad + β * d_old
end

"""
    armijo_line_search(α, step, cost_func; c1=1e-4, max_iter=20)

Perform Armijo backtracking line search.
"""
function armijo_line_search(α::Vector{Float64}, step::Vector{Float64},
                           cost_func::ReducedSpaceCostFunction;
                           c1::Float64 = 1e-4, max_iter::Int = 20)
    
    # Initial function value and directional derivative
    f0 = reduced_cost_function(α, cost_func)
    grad0 = reduced_gradient(α, cost_func)
    directional_deriv = dot(grad0, step)
    
    # If step is ascent direction, return small step
    if directional_deriv >= 0
        return 0.1
    end
    
    step_size = 1.0
    
    for i in 1:max_iter
        # Test point
        α_test = α + step_size * step
        f_test = reduced_cost_function(α_test, cost_func)
        
        # Armijo condition
        if f_test <= f0 + c1 * step_size * directional_deriv
            return step_size
        end
        
        # Backtrack
        step_size *= 0.5
        
        if step_size < 1e-8
            break
        end
    end
    
    return max(step_size, 1e-6)  # Minimum step size
end

# =============================================================================
# Main DRP-4DVar Algorithm
# =============================================================================

"""
    run_drp4dvar(method::DRP4DVar, background_state, background_error_cov,
                observations, observation_operators, model_operators)

Execute the complete DRP-4DVar algorithm.

# Arguments
- `method::DRP4DVar`: DRP-4DVar configuration
- `background_state::Vector{Float64}`: Initial background state
- `background_error_cov`: Background error representation (dense matrix or
  GSI background operator)
- `observations::Dict{Int,Vector{Float64}}`: Observations by time index
- `observation_operators::Dict{Int,Any}`: Observation operators by time
- `model_operators::Dict{Int,Any}`: Tangent linear model operators by time

# Returns
- `Vector{Float64}`: Analysis state
- `Dict{String,Any}`: Complete algorithm statistics and diagnostics
"""
function run_drp4dvar(method::DRP4DVar,
                     background_state::Vector{Float64},
                     background_error_cov,
                     observations::Dict{Int,Vector{Float64}},
                     observation_operators::Dict{Int,Any},
                     model_operators::Dict{Int,Any})
    
    println("=" ^ 60)
    println("DRP-4DVar Algorithm Execution")
    println("=" ^ 60)
    
    start_time = time()
    n_state = length(background_state)
    
    @printf("State dimension: %d\n", n_state)
    @printf("Ensemble size: %d\n", method.ensemble_size)
    @printf("Time window: %d\n", method.time_window)
    @printf("Optimization algorithm: %s\n", method.optimizer)
    println("Background error backend: $(typeof(background_error_cov))")
    
    analysis_state = copy(background_state)
    overall_converged = false
    last_opt_stats = Dict{String,Any}()
    
    # Outer loop for nonlinear problems
    for outer_iter in 1:method.max_outer_loops
        @printf("\nOuter loop %d/%d\n", outer_iter, method.max_outer_loops)
        println("-" ^ 30)
        
        # Step 1: Generate ensemble perturbations and projection matrices
        println("Step 1: Generating ensemble perturbations...")
        projection = ensemble_perturbations(
            analysis_state, 
            background_error_cov, 
            method.ensemble_size,
            inflation = method.ensemble_inflation,
            localization_radius = method.use_localization ? method.localization_radius : Inf
        )
        
        # Step 2: Project to observation space
        println("Step 2: Projecting to observation space...")
        time_steps = collect(keys(observations))
        sort!(time_steps)
        
        project_to_observation_space!(
            projection,
            observation_operators,
            model_operators, 
            time_steps,
            analysis_state
        )
        
        # Step 3: Compute innovations
        println("Step 3: Computing innovation vectors...")
        innovations = Dict{Int, Vector{Float64}}()
        obs_error_inv = Dict{Int, Matrix{Float64}}()
        
        for t in time_steps
            if haskey(observations, t) && haskey(observation_operators, t)
                # Innovation: d = y - H(x_b)
                y_obs = observations[t]
                h_background = observation_operators[t](analysis_state)  # Nonlinear observation operator
                innovations[t] = y_obs - h_background
                
                # Compute observation error inverse (simplified - assume diagonal)
                m_obs = length(y_obs)
                obs_error_inv[t] = Matrix{Float64}(I, m_obs, m_obs)  # R^{-1} = I (unit variance)
                
                @printf("  Time %d: %d observations, innovation norm = %.3f\n", 
                       t, m_obs, norm(innovations[t]))
            end
        end
        
        # Step 4: Set up reduced-space cost function
        println("Step 4: Setting up reduced-space cost function...")
        cost_func = ReducedSpaceCostFunction(projection, innovations, obs_error_inv)
        
        # Step 5: Optimize in reduced space
        println("Step 5: Optimizing in ensemble subspace...")
        solver = DRP4DVariationalSolver(
            method.optimizer,
            max_iterations = method.max_inner_loops,
            tolerance = method.convergence_tolerance
        )
        
        # Initial guess: α = 0 (no correction to background)
        k = size(projection.P_x, 2)
        initial_alpha = zeros(k)
        
        optimal_alpha, opt_stats = optimize_reduced_space(cost_func, solver, initial_alpha)
        
        # Step 6: Transform back to model space
        println("Step 6: Computing analysis increment...")
        analysis_increment = projection.P_x * optimal_alpha
        analysis_state += analysis_increment
        
        # Store statistics for this outer loop
        overall_converged |= get(opt_stats, "converged", false)
        last_opt_stats = opt_stats

        method.statistics["outer_loop_$(outer_iter)"] = Dict(
            "optimization_stats" => opt_stats,
            "analysis_increment_norm" => norm(analysis_increment),
            "final_cost" => opt_stats["final_cost"],
            "explained_variance" => projection.explained_variance
        )
        
        @printf("Analysis increment norm: %.6e\n", norm(analysis_increment))
        @printf("Final cost function: %.6e\n", opt_stats["final_cost"])
        
        # Check outer loop convergence
        if norm(analysis_increment) < method.convergence_tolerance * n_state
            @printf("Outer loop converged after %d iterations\n", outer_iter)
            break
        end
    end
    
    total_time = time() - start_time
    
    # Final statistics
    final_stats = Dict{String,Any}(
        "total_execution_time" => total_time,
        "state_dimension" => n_state,
        "ensemble_size" => method.ensemble_size,
        "final_analysis_state" => copy(analysis_state),
        "background_state" => copy(background_state),
        "analysis_increment" => analysis_state - background_state,
        "total_increment_norm" => norm(analysis_state - background_state),
        "method_parameters" => Dict(
            "ensemble_size" => method.ensemble_size,
            "time_window" => method.time_window,
            "optimizer" => method.optimizer,
            "convergence_tolerance" => method.convergence_tolerance
        )
    )
    final_stats["converged"] = overall_converged
    if !isempty(last_opt_stats)
        final_stats["final_cost"] = get(last_opt_stats, "final_cost", NaN)
        final_stats["final_gradient_norm"] = get(last_opt_stats, "final_gradient_norm", NaN)
    end
    
    # Merge with method statistics
    merge!(final_stats, method.statistics)
    
    println("=" ^ 60)
    @printf("DRP-4DVar completed in %.3f seconds\n", total_time)
    @printf("Total analysis increment norm: %.6e\n", norm(analysis_state - background_state))
    println("=" ^ 60)
    
    return analysis_state, final_stats
end

# Export GSI integration functionality
export GSIObservationInterface, GSIBackgroundErrorInterface, GSIModelInterface
export AtmosphericTestCase, OperationalDRP4DVar
export create_realistic_observations, create_gsi_background_error
export create_atmospheric_test_case, run_operational_drp4dvar

end # module FourDVar
