# Bayesian inversion core (port of retrieval.f90 and related)

module Inversion

using LinearAlgebra
using LinearAlgebra.LAPACK
using Statistics
using Dates
using Base: Set
using DelimitedFiles
using ..Settings: Config, Files
using ..CoreTypes: Domain
using ..ObservationsCore: Observations
using ..StateVector: InversionState, update_state!, add_to_optimization_history!
using ..Transformations
using Printf: @sprintf
using ..PriorCovariance: CovarianceMatrix, locate_fortran_file as locate_covariance_artifact
using ..ForwardModel: forward_model, PriorFluxes, resample_footprint_to_domain
import ..MOLAR_MASS_AIR
import ..NUMERICAL_SCALE
using ..ConjugateGradient: congrad!, CGResult
using ..QuasiNewton: m1qn3!, QNResult
using ..Transformations: transform_gradient, transform_state_increment
using ..MCMCIntegration: run_mcmc_inversion
using ..TransportInit: initialize_dates, DateInfo
using ..Tools: grid_area
using ..NetCDFIO: read_lsm!

export invert!, InversionResult, cost_function, gradient_function

"""
    InversionResult

Result structure for the complete inversion.
"""
struct InversionResult
    success::Bool                    # Overall success flag
    method::String                   # Optimization method used
    final_cost::Float64             # Final cost function value
    iterations::Int                 # Number of iterations
    state::InversionState          # Final state
    cost_history::Vector{Float64}   # Cost function history
    convergence_info::String        # Convergence information
end

"""
    invert!(state::InversionState, prior_fluxes::PriorFluxes, obs::Observations,
           covariance::CovarianceMatrix, cfg::Config, files::Files, domain::Domain) -> InversionResult

Run the Bayesian inversion using the selected optimization method.

# Arguments
- `state`: Inversion state (modified in-place)
- `prior_fluxes`: Prior flux fields
- `obs`: Observations
- `covariance`: Prior covariance matrix
- `cfg`: Configuration settings
- `files`: File paths
- `domain`: Domain information

# Returns
- InversionResult with optimization results
"""
function invert!(state::InversionState, prior_fluxes::PriorFluxes, obs::Observations,
                covariance::CovarianceMatrix, cfg::Config, files::Files, domain::Domain,
                footprints)

    @info "Starting Bayesian inversion"
    @info "Method: $(cfg.method)"
    @info "Observations: $(length(obs.concentrations))"
    @info "Control variables: $(state.n_control)"

    # Create cost and gradient functions
    cost_func = x -> cost_function(x, state, prior_fluxes, obs, covariance, cfg, domain, footprints, files)
    grad_func = x -> gradient_function(x, state, prior_fluxes, obs, covariance, cfg, domain, footprints, files)

    # Initial control vector
    x0 = copy(state.phi)

    # Select optimization method
    method = lowercase(cfg.method)
    result = if method == "congrad" || method == "cg"
        run_conjugate_gradient(cost_func, grad_func, x0, cfg)
    elseif method == "m1qn3" || method == "lbfgs" || method == "bfgs"
        run_quasi_newton(cost_func, grad_func, x0, cfg)
    elseif method == "analytic"
        run_analytic_inversion(state, prior_fluxes, obs, covariance, cfg, domain, footprints, files)
    elseif method == "mcmc"
        return run_mcmc_inversion_wrapper(state, prior_fluxes, obs, covariance, cfg, files, domain, footprints)
    else
        @warn "Unknown optimization method: $(cfg.method), using L-BFGS"
        run_quasi_newton(cost_func, grad_func, x0, cfg)
    end

    # Update final state
    if result.success && hasfield(typeof(result), :x)
        update_state!(state, result.x - x0)  # Apply increment
    end

    # Create inversion result
    # For analytic method, use the already computed cost from the analytic inversion
    # to avoid duplicate computation and potential inconsistencies
    if method == "analytic" && hasfield(typeof(result), :final_cost)
        final_cost = result.final_cost
    else
        final_cost = cost_func(state.phi)
    end
    convergence_info = result.success ? "Converged" : "Did not converge"

    @info "Inversion completed: $convergence_info"
    @info "Final cost: $final_cost"

    return InversionResult(
        result.success,
        cfg.method,
        final_cost,
        hasfield(typeof(result), :iterations) ? result.iterations : 0,
        state,
        hasfield(typeof(result), :cost_history) ? result.cost_history : [final_cost],
        convergence_info
    )
end

"""
    cost_function(phi::Vector{Float64}, state::InversionState, prior_fluxes::PriorFluxes,
                 obs::Observations, covariance::CovarianceMatrix, cfg::Config, domain::Domain) -> Float64

Compute the cost function J = J_obs + J_prior.

# Arguments
- `phi`: Control variables
- `state`: Current inversion state
- `prior_fluxes`: Prior flux fields
- `obs`: Observations
- `covariance`: Prior covariance matrix
- `cfg`: Configuration
- `domain`: Domain information

# Returns
- Cost function value
"""
function cost_function(phi::Vector{Float64}, state::InversionState, prior_fluxes::PriorFluxes,
                      obs::Observations, covariance::CovarianceMatrix, cfg::Config, domain::Domain,
                      footprints, files)
    obs_cost, prior_cost = compute_cost_terms(phi, state, prior_fluxes, obs, covariance, cfg, domain,
                                              footprints, files)
    return obs_cost + prior_cost
end

function get_observation_errors(obs::Observations, cfg::Config)
    n_obs = length(obs.concentrations)
    if length(obs.total_errors) == n_obs && any(!iszero, obs.total_errors)
        candidate = obs.total_errors
    else
        candidate = obs.measurement_errors
    end

    fallback = cfg.measerr > 0 ? cfg.measerr : 1.0
    errors = similar(candidate)
    for i in eachindex(candidate)
        val = candidate[i]
        if !isfinite(val) || val <= 0
            errors[i] = fallback
        else
            errors[i] = val
        end
    end
    return errors
end

function compute_cost_terms(phi::Vector{Float64}, state::InversionState, prior_fluxes::PriorFluxes,
                            obs::Observations, covariance::CovarianceMatrix, cfg::Config, domain::Domain,
                            footprints, files, H::Union{Nothing,AbstractMatrix}=nothing)

    # Temporarily update state for forward model so downstream helpers see the
    # candidate coefficients while we build cost terms.
    original_phi = copy(state.phi)
    state.phi .= phi

    try
        # Always build the physical increment; this mirrors the Fortran
        # analytic pathway where both the forward-model RHS and the prior term
        # operate on the same χ representation.
        state_increment = Transformations.phi2chi(phi, covariance)

        y_mod = forward_model(cfg, domain, prior_fluxes, obs;
                              footprints=footprints, files=files,
                              chi_increment=state_increment, H=H,
                              chi_prior=state.chi_prior)

        # Observation cost: J_obs = 0.5 * (y - y_mod)^T * R^{-1} * (y - y_mod)
        modeled_conc = y_mod
        residual = modeled_conc .- obs.concentrations
        obs_errors = get_observation_errors(obs, cfg)
        obs_cost = 0.5 * sum(residual.^2 ./ obs_errors.^2)

        # Prior cost in physical space: J_prior = 0.5 χᵀB^{-1}χ
        binv_state = Transformations.apply_B_inv(state_increment, covariance)
        prior_cost = 0.5 * dot(state_increment, binv_state)
        return obs_cost, prior_cost

    finally
        # Restore original state
        state.phi .= original_phi
    end
end

"""
    gradient_function(phi::Vector{Float64}, state::InversionState, prior_fluxes::PriorFluxes,
                     obs::Observations, covariance::CovarianceMatrix, cfg::Config, domain::Domain) -> Vector{Float64}

Compute the gradient of the cost function.

# Arguments
- `phi`: Control variables
- `state`: Current inversion state
- `prior_fluxes`: Prior flux fields
- `obs`: Observations
- `covariance`: Prior covariance matrix
- `cfg`: Configuration
- `domain`: Domain information

# Returns
- Gradient vector in control space
"""
function gradient_function(phi::Vector{Float64}, state::InversionState, prior_fluxes::PriorFluxes,
                          obs::Observations, covariance::CovarianceMatrix, cfg::Config, domain::Domain,
                          footprints, files)

    # Temporarily update state for forward model
    original_phi = copy(state.phi)
    state.phi .= phi

    try
        # Forward model: compute y_mod
        y_mod = forward_model(cfg, domain, prior_fluxes, obs;
                              footprints=footprints, files=files)

        # Observation space gradient: g_obs = H^T * R^{-1} * (H*x - y)
        residual = y_mod .- obs.concentrations
        obs_gradient_physical = adjoint_operator(residual, obs, cfg, domain, footprints)

        # Transform to control space
        if length(obs_gradient_physical) != state.n_physical
            factor = div(state.n_physical, length(obs_gradient_physical))
            if factor * length(obs_gradient_physical) != state.n_physical
                error("Observation gradient length $(length(obs_gradient_physical)) does not tile to physical dimension $(state.n_physical)")
            end
            obs_gradient_physical = repeat(obs_gradient_physical, factor)
        end

        obs_gradient_control = transform_gradient(obs_gradient_physical, covariance)

        # Prior gradient: account for physical scaling in prior cost
        if covariance.has_physical_scaling
            # For prior cost J_prior = 0.5 * ||phi2chi(phi)||²
            # The gradient is: g_prior = (dJ_prior/dphi) = (dchi/dphi)^T * chi
            physical_increment = Transformations.phi2chi(phi, covariance)
            # Transform back through the physical scaling chain
            prior_gradient_control = Transformations.chi2phi(physical_increment, covariance)
        else
            # Prior gradient: g_prior = phi (since prior cost is 0.5 * phi^T * phi)
            prior_gradient_control = phi
        end

        # Total gradient
        total_gradient = obs_gradient_control + prior_gradient_control

        return total_gradient

    finally
        # Restore original state
        state.phi .= original_phi
    end
end

"""
    adjoint_operator(residual::Vector{Float64}, obs::Observations, cfg::Config, domain::Domain) -> Vector{Float64}

Apply adjoint of observation operator H^T.

This is a simplified implementation. In practice, this would involve:
1. Mapping residuals back through footprints
2. Aggregating to grid cells
3. Proper temporal and spatial adjoint operations

# Arguments
- `residual`: Observation space residual (y_mod - y)
- `obs`: Observations
- `cfg`: Configuration
- `domain`: Domain information

# Returns
- Gradient in physical space
"""
function adjoint_operator(residual::Vector{Float64}, obs::Observations, cfg::Config, domain::Domain,
                          footprints)
    n_obs = length(residual)
    nx = domain.nxregrid
    ny = domain.nyregrid
    gradient_physical = zeros(Float64, domain.nbox)

    lon_edges = collect(cfg.w_edge_lon:cfg.xres:cfg.e_edge_lon)
    lat_edges = collect(cfg.s_edge_lat:cfg.yres:cfg.n_edge_lat)

    for i in 1:n_obs
        fp = footprints[i]
        fp === nothing && continue
        grid = fp.grid
        if size(grid, 1) != nx || size(grid, 2) != ny
            grid = resample_footprint_to_domain(grid, fp.lon, fp.lat, lon_edges, lat_edges)
        end
        spatial = dropdims(sum(grid, dims=3); dims=3)
        for ix in 1:nx, iy in 1:ny
            idx = domain.nbox_xy[ix, iy]
            idx <= 0 && continue
            gradient_physical[idx] += residual[i] * spatial[ix, iy]
        end
    end

    return gradient_physical
end

function read_fortran_matrix(path::AbstractString)
    rows = Vector{Vector{Float64}}()
    open(path, "r") do io
        for raw in eachline(io)
            line = strip(raw)
            isempty(line) && continue
            tokens = split(line)
            parsed = Float64[]
            for token in tokens
                val = replace(token, 'D' => 'E', 'd' => 'E')
                push!(parsed, parse(Float64, val))
            end
            push!(rows, parsed)
        end
    end
    nrows = length(rows)
    nrows == 0 && return zeros(Float64, 0, 0)
    ncols = maximum(length.(rows))
    mat = zeros(Float64, nrows, ncols)
    for (i, row) in enumerate(rows)
        for (j, val) in enumerate(row)
            mat[i, j] = val
        end
    end
    return mat
end

function maybe_load_fortran_hmat(cfg::Config, n_obs::Int, n_columns::Int)
    use_override = get(ENV, "FLEXINVERT_USE_FORTRAN_H", "") == "1"
    use_override || return nothing
    base = strip(cfg.fortran_output_path)
    isempty(base) && return nothing

    path = locate_covariance_artifact(base, "hmat.txt")
    path === nothing && return nothing

    mat = try
        read_fortran_matrix(path)
    catch err
        @warn "Failed to read Fortran hmat override" path error=err
        return nothing
    end

    if size(mat, 1) == n_obs && size(mat, 2) == n_columns
        @info "Using Fortran observation operator override" path
        return mat
    else
        @warn "Ignoring Fortran hmat override with mismatched dimensions" path rows=size(mat, 1) cols=size(mat, 2) expected_rows=n_obs expected_cols=n_columns
        return nothing
    end
end

function build_observation_operator(footprints, cfg::Config, domain::Domain,
                                    obs::Observations, date_info::DateInfo)
    n_obs = length(footprints)
    nx = domain.nxregrid
    ny = domain.nyregrid
    n_boxes = domain.nbox
    ndt = max(date_info.ndt, 1)
    ntstate = max(date_info.ntstate, 1)
    nt_total = ndt * ntstate

    H = zeros(Float64, n_obs, n_boxes * nt_total)

    lon_edges = collect(cfg.w_edge_lon:cfg.xres:cfg.e_edge_lon)
    lat_edges = collect(cfg.s_edge_lat:cfg.yres:cfg.n_edge_lat)
    statres = cfg.statres > 0 ? cfg.statres : 1.0
    state_times = [date_info.juldatei + statres * (k - 1) for k in 1:ntstate]
    hloc = isempty(domain.hloc) ? zeros(Float64, n_boxes) : domain.hloc
    bin_width = ndt == 1 ? 24.0 : 24.0 / ndt

    molar_mass = cfg.molarmass > 0 ? cfg.molarmass : 44.0
    mass_conversion = cfg.coeff * MOLAR_MASS_AIR / molar_mass

    for i in 1:n_obs
        fp = footprints[i]
        fp === nothing && continue

        grid = fp.grid
        if size(grid, 1) != nx || size(grid, 2) != ny
            grid = resample_footprint_to_domain(grid, fp.lon, fp.lat, lon_edges, lat_edges)
        end

        grid_scaled = grid .* mass_conversion

        gtime = fp.gtime
        ngrid = size(grid, 3)
        ngrid == 0 && continue

        for n in 1:ngrid
            jt = find_state_index(gtime[n], state_times, statres)
            jt == 0 && continue

            frac_day = gtime[n] - floor(gtime[n])
            utc_hour = frac_day * 24.0

            for ix in 1:nx, iy in 1:ny
                nb = domain.nbox_xy[ix, iy]
                nb <= 0 && continue

                local_hour = utc_hour + (nb <= length(hloc) ? hloc[nb] : 0.0)
                local_hour = mod(local_hour, 24.0)
                nt = ndt == 1 ? 1 : clamp(Int(floor(local_hour / bin_width)) + 1, 1, ndt)

                block_offset = ((jt - 1) * ndt + (nt - 1)) * n_boxes
                H[i, block_offset + nb] += grid_scaled[ix, iy, n]
            end
        end
    end

    if (H_override = maybe_load_fortran_hmat(cfg, n_obs, n_boxes * nt_total)) !== nothing
        return H_override
    end

    return H
end

function find_state_index(gtime::Float64, state_times::Vector{Float64}, statres::Float64)
    nt = length(state_times)
    nt == 0 && return 0

    last_limit = state_times[end] + statres
    if gtime >= last_limit + eps(last_limit)
        return 0
    end

    for idx in 1:nt
        limit = state_times[idx] + statres
        if gtime < limit + eps(limit)
            return idx
        end
    end

    return nt
end

function assign_state_indices(obs::Observations, cfg::Config, date_info::DateInfo, ntstate::Int)
    n_obs = length(obs.jdates)
    indices = fill(1, n_obs)
    ntstate == 1 && return indices

    start_jdate = date_info.juldatei
    step = max(cfg.statres, 1e-6)

    for i in 1:n_obs
        jdate = i <= length(obs.jdates) ? obs.jdates[i] : start_jdate
        delta = jdate - start_jdate
        raw_idx = Int(floor(delta / step)) + 1
        indices[i] = clamp(raw_idx, 1, ntstate)
    end

    return indices
end

function assign_time_bins(obs::Observations, ndt::Int)
    n_obs = length(obs.datetimes)
    bins = fill(1, n_obs)
    ndt == 1 && return bins

    bin_width = 24.0 / ndt

    for i in 1:n_obs
        dt = i <= length(obs.datetimes) ? obs.datetimes[i] : nothing
        if dt isa DateTime
            hours = Dates.hour(dt) + Dates.minute(dt) / 60.0 + Dates.second(dt) / 3600.0
            idx = Int(floor(hours / bin_width)) + 1
            bins[i] = clamp(idx, 1, ndt)
        else
            bins[i] = 1
        end
    end

    return bins
end

"""
    run_conjugate_gradient(cost_func, grad_func, x0::Vector{Float64}, cfg::Config) -> CGResult

Run conjugate gradient optimization.
"""
function run_conjugate_gradient(cost_func, grad_func, x0::Vector{Float64}, cfg::Config)
    maxiter = hasfield(typeof(cfg), :maxiter) ? cfg.maxiter : 100
    tol = hasfield(typeof(cfg), :tol) ? cfg.tol : 1e-5
    reqrd = hasfield(typeof(cfg), :reqrd) ? cfg.reqrd : 0.01

    @info "Running conjugate gradient optimization"
    @info "Max iterations: $maxiter, tolerance: $tol, required reduction: $reqrd"

    return congrad!(cost_func, grad_func, x0;
                   maxiter=maxiter, tol=tol, reqrd=reqrd, verbose=true)
end

"""
    run_quasi_newton(cost_func, grad_func, x0::Vector{Float64}, cfg::Config) -> QNResult

Run quasi-Newton (L-BFGS) optimization.
"""
function run_quasi_newton(cost_func, grad_func, x0::Vector{Float64}, cfg::Config)
    maxiter = hasfield(typeof(cfg), :maxiter) ? cfg.maxiter : 200
    tol = hasfield(typeof(cfg), :tol) ? cfg.tol : 1e-5

    @info "Running L-BFGS optimization"
    @info "Max iterations: $maxiter, tolerance: $tol"

    return m1qn3!(cost_func, grad_func, x0;
                 maxiter=maxiter, tol=tol, verbose=true)
end

"""
    run_analytic_inversion(state::InversionState, prior_fluxes::PriorFluxes, obs::Observations,
                          covariance::CovarianceMatrix, cfg::Config, domain::Domain) -> NamedTuple

Run analytic inversion (direct matrix solution).

This computes the exact Bayesian solution for linear problems.
"""
function run_analytic_inversion(state::InversionState, prior_fluxes::PriorFluxes, obs::Observations,
                                covariance::CovarianceMatrix, cfg::Config, domain::Domain,
                                footprints, files)
    @info "Running analytic inversion"

    try
        n_obs = length(obs.concentrations)
        n_state = domain.nbox
        date_info = initialize_dates(cfg)

        debug_enabled = get(ENV, "FLEXINVERT_DEBUG_OUTPUT", "0") in ("1", "true", "TRUE")
        if debug_enabled && !isempty(files.path_output)
            ENV["FLEXINVERT_OUTPUT_DIR"] = files.path_output
        end

        if covariance.has_physical_scaling
            scale = covariance.physical_scaling
            max_dev = maximum(abs.(scale .- 1.0))
            avg_scale = sum(scale) / length(scale)
            @info "Physical scaling active" minimum=minimum(scale) maximum=maximum(scale) mean=avg_scale max_dev=max_dev
            if debug_enabled && !isempty(files.path_output)
                scaling_path = joinpath(files.path_output, "physical_scaling_vector.txt")
                write_matrix(scaling_path, reshape(scale, 1, :))
            end
        else
            @info "Physical scaling disabled; using unity scaling"
        end

        H = build_observation_operator(footprints, cfg, domain, obs, date_info)
        if debug_enabled && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "H_matrix.txt"), H)
        end
        @info "Observation operator size: $(size(H))"

        H_for_hbht = H
        if debug_enabled && covariance.has_hbht_row_scaling && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "H_matrix_hbht_scaled.txt"),
                         Transformations.apply_hbht_column_scaling(H, covariance))
        end

        # Map to control space using the Fortran-style B^{1/2} helper.
        H_reduced, B_half, chol_to_eig = Transformations.build_H_reduced_with_basis(H, covariance)
        if debug_enabled && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "h_reduced_julia.txt"), H_reduced)
            write_matrix(joinpath(files.path_output, "b_half_julia.txt"), B_half)
        end

        # Build the prior rhs contribution y - H x_b in observation space
        chi_baseline = if length(state.chi) == length(state.chi_prior)
            state.chi .- state.chi_prior
        else
            zeros(Float64, covariance.nbox * max(covariance.nt, 1))
        end

        forward_model(cfg, domain, prior_fluxes, obs;
                      footprints=footprints, files=files,
                      chi_increment=chi_baseline, H=H,
                      chi_prior=state.chi_prior)

        # Reconstruct the Fortran-style observation residual (obs%delta):
        # δ = Hx_prior + fixed_fluxes - y + background + cinipos
        # where the fixed fluxes comprise NEE, fossil fuel, and ocean terms.
        delta = obs.posterior_values .+ obs.nee_contrib .+ obs.fff_contrib .+
                obs.ocean_contrib .- obs.concentrations .+ obs.background .+
                obs.cinipos

        if debug_enabled && !isempty(files.path_output)
            delta_path = joinpath(files.path_output, "obs_residual_components.txt")
            open(delta_path, "w") do io
                println(io, "# obs_index posterior nee fff ocean background cinipos observed delta")
                @inbounds for i in eachindex(delta)
                    println(io, @sprintf("%d %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e",
                                         i,
                                         obs.posterior_values[i],
                                         obs.nee_contrib[i],
                                         obs.fff_contrib[i],
                                         obs.ocean_contrib[i],
                                         obs.background[i],
                                         obs.cinipos[i],
                                         obs.concentrations[i],
                                         delta[i]))
                end
            end
        end

        obs_delta = copy(delta)
        if debug_enabled && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "delta_julia.txt"), reshape(obs_delta, 1, :))
        end

        obs_errors = get_observation_errors(obs, cfg)
        if debug_enabled && !isempty(files.path_output)
            rhs_breakdown_path = joinpath(files.path_output, "obs_rhs_breakdown_julia.txt")
            open(rhs_breakdown_path, "w") do io
                println(io, "# obs_index posterior nee fff ocn background cinipos conc delta weighted_posterior weighted_nee weighted_fff weighted_ocn weighted_background weighted_cini weighted_conc")
                @inbounds for i in eachindex(obs_errors)
                    sigma = obs_errors[i]
                    inv_sigma = sigma == 0.0 ? 0.0 : 1.0 / sigma
                    println(io, @sprintf("%d %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e",
                                         i,
                                         obs.posterior_values[i],
                                         obs.nee_contrib[i],
                                         obs.fff_contrib[i],
                                         obs.ocean_contrib[i],
                                         obs.background[i],
                                         obs.cinipos[i],
                                         obs.concentrations[i],
                                         delta[i],
                                         obs.posterior_values[i] * inv_sigma,
                                         obs.nee_contrib[i] * inv_sigma,
                                         obs.fff_contrib[i] * inv_sigma,
                                         obs.ocean_contrib[i] * inv_sigma,
                                         obs.background[i] * inv_sigma,
                                         obs.cinipos[i] * inv_sigma,
                                         obs.concentrations[i] * inv_sigma))
                end
            end
        end
        meas_var = obs_errors .^ 2

        # Construct BH^T ("zwork") following mod_analytic.F
        zwork = Transformations.build_zwork(H_for_hbht, covariance)

        # In-memory parity check against Fortran reference (Option A.2)
        if debug_enabled && !isempty(cfg.fortran_output_path)
            fortran_zwork_path = locate_covariance_artifact(cfg.fortran_output_path, "zwork.txt")
            if fortran_zwork_path !== nothing && isfile(fortran_zwork_path)
                try
                    zwork_fortran = read_fortran_matrix(fortran_zwork_path)

                    if size(zwork) == size(zwork_fortran)
                        diff = zwork .- zwork_fortran
                        rel_err = norm(diff) / norm(zwork_fortran)
                        max_abs_diff = maximum(abs.(diff))

                        @info "Zwork in-memory parity check" rel_error=rel_err max_abs_diff=max_abs_diff

                        if rel_err < 1e-12
                            @info "✅ Zwork achieves machine precision parity (rel_err < 1e-12)"
                        elseif rel_err < 1e-6
                            @warn "⚠️ Zwork has acceptable parity (1e-12 < rel_err < 1e-6)" rel_error=rel_err
                            @info "Sample comparison" fortran_sample=zwork_fortran[1:min(3,end), 1] julia_sample=zwork[1:min(3,end), 1]
                        else
                            @warn "❌ Zwork parity FAILED (rel_err >= 1e-6)" rel_error=rel_err
                            @info "Sample comparison" fortran_sample=zwork_fortran[1:min(3,end), 1] julia_sample=zwork[1:min(3,end), 1]

                            # Detailed diagnostics
                            n_nonzero_f = count(x -> abs(x) > 1e-10, zwork_fortran)
                            n_nonzero_j = count(x -> abs(x) > 1e-10, zwork)
                            @info "Non-zero counts" fortran=n_nonzero_f julia=n_nonzero_j
                            @info "Magnitude ranges" fortran_max=maximum(abs.(zwork_fortran)) julia_max=maximum(abs.(zwork))
                        end
                    else
                        @warn "Zwork size mismatch" julia_size=size(zwork) fortran_size=size(zwork_fortran)
                    end
                catch e
                    @warn "Failed to load Fortran zwork for parity check" path=fortran_zwork_path exception=e
                end
            else
                @info "No Fortran zwork reference found, skipping in-memory parity check" searched_path=fortran_zwork_path
            end
        end

        if debug_enabled && !isempty(files.path_output)
            zwork_path = joinpath(files.path_output, "zwork_julia.txt")
            @info "About to write zwork" path=zwork_path size=size(zwork) nonzero_count=count(x -> abs(x) > 1e-10, zwork) max_abs=maximum(abs.(zwork))
            write_matrix(zwork_path, zwork)
            @info "Finished writing zwork to" path=zwork_path
            # Verify write immediately
            zwork_readback = readdlm(zwork_path)
            @info "Read back zwork" size_readback=size(zwork_readback) nonzero_readback=count(x -> abs(x) > 1e-10, zwork_readback) max_abs_readback=maximum(abs.(zwork_readback))
        end

        # Build HBH^T + R
        hbht = Matrix{Float64}(undef, n_obs, n_obs)
        mul!(hbht, H_for_hbht, zwork)
        @inbounds for i in 1:n_obs
            hbht[i, i] += meas_var[i]
        end
        hbht .= 0.5 .* (hbht .+ hbht')

        if debug_enabled && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "hbht_julia.txt"), hbht)
            write_matrix(joinpath(files.path_output, "meas_var_julia.txt"), reshape(meas_var, 1, :))
        end

        hbht_factor, potrf_info = LAPACK.potrf!('L', copy(hbht))
        ridge = 0.0
        if potrf_info != 0
            diag_mean = sum(abs(hbht[i, i]) for i in 1:n_obs) / max(n_obs, 1)
            ridge = max(diag_mean * 1e-12, 1e-12)
            hbht_reg = copy(hbht)
            @inbounds for i in 1:n_obs
                hbht_reg[i, i] += ridge
            end
            hbht_factor, potrf_info = LAPACK.potrf!('L', copy(hbht_reg))
            if potrf_info != 0
                if debug_enabled && !isempty(files.path_output)
                    write_matrix(joinpath(files.path_output, "hbht_failure_matrix.txt"), hbht_reg)
                end
                error("Cholesky factorisation failed for HBHᵀ + R even after ridge (info=$(potrf_info))")
            end
            hbht = hbht_reg
            if debug_enabled
                @info "Applied HBHᵀ ridge regularisation" ridge=ridge diag_mean=diag_mean
            end
        end

        # Solve (HBHᵀ + R) x = δ without forming the dense inverse
        delta_rhs = reshape(copy(obs_delta), :, 1)
        solved_rhs = LAPACK.potrs!('L', hbht_factor, delta_rhs)
        hbht_solution = vec(solved_rhs)

        hbht_inv = nothing
        if debug_enabled && !isempty(files.path_output)
            hbht_inv_factor = copy(hbht_factor)
            potri_info = LAPACK.potri!('L', hbht_inv_factor)
            if potri_info == 0
                @inbounds for j in 1:n_obs
                    for i in 1:j-1
                        hbht_inv_factor[i, j] = hbht_inv_factor[j, i]
                    end
                end
                hbht_inv = hbht_inv_factor
            else
                @warn "Cholesky inverse failed for HBHᵀ + R; skipping inverse dump" potri_info
            end
        end

        chi_increment = -(zwork * hbht_solution)

        if debug_enabled && !isempty(files.path_output)
            write_matrix(joinpath(files.path_output, "mwork_julia.txt"), hbht)
            if hbht_inv !== nothing
                write_matrix(joinpath(files.path_output, "imwork_julia.txt"), hbht_inv)
                write_matrix(joinpath(files.path_output, "gain_julia.txt"), zwork * hbht_inv)
            end
        end
        phi_solution = Transformations.chi2phi(chi_increment, covariance)

        state_increment = chi_increment
        state.chi .= state.chi_prior .+ state_increment
        state.phi .= phi_solution

        inv_meas = 1.0 ./ meas_var
        rhs_control = -H_reduced' * (delta .* inv_meas)
        sqrt_inv_meas = sqrt.(inv_meas)
        weighted_H = H_reduced .* sqrt_inv_meas
        normal_matrix = weighted_H' * weighted_H + Matrix{Float64}(I, covariance.n_modes, covariance.n_modes)

        obs_cost, prior_cost = compute_cost_terms(phi_solution, state, prior_fluxes, obs, covariance,
                                                  cfg, domain, footprints, files, H)
        cost_final = obs_cost + prior_cost

        save_analytic_diagnostics(state, domain, files, H, phi_solution, covariance, obs_cost, prior_cost,
                                  cfg, prior_fluxes;
                                  H_reduced=H_reduced, rhs=rhs_control, normal_matrix=normal_matrix,
                                  basis_transform=chol_to_eig,
                                  debug_enabled=debug_enabled)

        # Generate monitor.txt for residual parity comparison
        save_monitor_diagnostics(state, domain, files, phi_solution, prior_fluxes, obs, covariance, cfg, footprints)

        @info "Analytic inversion completed successfully"
        @info "Final cost components: J_obs=$(obs_cost), J_prior=$(prior_cost), J=$(cost_final)"

        return (
            x = phi_solution,
            success = true,
            iterations = 1,
            final_cost = cost_final,
            cost_history = [cost_final]
        )

    catch err
        @warn "Analytic inversion failed; returning prior state" err
        x_final = copy(state.phi)
        H_local = @isdefined(H) ? H : nothing
        obs_cost, prior_cost = compute_cost_terms(x_final, state, prior_fluxes, obs, covariance,
                                                  cfg, domain, footprints, files, H_local)
        cost_final = obs_cost + prior_cost
        return (
            x = x_final,
            success = false,
            iterations = 0,
            final_cost = cost_final,
            cost_history = [cost_final]
        )
    end
end

const LAND_THRESHOLD = 0.5

function ensure_lsm!(domain::Domain, files::Files)
    if isempty(domain.lsm) && !isempty(files.file_lsm) && !isempty(files.varname_lsm)
        try
            read_lsm!(domain, files.file_lsm, files.varname_lsm, files.lonname_lsm, files.latname_lsm)
        catch err
            @warn "Failed to load land-sea mask; assuming all boxes are land" err
        end
    end
end

function total_area_per_box(domain::Domain)
    nbox = domain.nbox
    total_area = zeros(Float64, nbox)
    if isempty(domain.nbox_xy)
        return total_area
    end
    nx_map, ny_map = size(domain.nbox_xy)
    @inbounds for iy in 1:ny_map
        lat = domain.rlly + (iy - 1) * domain.rdy
        cell_area = grid_area(lat + 0.5 * domain.rdy, domain.rdy, domain.rdx)
        for ix in 1:nx_map
            box = domain.nbox_xy[ix, iy]
            box > 0 || continue
            total_area[box] += cell_area
        end
    end
    return total_area
end

function land_area_per_box(domain::Domain, files::Files)
    ensure_lsm!(domain, files)
    nbox = domain.nbox
    land_area = zeros(Float64, nbox)
    if isempty(domain.nbox_xy)
        return land_area
    end
    if isempty(domain.lsm)
        return total_area_per_box(domain)
    end
    nx_map, ny_map = size(domain.nbox_xy)
    lsm_shape = size(domain.lsm)
    orientation = lsm_shape == (domain.nxregrid, domain.nyregrid) ? :xy :
                  lsm_shape == (domain.nyregrid, domain.nxregrid) ? :yx : :other
    @inbounds for iy in 1:ny_map
        lat = domain.rlly + (iy - 1) * domain.rdy
        cell_area = grid_area(lat + 0.5 * domain.rdy, domain.rdy, domain.rdx)
        for ix in 1:nx_map
            box = domain.nbox_xy[ix, iy]
            box > 0 || continue
            val = orientation == :xy ? domain.lsm[ix, iy] :
                  orientation == :yx ? domain.lsm[iy, ix] :
                  domain.lsm[clamp(ix, 1, lsm_shape[1]), clamp(iy, 1, lsm_shape[2])]
            if val >= LAND_THRESHOLD
                land_area[box] += cell_area
            end
        end
    end
    return land_area
end

function compute_flux_increment(state_increment::Vector{Float64}, prior_state::Vector{Float64},
                                covariance::CovarianceMatrix, domain::Domain, files::Files,
                                cfg::Config; debug_enabled::Bool=false)
    nbox = domain.nbox
    ndt = max(covariance.ndt, 1)
    ntstate = max(covariance.ntstate, 1)
    nx = domain.nxregrid
    ny = domain.nyregrid

    expected = nbox * ndt * ntstate
    length(state_increment) == expected || error("state increment length $(length(state_increment)) does not match expected $expected")
    length(prior_state) == expected || error("prior state length $(length(prior_state)) does not match expected $expected")

    ensure_lsm!(domain, files)

    inc_cube = reshape(state_increment, nbox, ndt, ntstate)
    prior_cube = reshape(prior_state, nbox, ndt, ntstate)
    post_cube = inc_cube .+ prior_cube

    avg_prior = zeros(Float64, nbox, ntstate)
    avg_post = zeros(Float64, nbox, ntstate)
    scale_factor = 1.0 / (ndt * NUMERICAL_SCALE)
    @inbounds for n in 1:ntstate
        for box in 1:nbox
            avg_prior[box, n] = sum(@view prior_cube[box, :, n]) * scale_factor
            avg_post[box, n] = sum(@view post_cube[box, :, n]) * scale_factor
        end
    end

    fpri = zeros(Float64, nx, ny, ntstate)
    fpos = zeros(Float64, nx, ny, ntstate)

    @inbounds for n in 1:ntstate
        for jy in 1:ny, ix in 1:nx
            box = domain.nbox_xy[ix, jy]
            if box <= 0
                fpri[ix, jy, n] = 0.0
                fpos[ix, jy, n] = 0.0
            else
                fpri[ix, jy, n] = avg_prior[box, n]
                fpos[ix, jy, n] = avg_post[box, n]
            end
        end
    end

    # Prepare grid cell areas and land mask
    cell_area = zeros(Float64, nx, ny)
    if isempty(domain.reg_lat)
        domain.reg_lat = [domain.rlly + (j - 1) * domain.rdy for j in 1:domain.nyregrid]
    end
    @inbounds for jy in 1:ny
        lat = domain.reg_lat[jy]
        area = grid_area(lat + 0.5 * domain.rdy, domain.rdy, domain.rdx)
        for ix in 1:nx
            cell_area[ix, jy] = area
        end
    end

    land_mask = falses(nx, ny)
    use_lsm = !isempty(domain.lsm)
    lsm_shape = size(domain.lsm)
    orientation = use_lsm ? (lsm_shape == (nx, ny) ? :xy : lsm_shape == (ny, nx) ? :yx : :other) : :none
    @inbounds for jy in 1:ny, ix in 1:nx
        if domain.nbox_xy[ix, jy] <= 0
            land_mask[ix, jy] = false
            continue
        end
        if use_lsm
            val = orientation == :xy ? domain.lsm[ix, jy] :
                  orientation == :yx ? domain.lsm[jy, ix] :
                  domain.lsm[clamp(ix, 1, size(domain.lsm, 1)), clamp(jy, 1, size(domain.lsm, 2))]
            land_mask[ix, jy] = val >= LAND_THRESHOLD
        elseif !isempty(domain.lsm_box)
            land_mask[ix, jy] = domain.lsm_box[ix, jy] >= 1
        else
            land_mask[ix, jy] = true
        end
    end

    prior_before = zeros(Float64, ntstate)
    posterior_before = zeros(Float64, ntstate)
    @inbounds for jy in 1:ny, ix in 1:nx
        box = domain.nbox_xy[ix, jy]
        box <= 0 && continue
        area = cell_area[ix, jy]
        prior_before .+= @view(fpri[ix, jy, :]) .* area
        posterior_before .+= @view(fpos[ix, jy, :]) .* area
    end

    prior_after = zeros(Float64, ntstate)
    posterior_after = zeros(Float64, ntstate)
    flux_box = zeros(Float64, nbox, ntstate)

    @inbounds for box in 1:nbox
        sumpri = zeros(Float64, ntstate)
        sumpos = zeros(Float64, ntstate)
        landarea = 0.0

        for jy in 1:ny, ix in 1:nx
            domain.nbox_xy[ix, jy] == box || continue
            area = cell_area[ix, jy]
            sumpri .+= @view(fpri[ix, jy, :]) .* area
            sumpos .+= @view(fpos[ix, jy, :]) .* area
            if land_mask[ix, jy]
                landarea += area
            end
        end

        if landarea > 0
            sumpri ./= landarea
            sumpos ./= landarea
        else
            fill!(sumpri, 0.0)
            fill!(sumpos, 0.0)
        end

        prior_after .+= sumpri .* landarea
        posterior_after .+= sumpos .* landarea
        flux_box[box, :] .= sumpos .- sumpri

        for jy in 1:ny, ix in 1:nx
            domain.nbox_xy[ix, jy] == box || continue
            if land_mask[ix, jy]
                fpri[ix, jy, :] .= sumpri
                fpos[ix, jy, :] .= sumpos
            else
                fpri[ix, jy, :] .= 0.0
                fpos[ix, jy, :] .= 0.0
            end
        end
    end

    # Normalize totals by ntstate to mirror Fortran's logging
    prior_before ./= ntstate
    prior_after ./= ntstate
    posterior_before ./= ntstate
    posterior_after ./= ntstate

    flux_grid = permutedims(fpos .- fpri, (2, 1, 3))

    stats = (
        prior_before = prior_before,
        prior_after = prior_after,
        posterior_before = posterior_before,
        posterior_after = posterior_after
    )

    return flux_box, flux_grid, stats
end

function save_analytic_diagnostics(state::InversionState, domain::Domain, files::Files,
                                   H::Union{Nothing,AbstractMatrix}, phi::Vector{Float64},
                                   covariance::CovarianceMatrix, obs_cost::Float64, prior_cost::Float64,
                                   cfg::Config, prior_fluxes::PriorFluxes;
                                   H_reduced::Union{Nothing,AbstractMatrix}=nothing,
                                   rhs::Union{Nothing,AbstractVector}=nothing,
                                   normal_matrix::Union{Nothing,AbstractMatrix}=nothing,
                                   basis_transform::Union{Nothing,AbstractMatrix}=nothing,
                                   debug_enabled::Bool=false)
    isempty(files.path_output) && return
    try
        if H !== nothing
            write_matrix(joinpath(files.path_output, "hmat_julia.txt"), H)
        end
        if H_reduced !== nothing
            write_matrix(joinpath(files.path_output, "h_reduced_julia.txt"), H_reduced)
        end

        write_matrix(joinpath(files.path_output, "phi_solution.txt"), reshape(phi, 1, :))
        if rhs !== nothing
            write_matrix(joinpath(files.path_output, "rhs_control_cholesky.txt"), reshape(rhs, 1, :))
            rhs_fortran = rhs
            if basis_transform !== nothing
                rhs_fortran = basis_transform' \ rhs
            end
            write_matrix(joinpath(files.path_output, "rhs_control.txt"), reshape(rhs_fortran, 1, :))
        end
        if normal_matrix !== nothing
            write_matrix(joinpath(files.path_output, "normal_matrix_cholesky.txt"), normal_matrix)
            normal_fortran = normal_matrix
            if basis_transform !== nothing
                tmp = basis_transform' \ normal_matrix
                normal_fortran = tmp / basis_transform
            end
            write_matrix(joinpath(files.path_output, "normal_matrix.txt"), normal_fortran)
        end
        control_increment = reshape(phi, 1, :)
        state_increment = Transformations.phi2chi(phi, covariance)
        write_matrix(joinpath(files.path_output, "chi_increment.txt"), control_increment)
        write_matrix(joinpath(files.path_output, "state_increment.txt"), reshape(state_increment, 1, :))
        cost_terms_path = joinpath(files.path_output, "cost_terms.txt")
        open(cost_terms_path, "w") do io
            println(io, "# Analytic cost decomposition")
            println(io, @sprintf("J_obs %.10e", obs_cost))
            println(io, @sprintf("J_prior %.10e", prior_cost))
            println(io, @sprintf("J_total %.10e", obs_cost + prior_cost))
            println(io, @sprintf("2Jo %.10e", 2 * obs_cost))
            println(io, @sprintf("2Jp %.10e", 2 * prior_cost))
        end

        flux_box, flux_grid, flux_stats = compute_flux_increment(state_increment, state.chi_prior,
                                                                 covariance, domain, files, cfg;
                                                                 debug_enabled=debug_enabled)
        write_matrix(joinpath(files.path_output, "flux_increment.txt"), flux_box)

        if !isempty(domain.nbox_xy)
            ny, nx, n_steps = size(flux_grid)
            grid_path = joinpath(files.path_output, "flux_increment_grid.txt")
            open(grid_path, "w") do io
                for t in 1:n_steps
                    println(io, "# flux increment grid slice $t (kg/m^2/s)")
                    for j in 1:ny
                        for i in 1:nx
                            i > 1 && write(io, ' ')
                            print(io, @sprintf("%.8e", flux_grid[j, i, t]))
                        end
                        write(io, '\n')
                    end
                    t < n_steps && write(io, '\n')
                end
            end
        end

        if debug_enabled && !isempty(files.path_output)
            if basis_transform !== nothing
                write_matrix(joinpath(files.path_output, "basis_transform.txt"), basis_transform)
            end
            rel_prior = maximum(abs.(flux_stats.prior_after .- flux_stats.prior_before) ./
                                max.(abs.(flux_stats.prior_before), 1e-20))
            rel_post = maximum(abs.(flux_stats.posterior_after .- flux_stats.posterior_before) ./
                               max.(abs.(flux_stats.posterior_before), 1e-20))
            rel_diff = max(rel_prior, rel_post)
            stats_path = joinpath(files.path_output, "flux_redistribution_stats.txt")
            open(stats_path, "w") do io
                println(io, "# Flux redistribution diagnostics")
                println(io, @sprintf("max_rel_diff %.12e", rel_diff))
                fmt = v -> [@sprintf("%.12e", x) for x in v]
                println(io, "prior_before " * join(fmt(flux_stats.prior_before), " "))
                println(io, "prior_after " * join(fmt(flux_stats.prior_after), " "))
                println(io, "posterior_before " * join(fmt(flux_stats.posterior_before), " "))
                println(io, "posterior_after " * join(fmt(flux_stats.posterior_after), " "))
            end
            if rel_diff > 1e-10
                @warn "Flux redistribution mass conservation diff=$(rel_diff)"
            end
        end
    catch err
        @warn "Failed to save analytic diagnostics" err
    end
end

function write_matrix(path::AbstractString, M::AbstractArray)
    open(path, "w") do io
        ndims(M) == 1 && (M = reshape(M, 1, :))
        for i in 1:size(M, 1)
            for j in 1:size(M, 2)
                j > 1 && write(io, ' ')
                print(io, @sprintf("%.8e", M[i, j]))
            end
            write(io, '\n')
        end
        flush(io)  # Ensure buffer is flushed before close
    end
    # The do-block close already ensures data is written
    return true
end

"""
    save_optimization_logs(result::InversionResult, files::Files)

Save optimization logs compatible with Fortran format.

# Arguments
- `result`: Inversion result
- `files`: File configuration
"""
function save_optimization_logs(result::InversionResult, files::Files)
    if isempty(files.path_output)
        return
    end

    try
        # Save cost history
        cost_file = joinpath(files.path_output, "cost.txt")
        open(cost_file, "w") do io
            println(io, "# Iteration Cost")
            for (i, cost) in enumerate(result.cost_history)
                println(io, "$i $cost")
            end
        end

        # Save gradient norm history (if available)
        if hasfield(typeof(result.state), :gradient_norm_history) && !isempty(result.state.gradient_norm_history)
            grad_file = joinpath(files.path_output, "grad_norm.txt")
            open(grad_file, "w") do io
                println(io, "# Iteration GradientNorm")
                for (i, grad_norm) in enumerate(result.state.gradient_norm_history)
                    println(io, "$i $grad_norm")
                end
            end
        end

        @info "Optimization logs saved to $(files.path_output)"

    catch err
        @warn "Failed to save optimization logs" err
    end
end

"""
    run_mcmc_inversion_wrapper(state::InversionState, prior_fluxes::PriorFluxes,
                              obs::Observations, covariance::CovarianceMatrix,
                              cfg::Config, files::Files, domain::Domain) -> InversionResult

Wrapper to run MCMC inversion and convert result to InversionResult format.
"""
function run_mcmc_inversion_wrapper(
    state::InversionState,
    prior_fluxes::PriorFluxes,
    obs::Observations,
    covariance::CovarianceMatrix,
    cfg::Config,
    files::Files,
    domain::Domain,
    footprints
)
    @info "Running MCMC inversion"

    # Run MCMC sampling using the integration module
    mcmc_result = run_mcmc_inversion(files, cfg, domain, obs, prior_fluxes, covariance;
                                     footprints=footprints)

    # Update state with posterior mean
    if !isempty(mcmc_result.samples)
        # Use posterior mean as the final state estimate
        posterior_mean_phi = mean([s.x_chi for s in mcmc_result.samples])
        state.phi .= posterior_mean_phi

        # Also update physical space
        if !isempty(mcmc_result.samples)
            posterior_mean_phys = mean([s.x_phys for s in mcmc_result.samples])
            # Note: state doesn't have x_phys field, but transformations handle this
        end
    end

    # Convert MCMCResult to InversionResult format
    final_cost = if !isempty(mcmc_result.samples)
        -mcmc_result.samples[end].log_posterior  # Convert log-posterior to cost
    else
        Inf
    end

    # Create cost history from log-posterior values (negated to match cost convention)
    cost_history = [-s.log_posterior for s in mcmc_result.samples[1:min(100, length(mcmc_result.samples))]]

    # Determine convergence based on R-hat diagnostic
    success = if !isempty(mcmc_result.diagnostics.rhat)
        maximum(mcmc_result.diagnostics.rhat) < 1.1
    else
        mcmc_result.diagnostics.acceptance_rate > 0.1  # Basic acceptance rate check
    end

    convergence_info = if success
        "MCMC converged ($(mcmc_result.n_samples_kept) samples, acceptance=$(round(mcmc_result.diagnostics.acceptance_rate, digits=3)))"
    else
        "MCMC may not have converged"
    end

    @info "MCMC inversion completed"
    @info "Convergence: $convergence_info"
    @info "Final negative log-posterior: $final_cost"

    return InversionResult(
        success,
        "mcmc",
        final_cost,
        mcmc_result.n_samples_total,  # Use total samples as "iterations"
        state,
        cost_history,
        convergence_info
    )
end

"""
    save_monitor_diagnostics(state::InversionState, domain::Domain, files::Files,
                            phi::Vector{Float64}, prior_fluxes::PriorFluxes, obs::Observations,
                            covariance::CovarianceMatrix, cfg::Config, footprints)

Generate monitor.txt file matching Fortran format for residual parity comparison.

# Arguments
- `state`: Inversion state with final solution
- `domain`: Domain information
- `files`: File configuration for output path
- `phi`: Control space solution
- `prior_fluxes`: Prior flux fields for sector decomposition
- `obs`: Observations with concentrations and metadata
- `covariance`: Prior covariance matrix
- `cfg`: Configuration settings
- `footprints`: Footprint data for forward model
"""
function save_monitor_diagnostics(state::InversionState, domain::Domain, files::Files,
                                 phi::Vector{Float64}, prior_fluxes::PriorFluxes, obs::Observations,
                                 covariance::CovarianceMatrix, cfg::Config, footprints)
    isempty(files.path_output) && return

    monitor_path = joinpath(files.path_output, "monitor.txt")
    try
        open(monitor_path, "w") do io
            println(io, "rec yyyymmdd hhmmss juldate conc cini cinipos bkg nee fff ocn prior post diff error")

            n_obs = length(obs.concentrations)
            for i in 1:n_obs
                rec = i <= length(obs.stations) ? obs.stations[i] : "AAA"
                dt = i <= length(obs.datetimes) ? obs.datetimes[i] : now()
                yyyymmdd = Dates.format(dt, "yyyymmdd")
                hhmmss = Dates.format(dt, "HHMMSS")
                juldate = i <= length(obs.jdates) ? obs.jdates[i] : 0.0

                conc = obs.concentrations[i]
                cinipos = i <= length(obs.cinipos) ? obs.cinipos[i] : 0.0
                cini = cinipos  # fallback until observation basis vectors are archived
                bkg = i <= length(obs.background) ? obs.background[i] : 0.0

                nee = i <= length(obs.nee_contrib) ? obs.nee_contrib[i] : 0.0
                fff = i <= length(obs.fff_contrib) ? obs.fff_contrib[i] : 0.0
                ocn = i <= length(obs.ocean_contrib) ? obs.ocean_contrib[i] : 0.0

                prior_model = i <= length(obs.prior_values) ? obs.prior_values[i] : 0.0
                post_model = i <= length(obs.posterior_values) ? obs.posterior_values[i] : prior_model

                delta = post_model + nee + fff + ocn - conc + bkg + cinipos
                error = i <= length(obs.total_errors) ? obs.total_errors[i] : cfg.measerr

                line = @sprintf("%s %s %s %9.4f %9.4f %9.4f %9.4f %9.4f",
                                 rec, yyyymmdd, hhmmss, juldate, conc, cini, cinipos, bkg)

                line *= format_monitor_field(nee)
                line *= format_monitor_field(fff)
                line *= format_monitor_field(ocn)

                line *= @sprintf(" %10.4f %10.4f %10.4f %10.4f",
                                 prior_model, post_model, delta, error)

                println(io, line)
            end
        end
        @info "Monitor diagnostics saved to $(files.path_output)"
    catch err
        @warn "Failed to save monitor diagnostics" err
    end
end

function format_monitor_field(value::Float64)
    if !isfinite(value)
        return " **********"
    end
    if abs(value) >= 9.9995e9
        return " **********"
    end
    return @sprintf(" %10.4f", value)
end

end # module Inversion
