# Forward model implementation for FLEXINVERT.jl
# Ports calc_conc.f90 and simulate.f90 functionality

module ForwardModel

using ..CoreTypes: Domain
using ..Settings: Config, Files
using ..ObservationsCore: Observations
using ..Footprints: FootprintData, load_observation_footprints
using ..Tools
using ..InitialConditions: compute_initial_contributions!
import ..MOLAR_MASS_AIR
using LinearAlgebra
using Statistics
using Dates
using NCDatasets
using Printf

export FluxField, PriorFluxes, forward_model, calculate_model_concentrations, resample_footprint_to_domain

"""
    FluxField

Represents a single flux field (NEE, fossil fuel, ocean) with spatial and temporal data.
"""
struct FluxField
    name::String
    data::Array{Float64, 3}  # (lon, lat, time)
    lon::Vector{Float64}
    lat::Vector{Float64}
    time::Vector{Float64}
    units::String
    scaling_factor::Float64
    time_units::Union{String, Nothing}
end

"""
    PriorFluxes

Container for all flux fields used in the forward model.
"""
struct PriorFluxes
    nee::Union{FluxField, Nothing}
    fossil_fuel::Union{FluxField, Nothing}
    ocean::Union{FluxField, Nothing}
    background::Array{Float64, 1}  # Background concentrations per observation
    nee_global::Union{FluxField, Nothing}
    fossil_fuel_global::Union{FluxField, Nothing}
    ocean_global::Union{FluxField, Nothing}
end

"""
    forward_model(cfg::Config, domain::Domain, fluxes::PriorFluxes, obs::Observations;
                  footprints=nothing) -> Vector{Float64}

Compute modeled concentrations (y_mod) from flux fields and footprints.

# Arguments
- `cfg`: Configuration settings
- `domain`: Domain information with grid details
- `fluxes`: Prior flux fields (NEE, fossil fuel, ocean, background)
- `obs`: Observations structure
- `footprints`: Optional pre-computed footprints; if nothing, will attempt to read

# Returns
- Vector of modeled concentrations with same length as observations
"""
function forward_model(cfg::Config, domain::Domain, fluxes::PriorFluxes, obs::Observations;
                      files::Union{Files,Nothing}=nothing,
                      footprints::Union{Nothing,Vector{Union{Nothing,FootprintData}}}=nothing,
                      chi_increment::Union{Nothing,Vector{Float64}}=nothing,
                      H::Union{Nothing,AbstractMatrix}=nothing,
                      chi_prior::Union{Nothing,Vector{Float64}}=nothing)
    n_obs = length(obs.concentrations)
    y_mod = zeros(Float64, n_obs)

    debug_enabled = get(ENV, "FLEXINVERT_DEBUG_OUTPUT", "0") in ("1", "true", "TRUE")
    path_output = (files !== nothing) ? files.path_output : ""
    record_error_components = debug_enabled && !isempty(path_output)
    error_components = record_error_components ? zeros(Float64, n_obs, 11) : Array{Float64}(undef, 0, 0)
    contrib_records = record_error_components ? Vector{Vector{Float64}}(undef, n_obs) : Vector{Vector{Float64}}()

    fp_cache = footprints
    if fp_cache === nothing
        files === nothing && error("forward_model: either provide footprints or files to load them")
        fp_cache = load_observation_footprints(files, cfg, domain, obs)
    end

    # Ensure observation arrays sized appropriately
    resize_observation_diagnostics!(obs, n_obs)

    molar_mass = cfg.molarmass > 0 ? cfg.molarmass : 44.0
    mass_conversion = cfg.coeff * MOLAR_MASS_AIR / molar_mass

    y_mod .= 0.0
    obs.background .= 0.0
    obs.nee_contrib .= 0.0
    obs.fff_contrib .= 0.0
    obs.ocean_contrib .= 0.0
    obs.total_errors .= 0.0
    obs.prior_values .= 0.0
    obs.posterior_values .= 0.0

    meas_floor = cfg.measerr > 0 ? cfg.measerr : 0.0

    for i in 1:n_obs
        fp = fp_cache[i]

        base_err = i <= length(obs.measurement_errors) ? obs.measurement_errors[i] : 0.0
        meas_err = base_err > 0 ? base_err : meas_floor
        meas_err = meas_err > 0 ? meas_err : 1e-3

        bg_from_file = 0.0
        if length(fluxes.background) == n_obs
            bg_from_file = fluxes.background[i]
        elseif length(fluxes.background) == 1
            bg_from_file = fluxes.background[1]
        end

        if fp === nothing
            @warn "Observation $i lacks footprint data; using prior background only"
            obs.background[i] = bg_from_file
            obs.total_errors[i] = meas_err
            y_mod[i] = bg_from_file
            continue
        end

        footprint_grid = fp.grid .* mass_conversion
        gtime = fp.gtime
        ngrid = size(footprint_grid, 3)
        if ngrid == 0
            obs.background[i] = bg_from_file
            obs.total_errors[i] = meas_err
            y_mod[i] = bg_from_file
            continue
        end

        background_val = bg_from_file
        bkgerr = 0.0

        if fp.global_grid !== nothing
            global_scaled = fp.global_grid .* mass_conversion
            hbkg = dropdims(sum(global_scaled, dims=3); dims=3)
            if !isempty(fp.global_lon) && !isempty(fp.global_lat)
                lon_mask = find_interval_indices(fp.global_lon, domain.rllx, domain.rurx)
                lat_mask = find_interval_indices(fp.global_lat, domain.rlly, domain.rury)
                if !isempty(lon_mask) && !isempty(lat_mask)
                    hbkg[lon_mask, lat_mask] .= 0.0
                end
            end
            if fluxes.nee_global !== nothing || fluxes.fossil_fuel_global !== nothing || fluxes.ocean_global !== nothing
                background_time = gtime[1]
                bg_matrix = zeros(size(hbkg))
                if fluxes.nee_global !== nothing
                    idx = nearest_time_index(fluxes.nee_global.time, background_time)
                    bg_matrix .+= sample_flux_slice(fluxes.nee_global, fp.global_lon, fp.global_lat, idx)
                end
                if fluxes.fossil_fuel_global !== nothing
                    idx = nearest_time_index(fluxes.fossil_fuel_global.time, background_time)
                    bg_matrix .+= sample_flux_slice(fluxes.fossil_fuel_global, fp.global_lon, fp.global_lat, idx)
                end
                if fluxes.ocean_global !== nothing
                    idx = nearest_time_index(fluxes.ocean_global.time, background_time)
                    bg_matrix .+= sample_flux_slice(fluxes.ocean_global, fp.global_lon, fp.global_lat, idx)
                end
                background_val += sum(hbkg .* bg_matrix)
                bkgerr = (sum(hbkg .* abs.(bg_matrix)) * cfg.flxerr)^2
            end
        end

        nee_val = 0.0
        fff_val = 0.0
        ocn_val = 0.0
        ffferr = 0.0
        fff_sum_sq = 0.0
        fff_count = 0
        fff_max_abs = 0.0
        fff_trace = record_error_components ? Float64[] : nothing

        lon_targets = fp.lon
        lat_targets = fp.lat

        for n in 1:ngrid
            t = n <= length(gtime) ? gtime[n] : gtime[end]

            if fluxes.nee !== nothing
                idx = nearest_time_index(fluxes.nee.time, t)
                slice = sample_flux_slice(fluxes.nee, lon_targets, lat_targets, idx)
                nee_val += sum(footprint_grid[:, :, n] .* slice)
            end
            if fluxes.fossil_fuel !== nothing
                idx = nearest_time_index(fluxes.fossil_fuel.time, t)
                slice = sample_flux_slice(fluxes.fossil_fuel, lon_targets, lat_targets, idx)
                contrib = sum(footprint_grid[:, :, n] .* slice)
                fff_val += contrib
                if cfg.ffferr > 0
                    ffferr += (contrib * cfg.ffferr)^2
                end
                fff_sum_sq += contrib^2
                fff_count += 1
                if abs(contrib) > fff_max_abs
                    fff_max_abs = abs(contrib)
                end
                if fff_trace !== nothing
                    push!(fff_trace, contrib)
                end
            end
            if fluxes.ocean !== nothing
                idx = nearest_time_index(fluxes.ocean.time, t)
                slice = sample_flux_slice(fluxes.ocean, lon_targets, lat_targets, idx)
                ocn_val += sum(footprint_grid[:, :, n] .* slice)
            elseif fluxes.ocean_global !== nothing
                idx = nearest_time_index(fluxes.ocean_global.time, t)
                slice = sample_flux_slice(fluxes.ocean_global, lon_targets, lat_targets, idx)
                ocn_val += sum(footprint_grid[:, :, n] .* slice)
            end
        end

        obs.nee_contrib[i] = nee_val
        obs.fff_contrib[i] = fff_val
        obs.ocean_contrib[i] = ocn_val
        obs.background[i] = background_val

        total_err = sqrt(meas_err^2 + bkgerr + ffferr)
        obs.total_errors[i] = total_err

        y_mod[i] = background_val + nee_val + fff_val + ocn_val

        if record_error_components
            error_components[i, 1] = meas_err
            error_components[i, 2] = bkgerr
            error_components[i, 3] = ffferr
            error_components[i, 4] = total_err
            error_components[i, 5] = background_val
            error_components[i, 6] = nee_val
            error_components[i, 7] = fff_val
            error_components[i, 8] = ocn_val
            error_components[i, 9] = fff_sum_sq
            error_components[i, 10] = fff_count
            error_components[i, 11] = fff_max_abs
            contrib_records[i] = fff_trace === nothing ? Float64[] : fff_trace
        end
    end

    background_init = compute_background_from_initconc(files, cfg, domain, obs)
    if background_init !== nothing
        @debug "INITCONC background ignored in forward model" maximum(background_init)
    end

    initial_contrib = zeros(Float64, n_obs)
    if files !== nothing
        try
            initial_contrib = compute_initial_contributions!(obs, files, cfg, domain)
        catch err
            @warn "Failed to compute initial mixing ratio contributions" err
            obs.cinipos .= 0.0
            initial_contrib .= 0.0
        end
    else
        obs.cinipos .= 0.0
    end

    effective_conc = obs.concentrations .- initial_contrib

    model_increment_prior = zeros(Float64, n_obs)
    model_increment_posterior = zeros(Float64, n_obs)

    if H !== nothing
        if chi_prior !== nothing && !isempty(chi_prior)
            model_increment_prior .= H * chi_prior
        end
        if chi_increment !== nothing && !isempty(chi_increment)
            model_increment_posterior .= H * chi_increment
        end
    end

    obs.prior_values .= model_increment_prior
    obs.posterior_values .= model_increment_prior .+ model_increment_posterior

    # Include initial mixing ratio contribution
    obs.cinipos .= initial_contrib

    y_mod .= obs.posterior_values
    total = y_mod .+ obs.nee_contrib .+ obs.fff_contrib .+ obs.ocean_contrib .+ obs.background .+ obs.cinipos
    obs.model_values .= total
    obs.residuals .= total .- obs.concentrations

    if record_error_components
        error_path = joinpath(path_output, "obs_error_breakdown_julia.txt")
        open(error_path, "w") do io
            println(io, "# obs_index meas_err bkgerr ffferr total_err background nee fff ocean fff_sumsq fff_count fff_max_abs")
            @inbounds for i in 1:n_obs
                println(io, @sprintf("%d %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %.12e %d %.12e",
                                     i,
                                     error_components[i, 1],
                                     error_components[i, 2],
                                     error_components[i, 3],
                                     error_components[i, 4],
                                     error_components[i, 5],
                                     error_components[i, 6],
                                     error_components[i, 7],
                                     error_components[i, 8],
                                     error_components[i, 9],
                                     Int(error_components[i, 10]),
                                     error_components[i, 11]))
            end
        end
        # Dump fossil-fuel contribution traces for diagnostics
        for i in 1:n_obs
            contrib_path = joinpath(path_output, @sprintf("fff_contrib_obs%03d.txt", i))
            open(contrib_path, "w") do io
                println(io, "# index contrib_value")
                trace = contrib_records[i]
                for (idx, value) in enumerate(trace)
                    println(io, @sprintf("%d %.12e", idx, value))
                end
            end
        end
    end

    return total
end

const LON_VAR_CANDIDATES = ("longitude", "lon", "lonc", "x")
const LAT_VAR_CANDIDATES = ("latitude", "lat", "latc", "y")
const TIME_VAR_CANDIDATES = ("time", "t", "time_counter")
const LEVEL_VAR_CANDIDATES = ("pressure", "level", "lev", "layer", "plev")

function compute_background_from_initconc(files::Files, cfg::Config, domain::Domain, obs::Observations)
    if isempty(files.path_initconc) || isempty(files.file_initconc) || isempty(files.varname_init)
        return nothing
    end

    coords = load_station_coordinates(files.file_recept)
    isempty(coords) && return nothing

    dataset_cache = Dict{String,Any}()

    try
        values = Vector{Float64}(undef, length(obs.concentrations))
        fallback_dt = Dates.now()
        for i in 1:length(obs.concentrations)
            station = obs.stations[i]
            coord = get(coords, station, nothing)
            if coord === nothing
                @warn "Missing station coordinates for $station in $(files.file_recept); skipping INITCONC background"
                return nothing
            end

            dt = (i <= length(obs.datetimes) && obs.datetimes[i] isa DateTime) ? obs.datetimes[i] : fallback_dt
            filename = resolve_initconc_path(files.file_initconc, dt)
            dataset_path = joinpath(files.path_initconc, filename)

            entry = get!(dataset_cache, dataset_path) do
                load_initconc_dataset(dataset_path, files.varname_init)
            end

            values[i] = sample_background_value(entry, coord[1], coord[2], dt)
        end
        return values
    catch err
        @warn "Failed to load INITCONC background" err
        return nothing
    end
end

function load_station_coordinates(path::AbstractString)
    if isempty(path) || !isfile(path)
        return Dict{String, Tuple{Float64, Float64}}()
    end

    coords = Dict{String, Tuple{Float64, Float64}}()
    open(path, "r") do io
        for line in eachline(io)
            s = strip(line)
            isempty(s) && continue
            startswith(s, "#") && continue
            parts = split(s)
            length(parts) < 3 && continue
            name = parts[1]
            lat = try parse(Float64, parts[2]) catch; continue; end
            lon = try parse(Float64, parts[3]) catch; continue; end
            coords[name] = (lat, lon)
        end
    end
    return coords
end

function resolve_initconc_path(template::AbstractString, dt::DateTime)
    replacements = [
        ("YYYYMMDD", Dates.format(dt, "yyyymmdd")),
        ("YYYYMM", Dates.format(dt, "yyyymm")),
        ("YYYY", Dates.format(dt, "yyyy")),
        ("MM", Dates.format(dt, "mm")),
        ("DD", Dates.format(dt, "dd")),
    ]
    result = template
    for (key, value) in replacements
        if occursin(key, result)
            result = replace(result, key => value)
        end
    end
    return result
end

function load_initconc_dataset(path::AbstractString, varname::AbstractString)
    if !isfile(path)
        error("INITCONC background file not found: $path")
    end

    Dataset(path) do ds
        lon_name = find_coordinate_name(ds, LON_VAR_CANDIDATES)
        lat_name = find_coordinate_name(ds, LAT_VAR_CANDIDATES)
        lon = collect(ds[lon_name][:])
        lat = collect(ds[lat_name][:])

        var_key = find_variable_name(ds, varname)
        var = ds[var_key]
        data = Array(var)
        dims = Tuple(dimnames(var))

        time_name = find_coordinate_name(ds, TIME_VAR_CANDIDATES; allow_missing=true)
        time_vals = nothing
        if time_name !== nothing
            time_var = ds[time_name]
            time_units = haskey(time_var.attrib, "units") ? String(time_var.attrib["units"]) : nothing
            raw_time = Float64.(collect(time_var[:]))
            time_vals = convert_time_values(raw_time, time_units)
        end

        return (
            data=data,
            dims=dims,
            lon=lon,
            lat=lat,
            time=time_vals,
        )
    end
end

function find_coordinate_name(ds::Dataset, candidates; allow_missing=false)
    for name in keys(ds)
        lname = lowercase(String(name))
        if any(lname == lowercase(c) for c in candidates)
            return name
        end
    end
    return allow_missing ? nothing : error("Could not locate coordinate variable matching $(candidates)")
end

function find_variable_name(ds::Dataset, target::AbstractString)
    if haskey(ds, target)
        return target
    end
    target_lower = lowercase(strip(target))
    for name in keys(ds)
        if lowercase(String(name)) == target_lower
            return name
        end
    end
    error("Variable $target not found in $(ds.path)")
end

function convert_time_values(values::Vector{Float64}, units::Union{String, Nothing})
    if units === nothing
        return interpret_calendar_times(values)
    end

    str = lowercase(strip(units))
    if !occursin("since", str)
        return interpret_calendar_times(values)
    end

    parts = split(str, "since")
    unit_str = strip(parts[1])
    origin_str = strip(parts[2])
    base_dt = try parse(DateTime, origin_str) catch
        try parse(Date, origin_str) |> DateTime catch
            DateTime(1900,1,1)
        end
    end
    base_j = Dates.datetime2julian(base_dt)

    factor = time_unit_factor(unit_str)
    converted = base_j .+ values .* factor
    return interpret_calendar_times(converted)
end

function interpret_calendar_times(values::Vector{Float64})
    decoded = similar(values)
    changed = false
    for (i, val) in pairs(values)
        new_val, did_change = decode_calendar_like_value(val)
        decoded[i] = new_val
        changed |= did_change
    end
    return changed ? decoded : values
end

function decode_calendar_like_value(val::Float64)
    if !isfinite(val)
        return val, false
    end

    rounded = Int(round(val))
    absval = abs(rounded)

    if absval < 1_000_000
        return val, false
    end

    s = string(absval)
    year = month = day = hour = minute = second = 0
    parsed = false
    try
        if length(s) == 10
            year = parse(Int, s[1:4])
            month = parse(Int, s[5:6])
            day = parse(Int, s[7:8])
            hour = parse(Int, s[9:10])
            parsed = true
        elseif length(s) == 8
            year = parse(Int, s[1:4])
            month = parse(Int, s[5:6])
            day = parse(Int, s[7:8])
            parsed = true
        elseif length(s) == 12
            year = parse(Int, s[1:4])
            month = parse(Int, s[5:6])
            day = parse(Int, s[7:8])
            hour = parse(Int, s[9:10])
            minute = parse(Int, s[11:12])
            parsed = true
        end
    catch
        parsed = false
    end

    if !parsed
        return val, false
    end

    if !(1 <= month <= 12 && 1 <= day <= 31 && 0 <= hour <= 23 && 0 <= minute <= 59)
        return val, false
    end

    dt = try
        DateTime(year, month, day, hour, minute, second)
    catch
        return val, false
    end

    return Dates.datetime2julian(dt), true
end

function time_unit_factor(unit::AbstractString)
    u = lowercase(strip(unit))
    if startswith(u, "day")
        return 1.0
    elseif startswith(u, "hour")
        return 1.0 / 24.0
    elseif startswith(u, "minute")
        return 1.0 / 1440.0
    elseif startswith(u, "second")
        return 1.0 / 86400.0
    else
        return 1.0
    end
end

function sample_background_value(entry, lat::Float64, lon::Float64, dt::DateTime)
    data = entry.data
    dims = entry.dims

    indices = ntuple(_ -> Colon(), length(dims))
    dim_labels = collect(dims)

    time_dim = findfirst(d -> lowercase(String(d)) in lowercase.(TIME_VAR_CANDIDATES), dim_labels)
    if time_dim !== nothing && entry.time !== nothing
        jd = Dates.datetime2julian(dt)
        idx = nearest_index(entry.time, jd)
        indices = Base.setindex(indices, idx, time_dim)
    elseif time_dim !== nothing
        indices = Base.setindex(indices, 1, time_dim)
    end

    level_dim = findfirst(d -> lowercase(String(d)) in lowercase.(LEVEL_VAR_CANDIDATES), dim_labels)
    if level_dim !== nothing
        indices = Base.setindex(indices, 1, level_dim)
    end

    slice = data[indices...]
    remaining_dims = [dim_labels[i] for i in 1:length(dim_labels) if indices[i] isa Colon]

    if ndims(slice) == 3
        slice = dropdims(slice, dims=1)
    end
    if ndims(slice) == 1
        slice = reshape(slice, :, 1)
    end

    lat_first = !isempty(remaining_dims) && lowercase(String(remaining_dims[1])) in lowercase.(LAT_VAR_CANDIDATES)
    lon_first = !isempty(remaining_dims) && lowercase(String(remaining_dims[1])) in lowercase.(LON_VAR_CANDIDATES)
    field = slice
    if lon_first
        field = permutedims(field, (2,1))
    elseif !lat_first
        # Ensure field rows correspond to latitude and columns to longitude
        field = permutedims(field, (2,1))
    end

    return bilinear_interpolate(entry.lat, entry.lon, field, lat, lon)
end

function nearest_index(array::AbstractVector{T}, value::T) where {T<:Real}
    if isempty(array)
        return 1
    end
    idx = searchsortedfirst(array, value)
    if idx <= 1
        return 1
    elseif idx > length(array)
        return length(array)
    else
        lower = array[idx-1]
        upper = array[idx]
        return (abs(value - lower) <= abs(upper - value)) ? idx-1 : idx
    end
end

function bilinear_interpolate(latitudes::Vector{Float64}, longitudes::Vector{Float64}, field::Array{Float64,2}, lat::Float64, lon::Float64)
    lat_arr = copy(latitudes)
    lon_arr = copy(longitudes)
    grid = copy(field)

    if lat_arr[1] > lat_arr[end]
        lat_arr = reverse(lat_arr)
        grid = reverse(grid, dims=1)
    end
    if lon_arr[1] > lon_arr[end]
        lon_arr = reverse(lon_arr)
        grid = reverse(grid, dims=2)
    end

    lon_mod = wrap_longitude(lon, lon_arr[1], lon_arr[end])
    lat_mod = clamp(lat, minimum(lat_arr), maximum(lat_arr))

    i_lon_hi = searchsortedfirst(lon_arr, lon_mod)
    if i_lon_hi <= 1
        i_lon_low = 1
        i_lon_hi = 2
    elseif i_lon_hi > length(lon_arr)
        i_lon_hi = length(lon_arr)
        i_lon_low = i_lon_hi - 1
    else
        i_lon_low = i_lon_hi - 1
    end

    i_lat_hi = searchsortedfirst(lat_arr, lat_mod)
    if i_lat_hi <= 1
        i_lat_low = 1
        i_lat_hi = 2
    elseif i_lat_hi > length(lat_arr)
        i_lat_hi = length(lat_arr)
        i_lat_low = i_lat_hi - 1
    else
        i_lat_low = i_lat_hi - 1
    end

    lon0, lon1 = lon_arr[i_lon_low], lon_arr[i_lon_hi]
    lat0, lat1 = lat_arr[i_lat_low], lat_arr[i_lat_hi]
    w_lon = lon1 ≈ lon0 ? 0.0 : (lon_mod - lon0) / (lon1 - lon0)
    w_lat = lat1 ≈ lat0 ? 0.0 : (lat_mod - lat0) / (lat1 - lat0)

    v00 = grid[i_lat_low, i_lon_low]
    v01 = grid[i_lat_low, i_lon_hi]
    v10 = grid[i_lat_hi, i_lon_low]
    v11 = grid[i_lat_hi, i_lon_hi]

    return (1 - w_lon) * ((1 - w_lat) * v00 + w_lat * v10) + w_lon * ((1 - w_lat) * v01 + w_lat * v11)
end

function wrap_longitude(lon::Float64, min_lon::Float64, max_lon::Float64)
    span = max_lon - min_lon
    span <= 0 && return lon
    lon_mod = lon
    while lon_mod < min_lon
        lon_mod += span
    end
    while lon_mod > max_lon
        lon_mod -= span
    end
    return clamp(lon_mod, min_lon, max_lon)
end

function resize_observation_diagnostics!(obs::Observations, n_obs::Int)
    resize!(obs.model_values, n_obs)
    resize!(obs.nee_contrib, n_obs)
    resize!(obs.fff_contrib, n_obs)
    resize!(obs.ocean_contrib, n_obs)
    resize!(obs.background, n_obs)
    resize!(obs.cinipos, n_obs)
    resize!(obs.total_errors, n_obs)
end

function resample_footprint_to_domain(grid::Array{Float64,3}, lon_src::Vector{Float64}, lat_src::Vector{Float64},
                                      lon_edges::Vector{Float64}, lat_edges::Vector{Float64})
    nx = length(lon_edges) - 1
    ny = length(lat_edges) - 1
    nt = size(grid, 3)
    result = zeros(Float64, nx, ny, nt)

    for ix in 1:nx
        lon_min = lon_edges[ix]
        lon_max = lon_edges[ix+1]
        lon_mask = findall(x -> x >= lon_min && x < lon_max, lon_src)
        if isempty(lon_mask)
            idx = nearest_index(lon_src, (lon_min + lon_max) / 2)
            lon_mask = [idx]
        end
        for iy in 1:ny
            lat_min = lat_edges[iy]
            lat_max = lat_edges[iy+1]
            lat_mask = findall(y -> y >= lat_min && y < lat_max, lat_src)
            if isempty(lat_mask)
                idx = nearest_index(lat_src, (lat_min + lat_max) / 2)
                lat_mask = [idx]
            end
            for it in 1:nt
                sub = grid[lon_mask, lat_mask, it]
                result[ix, iy, it] = mean(sub)
            end
        end
    end

    return result
end

function find_interval_indices(values::Vector{Float64}, low::Float64, high::Float64; atol::Float64=1e-6)
    isempty(values) && return Int[]
    lo = min(low, high) - atol
    hi = max(low, high) + atol
    return findall(x -> x >= lo && x <= hi, values)
end

function sample_flux_slice(flux::FluxField, lon_targets::Vector{Float64}, lat_targets::Vector{Float64}, time_idx::Int)
    nx = length(lon_targets)
    ny = length(lat_targets)
    if isempty(flux.lon) || isempty(flux.lat) || size(flux.data, 3) == 0
        return zeros(Float64, nx, ny)
    end

    idx = clamp(time_idx, 1, size(flux.data, 3))
    slice = flux.data[:, :, idx]
    lon_src = flux.lon
    lat_src = flux.lat

    lon_min = minimum(lon_src)
    lon_max = maximum(lon_src)
    lat_min = minimum(lat_src)
    lat_max = maximum(lat_src)

    result = Array{Float64}(undef, nx, ny)
    for ix in 1:nx
        lon_val = wrap_longitude(lon_targets[ix], lon_min, lon_max)
        lon_hi = searchsortedfirst(lon_src, lon_val)
        lon_lo = lon_hi > 1 ? lon_hi - 1 : 1
        lon_hi = lon_hi < length(lon_src) ? lon_hi : length(lon_src)
        if lon_hi == lon_lo
            w_lon = 0.0
        else
            w_lon = (lon_val - lon_src[lon_lo]) / (lon_src[lon_hi] - lon_src[lon_lo])
        end

        for iy in 1:ny
            lat_val = clamp(lat_targets[iy], lat_min, lat_max)
            lat_hi = searchsortedfirst(lat_src, lat_val)
            lat_lo = lat_hi > 1 ? lat_hi - 1 : 1
            lat_hi = lat_hi < length(lat_src) ? lat_hi : length(lat_src)
            if lat_hi == lat_lo
                w_lat = 0.0
            else
                w_lat = (lat_val - lat_src[lat_lo]) / (lat_src[lat_hi] - lat_src[lat_lo])
            end

            v_ll = slice[lon_lo, lat_lo]
            v_lh = slice[lon_lo, lat_hi]
            v_hl = slice[lon_hi, lat_lo]
            v_hh = slice[lon_hi, lat_hi]

            v_lat_low = (1 - w_lon) * v_ll + w_lon * v_hl
            v_lat_high = (1 - w_lon) * v_lh + w_lon * v_hh
            result[ix, iy] = (1 - w_lat) * v_lat_low + w_lat * v_lat_high
        end
    end

    return result .* flux.scaling_factor
end

function compute_flux_observation_contribution(flux::FluxField, footprint::Array{Float64,3}, gtime::Vector{Float64},
                                               lon_edges::Vector{Float64}, lat_edges::Vector{Float64})
    total = 0.0
    nx = size(footprint, 1)
    ny = size(footprint, 2)
    nt = size(footprint, 3)

    flux_times = flux.time
    for it in 1:nt
        t = gtime[it]
        time_idx = nearest_time_index(flux_times, t)
        flux_slice = flux.data[:, :, min(time_idx, size(flux.data, 3))]
        flux_on_domain = aggregate_flux_to_domain(flux_slice, flux.lon, flux.lat, lon_edges, lat_edges)
        total += sum(footprint[:, :, it] .* flux_on_domain)
    end

    return total * flux.scaling_factor
end

function aggregate_flux_to_domain(flux_slice::Array{Float64,2}, lon::Vector{Float64}, lat::Vector{Float64},
                                  lon_edges::Vector{Float64}, lat_edges::Vector{Float64})
    nx = length(lon_edges) - 1
    ny = length(lat_edges) - 1
    result = zeros(Float64, nx, ny)

    for ix in 1:nx
        lon_min = lon_edges[ix]
        lon_max = lon_edges[ix+1]
        lon_mask = findall(x -> x >= lon_min && x < lon_max, lon)
        if isempty(lon_mask)
            idx = nearest_index(lon, (lon_min + lon_max) / 2)
            lon_mask = [idx]
        end
        for iy in 1:ny
            lat_min = lat_edges[iy]
            lat_max = lat_edges[iy+1]
            lat_mask = findall(y -> y >= lat_min && y < lat_max, lat)
            if isempty(lat_mask)
                idx = nearest_index(lat, (lat_min + lat_max) / 2)
                lat_mask = [idx]
            end
            sub = flux_slice[lon_mask, lat_mask]
            result[ix, iy] = mean(sub)
        end
    end

    return result
end

function nearest_time_index(times::Vector{Float64}, target::Float64)
    if isempty(times)
        return 1
    end

    idx = searchsortedfirst(times, target)
    if idx <= 1
        return 1
    elseif idx > length(times)
        return length(times)
    end

    prev_idx = idx - 1
    if abs(times[idx] - target) < abs(times[prev_idx] - target)
        return idx
    else
        return prev_idx
    end
end

function nearest_index(values::Vector{Float64}, target::Float64)
    _, idx = findmin(abs.(values .- target))
    return Int(idx)
end

"""
    calculate_model_concentrations(cfg::Config, domain::Domain, obs::Observations,
                                 state_vector::Vector{Float64}) -> Vector{Float64}

Calculate model concentrations given current state vector (used in optimization).

# Arguments
- `cfg`: Configuration settings
- `domain`: Domain information
- `obs`: Observations structure
- `state_vector`: Current state vector values

# Returns
- Vector of modeled concentrations
"""
function calculate_model_concentrations(cfg::Config, domain::Domain, obs::Observations,
                                      state_vector::Vector{Float64})
    # This function would implement the H*x operation in the inversion
    # where H is the observation operator and x is the state vector

    n_obs = length(obs.concentrations)
    y_mod = zeros(Float64, n_obs)

    # Simplified implementation - would need to:
    # 1. Map state vector to flux increments
    # 2. Apply transport operator (footprints)
    # 3. Add to background/prior model

    @info "Calculating model concentrations from state vector (simplified)"

    # For now, just add a small perturbation based on state vector
    if length(state_vector) > 0
        # Assume uniform perturbation for simplicity
        perturbation = sum(state_vector) / length(state_vector) * 0.01
        y_mod .= perturbation
    end

    return y_mod
end

end # module ForwardModel
