#!/usr/bin/env julia

"""
Benchmark the pybufrkit-backed PrepBUFR decoding and a compact variational
assimilation workflow. This script focuses on environments where the Julia
implementation relies on the pybufrkit decoder and quantifies:

1. PrepBUFR decode throughput (observations per second).
2. End-to-end assimilation throughput on a reduced grid (suitable for CI).
3. Relative throughput against the published Fortran GSI tutorial benchmark
   (63,580 observations assimilated in ~2.8 seconds).

Usage:
    julia benchmarks/benchmark_pybufrkit_assimilation.jl [--prepbufr=<file>] \
        [--max-records=<N>] [--samples=<N>] [--grid=nx,ny,nz]

Results are printed to stdout and written to
`results/pybufrkit_benchmark_summary.toml` for downstream reporting.
"""

using Dates
using LinearAlgebra
using Printf
using Random
using Statistics
using TOML

push!(LOAD_PATH, dirname(dirname(@__FILE__)))

using GSICoreAnalysis
using GSICoreAnalysis.DataIO: read_prepbufr, PrepBUFRConfig, pybufrkit_available
using GSICoreAnalysis.CostFunctions
using GSICoreAnalysis.ControlVectors
using GSICoreAnalysis.Minimization

# -----------------------------------------------------------------------------
# Argument parsing helpers
# -----------------------------------------------------------------------------

function parse_arg(name::AbstractString, default::AbstractString)
    prefix = "--" * name * "="
    for arg in ARGS
        if startswith(arg, prefix)
            return String(strip(arg[length(prefix)+1:end]))
        end
    end
    return default
end

function parse_grid(default::Tuple{Int,Int,Int})
    raw = parse_arg("grid", "")
    isempty(raw) && return default
    parts = split(raw, ",")
    length(parts) == 3 || error("Grid specification must be nx,ny,nz")
    return Tuple(parse.(Int, parts))
end

# -----------------------------------------------------------------------------
# Utility helpers
# -----------------------------------------------------------------------------

function median_elapsed(f::Function; samples::Int=5)
    times = Float64[]
    for _ in 1:samples
        GC.gc()
        push!(times, @elapsed f())
    end
    return median(times)
end

function repeat_to_length(vec::AbstractVector{T}, target_len::Int) where T
    if isempty(vec)
        error("Cannot repeat empty vector to target length")
    end
    if length(vec) >= target_len
        return copy(vec[1:target_len])
    end
    reps = ceil(Int, target_len / length(vec))
    repeated = repeat(vec, reps)
    return copy(repeated[1:target_len])
end

function assimilation_kernel(obs_data; grid_size=(10, 10, 10), seed=314159)
    Random.seed!(seed)

    config = AnalysisConfig(
        grid_size=grid_size,
        use_hybrid=false,
        max_iterations=15,
        convergence_tol=1e-6
    )

    cost_func = CostFunction(config)
    n_state = length(cost_func.background_state)
    n_obs_cf = length(cost_func.observations)

    # Populate background state and covariance (diagonal for efficiency)
    cost_func.background_state = randn(config.precision, n_state)
    cost_func.background_covariance = Diagonal(fill(config.precision(4.0), n_state))

    if isempty(obs_data.obs_values)
        error("pybufrkit decoder returned zero observations; cannot benchmark")
    end

    obs_values = repeat_to_length(obs_data.obs_values, n_obs_cf)
    obs_errors = repeat_to_length(obs_data.obs_errors, n_obs_cf)

    cost_func.observations = obs_values
    cost_func.observation_errors = obs_errors
    cost_func.observation_covariance = Diagonal(obs_errors .^ 2)

    n_state_total = length(cost_func.background_state)
    obs_indices = create_observation_indices(n_state_total, n_obs_cf, :uniform)
    forward_op, adjoint_op = create_sampling_operator(obs_indices, n_state_total)

    cost_func.observation_indices = obs_indices
    cost_func.observation_operator = forward_op
    cost_func.observation_operator_adjoint = adjoint_op

    control = ControlVector(config)
    solver = PCGSolver(config, max_iterations=config.max_iterations, tolerance=config.convergence_tol)

    result = minimize_cost_function(cost_func, control, solver)
    return result
end

# -----------------------------------------------------------------------------
# Main execution
# -----------------------------------------------------------------------------

function main()
    pybufrkit_available() || error("pybufrkit is not available; install via pip and rebuild PyCall.")

    repo_root = dirname(dirname(@__FILE__))
    default_real = joinpath("/home/docker/comgsi/tutorial/case_data/2018081212/obs", "rap.t12z.prepbufr.tm00")
    default_sample = joinpath(repo_root, "test", "test_data", "sample_prepbufr_subset.bufr")
    default_prepbufr = isfile(default_real) ? default_real : default_sample

    prepbufr_file = parse_arg("prepbufr", default_prepbufr)
    isfile(prepbufr_file) || error("PrepBUFR file not found: $prepbufr_file")

    max_records = parse(Int, parse_arg("max-records", "4000"))
    samples = parse(Int, parse_arg("samples", "5"))
    grid_size = parse_grid((10, 10, 10))

    println("="^78)
    println("PYBUFRKIT ASSIMILATION BENCHMARK")
    println("="^78)
    println(@sprintf("PrepBUFR file    : %s", prepbufr_file))
    println(@sprintf("Max records      : %d", max_records))
    println(@sprintf("Benchmark samples: %d", samples))
    println(@sprintf("Grid (nx,ny,nz)  : (%d,%d,%d)", grid_size...))
    println(@sprintf("Start time       : %s", Dates.format(Dates.now(), "yyyy-mm-dd HH:MM:SS")))
    println()

    cfg = PrepBUFRConfig(max_records=max_records, verbose=false)

    decode_time = median_elapsed(() -> read_prepbufr(prepbufr_file; config=cfg), samples=samples)

    obs_data = read_prepbufr(prepbufr_file; config=cfg)
    obs_source = get(obs_data.metadata, "format", "unknown")
    n_obs = length(obs_data.obs_values)

    println("[1/2] PrepBUFR Decoding (pybufrkit)")
    println("-"^78)
    println(@sprintf("Median decode time   : %.4f s", decode_time))
    println(@sprintf("Observations decoded : %d", n_obs))
    println(@sprintf("Decoder backend      : %s", obs_source))
    println(@sprintf("Decode throughput    : %.1f obs/s", n_obs / decode_time))
    println()

    assimilation_time = median_elapsed(() -> assimilation_kernel(obs_data; grid_size=grid_size), samples=samples)
    assimilation_result = assimilation_kernel(obs_data; grid_size=grid_size)

    fortran_obs = 63580.0
    fortran_time = 2.8
    fortran_throughput = fortran_obs / fortran_time
    assimilation_throughput = n_obs / assimilation_time
    relative_speed = assimilation_throughput / fortran_throughput

    println("[2/2] Variational Assimilation Workflow")
    println("-"^78)
    println(@sprintf("Median assimilation time : %.4f s", assimilation_time))
    println(@sprintf("Gradient norm (final)    : %.3e", assimilation_result.final_gradient_norm))
    println(@sprintf("Iterations executed      : %d", assimilation_result.iterations))
    println(@sprintf("Assimilation throughput  : %.1f obs/s", assimilation_throughput))
    println(@sprintf("Fortran reference rate   : %.1f obs/s", fortran_throughput))
    println(@sprintf("Relative throughput      : %.2f×", relative_speed))
    println()

    results_path = joinpath(dirname(dirname(@__FILE__)), "results", "pybufrkit_benchmark_summary.toml")
    mkpath(dirname(results_path))

    results = Dict(
        "timestamp" => Dates.format(Dates.now(), "yyyy-mm-ddTHH:MM:SS"),
        "prepbufr_file" => prepbufr_file,
        "max_records" => max_records,
        "samples" => samples,
        "grid_size" => collect(grid_size),
        "decode_seconds" => decode_time,
        "decode_observations" => n_obs,
        "decode_throughput" => n_obs / decode_time,
        "assimilation_seconds" => assimilation_time,
        "assimilation_iterations" => assimilation_result.iterations,
        "assimilation_gradient_norm" => assimilation_result.final_gradient_norm,
        "assimilation_throughput" => assimilation_throughput,
        "fortran_reference_obs" => fortran_obs,
        "fortran_reference_time" => fortran_time,
        "relative_throughput" => relative_speed,
        "decoder_backend" => obs_source
    )

    open(results_path, "w") do io
        TOML.print(io, results)
    end

    println(@sprintf("Results saved to %s", results_path))
end

main()
