"""
    Comprehensive Benchmark Suite for GSICoreAnalysis.jl

This benchmark suite provides comprehensive performance testing for all major components
of the GSICoreAnalysis.jl package, focusing on:
- Core data structures and operations
- Observation processing pipelines
- Parallel processing capabilities
- Memory management efficiency
- I/O operations
- Algorithmic performance across different scales

# Usage

```julia
# Run all benchmarks
julia benchmarks/benchmark_suite.jl

# Run specific benchmark categories
julia benchmarks/benchmark_suite.jl --category observation_processing
julia benchmarks/benchmark_suite.jl --category parallel_processing

# Compare with baseline
julia benchmarks/benchmark_suite.jl --compare baseline_results.json
```
"""

using BenchmarkTools
using Random
using Statistics
using JSON3
using Printf
using Dates
using ArgParse
using LinearAlgebra

# Add package to path
push!(LOAD_PATH, dirname(@__DIR__))
using GSICoreAnalysis
using GSICoreAnalysis.Performance.PerformanceOptimization
using GSICoreAnalysis.Performance.ParallelProcessing
using GSICoreAnalysis.Performance.MemoryManagement
using GSICoreAnalysis.Performance.Profiling

"""
    BenchmarkConfiguration

Configuration for benchmark execution.
"""
struct BenchmarkConfiguration
    data_sizes::Vector{Int}
    parallel_configs::Vector{Dict{String, Any}}
    memory_configs::Vector{Dict{String, Any}}
    output_dir::String
    enable_comparison::Bool
    baseline_file::Union{Nothing, String}
    
    function BenchmarkConfiguration(;
        data_sizes = [100, 1000, 10000, 50000],
        parallel_configs = [
            Dict("threads" => 1, "enable_simd" => false),
            Dict("threads" => Threads.nthreads(), "enable_simd" => true)
        ],
        memory_configs = [
            Dict("pool_size" => 1000, "gc_threshold" => 0.8),
            Dict("pool_size" => 10000, "gc_threshold" => 0.9)
        ],
        output_dir = "benchmark_results",
        enable_comparison = false,
        baseline_file = nothing
    )
        new(data_sizes, parallel_configs, memory_configs, output_dir,
            enable_comparison, baseline_file)
    end
end

"""
    create_mock_observations(n::Int)

Create mock observation data for benchmarking.
"""
function create_mock_observations(n::Int)
    Random.seed!(12345)  # For reproducible benchmarks
    
    observations = []
    
    for i in 1:n
        obs = (
            id = i,
            value = 285.0 + 10*randn(),
            error = 0.5 + 0.3*rand(),
            location = (lat = 40.0 + 20*randn(), lon = -75.0 + 30*randn()),
            quality = 0.3 + 0.7*rand(),
            observation_type = rand([:temperature, :pressure, :humidity, :wind]),
            instrument = rand([:radiosonde, :surface, :aircraft, :satellite]),
            time = now() - Minute(rand(1:1440)),
            metadata = Dict(
                "station_id" => "$(rand(1000:9999))",
                "elevation" => rand(0:3000),
                "qc_flags" => rand(0:7)
            )
        )
        push!(observations, obs)
    end
    
    return observations
end

"""
    benchmark_data_structures()

Benchmark core data structure operations.
"""
function benchmark_data_structures()
    @info "Benchmarking data structures..."
    
    suite = BenchmarkGroup()
    
    # Test different sizes
    for n in [1000, 10000, 100000]
        observations = create_mock_observations(n)
        
        group = BenchmarkGroup()
        
        # Vector operations
        group["vector_creation"] = @benchmarkable create_mock_observations($n)
        group["vector_filtering"] = @benchmarkable filter(obs -> obs.quality > 0.7, $observations)
        group["vector_mapping"] = @benchmarkable map(obs -> obs.value, $observations)
        group["vector_sorting"] = @benchmarkable sort($observations, by=obs -> obs.quality)
        
        # Dictionary operations
        obs_dict = Dict(obs.id => obs for obs in observations[1:min(1000, n)])
        keys_sample = collect(keys(obs_dict))[1:min(100, length(obs_dict))]
        
        group["dict_lookup"] = @benchmarkable [obs_dict[k] for k in $keys_sample]
        group["dict_iteration"] = @benchmarkable [obs for obs in values($obs_dict)]
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_observation_processing()

Benchmark observation processing operations.
"""
function benchmark_observation_processing()
    @info "Benchmarking observation processing..."
    
    suite = BenchmarkGroup()
    
    for n in [1000, 5000, 20000]
        observations = create_mock_observations(n)
        
        group = BenchmarkGroup()
        
        # Quality control
        qc_thresholds = Dict(:quality_min => 0.5, :value_min => 200.0, :value_max => 350.0)
        group["quality_control"] = @benchmarkable begin
            passed = filter(obs -> obs.quality >= 0.5 && 200.0 <= obs.value <= 350.0, $observations)
            length(passed)
        end
        
        # Spatial processing
        target_location = (lat=40.0, lon=-75.0)
        group["spatial_filtering"] = @benchmarkable begin
            nearby = filter(obs -> begin
                lat_diff = obs.location.lat - $target_location.lat
                lon_diff = obs.location.lon - $target_location.lon
                sqrt(lat_diff^2 + lon_diff^2) < 5.0
            end, $observations)
            length(nearby)
        end
        
        # Bias correction simulation
        bias_coeffs = randn(n)
        group["bias_correction"] = @benchmarkable begin
            corrected = map(i -> begin
                obs = $observations[i]
                correction = $bias_coeffs[i]
                merge(obs, (value = obs.value + correction,))
            end, 1:length($observations))
            length(corrected)
        end
        
        # Temporal aggregation
        group["temporal_grouping"] = @benchmarkable begin
            by_hour = Dict{Int, Vector{eltype($observations)}}()
            for obs in $observations
                hour = Dates.hour(obs.time)
                if !haskey(by_hour, hour)
                    by_hour[hour] = []
                end
                push!(by_hour[hour], obs)
            end
            length(by_hour)
        end
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_parallel_processing()

Benchmark parallel processing capabilities.
"""
function benchmark_parallel_processing()
    @info "Benchmarking parallel processing..."
    
    suite = BenchmarkGroup()
    
    if Threads.nthreads() == 1
        @warn "Running on single thread - parallel benchmarks may not show expected speedup"
    end
    
    for n in [5000, 20000, 50000]
        observations = create_mock_observations(n)
        
        group = BenchmarkGroup()
        
        # Sequential processing
        process_func = obs -> begin
            # Simulate processing work
            result = obs.value^2 + sin(obs.value)
            sqrt(abs(result))
        end
        
        group["sequential"] = @benchmarkable map($process_func, $observations)
        
        # Parallel processing
        if Threads.nthreads() > 1
            group["threaded"] = @benchmarkable begin
                results = Vector{Float64}(undef, length($observations))
                Threads.@threads for i in eachindex($observations)
                    results[i] = $process_func($observations[i])
                end
                results
            end
            
            # Chunked parallel processing
            group["chunked_parallel"] = @benchmarkable begin
                chunk_size = max(1, length($observations) ÷ Threads.nthreads())
                results = Vector{Float64}(undef, length($observations))
                
                Threads.@threads for tid in 1:Threads.nthreads()
                    start_idx = (tid - 1) * chunk_size + 1
                    end_idx = min(tid * chunk_size, length($observations))
                    
                    if start_idx <= length($observations)
                        for i in start_idx:end_idx
                            results[i] = $process_func($observations[i])
                        end
                    end
                end
                results
            end
        end
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_memory_operations()

Benchmark memory management operations.
"""
function benchmark_memory_operations()
    @info "Benchmarking memory operations..."
    
    suite = BenchmarkGroup()
    
    # Memory allocation patterns
    for n in [1000, 10000, 50000]
        group = BenchmarkGroup()
        
        # Array allocation and deallocation
        group["array_allocation"] = @benchmarkable Vector{Float64}(undef, $n)
        group["array_zeros"] = @benchmarkable zeros($n)
        group["array_copy"] = @benchmarkable copy($(randn(n)))
        
        # Memory pool simulation
        test_data = randn(n)
        group["memory_reuse"] = @benchmarkable begin
            # Simulate memory reuse pattern
            temp_arrays = [Vector{Float64}(undef, 100) for _ in 1:10]
            for arr in temp_arrays
                arr .= rand(100)
            end
            sum(sum(arr) for arr in temp_arrays)
        end
        
        # Garbage collection impact
        group["gc_pressure"] = @benchmarkable begin
            # Create temporary objects that will need GC
            temp_objects = [randn(100) for _ in 1:100]
            result = sum(sum(obj) for obj in temp_objects)
            temp_objects = nothing  # Release references
            result
        end
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_spatial_operations()

Benchmark spatial indexing and queries.
"""
function benchmark_spatial_operations()
    @info "Benchmarking spatial operations..."
    
    suite = BenchmarkGroup()
    
    for n in [1000, 5000, 20000]
        observations = create_mock_observations(n)
        
        group = BenchmarkGroup()
        
        # Linear search (baseline)
        query_point = (lat=40.0, lon=-75.0)
        radius = 5.0
        
        group["linear_search"] = @benchmarkable begin
            results = []
            for (i, obs) in enumerate($observations)
                lat_diff = obs.location.lat - $query_point.lat
                lon_diff = obs.location.lon - $query_point.lon
                distance = sqrt(lat_diff^2 + lon_diff^2)
                if distance <= $radius
                    push!(results, i)
                end
            end
            length(results)
        end
        
        # Spatial hash (simulated)
        group["spatial_hash"] = @benchmarkable begin
            # Simulate spatial hash by grouping into grid cells
            cell_size = 2.0
            grid = Dict{Tuple{Int,Int}, Vector{Int}}()
            
            for (i, obs) in enumerate($observations)
                grid_x = floor(Int, obs.location.lat / cell_size)
                grid_y = floor(Int, obs.location.lon / cell_size)
                key = (grid_x, grid_y)
                
                if !haskey(grid, key)
                    grid[key] = Int[]
                end
                push!(grid[key], i)
            end
            
            # Query nearby cells
            query_grid_x = floor(Int, $query_point.lat / cell_size)
            query_grid_y = floor(Int, $query_point.lon / cell_size)
            
            results = []
            for dx in -1:1, dy in -1:1
                key = (query_grid_x + dx, query_grid_y + dy)
                if haskey(grid, key)
                    append!(results, grid[key])
                end
            end
            
            length(results)
        end
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_matrix_operations()

Benchmark matrix operations relevant to data assimilation.
"""
function benchmark_matrix_operations()
    @info "Benchmarking matrix operations..."
    
    suite = BenchmarkGroup()
    
    for n in [50, 100, 200, 500]
        group = BenchmarkGroup()
        
        # Dense matrices
        A = randn(n, n)
        B = randn(n, n)
        x = randn(n)
        
        group["matrix_multiply"] = @benchmarkable $A * $B
        group["matrix_vector"] = @benchmarkable $A * $x
        group["matrix_transpose"] = @benchmarkable transpose($A)
        group["matrix_inverse"] = @benchmarkable inv($A + I)  # Add identity for stability
        
        # Sparse matrices (relevant for large-scale DA)
        if n <= 200  # Keep sparse matrices smaller for reasonable benchmark times
            sparsity = 0.1
            S = sprandn(n, n, sparsity)
            
            group["sparse_multiply"] = @benchmarkable $S * $x
            group["sparse_transpose"] = @benchmarkable transpose($S)
        end
        
        # Symmetric matrices (common in covariance operations)
        Sym = Symmetric(A)
        group["symmetric_multiply"] = @benchmarkable $Sym * $x
        
        suite["size_$n"] = group
    end
    
    return suite
end

"""
    benchmark_io_operations()

Benchmark I/O operations.
"""
function benchmark_io_operations()
    @info "Benchmarking I/O operations..."
    
    suite = BenchmarkGroup()
    
    # Create temporary directory for benchmarks
    temp_dir = mktempdir()
    
    try
        for n in [1000, 10000, 50000]
            observations = create_mock_observations(n)
            
            group = BenchmarkGroup()
            
            # JSON serialization/deserialization
            json_file = joinpath(temp_dir, "test_$n.json")
            group["json_write"] = @benchmarkable begin
                open($json_file, "w") do io
                    JSON3.write(io, $observations)
                end
            end
            
            # Write the file first for read benchmark
            open(json_file, "w") do io
                JSON3.write(io, observations)
            end
            
            group["json_read"] = @benchmarkable begin
                open($json_file, "r") do io
                    JSON3.read(io)
                end
            end
            
            # Binary I/O simulation
            binary_file = joinpath(temp_dir, "test_$n.dat")
            values = [obs.value for obs in observations]
            
            group["binary_write"] = @benchmarkable begin
                open($binary_file, "w") do io
                    write(io, $values)
                end
            end
            
            group["binary_read"] = @benchmarkable begin
                open($binary_file, "r") do io
                    read(io, Vector{Float64})
                end
            end
            
            suite["size_$n"] = group
        end
    finally
        # Cleanup temporary directory
        rm(temp_dir, recursive=true)
    end
    
    return suite
end

"""
    run_benchmark_suite(config::BenchmarkConfiguration)

Run the complete benchmark suite.
"""
function run_benchmark_suite(config::BenchmarkConfiguration)
    @info "Starting comprehensive benchmark suite..."
    
    # Ensure output directory exists
    mkpath(config.output_dir)
    
    # Initialize results
    results = Dict{String, Any}()
    
    # System information
    results["system_info"] = Dict(
        "julia_version" => string(VERSION),
        "cpu_threads" => Sys.CPU_THREADS,
        "available_threads" => Threads.nthreads(),
        "total_memory_gb" => round(Sys.total_memory() / 1024^3, digits=2),
        "timestamp" => now()
    )
    
    # Run benchmark categories
    benchmark_categories = [
        ("data_structures", benchmark_data_structures),
        ("observation_processing", benchmark_observation_processing),
        ("parallel_processing", benchmark_parallel_processing),
        ("memory_operations", benchmark_memory_operations),
        ("spatial_operations", benchmark_spatial_operations),
        ("matrix_operations", benchmark_matrix_operations),
        ("io_operations", benchmark_io_operations)
    ]
    
    for (category_name, benchmark_func) in benchmark_categories
        @info "Running $category_name benchmarks..."
        
        try
            suite = benchmark_func()
            benchmark_results = run(suite)
            
            # Convert results to serializable format
            results[category_name] = serialize_benchmark_results(benchmark_results)
            
            @info "Completed $category_name benchmarks"
        catch e
            @error "Failed to run $category_name benchmarks" exception=e
            results[category_name] = Dict("error" => string(e))
        end
    end
    
    # Save results
    timestamp = Dates.format(now(), "yyyy-mm-dd_HH-MM-SS")
    results_file = joinpath(config.output_dir, "benchmark_results_$timestamp.json")
    
    open(results_file, "w") do io
        JSON3.pretty(io, results)
    end
    
    @info "Benchmark results saved" file=results_file
    
    # Generate summary report
    generate_summary_report(results, config.output_dir, timestamp)
    
    # Compare with baseline if requested
    if config.enable_comparison && config.baseline_file !== nothing
        compare_with_baseline(results, config.baseline_file, config.output_dir, timestamp)
    end
    
    return results
end

"""
    serialize_benchmark_results(benchmark_results)

Convert benchmark results to JSON-serializable format.
"""
function serialize_benchmark_results(benchmark_results)
    serialized = Dict{String, Any}()
    
    function serialize_trial(trial::BenchmarkTools.Trial)
        return Dict(
            "median_time_ns" => median(trial.times),
            "mean_time_ns" => mean(trial.times),
            "min_time_ns" => minimum(trial.times),
            "max_time_ns" => maximum(trial.times),
            "std_time_ns" => std(trial.times),
            "median_memory_bytes" => median(trial.memory),
            "allocs" => trial.allocs,
            "samples" => length(trial.times)
        )
    end
    
    function serialize_group(group::BenchmarkGroup)
        result = Dict{String, Any}()
        for (key, value) in group
            if value isa BenchmarkTools.Trial
                result[key] = serialize_trial(value)
            elseif value isa BenchmarkGroup
                result[key] = serialize_group(value)
            end
        end
        return result
    end
    
    if benchmark_results isa BenchmarkGroup
        return serialize_group(benchmark_results)
    elseif benchmark_results isa BenchmarkTools.Trial
        return serialize_trial(benchmark_results)
    else
        return benchmark_results
    end
end

"""
    generate_summary_report(results::Dict{String, Any}, output_dir::String, timestamp::String)

Generate a human-readable summary report.
"""
function generate_summary_report(results::Dict{String, Any}, output_dir::String, timestamp::String)
    report_file = joinpath(output_dir, "benchmark_summary_$timestamp.txt")
    
    open(report_file, "w") do io
        println(io, "GSICoreAnalysis.jl Benchmark Summary")
        println(io, "====================================")
        println(io, "Generated: $(results["system_info"]["timestamp"])")
        println(io, "Julia Version: $(results["system_info"]["julia_version"])")
        println(io, "CPU Threads: $(results["system_info"]["cpu_threads"])")
        println(io, "Available Threads: $(results["system_info"]["available_threads"])")
        println(io, "Total Memory: $(results["system_info"]["total_memory_gb"]) GB")
        println(io)
        
        for (category, data) in results
            if category == "system_info"
                continue
            end
            
            if haskey(data, "error")
                println(io, "Category: $category - ERROR: $(data["error"])")
                continue
            end
            
            println(io, "Category: $category")
            println(io, repeat("-", length("Category: $category")))
            
            print_benchmark_summary(io, data, 1)
            println(io)
        end
    end
    
    @info "Summary report saved" file=report_file
end

"""
    print_benchmark_summary(io::IO, data::Dict, indent::Int)

Recursively print benchmark summary data.
"""
function print_benchmark_summary(io::IO, data::Dict, indent::Int)
    indent_str = "  " ^ indent
    
    for (key, value) in data
        if value isa Dict
            if haskey(value, "median_time_ns")
                # This is a trial result
                median_ms = value["median_time_ns"] / 1e6
                memory_mb = value["median_memory_bytes"] / 1024^2
                println(io, "$(indent_str)$key: $(round(median_ms, digits=2)) ms, $(round(memory_mb, digits=2)) MB")
            else
                # This is a nested group
                println(io, "$(indent_str)$key:")
                print_benchmark_summary(io, value, indent + 1)
            end
        end
    end
end

"""
    compare_with_baseline(current_results::Dict, baseline_file::String, 
                         output_dir::String, timestamp::String)

Compare current results with baseline results.
"""
function compare_with_baseline(current_results::Dict, baseline_file::String, 
                              output_dir::String, timestamp::String)
    if !isfile(baseline_file)
        @warn "Baseline file not found" file=baseline_file
        return
    end
    
    @info "Comparing with baseline..." file=baseline_file
    
    baseline_results = JSON3.read(baseline_file, Dict)
    comparison_file = joinpath(output_dir, "benchmark_comparison_$timestamp.txt")
    
    open(comparison_file, "w") do io
        println(io, "Benchmark Comparison Report")
        println(io, "===========================")
        println(io, "Baseline: $baseline_file")
        println(io, "Current: $timestamp")
        println(io)
        
        for category in keys(current_results)
            if category == "system_info" || !haskey(baseline_results, category)
                continue
            end
            
            println(io, "Category: $category")
            println(io, repeat("-", length("Category: $category")))
            
            compare_category_results(io, current_results[category], baseline_results[category])
            println(io)
        end
    end
    
    @info "Comparison report saved" file=comparison_file
end

"""
    compare_category_results(io::IO, current::Dict, baseline::Dict)

Compare benchmark results for a specific category.
"""
function compare_category_results(io::IO, current::Dict, baseline::Dict)
    for (key, current_value) in current
        if !haskey(baseline, key)
            continue
        end
        
        baseline_value = baseline[key]
        
        if current_value isa Dict && haskey(current_value, "median_time_ns") && 
           baseline_value isa Dict && haskey(baseline_value, "median_time_ns")
            
            current_time = current_value["median_time_ns"] / 1e6  # Convert to ms
            baseline_time = baseline_value["median_time_ns"] / 1e6
            
            speedup = baseline_time / current_time
            change_pct = (current_time - baseline_time) / baseline_time * 100
            
            status = if speedup > 1.05
                "IMPROVED"
            elseif speedup < 0.95
                "REGRESSED"
            else
                "UNCHANGED"
            end
            
            println(io, "  $key: $(round(current_time, digits=2)) ms vs $(round(baseline_time, digits=2)) ms " *
                       "($(round(speedup, digits=2))x, $(round(change_pct, digits=1))%) [$status]")
        elseif current_value isa Dict && baseline_value isa Dict
            # Nested comparison
            println(io, "  $key:")
            compare_category_results(io, current_value, baseline_value)
        end
    end
end

"""
    parse_command_line()

Parse command line arguments.
"""
function parse_command_line()
    s = ArgParseSettings()
    
    @add_arg_table! s begin
        "--category"
            help = "Run specific benchmark category"
            arg_type = String
            default = "all"
        "--compare"
            help = "Baseline file for comparison"
            arg_type = String
        "--output-dir"
            help = "Output directory for results"
            arg_type = String
            default = "benchmark_results"
        "--sizes"
            help = "Comma-separated list of data sizes to test"
            arg_type = String
            default = "1000,10000,50000"
    end
    
    return parse_args(s)
end

"""
    main()

Main function for running benchmarks.
"""
function main()
    args = parse_command_line()
    
    # Parse data sizes
    data_sizes = parse.(Int, split(args["sizes"], ","))
    
    # Create configuration
    config = BenchmarkConfiguration(
        data_sizes = data_sizes,
        output_dir = args["output-dir"],
        enable_comparison = args["compare"] !== nothing,
        baseline_file = args["compare"]
    )
    
    if args["category"] == "all"
        # Run all benchmarks
        results = run_benchmark_suite(config)
    else
        # Run specific category
        category = args["category"]
        
        benchmark_func = if category == "data_structures"
            benchmark_data_structures
        elseif category == "observation_processing"
            benchmark_observation_processing
        elseif category == "parallel_processing"
            benchmark_parallel_processing
        elseif category == "memory_operations"
            benchmark_memory_operations
        elseif category == "spatial_operations"
            benchmark_spatial_operations
        elseif category == "matrix_operations"
            benchmark_matrix_operations
        elseif category == "io_operations"
            benchmark_io_operations
        else
            error("Unknown benchmark category: $category")
        end
        
        @info "Running $category benchmarks only..."
        
        suite = benchmark_func()
        results = run(suite)
        
        # Save results
        mkpath(config.output_dir)
        timestamp = Dates.format(now(), "yyyy-mm-dd_HH-MM-SS")
        results_file = joinpath(config.output_dir, "benchmark_$(category)_$timestamp.json")
        
        serialized = serialize_benchmark_results(results)
        open(results_file, "w") do io
            JSON3.pretty(io, serialized)
        end
        
        @info "Category benchmark results saved" file=results_file
    end
    
    @info "Benchmark execution completed"
end

# Run main function if script is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    main()
end