"""
# Comprehensive Performance Benchmarking Module

Advanced benchmarking utilities for NSEMSolver.jl performance analysis and regression detection.
"""

using Statistics
using Printf
using Random
using LinearAlgebra
using Dates

# Optional dependencies for enhanced functionality
const HAS_BENCHMARK_TOOLS = Ref(false)
const HAS_CSV = Ref(false)
const HAS_DATAFRAMES = Ref(false)
const HAS_JSON3 = Ref(false)

# Check for optional benchmarking dependencies
function check_benchmarking_deps()
    try
        @eval using BenchmarkTools
        HAS_BENCHMARK_TOOLS[] = true
        @info "BenchmarkTools.jl available for micro-benchmarking"
    catch
        @warn "BenchmarkTools.jl not available - using basic timing"
    end
    
    try
        @eval using CSV
        HAS_CSV[] = true
    catch
        @warn "CSV.jl not available - CSV export disabled"
    end
    
    try
        @eval using DataFrames
        HAS_DATAFRAMES[] = true
    catch
        @warn "DataFrames.jl not available - structured data disabled"
    end
    
    try
        @eval using JSON3
        HAS_JSON3[] = true
    catch
        @warn "JSON3.jl not available - JSON export disabled"
    end
end

# Performance metrics structure
@Base.kwdef struct PerformanceMetrics
    # Execution time metrics
    mean_time::Float64 = 0.0
    median_time::Float64 = 0.0
    std_time::Float64 = 0.0
    min_time::Float64 = 0.0
    max_time::Float64 = 0.0
    
    # Memory allocation metrics
    total_allocations::Int64 = 0
    total_memory::Int64 = 0
    allocations_per_second::Float64 = 0.0
    memory_per_second::Float64 = 0.0
    peak_memory::Int64 = 0
    
    # Convergence metrics
    mean_iterations::Float64 = 0.0
    mean_residual::Float64 = 0.0
    convergence_rate::Float64 = 0.0
    
    # Throughput metrics
    dofs_per_second::Float64 = 0.0
    timesteps_per_second::Float64 = 0.0
    
    # Success metrics
    success_rate::Float64 = 0.0
    total_trials::Int = 0
    successful_trials::Int = 0
    
    # Problem characteristics
    dofs::Int = 0
    problem_size::String = ""
    configuration::String = ""
end

# Benchmark configuration structure
@Base.kwdef struct BenchmarkConfig
    # Trial configuration
    n_trials::Int = 3
    n_samples::Int = 10
    min_time_ns::Int64 = 1_000_000_000  # 1 second minimum
    max_time_ns::Int64 = 30_000_000_000  # 30 seconds maximum
    
    # Memory profiling
    track_allocations::Bool = true
    gc_between_trials::Bool = true
    warmup_trials::Int = 1
    
    # Output configuration
    verbose::Bool = true
    save_raw_data::Bool = true
    export_csv::Bool = false
    export_json::Bool = false
    
    # Regression detection
    baseline_file::Union{String, Nothing} = nothing
    regression_threshold::Float64 = 1.2  # 20% regression threshold
    
    # Platform information
    record_system_info::Bool = true
end

"""
    SystemInfo

Captures comprehensive system information for performance benchmarking.
"""
@Base.kwdef struct SystemInfo
    julia_version::String = string(VERSION)
    os::String = string(Sys.KERNEL)
    arch::String = string(Sys.ARCH)
    cpu_name::String = Sys.cpu_info()[1].model
    cpu_cores::Int = Sys.CPU_THREADS
    total_memory::UInt64 = Sys.total_memory()
    
    # Julia-specific information
    num_threads::Int = Threads.nthreads()
    num_blas_threads::Int = LinearAlgebra.BLAS.get_num_threads()
    blas_vendor::String = string(LinearAlgebra.BLAS.vendor())
    
    # NSEMSolver backends
    has_petsc::Bool = HAS_PETSC
    has_gcr::Bool = HAS_GCR
    has_mpi::Bool = HAS_MPI
    
    # Timestamp
    timestamp::String = string(now())
end

"""
    BenchmarkResult

Comprehensive benchmark result structure with all performance data.
"""
@Base.kwdef struct BenchmarkResult
    # Metadata
    name::String = ""
    description::String = ""
    system_info::SystemInfo = SystemInfo()
    config::BenchmarkConfig = BenchmarkConfig()
    
    # Performance data
    metrics::PerformanceMetrics = PerformanceMetrics()
    
    # Raw data (optional)
    raw_times::Vector{Float64} = Float64[]
    raw_allocations::Vector{Int64} = Int64[]
    raw_memory::Vector{Int64} = Int64[]
    convergence_history::Vector{Float64} = Float64[]
    
    # Errors and warnings
    errors::Vector{String} = String[]
    warnings::Vector{String} = String[]
    
    # Comparison data
    baseline_comparison::Union{Dict{String, Float64}, Nothing} = nothing
    regression_detected::Bool = false
end

"""
    benchmark_solver_performance(solver_configs::Vector; config::BenchmarkConfig=BenchmarkConfig()) -> Vector{BenchmarkResult}

Benchmark solver performance across different configurations.
"""
function benchmark_solver_performance(solver_configs::Vector; config::BenchmarkConfig=BenchmarkConfig())
    results = BenchmarkResult[]
    
    for (i, solver_config) in enumerate(solver_configs)
        if config.verbose
            println("🔍 Benchmarking configuration $i of $(length(solver_configs))")
            println("   Configuration: $(solver_config)")
        end
        
        result = benchmark_single_configuration(solver_config, config)
        result = BenchmarkResult(result; 
                                name="Config_$i",
                                description=string(solver_config))
        push!(results, result)
    end
    
    return results
end

"""
    benchmark_scaling_analysis(N_values::Vector, n_block_values::Vector; config::BenchmarkConfig=BenchmarkConfig()) -> BenchmarkResult

Analyze performance scaling with problem size.
"""
function benchmark_scaling_analysis(N_values::Vector, n_block_values::Vector; config::BenchmarkConfig=BenchmarkConfig())
    scaling_results = BenchmarkResult[]
    
    if config.verbose
        println("📈 Starting scaling analysis")
        println("   Polynomial orders: $(N_values)")
        println("   Block counts: $(n_block_values)")
    end
    
    # Test all combinations
    for N in N_values
        for n_block in n_block_values
            solver_config = NSOptions(N=N, n_block=n_block, verbose=false)
            
            if config.verbose
                println("   Testing N=$N, n_block=$n_block")
            end
            
            result = benchmark_single_configuration(solver_config, config)
            result = BenchmarkResult(result;
                                   name="Scaling_N$(N)_B$(n_block)",
                                   description="N=$N, n_block=$n_block")
            push!(scaling_results, result)
        end
    end
    
    # Analyze scaling relationships
    analyze_scaling_relationships!(scaling_results)
    
    return scaling_results
end

"""
    benchmark_backend_comparison(backends::Vector{Symbol}; config::BenchmarkConfig=BenchmarkConfig()) -> Vector{BenchmarkResult}

Compare performance across different solver backends.
"""
function benchmark_backend_comparison(backends::Vector{Symbol}; config::BenchmarkConfig=BenchmarkConfig())
    backend_results = BenchmarkResult[]
    
    # Filter available backends
    available_backends = Symbol[]
    for backend in backends
        if backend == :julia
            push!(available_backends, backend)
        elseif backend == :petsc && HAS_PETSC
            push!(available_backends, backend)
        elseif backend == :gcr && HAS_GCR
            push!(available_backends, backend)
        else
            if config.verbose
                @warn "Backend $backend not available, skipping"
            end
        end
    end
    
    if config.verbose
        println("⚡ Comparing solver backends: $(available_backends)")
    end
    
    # Base configuration for fair comparison
    base_config = NSOptions(N=4, n_block=3, nu=0.01, tfinal=0.5, 
                           cfl=0.4, tol=1e-6, verbose=false)
    
    for backend in available_backends
        if config.verbose
            println("   Benchmarking $backend backend")
        end
        
        solver_config = NSOptions(base_config; solver=backend)
        result = benchmark_single_configuration(solver_config, config)
        result = BenchmarkResult(result;
                               name="Backend_$(backend)",
                               description="Solver backend: $backend")
        push!(backend_results, result)
    end
    
    return backend_results
end

"""
    benchmark_memory_usage(problem_sizes::Vector; config::BenchmarkConfig=BenchmarkConfig()) -> Vector{BenchmarkResult}

Analyze memory usage patterns for different problem sizes.
"""
function benchmark_memory_usage(problem_sizes::Vector; config::BenchmarkConfig=BenchmarkConfig())
    memory_results = BenchmarkResult[]
    
    if config.verbose
        println("🧠 Analyzing memory usage patterns")
    end
    
    for (i, size_config) in enumerate(problem_sizes)
        if config.verbose
            println("   Problem size $i: $(size_config)")
        end
        
        # Enable detailed memory tracking
        memory_config = BenchmarkConfig(config; track_allocations=true, gc_between_trials=true)
        
        result = benchmark_single_configuration(size_config, memory_config)
        result = BenchmarkResult(result;
                               name="Memory_Size_$i",
                               description="Memory usage for $(size_config)")
        push!(memory_results, result)
    end
    
    return memory_results
end

"""
    benchmark_convergence_rates(tolerance_values::Vector; config::BenchmarkConfig=BenchmarkConfig()) -> Vector{BenchmarkResult}

Analyze convergence behavior for different tolerance values.
"""
function benchmark_convergence_rates(tolerance_values::Vector; config::BenchmarkConfig=BenchmarkConfig())
    convergence_results = BenchmarkResult[]
    
    if config.verbose
        println("🎯 Analyzing convergence rates")
        println("   Tolerance values: $(tolerance_values)")
    end
    
    base_config = NSOptions(N=4, n_block=3, nu=0.01, tfinal=1.0,
                           cfl=0.3, verbose=false, save_history=true)
    
    for tol in tolerance_values
        if config.verbose
            println("   Testing tolerance: $tol")
        end
        
        solver_config = NSOptions(base_config; tol=tol)
        result = benchmark_single_configuration(solver_config, config)
        result = BenchmarkResult(result;
                               name="Tolerance_$(tol)",
                               description="Tolerance: $tol")
        push!(convergence_results, result)
    end
    
    return convergence_results
end

"""
    benchmark_single_configuration(solver_config::NSOptions, config::BenchmarkConfig) -> BenchmarkResult

Benchmark a single solver configuration with comprehensive metrics collection.
"""
function benchmark_single_configuration(solver_config::NSOptions, config::BenchmarkConfig)
    result = BenchmarkResult(config=config)
    
    # Collect raw performance data
    times = Float64[]
    allocations = Int64[]
    memory_usage = Int64[]
    iterations_data = Int[]
    residuals = Float64[]
    convergence_histories = Vector{Float64}[]
    
    # Warmup runs
    for _ in 1:config.warmup_trials
        try
            warmup_result = solve_navier_stokes_2d(solver_config)
        catch e
            push!(result.warnings, "Warmup failed: $e")
        end
        
        if config.gc_between_trials
            GC.gc()
        end
    end
    
    # Main benchmark trials
    successful_trials = 0
    for trial in 1:config.n_trials
        if config.gc_between_trials
            GC.gc()
        end
        
        try
            # Memory tracking setup
            if config.track_allocations
                start_mem = Base.gc_bytes()
                start_allocs = Base.gc_alloc_count()
            end
            
            # Time the solve
            start_time = time_ns()
            ns_result = solve_navier_stokes_2d(solver_config)
            end_time = time_ns()
            
            # Memory tracking collection
            if config.track_allocations
                end_mem = Base.gc_bytes()
                end_allocs = Base.gc_alloc_count()
                
                trial_memory = end_mem - start_mem
                trial_allocs = end_allocs - start_allocs
                
                push!(memory_usage, trial_memory)
                push!(allocations, trial_allocs)
            end
            
            # Time and convergence data
            trial_time = (end_time - start_time) / 1e9
            push!(times, trial_time)
            push!(iterations_data, ns_result.iterations)
            push!(residuals, ns_result.residual_norm)
            
            if !isempty(ns_result.convergence_history)
                push!(convergence_histories, copy(ns_result.convergence_history))
            end
            
            successful_trials += 1
            
        catch e
            push!(result.errors, "Trial $trial failed: $e")
        end
    end
    
    # Calculate comprehensive metrics
    if successful_trials > 0
        # Time metrics
        metrics = PerformanceMetrics(
            mean_time = mean(times),
            median_time = median(times),
            std_time = std(times),
            min_time = minimum(times),
            max_time = maximum(times),
            
            # Memory metrics
            total_allocations = !isempty(allocations) ? mean(allocations) : 0,
            total_memory = !isempty(memory_usage) ? mean(memory_usage) : 0,
            allocations_per_second = !isempty(allocations) && mean(times) > 0 ? mean(allocations) / mean(times) : 0,
            memory_per_second = !isempty(memory_usage) && mean(times) > 0 ? mean(memory_usage) / mean(times) : 0,
            peak_memory = !isempty(memory_usage) ? maximum(memory_usage) : 0,
            
            # Convergence metrics
            mean_iterations = mean(iterations_data),
            mean_residual = mean(residuals),
            convergence_rate = calculate_convergence_rate(convergence_histories),
            
            # Throughput metrics
            dofs = estimate_degrees_of_freedom(solver_config.N, solver_config.n_block, solver_config.dim),
            dofs_per_second = mean(times) > 0 ? estimate_degrees_of_freedom(solver_config.N, solver_config.n_block, solver_config.dim) / mean(times) : 0,
            timesteps_per_second = mean(times) > 0 && mean(iterations_data) > 0 ? mean(iterations_data) / mean(times) : 0,
            
            # Success metrics
            success_rate = successful_trials / config.n_trials,
            total_trials = config.n_trials,
            successful_trials = successful_trials,
            
            # Problem info
            problem_size = "N=$(solver_config.N), n_block=$(solver_config.n_block), dim=$(solver_config.dim)",
            configuration = string(solver_config)
        )
        
        result = BenchmarkResult(result;
                               metrics=metrics,
                               raw_times=times,
                               raw_allocations=allocations,
                               raw_memory=memory_usage,
                               convergence_history=!isempty(convergence_histories) ? convergence_histories[1] : Float64[])
    else
        push!(result.errors, "All trials failed")
    end
    
    return result
end

"""
    calculate_convergence_rate(convergence_histories::Vector{Vector{Float64}}) -> Float64

Calculate the convergence rate from convergence history data.
"""
function calculate_convergence_rate(convergence_histories::Vector{Vector{Float64}})
    if isempty(convergence_histories)
        return 0.0
    end
    
    rates = Float64[]
    
    for history in convergence_histories
        if length(history) >= 2
            # Calculate exponential decay rate
            start_residual = history[1]
            end_residual = history[end]
            n_iterations = length(history)
            
            if start_residual > 0 && end_residual > 0 && n_iterations > 1
                rate = -log(end_residual / start_residual) / (n_iterations - 1)
                push!(rates, rate)
            end
        end
    end
    
    return isempty(rates) ? 0.0 : mean(rates)
end

"""
    analyze_scaling_relationships!(scaling_results::Vector{BenchmarkResult})

Analyze and annotate scaling relationships in benchmark results.
"""
function analyze_scaling_relationships!(scaling_results::Vector{BenchmarkResult})
    # Sort by problem size (DOFs)
    sort!(scaling_results, by = r -> r.metrics.dofs)
    
    # Calculate scaling exponents
    for i in 2:length(scaling_results)
        prev_result = scaling_results[i-1]
        curr_result = scaling_results[i]
        
        if prev_result.metrics.dofs > 0 && curr_result.metrics.dofs > 0 &&
           prev_result.metrics.mean_time > 0 && curr_result.metrics.mean_time > 0
            
            dof_ratio = curr_result.metrics.dofs / prev_result.metrics.dofs
            time_ratio = curr_result.metrics.mean_time / prev_result.metrics.mean_time
            
            if dof_ratio > 1
                scaling_exponent = log(time_ratio) / log(dof_ratio)
                
                # Store scaling information
                if curr_result.baseline_comparison === nothing
                    curr_result = BenchmarkResult(curr_result; 
                                                baseline_comparison = Dict{String, Float64}())
                end
                
                curr_result.baseline_comparison["scaling_exponent"] = scaling_exponent
                curr_result.baseline_comparison["dof_ratio"] = dof_ratio
                curr_result.baseline_comparison["time_ratio"] = time_ratio
            end
        end
    end
end

"""
    compare_with_baseline(results::Vector{BenchmarkResult}, baseline_file::String) -> Vector{BenchmarkResult}

Compare benchmark results with stored baseline and detect regressions.
"""
function compare_with_baseline(results::Vector{BenchmarkResult}, baseline_file::String)
    if !HAS_JSON3[]
        @warn "JSON3.jl not available - baseline comparison disabled"
        return results
    end
    
    if !isfile(baseline_file)
        @warn "Baseline file $baseline_file not found - skipping comparison"
        return results
    end
    
    try
        baseline_data = JSON3.read(baseline_file)
        
        for result in results
            if haskey(baseline_data, result.name)
                baseline_metrics = baseline_data[result.name]
                comparison = compare_metrics(result.metrics, baseline_metrics)
                
                result = BenchmarkResult(result; baseline_comparison = comparison)
                
                # Check for regressions
                if haskey(comparison, "time_regression") && comparison["time_regression"] > result.config.regression_threshold
                    result = BenchmarkResult(result; regression_detected = true)
                    push!(result.warnings, "Performance regression detected: $(comparison["time_regression"])x slower")
                end
            end
        end
        
    catch e
        @warn "Failed to load baseline data: $e"
    end
    
    return results
end

"""
    compare_metrics(current::PerformanceMetrics, baseline::Dict) -> Dict{String, Float64}

Compare current metrics with baseline metrics.
"""
function compare_metrics(current::PerformanceMetrics, baseline::Dict)
    comparison = Dict{String, Float64}()
    
    # Time comparison
    if haskey(baseline, "mean_time") && baseline["mean_time"] > 0
        comparison["time_regression"] = current.mean_time / baseline["mean_time"]
    end
    
    # Memory comparison
    if haskey(baseline, "total_memory") && baseline["total_memory"] > 0
        comparison["memory_regression"] = current.total_memory / baseline["total_memory"]
    end
    
    # Throughput comparison
    if haskey(baseline, "dofs_per_second") && baseline["dofs_per_second"] > 0
        comparison["throughput_improvement"] = current.dofs_per_second / baseline["dofs_per_second"]
    end
    
    return comparison
end

"""
    save_benchmark_results(results::Vector{BenchmarkResult}, output_dir::String)

Save benchmark results in multiple formats.
"""
function save_benchmark_results(results::Vector{BenchmarkResult}, output_dir::String)
    mkpath(output_dir)
    
    # Save as JSON (comprehensive)
    if HAS_JSON3[]
        json_file = joinpath(output_dir, "benchmark_results_$(now()).json")
        try
            JSON3.write(json_file, results)
            @info "Benchmark results saved to $json_file"
        catch e
            @warn "Failed to save JSON results: $e"
        end
    end
    
    # Save as CSV (tabular data)
    if HAS_CSV[] && HAS_DATAFRAMES[]
        csv_file = joinpath(output_dir, "benchmark_summary_$(now()).csv")
        try
            df = results_to_dataframe(results)
            CSV.write(csv_file, df)
            @info "Benchmark summary saved to $csv_file"
        catch e
            @warn "Failed to save CSV results: $e"
        end
    end
    
    # Save detailed report
    report_file = joinpath(output_dir, "benchmark_report_$(now()).md")
    try
        generate_markdown_report(results, report_file)
        @info "Benchmark report saved to $report_file"
    catch e
        @warn "Failed to save markdown report: $e"
    end
end

"""
    results_to_dataframe(results::Vector{BenchmarkResult}) -> DataFrame

Convert benchmark results to a DataFrame for analysis.
"""
function results_to_dataframe(results::Vector{BenchmarkResult})
    if !HAS_DATAFRAMES[]
        error("DataFrames.jl required for DataFrame conversion")
    end
    
    @eval using DataFrames
    
    df = DataFrame(
        name = [r.name for r in results],
        description = [r.description for r in results],
        mean_time = [r.metrics.mean_time for r in results],
        median_time = [r.metrics.median_time for r in results],
        std_time = [r.metrics.std_time for r in results],
        total_memory = [r.metrics.total_memory for r in results],
        dofs = [r.metrics.dofs for r in results],
        dofs_per_second = [r.metrics.dofs_per_second for r in results],
        mean_iterations = [r.metrics.mean_iterations for r in results],
        success_rate = [r.metrics.success_rate for r in results],
        regression_detected = [r.regression_detected for r in results]
    )
    
    return df
end

"""
    generate_markdown_report(results::Vector{BenchmarkResult}, output_file::String)

Generate a comprehensive markdown report from benchmark results.
"""
function generate_markdown_report(results::Vector{BenchmarkResult}, output_file::String)
    open(output_file, "w") do io
        write_markdown_header(io, results)
        write_system_info(io, results)
        write_performance_summary(io, results)
        write_detailed_results(io, results)
        write_scaling_analysis(io, results)
        write_regression_analysis(io, results)
        write_recommendations(io, results)
    end
end

# Helper functions for markdown report generation
function write_markdown_header(io::IO, results::Vector{BenchmarkResult})
    println(io, "# NSEMSolver.jl Performance Benchmark Report")
    println(io, "")
    println(io, "**Generated:** $(now())")
    println(io, "**Total Benchmarks:** $(length(results))")
    
    successful = count(r -> r.metrics.success_rate > 0, results)
    println(io, "**Successful Benchmarks:** $successful / $(length(results))")
    println(io, "")
end

function write_system_info(io::IO, results::Vector{BenchmarkResult})
    if !isempty(results)
        system_info = results[1].system_info
        
        println(io, "## System Information")
        println(io, "")
        println(io, "| Property | Value |")
        println(io, "|----------|-------|")
        println(io, "| Julia Version | $(system_info.julia_version) |")
        println(io, "| Operating System | $(system_info.os) |")
        println(io, "| Architecture | $(system_info.arch) |")
        println(io, "| CPU | $(system_info.cpu_name) |")
        println(io, "| CPU Cores | $(system_info.cpu_cores) |")
        println(io, "| Total Memory | $(round(system_info.total_memory / 1024^3, digits=2)) GB |")
        println(io, "| Julia Threads | $(system_info.num_threads) |")
        println(io, "| BLAS Threads | $(system_info.num_blas_threads) |")
        println(io, "| BLAS Vendor | $(system_info.blas_vendor) |")
        println(io, "")
        
        println(io, "### Available Backends")
        println(io, "")
        println(io, "- PETSc: $(system_info.has_petsc ? "✅" : "❌")")
        println(io, "- GCR: $(system_info.has_gcr ? "✅" : "❌")")
        println(io, "- MPI: $(system_info.has_mpi ? "✅" : "❌")")
        println(io, "")
    end
end

function write_performance_summary(io::IO, results::Vector{BenchmarkResult})
    println(io, "## Performance Summary")
    println(io, "")
    println(io, "| Benchmark | Mean Time (s) | DOFs/sec | Memory (MB) | Success Rate |")
    println(io, "|-----------|---------------|----------|-------------|--------------|")
    
    for result in results
        if result.metrics.success_rate > 0
            memory_mb = result.metrics.total_memory / 1024^2
            println(io, "| $(result.name) | $(@sprintf("%.3f", result.metrics.mean_time)) | $(@sprintf("%.0f", result.metrics.dofs_per_second)) | $(@sprintf("%.1f", memory_mb)) | $(@sprintf("%.0f", result.metrics.success_rate * 100))% |")
        else
            println(io, "| $(result.name) | Failed | - | - | 0% |")
        end
    end
    println(io, "")
end

function write_detailed_results(io::IO, results::Vector{BenchmarkResult})
    println(io, "## Detailed Results")
    println(io, "")
    
    for result in results
        println(io, "### $(result.name)")
        println(io, "")
        println(io, "**Description:** $(result.description)")
        println(io, "")
        
        if result.metrics.success_rate > 0
            println(io, "| Metric | Value |")
            println(io, "|--------|-------|")
            println(io, "| Mean Time | $(@sprintf("%.3f", result.metrics.mean_time)) s |")
            println(io, "| Median Time | $(@sprintf("%.3f", result.metrics.median_time)) s |")
            println(io, "| Time Std Dev | $(@sprintf("%.3f", result.metrics.std_time)) s |")
            println(io, "| DOFs | $(result.metrics.dofs) |")
            println(io, "| DOFs/second | $(@sprintf("%.0f", result.metrics.dofs_per_second)) |")
            println(io, "| Memory Usage | $(@sprintf("%.1f", result.metrics.total_memory / 1024^2)) MB |")
            println(io, "| Mean Iterations | $(@sprintf("%.1f", result.metrics.mean_iterations)) |")
            println(io, "| Convergence Rate | $(@sprintf("%.3f", result.metrics.convergence_rate)) |")
            println(io, "")
            
            if !isempty(result.warnings)
                println(io, "**Warnings:**")
                for warning in result.warnings
                    println(io, "- $warning")
                end
                println(io, "")
            end
        else
            println(io, "**Status:** Failed to complete")
            if !isempty(result.errors)
                println(io, "")
                println(io, "**Errors:**")
                for error in result.errors
                    println(io, "- $error")
                end
            end
            println(io, "")
        end
    end
end

function write_scaling_analysis(io::IO, results::Vector{BenchmarkResult})
    scaling_results = filter(r -> r.baseline_comparison !== nothing && haskey(r.baseline_comparison, "scaling_exponent"), results)
    
    if !isempty(scaling_results)
        println(io, "## Scaling Analysis")
        println(io, "")
        println(io, "| Benchmark | DOF Ratio | Time Ratio | Scaling Exponent |")
        println(io, "|-----------|-----------|------------|------------------|")
        
        for result in scaling_results
            comp = result.baseline_comparison
            println(io, "| $(result.name) | $(@sprintf("%.2f", comp["dof_ratio"])) | $(@sprintf("%.2f", comp["time_ratio"])) | $(@sprintf("%.2f", comp["scaling_exponent"])) |")
        end
        println(io, "")
        
        # Analyze theoretical scaling
        println(io, "### Scaling Analysis")
        println(io, "")
        println(io, "For spectral element methods:")
        println(io, "- **Ideal scaling:** O(N³) for assembly, O(N^(3d)) for direct solve")
        println(io, "- **Iterative scaling:** O(N^(3d)) per iteration with optimal preconditioner")
        println(io, "- **Observed scaling:** See table above")
        println(io, "")
    end
end

function write_regression_analysis(io::IO, results::Vector{BenchmarkResult})
    regressions = filter(r -> r.regression_detected, results)
    
    if !isempty(regressions)
        println(io, "## ⚠️ Performance Regressions Detected")
        println(io, "")
        
        for result in regressions
            if result.baseline_comparison !== nothing
                comp = result.baseline_comparison
                if haskey(comp, "time_regression")
                    regression = comp["time_regression"]
                    println(io, "- **$(result.name):** $(@sprintf("%.1fx", regression)) slower than baseline")
                end
            end
        end
        println(io, "")
    else
        improvements = filter(results) do r
            r.baseline_comparison !== nothing && 
            haskey(r.baseline_comparison, "time_regression") && 
            r.baseline_comparison["time_regression"] < 1.0
        end
        
        if !isempty(improvements)
            println(io, "## 🚀 Performance Improvements")
            println(io, "")
            
            for result in improvements
                comp = result.baseline_comparison
                improvement = 1.0 / comp["time_regression"]
                println(io, "- **$(result.name):** $(@sprintf("%.1fx", improvement)) faster than baseline")
            end
            println(io, "")
        end
    end
end

function write_recommendations(io::IO, results::Vector{BenchmarkResult})
    println(io, "## Recommendations")
    println(io, "")
    
    successful_results = filter(r -> r.metrics.success_rate > 0, results)
    
    if !isempty(successful_results)
        # Find best performing configuration
        best_result = argmin(r -> r.metrics.mean_time, successful_results)
        best = successful_results[best_result]
        
        println(io, "### Optimal Configuration")
        println(io, "")
        println(io, "Based on the benchmark results:")
        println(io, "- **Best performing:** $(best.name)")
        println(io, "- **Time:** $(@sprintf("%.3f", best.metrics.mean_time)) seconds")
        println(io, "- **Throughput:** $(@sprintf("%.0f", best.metrics.dofs_per_second)) DOFs/second")
        println(io, "")
        
        # Memory efficiency
        memory_efficient = argmin(r -> r.metrics.total_memory, successful_results)
        memory_best = successful_results[memory_efficient]
        
        if memory_best != best
            println(io, "### Memory Efficiency")
            println(io, "")
            println(io, "- **Most memory efficient:** $(memory_best.name)")
            println(io, "- **Memory usage:** $(@sprintf("%.1f", memory_best.metrics.total_memory / 1024^2)) MB")
            println(io, "")
        end
        
        # General recommendations
        println(io, "### General Recommendations")
        println(io, "")
        
        avg_time = mean(r.metrics.mean_time for r in successful_results)
        fast_results = filter(r -> r.metrics.mean_time < avg_time, successful_results)
        
        if !isempty(fast_results)
            println(io, "- Consider using configurations: $(join([r.name for r in fast_results], ", "))")
        end
        
        high_memory = filter(r -> r.metrics.total_memory > 1024^3, successful_results)  # > 1GB
        if !isempty(high_memory)
            println(io, "- Monitor memory usage for: $(join([r.name for r in high_memory], ", "))")
        end
        
        println(io, "- Regular performance monitoring recommended")
        println(io, "- Consider upgrading hardware if performance is insufficient")
        println(io, "")
    end
end

# Export main functions
export benchmark_solver_performance, benchmark_scaling_analysis, benchmark_backend_comparison
export benchmark_memory_usage, benchmark_convergence_rates, compare_with_baseline
export BenchmarkConfig, BenchmarkResult, PerformanceMetrics, SystemInfo
export save_benchmark_results, generate_markdown_report
export check_benchmarking_deps

# Initialize on module load
check_benchmarking_deps()