#!/usr/bin/env julia
"""
Comprehensive Performance Benchmark Suite for NSEMSolver.jl

A production-quality benchmarking framework that provides:
- Comprehensive performance analysis across multiple dimensions
- Memory profiling and allocation tracking
- Scaling analysis for different problem sizes
- Backend comparison (Julia, PETSc, GCR)
- Automated report generation with regression detection
- Export capabilities for CSV, JSON, and Markdown formats
- CI/CD integration support

Usage:
    julia performance_benchmark.jl [--quick] [--export-dir DIR] [--baseline FILE]

Options:
    --quick         Run quick benchmark suite (fewer trials)
    --ci            Run lightweight CI benchmarks
    --export-dir    Directory for exporting results (default: ./benchmark)
    --baseline      Baseline file for regression detection
    --verbose       Enable verbose output
    --help          Show this help message

Examples:
    julia performance_benchmark.jl                    # Full benchmark suite
    julia performance_benchmark.jl --quick            # Quick benchmarks
    julia performance_benchmark.jl --ci               # CI-friendly benchmarks
    julia performance_benchmark.jl --export-dir ./results --baseline baseline.json
"""

using NSEMSolver
using Printf
using Statistics
using Dates

# Parse command line arguments
function parse_args()
    args = Dict{String, Any}(
        "quick" => false,
        "ci" => false,
        "export_dir" => "./benchmark",
        "baseline" => nothing,
        "verbose" => false,
        "help" => false
    )
    
    i = 1
    while i <= length(ARGS)
        arg = ARGS[i]
        if arg == "--quick"
            args["quick"] = true
        elseif arg == "--ci"
            args["ci"] = true
        elseif arg == "--export-dir"
            i += 1
            if i <= length(ARGS)
                args["export_dir"] = ARGS[i]
            end
        elseif arg == "--baseline"
            i += 1
            if i <= length(ARGS)
                args["baseline"] = ARGS[i]
            end
        elseif arg == "--verbose"
            args["verbose"] = true
        elseif arg == "--help"
            args["help"] = true
        end
        i += 1
    end
    
    return args
end

# Show help message
function show_help()
    println("Comprehensive Performance Benchmark Suite for NSEMSolver.jl")
    println()
    println("Usage: julia performance_benchmark.jl [options]")
    println()
    println("Options:")
    println("  --quick         Run quick benchmark suite (fewer trials)")
    println("  --ci            Run lightweight CI benchmarks")
    println("  --export-dir    Directory for exporting results (default: ./benchmark)")
    println("  --baseline      Baseline file for regression detection")
    println("  --verbose       Enable verbose output")
    println("  --help          Show this help message")
    println()
    println("Examples:")
    println("  julia performance_benchmark.jl                    # Full benchmark suite")
    println("  julia performance_benchmark.jl --quick            # Quick benchmarks")
    println("  julia performance_benchmark.jl --ci               # CI-friendly benchmarks")
    println("  julia performance_benchmark.jl --export-dir ./results --baseline baseline.json")
end

"""
    create_benchmark_config(args::Dict) -> BenchmarkConfig

Create benchmark configuration from command line arguments.
"""
function create_benchmark_config(args::Dict)
    if args["ci"]
        # Lightweight CI configuration
        return BenchmarkConfig(
            n_trials = 2,
            n_samples = 5,
            min_time_ns = 500_000_000,  # 0.5 seconds
            max_time_ns = 10_000_000_000,  # 10 seconds
            verbose = args["verbose"],
            export_csv = true,
            export_json = true,
            baseline_file = args["baseline"]
        )
    elseif args["quick"]
        # Quick benchmark configuration
        return BenchmarkConfig(
            n_trials = 3,
            n_samples = 8,
            min_time_ns = 1_000_000_000,  # 1 second
            max_time_ns = 15_000_000_000,  # 15 seconds
            verbose = args["verbose"],
            export_csv = true,
            export_json = true,
            baseline_file = args["baseline"]
        )
    else
        # Full comprehensive benchmark configuration
        return BenchmarkConfig(
            n_trials = 5,
            n_samples = 10,
            min_time_ns = 2_000_000_000,  # 2 seconds
            max_time_ns = 30_000_000_000,  # 30 seconds
            verbose = args["verbose"],
            export_csv = true,
            export_json = true,
            save_raw_data = true,
            baseline_file = args["baseline"]
        )
    end
end

"""
    comprehensive_scaling_analysis(config::BenchmarkConfig) -> Vector{BenchmarkResult}

Perform comprehensive scaling analysis across polynomial orders and domain sizes.
"""
function comprehensive_scaling_analysis(config::BenchmarkConfig)
    if config.verbose
        println("🔍 Starting Comprehensive Scaling Analysis")
        println("=" * 50)
    end
    
    # Define problem size ranges based on configuration
    if config.n_trials <= 2  # CI mode
        N_values = [3, 4]
        n_block_values = [2, 3]
    elseif config.n_trials <= 3  # Quick mode
        N_values = [3, 4, 5]
        n_block_values = [2, 3, 4]
    else  # Full mode
        N_values = [2, 3, 4, 5, 6]
        n_block_values = [2, 3, 4, 5]
    end
    
    if config.verbose
        println("Polynomial orders: $(N_values)")
        println("Block counts: $(n_block_values)")
    end
    
    return benchmark_scaling_analysis(N_values, n_block_values, config=config)
end

"""
    comprehensive_backend_comparison(config::BenchmarkConfig) -> Vector{BenchmarkResult}

Compare performance across all available solver backends.
"""
function comprehensive_backend_comparison(config::BenchmarkConfig)
    if config.verbose
        println("⚡ Comprehensive Backend Comparison")
        println("=" * 40)
    end
    
    # Determine available backends
    backends = [:julia]
    
    if HAS_PETSC
        push!(backends, :petsc)
        if config.verbose
            println("✅ PETSc backend available")
        end
    else
        if config.verbose
            println("❌ PETSc backend not available")
        end
    end
    
    if HAS_GCR
        push!(backends, :gcr)
        if config.verbose
            println("✅ GCR backend available")
        end
    else
        if config.verbose
            println("❌ GCR backend not available")
        end
    end
    
    if config.verbose
        println("Testing backends: $(backends)")
    end
    
    return benchmark_backend_comparison(backends, config=config)
end

"""
    comprehensive_memory_analysis(config::BenchmarkConfig) -> Vector{BenchmarkResult}

Analyze memory usage patterns across different problem sizes.
"""
function comprehensive_memory_analysis(config::BenchmarkConfig)
    if config.verbose
        println("🧠 Comprehensive Memory Analysis")
        println("=" * 35)
    end
    
    # Create problem size configurations for memory analysis
    problem_sizes = []
    
    if config.n_trials <= 2  # CI mode
        push!(problem_sizes, NSOptions(N=3, n_block=2, verbose=false))
        push!(problem_sizes, NSOptions(N=4, n_block=3, verbose=false))
    elseif config.n_trials <= 3  # Quick mode
        push!(problem_sizes, NSOptions(N=3, n_block=2, verbose=false))
        push!(problem_sizes, NSOptions(N=4, n_block=3, verbose=false))
        push!(problem_sizes, NSOptions(N=5, n_block=3, verbose=false))
    else  # Full mode
        push!(problem_sizes, NSOptions(N=3, n_block=2, verbose=false))
        push!(problem_sizes, NSOptions(N=4, n_block=3, verbose=false))
        push!(problem_sizes, NSOptions(N=5, n_block=3, verbose=false))
        push!(problem_sizes, NSOptions(N=4, n_block=4, verbose=false))
        push!(problem_sizes, NSOptions(N=6, n_block=3, verbose=false))
    end
    
    if config.verbose
        println("Problem sizes: $(length(problem_sizes)) configurations")
    end
    
    return benchmark_memory_usage(problem_sizes, config=config)
end

"""
    comprehensive_convergence_analysis(config::BenchmarkConfig) -> Vector{BenchmarkResult}

Analyze convergence behavior for different tolerance values.
"""
function comprehensive_convergence_analysis(config::BenchmarkConfig)
    if config.verbose
        println("🎯 Comprehensive Convergence Analysis")
        println("=" * 40)
    end
    
    # Define tolerance ranges based on configuration
    if config.n_trials <= 2  # CI mode
        tolerance_values = [1e-4, 1e-5, 1e-6]
    elseif config.n_trials <= 3  # Quick mode
        tolerance_values = [1e-4, 1e-5, 1e-6, 1e-7]
    else  # Full mode
        tolerance_values = [1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8]
    end
    
    if config.verbose
        println("Tolerance values: $(tolerance_values)")
    end
    
    return benchmark_convergence_rates(tolerance_values, config=config)
end

"""
    micro_benchmarks(config::BenchmarkConfig) -> Vector{BenchmarkResult}

Run micro-benchmarks for individual components.
"""
function micro_benchmarks(config::BenchmarkConfig)
    if config.verbose
        println("🔬 Micro-Benchmarks")
        println("=" * 20)
    end
    
    # For now, focus on end-to-end benchmarks
    # Micro-benchmarks would require exposing individual SEM operations
    micro_results = BenchmarkResult[]
    
    # Basic configuration micro-benchmark
    basic_config = NSOptions(N=3, n_block=2, tfinal=0.1, verbose=false)
    result = benchmark_single_configuration(basic_config, config)
    result = BenchmarkResult(result, name="Micro_Basic", description="Basic micro-benchmark")
    push!(micro_results, result)
    
    return micro_results
end

"""
    run_comprehensive_benchmarks(args::Dict) -> Dict{String, Vector{BenchmarkResult}}

Run the complete benchmark suite based on command line arguments.
"""
function run_comprehensive_benchmarks(args::Dict)
    config = create_benchmark_config(args)
    
    if config.verbose
        println("🚀 NSEMSolver.jl Comprehensive Performance Benchmark Suite")
        println("=" * 60)
        println("Configuration: $(config.n_trials) trials, $(config.n_samples) samples per trial")
        println("Export directory: $(args["export_dir"])")
        if args["baseline"] !== nothing
            println("Baseline file: $(args["baseline"])")
        end
        println()
    end
    
    # Initialize results collection
    all_results = Dict{String, Vector{BenchmarkResult}}()
    
    # 1. Scaling Analysis
    try
        if config.verbose
            println("Phase 1/5: Scaling Analysis")
        end
        all_results["scaling"] = comprehensive_scaling_analysis(config)
    catch e
        @warn "Scaling analysis failed: $e"
        all_results["scaling"] = BenchmarkResult[]
    end
    
    # 2. Backend Comparison
    try
        if config.verbose
            println("\\nPhase 2/5: Backend Comparison")
        end
        all_results["backends"] = comprehensive_backend_comparison(config)
    catch e
        @warn "Backend comparison failed: $e"
        all_results["backends"] = BenchmarkResult[]
    end
    
    # 3. Memory Analysis
    try
        if config.verbose
            println("\\nPhase 3/5: Memory Analysis")
        end
        all_results["memory"] = comprehensive_memory_analysis(config)
    catch e
        @warn "Memory analysis failed: $e"
        all_results["memory"] = BenchmarkResult[]
    end
    
    # 4. Convergence Analysis  
    try
        if config.verbose
            println("\\nPhase 4/5: Convergence Analysis")
        end
        all_results["convergence"] = comprehensive_convergence_analysis(config)
    catch e
        @warn "Convergence analysis failed: $e"
        all_results["convergence"] = BenchmarkResult[]
    end
    
    # 5. Micro-benchmarks
    try
        if config.verbose
            println("\\nPhase 5/5: Micro-benchmarks")
        end
        all_results["micro"] = micro_benchmarks(config)
    catch e
        @warn "Micro-benchmarks failed: $e"
        all_results["micro"] = BenchmarkResult[]
    end
    
    return all_results
end

"""
    process_and_export_results(all_results::Dict, args::Dict)

Process benchmark results and export in multiple formats.
"""
function process_and_export_results(all_results::Dict, args::Dict)
    # Flatten all results into a single vector
    flat_results = BenchmarkResult[]
    for (category, results) in all_results
        for result in results
            # Add category prefix to result name
            new_name = "$(category)_$(result.name)"
            result_with_category = BenchmarkResult(result, name=new_name)
            push!(flat_results, result_with_category)
        end
    end
    
    if args["verbose"]
        println("\\n📊 Processing $(length(flat_results)) benchmark results")
    end
    
    # Apply baseline comparison if requested
    if args["baseline"] !== nothing
        try
            flat_results = compare_with_baseline(flat_results, args["baseline"])
            if args["verbose"]
                regressions = count(r -> r.regression_detected, flat_results)
                println("Regression analysis: $regressions regressions detected")
            end
        catch e
            @warn "Baseline comparison failed: $e"
        end
    end
    
    # Export results
    try
        save_benchmark_results(flat_results, args["export_dir"])
        if args["verbose"]
            println("✅ Results exported to $(args["export_dir"])")
        end
    catch e
        @warn "Export failed: $e"
    end
    
    return flat_results
end

"""
    print_summary_report(all_results::Dict, flat_results::Vector{BenchmarkResult}, args::Dict)

Print a comprehensive summary report to console.
"""
function print_summary_report(all_results::Dict, flat_results::Vector{BenchmarkResult}, args::Dict)
    println("\\n" * "=" * 60)
    println("📋 COMPREHENSIVE BENCHMARK SUMMARY REPORT")
    println("=" * 60)
    
    # Overall statistics
    total_benchmarks = length(flat_results)
    successful_benchmarks = count(r -> r.metrics.success_rate > 0, flat_results)
    
    println("📈 Overall Statistics:")
    println("   Total benchmarks: $total_benchmarks")
    println("   Successful benchmarks: $successful_benchmarks")
    println("   Success rate: $(@sprintf("%.1f", successful_benchmarks / total_benchmarks * 100))%")
    
    # System information (from first result)
    if !isempty(flat_results)
        sys_info = flat_results[1].system_info
        println("\\n🖥️  System Information:")
        println("   Julia version: $(sys_info.julia_version)")
        println("   OS: $(sys_info.os)")
        println("   CPU: $(sys_info.cpu_name)")
        println("   Cores: $(sys_info.cpu_cores)")
        println("   Memory: $(@sprintf("%.1f", sys_info.total_memory / 1024^3)) GB")
        println("   Julia threads: $(sys_info.num_threads)")
        println("   Available backends: Julia$(sys_info.has_petsc ? ", PETSc" : "")$(sys_info.has_gcr ? ", GCR" : "")")
    end
    
    # Category-wise summary
    println("\\n📊 Category Summary:")
    for (category, results) in all_results
        if !isempty(results)
            successful = count(r -> r.metrics.success_rate > 0, results)
            avg_time = successful > 0 ? mean([r.metrics.mean_time for r in results if r.metrics.success_rate > 0]) : 0.0
            
            println("   $(titlecase(category)):")
            println("     Tests: $(length(results)) ($(successful) successful)")
            if successful > 0
                println("     Avg time: $(@sprintf("%.3f", avg_time)) s")
                
                # Find best performer in this category
                if successful > 1
                    best_idx = argmin([r.metrics.mean_time for r in results if r.metrics.success_rate > 0])
                    successful_results = [r for r in results if r.metrics.success_rate > 0]
                    best = successful_results[best_idx]
                    println("     Best: $(best.name) ($(@sprintf("%.3f", best.metrics.mean_time)) s)")
                end
            end
        end
    end
    
    # Performance insights
    println("\\n🎯 Key Performance Insights:")
    successful_results = [r for r in flat_results if r.metrics.success_rate > 0]
    
    if !isempty(successful_results)
        # Fastest overall
        fastest_idx = argmin([r.metrics.mean_time for r in successful_results])
        fastest = successful_results[fastest_idx]
        println("   ⚡ Fastest configuration: $(fastest.name)")
        println("      Time: $(@sprintf("%.3f", fastest.metrics.mean_time)) s")
        println("      Throughput: $(@sprintf("%.0f", fastest.metrics.dofs_per_second)) DOFs/s")
        
        # Most memory efficient
        if any(r -> r.metrics.total_memory > 0, successful_results)
            memory_efficient_results = [r for r in successful_results if r.metrics.total_memory > 0]
            if !isempty(memory_efficient_results)
                memory_idx = argmin([r.metrics.total_memory for r in memory_efficient_results])
                memory_best = memory_efficient_results[memory_idx]
                println("   🧠 Most memory efficient: $(memory_best.name)")
                println("      Memory: $(@sprintf("%.1f", memory_best.metrics.total_memory / 1024^2)) MB")
            end
        end
        
        # Scaling insights
        scaling_results = [r for r in successful_results if contains(r.name, "scaling")]
        if length(scaling_results) >= 2
            dofs = [r.metrics.dofs for r in scaling_results]
            times = [r.metrics.mean_time for r in scaling_results]
            if length(unique(dofs)) >= 2
                # Simple scaling analysis
                log_dofs = log.(dofs)
                log_times = log.(times)
                
                # Linear regression to estimate scaling
                n = length(log_dofs)
                sum_x = sum(log_dofs)
                sum_y = sum(log_times)
                sum_xy = sum(log_dofs .* log_times)
                sum_x2 = sum(log_dofs .^2)
                
                slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x^2)
                
                println("   📈 Estimated scaling: Time ~ DOFs^$(@sprintf("%.2f", slope))")
                if slope < 1.5
                    println("      Excellent scaling (better than O(N^1.5))") 
                elseif slope < 2.0
                    println("      Good scaling (better than O(N^2))")
                elseif slope < 3.0
                    println("      Acceptable scaling (better than O(N^3))")
                else
                    println("      Poor scaling (worse than O(N^3))")
                end
            end
        end
    end
    
    # Regression alerts
    regressions = [r for r in flat_results if r.regression_detected]
    if !isempty(regressions)
        println("\\n⚠️  Performance Regressions Detected:")
        for reg in regressions
            println("   $(reg.name): Performance degraded")
            if haskey(reg.baseline_comparison, "time_regression")
                factor = reg.baseline_comparison["time_regression"]
                println("      $(@sprintf("%.1fx", factor)) slower than baseline")
            end
        end
    end
    
    # Recommendations
    println("\\n💡 Recommendations:")
    if !isempty(successful_results)
        # Top 3 fastest configurations
        sorted_results = sort(successful_results, by = r -> r.metrics.mean_time)
        top_3 = sorted_results[1:min(3, length(sorted_results))]
        
        println("   Recommended configurations:")
        for (i, result) in enumerate(top_3)
            println("   $i. $(result.name): $(@sprintf("%.3f", result.metrics.mean_time)) s")
        end
        
        # Memory recommendations
        high_memory = [r for r in successful_results if r.metrics.total_memory > 1024^3]  # > 1GB
        if !isempty(high_memory)
            println("   Memory warnings:")
            println("     $(length(high_memory)) configurations use >1GB memory")
            println("     Consider memory optimization for large-scale runs")
        end
        
        # Convergence recommendations
        convergence_results = [r for r in successful_results if contains(r.name, "convergence")]
        if !isempty(convergence_results)
            slow_convergence = [r for r in convergence_results if r.metrics.mean_iterations > 100]
            if !isempty(slow_convergence)
                println("   Convergence warnings:")
                println("     $(length(slow_convergence)) configurations show slow convergence")
                println("     Consider tighter tolerances or preconditioner improvements")
            end
        end
    end
    
    println("\\n✅ Comprehensive benchmark completed successfully!")
    println("📁 Detailed results available in: $(args["export_dir"])")
end

"""
    main()

Main entry point for the comprehensive benchmark suite.
"""
function main()
    args = parse_args()
    
    # Show help if requested
    if args["help"]
        show_help()
        return 0
    end
    
    if args["verbose"]
        println("🚀 Starting NSEMSolver.jl Comprehensive Performance Benchmark")
        println("Timestamp: $(now())")
        println()
    end
    
    try
        # Run comprehensive benchmarks
        all_results = run_comprehensive_benchmarks(args)
        
        # Process and export results
        flat_results = process_and_export_results(all_results, args)
        
        # Print summary report
        print_summary_report(all_results, flat_results, args)
        
        return 0
        
    catch e
        @error "Benchmark suite failed with error: $e"
        if args["verbose"]
            println("Stack trace:")
            for (exc, bt) in Base.catch_stack()
                showerror(stdout, exc, bt)
                println()
            end
        end
        return 1
    end
end

# Legacy function wrappers for backward compatibility
function benchmark_polynomial_orders()
    config = BenchmarkConfig(verbose=true, n_trials=3)
    N_values = [2, 3, 4, 5, 6]
    solver_configs = [NSOptions(N=N, n_block=3, nu=0.01, tfinal=0.5, cfl=0.4, tol=1e-6, solver=:julia, verbose=false) for N in N_values]
    results = benchmark_solver_performance(solver_configs, config=config)
    
    # Convert to legacy format for compatibility
    legacy_results = Dict{Int, Any}()
    for (i, result) in enumerate(results)
        N = N_values[i]
        if result.metrics.success_rate > 0
            legacy_results[N] = (
                mean_time = result.metrics.mean_time,
                std_time = result.metrics.std_time,
                mean_iterations = result.metrics.mean_iterations,
                mean_residual = result.metrics.mean_residual,
                success_rate = result.metrics.success_rate,
                dofs = result.metrics.dofs
            )
        else
            legacy_results[N] = (success_rate = 0.0)
        end
    end
    
    return legacy_results
end

function benchmark_domain_sizes()
    config = BenchmarkConfig(verbose=true, n_trials=1)
    n_block_values = [2, 3, 4, 5]
    solver_configs = [NSOptions(N=4, n_block=n_block, nu=0.01, tfinal=0.3, cfl=0.3, tol=1e-5, solver=:julia, verbose=false) for n_block in n_block_values]
    results = benchmark_solver_performance(solver_configs, config=config)
    
    # Convert to legacy format
    legacy_results = Dict{Int, Any}()
    for (i, result) in enumerate(results)
        n_block = n_block_values[i]
        if result.metrics.success_rate > 0
            legacy_results[n_block] = (
                time = result.metrics.mean_time,
                iterations = result.metrics.mean_iterations,
                residual = result.metrics.mean_residual,
                dofs = result.metrics.dofs,
                converged = true
            )
        else
            legacy_results[n_block] = (converged = false)
        end
    end
    
    return legacy_results
end

function benchmark_solver_backends()
    config = BenchmarkConfig(verbose=true, n_trials=3)
    backends = [:julia]
    if HAS_PETSC
        push!(backends, :petsc)
    end
    if HAS_GCR
        push!(backends, :gcr)
    end
    
    results = benchmark_backend_comparison(backends, config=config)
    
    # Convert to legacy format
    legacy_results = Dict{Symbol, Any}()
    for (i, result) in enumerate(results)
        backend = backends[i]
        if result.metrics.success_rate > 0
            legacy_results[backend] = (
                mean_time = result.metrics.mean_time,
                std_time = result.metrics.std_time,
                mean_iterations = result.metrics.mean_iterations,
                success_rate = result.metrics.success_rate
            )
        else
            legacy_results[backend] = (success_rate = 0.0)
        end
    end
    
    return legacy_results
end

function benchmark_adaptive_refinement()
    # Simplified adaptive refinement benchmark for compatibility
    config = BenchmarkConfig(verbose=true, n_trials=1)
    
    cases = [
        ("Uniform", NSOptions(N=3, n_block=4, adaptive_refinement=false, verbose=false)),
        ("Adaptive-2", NSOptions(N=3, n_block=4, adaptive_refinement=true, refinement_levels=2, verbose=false)),
        ("Adaptive-3", NSOptions(N=3, n_block=4, adaptive_refinement=true, refinement_levels=3, verbose=false))
    ]
    
    legacy_results = Dict{String, Any}()
    
    for (name, solver_config) in cases
        result = benchmark_single_configuration(solver_config, config)
        if result.metrics.success_rate > 0
            legacy_results[name] = (
                time = result.metrics.mean_time,
                iterations = result.metrics.mean_iterations,
                residual = result.metrics.mean_residual,
                converged = true
            )
        else
            legacy_results[name] = (converged = false)
        end
    end
    
    return legacy_results
end

function estimate_degrees_of_freedom(N::Int, n_block::Int, dim::Int)
    # Rough estimate of DOFs for spectral element method
    elements_per_dim = n_block
    points_per_element = (N + 1)^dim
    total_elements = elements_per_dim^dim
    total_dofs = total_elements * points_per_element
    return total_dofs
end

function generate_performance_report(poly_results, domain_results, solver_results, adaptive_results)
    println("\\n" * "=" * 60)
    println("📋 PERFORMANCE BENCHMARK REPORT (LEGACY)")
    println("=" * 60)
    println("Note: This is a legacy report format. Use the comprehensive benchmarking")
    println("      for more detailed analysis and modern reporting features.")
    println()
    
    # Basic legacy report functionality
    println("Polynomial order results: $(length(poly_results)) tests")
    println("Domain size results: $(length(domain_results)) tests") 
    println("Solver backend results: $(length(solver_results)) tests")
    println("Adaptive refinement results: $(length(adaptive_results)) tests")
end

# Run the main function if script is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    exit(main())
end