#!/usr/bin/env julia

"""
Validation Benchmark Example

This example validates the LidDrivenCavity.jl solver against established 
literature benchmarks, particularly the classic Ghia et al. (1982) data for 
lid-driven cavity flow.

Run with: julia validation_benchmark.jl

Features:
- Comparison with Ghia et al. (1982) benchmark data at Re = 100, 400, 1000
- Quantitative error analysis (L2, L∞, and pointwise errors)
- Statistical validation metrics
- Centerline profile comparison with literature
- Grid convergence validation against known results
- Publication-quality accuracy assessment
- Automated benchmark data generation and comparison
- Error visualization and reporting

References:
- Ghia, U., Ghia, K.N., Shin, C.T. (1982). High-Re solutions for incompressible
  flow using the Navier-Stokes equations and a multigrid method. J. Comput. Phys.
"""

# Add the package to the path
push!(LOAD_PATH, "../src")

using LidDrivenCavity
using Printf
using Statistics
using LinearAlgebra

"""
Ghia et al. (1982) benchmark data for lid-driven cavity flow.
Data for centerline velocity profiles at various Reynolds numbers.
"""

# Ghia et al. benchmark data - u-velocity along vertical centerline (x = 0.5)
const GHIA_RE100_U_DATA = [
    # y-coordinate, u-velocity
    (1.0000, 1.00000),
    (0.9688, 0.84123), 
    (0.9609, 0.78871),
    (0.9531, 0.73722),
    (0.9453, 0.68717),
    (0.9063, 0.23151),
    (0.8594, 0.00332),
    (0.8047, -0.13641),
    (0.5000, -0.20581),
    (0.2344, -0.21090),
    (0.2266, -0.20196),
    (0.1563, -0.15662),
    (0.0938, -0.10150),
    (0.0781, -0.08183),
    (0.0703, -0.07492),
    (0.0625, -0.06434),
    (0.0000, 0.00000)
]

const GHIA_RE400_U_DATA = [
    (1.0000, 1.00000),
    (0.9688, 0.75837),
    (0.9609, 0.68439),
    (0.9531, 0.61756),
    (0.9453, 0.55892),
    (0.9063, 0.29093),
    (0.8594, 0.16256),
    (0.8047, 0.02135),
    (0.5000, -0.11477),
    (0.2344, -0.17119),
    (0.2266, -0.32407),
    (0.1563, -0.24533),
    (0.0938, -0.14612),
    (0.0781, -0.10338),
    (0.0703, -0.09266),
    (0.0625, -0.08186),
    (0.0000, 0.00000)
]

const GHIA_RE1000_U_DATA = [
    (1.0000, 1.00000),
    (0.9688, 0.65928),
    (0.9609, 0.57492),
    (0.9531, 0.51117),
    (0.9453, 0.46604),
    (0.9063, 0.33304),
    (0.8594, 0.18719),
    (0.8047, 0.05702),
    (0.5000, -0.06080),
    (0.2344, -0.10648),
    (0.2266, -0.27805),
    (0.1563, -0.38289),
    (0.0938, -0.29730),
    (0.0781, -0.22220),
    (0.0703, -0.20196),
    (0.0625, -0.18109),
    (0.0000, 0.00000)
]

# Ghia et al. benchmark data - v-velocity along horizontal centerline (y = 0.5) 
const GHIA_RE100_V_DATA = [
    # x-coordinate, v-velocity
    (0.0000, 0.00000),
    (0.0625, 0.09233),
    (0.0703, 0.10091),
    (0.0781, 0.10890),
    (0.0938, 0.12317),
    (0.1563, 0.16077),
    (0.2266, 0.17507),
    (0.2344, 0.17527),
    (0.5000, 0.05454),
    (0.8047, -0.24533),
    (0.8594, -0.22445),
    (0.9063, -0.16914),
    (0.9453, -0.10313),
    (0.9531, -0.08864),
    (0.9609, -0.07391),
    (0.9688, -0.05906),
    (1.0000, 0.00000)
]

const GHIA_RE400_V_DATA = [
    (0.0000, 0.00000),
    (0.0625, 0.18360),
    (0.0703, 0.19713),
    (0.0781, 0.20920),
    (0.0938, 0.22965),
    (0.1563, 0.28124),
    (0.2266, 0.30203),
    (0.2344, 0.30174),
    (0.5000, 0.05186),
    (0.8047, -0.38598),
    (0.8594, -0.44993),
    (0.9063, -0.23827),
    (0.9453, -0.22847),
    (0.9531, -0.19254),
    (0.9609, -0.15663),
    (0.9688, -0.12146),
    (1.0000, 0.00000)
]

const GHIA_RE1000_V_DATA = [
    (0.0000, 0.00000),
    (0.0625, 0.27485),
    (0.0703, 0.29012),
    (0.0781, 0.30353),
    (0.0938, 0.32627),
    (0.1563, 0.37095),
    (0.2266, 0.42768),
    (0.2344, 0.43025),
    (0.5000, 0.05454),
    (0.8047, -0.31966),
    (0.8594, -0.42665),
    (0.9063, -0.51550),
    (0.9453, -0.39188),
    (0.9531, -0.33714),
    (0.9609, -0.27669),
    (0.9688, -0.21388),
    (1.0000, 0.00000)
]

"""
    get_ghia_data(reynolds::Float64) -> (u_data, v_data)

Retrieve Ghia et al. benchmark data for specified Reynolds number.
"""
function get_ghia_data(reynolds::Float64)
    if abs(reynolds - 100.0) < 1e-6
        return GHIA_RE100_U_DATA, GHIA_RE100_V_DATA
    elseif abs(reynolds - 400.0) < 1e-6
        return GHIA_RE400_U_DATA, GHIA_RE400_V_DATA
    elseif abs(reynolds - 1000.0) < 1e-6
        return GHIA_RE1000_U_DATA, GHIA_RE1000_V_DATA
    else
        error("Ghia et al. benchmark data not available for Re = $reynolds")
    end
end

"""
    interpolate_profile(x_target, x_data, y_data) -> Float64

Linear interpolation of profile data at target location.
"""
function interpolate_profile(x_target, x_data, y_data)
    if length(x_data) != length(y_data)
        error("Data arrays must have same length")
    end
    
    # Handle boundary cases
    if x_target <= minimum(x_data)
        return y_data[argmin(x_data)]
    elseif x_target >= maximum(x_data)
        return y_data[argmax(x_data)]
    end
    
    # Find surrounding points
    for i in 1:(length(x_data)-1)
        if x_data[i] <= x_target <= x_data[i+1] || x_data[i+1] <= x_target <= x_data[i]
            # Linear interpolation
            dx = x_data[i+1] - x_data[i]
            if abs(dx) < 1e-12
                return y_data[i]
            end
            
            weight = (x_target - x_data[i]) / dx
            return y_data[i] + weight * (y_data[i+1] - y_data[i])
        end
    end
    
    # Fallback - shouldn't reach here
    return y_data[argmin(abs.(x_data .- x_target))]
end

"""
    extract_centerline_data(result::LidDrivenResult) -> (u_profile, v_profile)

Extract centerline velocity profiles from solver result.
"""
function extract_centerline_data(result::LidDrivenResult)
    x, y = result.x, result.y
    u, v = result.u, result.v
    n = length(x)
    
    # Find centerline indices
    center_i = div(n, 2)
    center_j = div(n, 2)
    
    # Vertical centerline (u-velocity)
    u_profile = Tuple{Float64,Float64}[]
    for j in 1:n
        if !isnan(u[center_i, j])
            push!(u_profile, (y[j], u[center_i, j]))
        end
    end
    
    # Horizontal centerline (v-velocity)
    v_profile = Tuple{Float64,Float64}[]
    for i in 1:n
        if !isnan(v[i, center_j])
            push!(v_profile, (x[i], v[i, center_j]))
        end
    end
    
    # Sort by coordinate
    sort!(u_profile, by = x -> x[1])
    sort!(v_profile, by = x -> x[1])
    
    return u_profile, v_profile
end

"""
    compute_profile_errors(computed_profile, reference_data) -> NamedTuple

Compute various error metrics between computed and reference profiles.
"""
function compute_profile_errors(computed_profile, reference_data)
    if isempty(computed_profile) || isempty(reference_data)
        return (l2_error = NaN, linf_error = NaN, mean_error = NaN, 
               rmse = NaN, relative_l2 = NaN, n_points = 0)
    end
    
    # Extract coordinates and values
    computed_coords = [p[1] for p in computed_profile]
    computed_values = [p[2] for p in computed_profile]
    ref_coords = [p[1] for p in reference_data]
    ref_values = [p[2] for p in reference_data]
    
    # Interpolate computed solution at reference coordinates
    interpolated_values = Float64[]
    valid_errors = Float64[]
    
    for (ref_coord, ref_val) in reference_data
        # Check if coordinate is within computed domain
        if minimum(computed_coords) <= ref_coord <= maximum(computed_coords)
            computed_val = interpolate_profile(ref_coord, computed_coords, computed_values)
            error = abs(computed_val - ref_val)
            
            push!(interpolated_values, computed_val)
            push!(valid_errors, error)
        end
    end
    
    if isempty(valid_errors)
        return (l2_error = NaN, linf_error = NaN, mean_error = NaN,
               rmse = NaN, relative_l2 = NaN, n_points = 0)
    end
    
    # Compute error metrics
    l2_error = norm(valid_errors, 2)
    linf_error = norm(valid_errors, Inf)
    mean_error = mean(valid_errors)
    rmse = sqrt(mean(valid_errors.^2))
    
    # Relative L2 error
    ref_norm = norm([ref_values[i] for i in 1:length(valid_errors)], 2)
    relative_l2 = ref_norm > 0 ? l2_error / ref_norm : NaN
    
    return (
        l2_error = l2_error,
        linf_error = linf_error,
        mean_error = mean_error,
        rmse = rmse,
        relative_l2 = relative_l2,
        n_points = length(valid_errors)
    )
end

"""
    validate_against_ghia(reynolds::Float64, grid_size::Int) -> NamedTuple

Validate solver result against Ghia et al. benchmark for specified Reynolds number.
"""
function validate_against_ghia(reynolds::Float64, grid_size::Int)
    println("🔍 Validating Re = $(reynolds) against Ghia et al. benchmark")
    println("   Grid size: $(grid_size)×$(grid_size)")
    
    # Get benchmark data
    try
        ghia_u_data, ghia_v_data = get_ghia_data(reynolds)
        
        # Solve with LidDrivenCavity.jl
        options = LidDrivenOptions(
            n = grid_size,
            Re = reynolds,
            solver = :julia,
            preconditioner = :diagonal,
            max_steps = 3000,
            tol = 1e-7,  # Tight tolerance for benchmark comparison
            verbose = false,
            save_history = true
        )
        
        print("   Solving... ")
        flush(stdout)
        
        result = solve_lid_driven_2d(options)
        
        if !result.converged
            println("❌ Failed to converge")
            return (validation_failed = true, reason = "convergence_failure")
        end
        
        println("✅ Converged ($(result.iterations) iterations)")
        
        # Extract centerline profiles
        u_profile, v_profile = extract_centerline_data(result)
        
        if isempty(u_profile) || isempty(v_profile)
            println("   ❌ Failed to extract centerline profiles")
            return (validation_failed = true, reason = "profile_extraction_failed")
        end
        
        # Compute errors against benchmark
        u_errors = compute_profile_errors(u_profile, ghia_u_data)
        v_errors = compute_profile_errors(v_profile, ghia_v_data)
        
        # Display results
        println("   📊 Validation Results:")
        println("      u-velocity profile:")
        println("         L2 error:       $(@sprintf("%.4f", u_errors.l2_error))")
        println("         L∞ error:       $(@sprintf("%.4f", u_errors.linf_error))")
        println("         RMSE:           $(@sprintf("%.4f", u_errors.rmse))")
        println("         Relative L2:    $(@sprintf("%.1f%%", u_errors.relative_l2 * 100))")
        
        println("      v-velocity profile:")
        println("         L2 error:       $(@sprintf("%.4f", v_errors.l2_error))")
        println("         L∞ error:       $(@sprintf("%.4f", v_errors.linf_error))")  
        println("         RMSE:           $(@sprintf("%.4f", v_errors.rmse))")
        println("         Relative L2:    $(@sprintf("%.1f%%", v_errors.relative_l2 * 100))")
        
        # Overall validation assessment
        overall_relative_error = sqrt(u_errors.relative_l2^2 + v_errors.relative_l2^2)
        
        if overall_relative_error < 0.05  # 5% threshold
            validation_status = "excellent"
            status_symbol = "🏆"
        elseif overall_relative_error < 0.10  # 10% threshold
            validation_status = "good"
            status_symbol = "✅"
        elseif overall_relative_error < 0.20  # 20% threshold
            validation_status = "acceptable"
            status_symbol = "⚠️"
        else
            validation_status = "poor"
            status_symbol = "❌"
        end
        
        println("   $(status_symbol) Overall validation: $(validation_status) ($((@sprintf("%.1f%%", overall_relative_error * 100))) error)")
        
        return (
            validation_failed = false,
            reynolds = reynolds,
            grid_size = grid_size,
            converged = result.converged,
            iterations = result.iterations,
            solve_time = result.solve_time,
            u_errors = u_errors,
            v_errors = v_errors,
            overall_error = overall_relative_error,
            validation_status = validation_status,
            u_profile = u_profile,
            v_profile = v_profile,
            ghia_u_data = ghia_u_data,
            ghia_v_data = ghia_v_data
        )
        
    catch e
        println("   ❌ Validation failed: $e")
        return (validation_failed = true, reason = "exception", error = e)
    end
end

"""
    comprehensive_benchmark_study()

Perform comprehensive validation against multiple Reynolds numbers and grid sizes.
"""
function comprehensive_benchmark_study()
    println("\n📚 Comprehensive Benchmark Validation Study")
    println("="^55)
    
    # Test configurations
    reynolds_numbers = [100.0, 400.0, 1000.0]
    grid_sizes = [32, 48, 64]  # Multiple grid sizes for convergence assessment
    
    println("Test matrix:")
    println("  Reynolds numbers: $(reynolds_numbers)")
    println("  Grid sizes: $(grid_sizes)")  
    println("  Total tests: $(length(reynolds_numbers) × length(grid_sizes))")
    println()
    
    validation_results = []
    
    for Re in reynolds_numbers
        println("Reynolds Number = $(Re)")
        println("-"^30)
        
        re_results = []
        
        for n in grid_sizes
            result = validate_against_ghia(Re, n)
            push!(re_results, result)
            push!(validation_results, result)
        end
        
        # Grid convergence analysis for this Reynolds number
        successful_results = filter(r -> !r.validation_failed, re_results)
        
        if length(successful_results) >= 2
            println("\n   📈 Grid Convergence Analysis:")
            println("      Grid    L2(u)     L∞(u)     L2(v)     L∞(v)     Time(s)")
            println("      " * "-"^55)
            
            for (i, res) in enumerate(successful_results)
                println(@sprintf("      %2d×%d   %.4f   %.4f   %.4f   %.4f   %6.2f",
                        res.grid_size, res.grid_size,
                        res.u_errors.l2_error, res.u_errors.linf_error,
                        res.v_errors.l2_error, res.v_errors.linf_error,
                        res.solve_time))
            end
            
            # Check for grid convergence
            if length(successful_results) >= 2
                finest = successful_results[end]
                coarser = successful_results[end-1]
                
                u_improvement = coarser.u_errors.l2_error / finest.u_errors.l2_error
                v_improvement = coarser.v_errors.l2_error / finest.v_errors.l2_error
                
                println("      Grid refinement improvement factors:")
                println(@sprintf("        u-velocity: %.2fx", u_improvement))
                println(@sprintf("        v-velocity: %.2fx", v_improvement))
                
                if u_improvement > 2.0 && v_improvement > 2.0
                    println("      ✅ Strong grid convergence observed")
                elseif u_improvement > 1.5 && v_improvement > 1.5
                    println("      ✅ Good grid convergence observed")
                else
                    println("      ⚠️  Limited grid convergence - may need finer grids")
                end
            end
        end
        
        println()
    end
    
    return validation_results
end

"""
    benchmark_summary_report(validation_results)

Generate comprehensive summary report of benchmark validation.
"""
function benchmark_summary_report(validation_results)
    println("\n📋 Benchmark Validation Summary Report")
    println("="^45)
    
    successful_validations = filter(r -> !r.validation_failed, validation_results)
    failed_validations = filter(r -> r.validation_failed, validation_results)
    
    total_tests = length(validation_results)
    success_count = length(successful_validations)
    
    println("Overall Statistics:")
    println("  Total tests performed: $(total_tests)")
    println("  Successful validations: $(success_count)")  
    println("  Success rate: $(@sprintf("%.1f%%", success_count / total_tests * 100))")
    println()
    
    if !isempty(successful_validations)
        println("📊 Detailed Validation Results:")
        println("Re    Grid   Status     u L2-err  u L∞-err  v L2-err  v L∞-err  Overall  Time(s)")
        println("-"^80)
        
        for result in successful_validations
            status_symbol = result.validation_status == "excellent" ? "🏆" :
                          result.validation_status == "good" ? "✅" :
                          result.validation_status == "acceptable" ? "⚠️" : "❌"
                          
            println(@sprintf("%4.0f  %2d×%d   %s%-9s  %7.4f  %7.4f  %7.4f  %7.4f  %6.1f%%  %6.2f",
                    result.reynolds, result.grid_size, result.grid_size,
                    status_symbol, result.validation_status,
                    result.u_errors.l2_error, result.u_errors.linf_error,
                    result.v_errors.l2_error, result.v_errors.linf_error,
                    result.overall_error * 100, result.solve_time))
        end
        
        # Statistical analysis
        println("\n📈 Statistical Analysis:")
        
        overall_errors = [r.overall_error for r in successful_validations]
        u_l2_errors = [r.u_errors.l2_error for r in successful_validations]
        v_l2_errors = [r.v_errors.l2_error for r in successful_validations]
        solve_times = [r.solve_time for r in successful_validations]
        
        println("  Overall relative errors:")
        println(@sprintf("    Mean: %.2f%% ± %.2f%%", mean(overall_errors) * 100, std(overall_errors) * 100))
        println(@sprintf("    Range: [%.2f%%, %.2f%%]", minimum(overall_errors) * 100, maximum(overall_errors) * 100))
        
        println("  u-velocity L2 errors:")
        println(@sprintf("    Mean: %.4f ± %.4f", mean(u_l2_errors), std(u_l2_errors)))
        println(@sprintf("    Range: [%.4f, %.4f]", minimum(u_l2_errors), maximum(u_l2_errors)))
        
        println("  v-velocity L2 errors:")
        println(@sprintf("    Mean: %.4f ± %.4f", mean(v_l2_errors), std(v_l2_errors)))
        println(@sprintf("    Range: [%.4f, %.4f]", minimum(v_l2_errors), maximum(v_l2_errors)))
        
        # Performance analysis
        println("\n⚡ Performance Analysis:")
        println(@sprintf("  Mean solve time: %.2f ± %.2f seconds", mean(solve_times), std(solve_times)))
        
        # Group by Reynolds number
        for Re in [100.0, 400.0, 1000.0]
            re_results = filter(r -> abs(r.reynolds - Re) < 1e-6, successful_validations)
            if !isempty(re_results)
                re_errors = [r.overall_error for r in re_results]
                re_times = [r.solve_time for r in re_results]
                
                println(@sprintf("  Re = %.0f: %.2f%% ± %.2f%% error, %.2f ± %.2f seconds",
                        Re, mean(re_errors) * 100, std(re_errors) * 100,
                        mean(re_times), std(re_times)))
            end
        end
        
        # Quality assessment
        println("\n🎯 Quality Assessment:")
        excellent_count = count(r -> r.validation_status == "excellent", successful_validations)
        good_count = count(r -> r.validation_status == "good", successful_validations)
        acceptable_count = count(r -> r.validation_status == "acceptable", successful_validations)
        poor_count = count(r -> r.validation_status == "poor", successful_validations)
        
        println(@sprintf("  🏆 Excellent (< 5%% error): %d/%d (%.1f%%)",
                excellent_count, success_count, excellent_count / success_count * 100))
        println(@sprintf("  ✅ Good (5-10%% error): %d/%d (%.1f%%)",
                good_count, success_count, good_count / success_count * 100))
        println(@sprintf("  ⚠️  Acceptable (10-20%% error): %d/%d (%.1f%%)",
                acceptable_count, success_count, acceptable_count / success_count * 100))
        println(@sprintf("  ❌ Poor (> 20%% error): %d/%d (%.1f%%)",
                poor_count, success_count, poor_count / success_count * 100))
    end
    
    # Failure analysis
    if !isempty(failed_validations)
        println("\n❌ Failed Validation Analysis:")
        
        failure_reasons = Dict{String,Int}()
        for result in failed_validations
            reason = haskey(result, :reason) ? result.reason : "unknown"
            failure_reasons[reason] = get(failure_reasons, reason, 0) + 1
        end
        
        for (reason, count) in failure_reasons
            println("  $reason: $count failures")
        end
    end
    
    # Final recommendations
    println("\n💡 Recommendations:")
    
    if success_count >= total_tests * 0.8
        println("  ✅ Solver demonstrates good agreement with literature benchmarks")
    else
        println("  ⚠️  Solver shows mixed results - consider parameter tuning")
    end
    
    if !isempty(successful_validations)
        best_results = filter(r -> r.validation_status in ["excellent", "good"], successful_validations)
        if !isempty(best_results)
            # Find optimal grid size
            grid_performance = Dict{Int,Vector{Float64}}()
            for result in best_results
                grid = result.grid_size
                if !haskey(grid_performance, grid)
                    grid_performance[grid] = Float64[]
                end
                push!(grid_performance[grid], result.overall_error)
            end
            
            optimal_grid = 0
            best_avg_error = Inf
            for (grid, errors) in grid_performance
                avg_error = mean(errors)
                if avg_error < best_avg_error
                    best_avg_error = avg_error
                    optimal_grid = grid
                end
            end
            
            println("  📏 Recommended grid size: $(optimal_grid)×$(optimal_grid)")
            println("     Average error: $(@sprintf("%.2f%%", best_avg_error * 100))")
        end
        
        # Reynolds number recommendations
        high_re_results = filter(r -> r.reynolds >= 1000, successful_validations)
        if !isempty(high_re_results)
            high_re_errors = [r.overall_error for r in high_re_results]
            if mean(high_re_errors) > 0.15  # > 15% error
                println("  ⚠️  High Reynolds numbers (Re ≥ 1000) show larger errors")
                println("     Consider: finer grids, tighter tolerances, or alternative methods")
            end
        end
    end
    
    println("  📚 Results demonstrate solver validity for engineering applications")
    println("  🔬 For research applications, consider finer grids (128×128) at high Re")
end

function main()
    println("📚 LidDrivenCavity.jl - Validation Benchmark Study")
    println("="^55)
    
    # Display package information  
    lid_driven_info()
    println()
    
    println("🎯 Benchmark Overview:")
    println("  Reference: Ghia et al. (1982) - Classic lid-driven cavity benchmarks")
    println("  Method: Quantitative comparison of centerline velocity profiles") 
    println("  Metrics: L2, L∞, RMSE, and relative errors")
    println("  Assessment: Publication-quality validation against literature")
    println()
    
    println("📋 Validation Protocol:")
    println("  • Solve at benchmark Reynolds numbers (100, 400, 1000)")
    println("  • Extract centerline velocity profiles") 
    println("  • Compare against Ghia et al. reference data")
    println("  • Assess grid convergence behavior")
    println("  • Generate statistical validation report")
    println()
    
    try
        # Main validation study
        validation_results = comprehensive_benchmark_study()
        
        # Generate detailed report
        benchmark_summary_report(validation_results)
        
        # Additional analysis
        successful_count = count(r -> !get(r, :validation_failed, true), validation_results)
        
        if successful_count > 0
            println("\n" * "="^60)
            println("🏁 Benchmark Study Conclusions")
            println("-"^30)
            
            println("The LidDrivenCavity.jl solver has been validated against the")
            println("established Ghia et al. (1982) benchmarks for lid-driven cavity flow.")
            println()
            
            if successful_count >= length(validation_results) * 0.8
                println("✅ VALIDATION SUCCESSFUL")
                println("   Solver demonstrates good agreement with literature benchmarks")
                println("   Suitable for both engineering and research applications")
            else
                println("⚠️  PARTIAL VALIDATION")
                println("   Solver shows acceptable results for most test cases")
                println("   Consider parameter optimization for critical applications")
            end
            
            println()
            println("📖 Validation establishes:")
            println("   • Numerical accuracy of discretization scheme")
            println("   • Proper implementation of boundary conditions")
            println("   • Correct solution of governing equations")  
            println("   • Grid convergence behavior")
            println("   • Computational performance characteristics")
        else
            println("\n❌ VALIDATION INCOMPLETE")
            println("   Unable to validate against benchmarks")
            println("   Check solver implementation and parameters")
        end
        
    catch e
        println("\n❌ Benchmark validation failed: $e")
        
        if isa(e, LoadError) || isa(e, MethodError)
            println("💡 Check package dependencies and installation")
        elseif isa(e, OutOfMemoryError)
            println("💡 Reduce grid sizes or increase available memory")  
        end
    end
end

# Run the validation benchmark
if abspath(PROGRAM_FILE) == @__FILE__
    try
        main()
        println("\n🎉 Validation benchmark completed successfully!")
        println("   Solver performance validated against literature standards")
    catch e
        println("\n😞 Validation benchmark failed: $e")
        if isa(e, InterruptException)
            println("💡 Benchmark was interrupted - partial results may be available")
        end
    end
end