#!/usr/bin/env julia
"""
Comprehensive Performance Benchmark Suite for NSEMSolver.jl

A production-quality benchmarking framework that provides:
- Comprehensive performance analysis across multiple dimensions
- Memory profiling and allocation tracking
- Scaling analysis for different problem sizes
- Backend comparison (Julia, PETSc, GCR)
- Automated report generation with regression detection
- Export capabilities for CSV, JSON, and Markdown formats
- CI/CD integration support

Usage:
    julia performance_benchmark.jl [--quick] [--export-dir DIR] [--baseline FILE]

Options:
    --quick         Run quick benchmark suite (fewer trials)
    --ci            Run lightweight CI benchmarks
    --export-dir    Directory for exporting results (default: ./benchmark)
    --baseline      Baseline file for regression detection
    --verbose       Enable verbose output
    --help          Show this help message
"""

using NSEMSolver
using Printf
using Statistics
using Dates

# Parse command line arguments
function parse_args()
    args = Dict{String, Any}(
        "quick" => false,
        "ci" => false,
        "export_dir" => "./benchmark",
        "baseline" => nothing,
        "verbose" => false,
        "help" => false
    )
    
    i = 1
    while i <= length(ARGS)
        arg = ARGS[i]
        if arg == "--quick"
            args["quick"] = true
        elseif arg == "--ci"
            args["ci"] = true
        elseif arg == "--export-dir"
            i += 1
            if i <= length(ARGS)
                args["export_dir"] = ARGS[i]
            end
        elseif arg == "--baseline"
            i += 1
            if i <= length(ARGS)
                args["baseline"] = ARGS[i]
            end
        elseif arg == "--verbose"
            args["verbose"] = true
        elseif arg == "--help"
            args["help"] = true
        end
        i += 1
    end
    
    return args
end

# Show help message
function show_help()
    println("Comprehensive Performance Benchmark Suite for NSEMSolver.jl")
    println()
    println("Usage: julia performance_benchmark.jl [options]")
    println()
    println("Options:")
    println("  --quick         Run quick benchmark suite (fewer trials)")
    println("  --ci            Run lightweight CI benchmarks")
    println("  --export-dir    Directory for exporting results (default: ./benchmark)")
    println("  --baseline      Baseline file for regression detection")
    println("  --verbose       Enable verbose output")
    println("  --help          Show this help message")
    println()
    println("Examples:")
    println("  julia performance_benchmark.jl                    # Full benchmark suite")
    println("  julia performance_benchmark.jl --quick            # Quick benchmarks")
    println("  julia performance_benchmark.jl --ci               # CI-friendly benchmarks")
    println("  julia performance_benchmark.jl --export-dir ./results --baseline baseline.json")
end

# Legacy function wrapper for backward compatibility
function benchmark_polynomial_orders(config::BenchmarkConfig = BenchmarkConfig())
    if config.verbose
        println("📊 Polynomial Order Performance Benchmark")
        println("=" * 50)
    end
    
    N_values = config.n_trials > 1 ? [2, 3, 4, 5, 6] : [3, 4, 5]  # Reduced for quick runs
    solver_configs = [NSOptions(N=N, n_block=3, nu=0.01, tfinal=0.5, cfl=0.4, tol=1e-6, solver=:julia, verbose=false) for N in N_values]
    
    if config.verbose
        println("Testing polynomial orders: $(N_values)")
    end
    
    # Use the new comprehensive benchmarking framework
    results = benchmark_solver_performance(solver_configs, config=config)
    
    return results
end

# Enhanced domain size benchmarking
function benchmark_domain_sizes_enhanced(config::BenchmarkConfig = BenchmarkConfig())
        
        options = NSOptions(
            N = N,
            n_block = 3,
            nu = 0.01,
            tfinal = 0.5,
            cfl = 0.4,
            tol = 1e-6,
            solver = :julia,
            adaptive_refinement = false,
            verbose = false
        )
        
        # Run multiple trials for accuracy
        times = Float64[]
        iterations = Int[]
        residuals = Float64[]
        
        n_trials = 3
        for trial in 1:n_trials
            start_time = time()
            result = solve_navier_stokes_2d(options)
            end_time = time()
            
            if result.converged
                push!(times, end_time - start_time)
                push!(iterations, result.iterations)
                push!(residuals, result.residual_norm)
            end
        end
        
        if length(times) > 0
            results[N] = (
                mean_time = sum(times) / length(times),
                std_time = length(times) > 1 ? sqrt(sum((times .- sum(times)/length(times)).^2) / (length(times) - 1)) : 0.0,
                mean_iterations = sum(iterations) / length(iterations),
                mean_residual = sum(residuals) / length(residuals),
                success_rate = length(times) / n_trials,
                dofs = estimate_degrees_of_freedom(N, options.n_block, 2)
            )
            
            println("    Time: $(@sprintf("%.3f", results[N].mean_time)) ± $(@sprintf("%.3f", results[N].std_time)) s")
            println("    Iterations: $(@sprintf("%.1f", results[N].mean_iterations))")
            println("    DOFs: $(results[N].dofs)")
            println("    Success rate: $(@sprintf("%.0f", results[N].success_rate * 100))%")
        else
            results[N] = (success_rate = 0.0)
            println("    ❌ All trials failed")
        end
    end
    
    # Analyze scaling
    println("\n📈 Performance Scaling Analysis:")
    successful_N = [N for N in N_values if haskey(results, N) && results[N].success_rate > 0]
    
    if length(successful_N) >= 2
        println("  DOFs vs Time scaling:")
        for i in 1:(length(successful_N)-1)
            N1, N2 = successful_N[i], successful_N[i+1]
            time_ratio = results[N2].mean_time / results[N1].mean_time
            dof_ratio = results[N2].dofs / results[N1].dofs
            efficiency = log(time_ratio) / log(dof_ratio)
            println("    N=$N1 → N=$N2: time scales as DOFs^$(@sprintf("%.2f", efficiency))")
        end
    end
    
    return results
end

function benchmark_domain_sizes()
    println("\n🏗️  Domain Size Performance Benchmark")
    println("=" * 45)
    
    n_block_values = [2, 3, 4, 5]
    results = Dict{Int, Any}()
    
    for n_block in n_block_values
        println("\n  Benchmarking n_block = $n_block...")
        
        options = NSOptions(
            N = 4,  # Fixed polynomial order
            n_block = n_block,
            nu = 0.01,
            tfinal = 0.3,  # Shorter time for larger domains
            cfl = 0.3,
            tol = 1e-5,
            solver = :julia,
            adaptive_refinement = false,
            verbose = false
        )
        
        start_time = time()
        result = solve_navier_stokes_2d(options)
        end_time = time()
        
        if result.converged
            dofs = estimate_degrees_of_freedom(options.N, n_block, 2)
            results[n_block] = (
                time = end_time - start_time,
                iterations = result.iterations,
                residual = result.residual_norm,
                dofs = dofs,
                converged = true
            )
            
            println("    Time: $(@sprintf("%.3f", results[n_block].time)) s")
            println("    Iterations: $(results[n_block].iterations)")
            println("    DOFs: $(results[n_block].dofs)")
            println("    DOFs/second: $(@sprintf("%.0f", results[n_block].dofs / results[n_block].time))")
        else
            results[n_block] = (converged = false)
            println("    ❌ Failed to converge")
        end
    end
    
    return results
end

function benchmark_solver_backends()
    println("\n⚡ Solver Backend Comparison")
    println("=" * 35)
    
    # Available solvers
    solvers = [:julia]
    if HAS_PETSC
        push!(solvers, :petsc)
        println("PETSc backend available")
    else
        println("PETSc backend not available")
    end
    
    if HAS_GCR
        push!(solvers, :gcr)
        println("GCR backend available")
    else
        println("GCR backend not available")
    end
    
    results = Dict{Symbol, Any}()
    
    base_options = NSOptions(
        N = 4,
        n_block = 3,
        nu = 0.01,
        tfinal = 0.5,
        cfl = 0.4,
        tol = 1e-6,
        adaptive_refinement = false,
        verbose = false
    )
    
    for solver in solvers
        println("\n  Benchmarking $(solver) solver...")
        
        options = NSOptions(base_options; solver = solver)
        
        # Run multiple trials
        times = Float64[]
        iterations = Int[]
        
        n_trials = 3
        for trial in 1:n_trials
            try
                start_time = time()
                result = solve_navier_stokes_2d(options)
                end_time = time()
                
                if result.converged
                    push!(times, end_time - start_time)
                    push!(iterations, result.iterations)
                end
            catch e
                println("    Trial $trial failed: $e")
            end
        end
        
        if length(times) > 0
            results[solver] = (
                mean_time = sum(times) / length(times),
                std_time = length(times) > 1 ? sqrt(sum((times .- sum(times)/length(times)).^2) / (length(times) - 1)) : 0.0,
                mean_iterations = sum(iterations) / length(iterations),
                success_rate = length(times) / n_trials
            )
            
            println("    Time: $(@sprintf("%.3f", results[solver].mean_time)) ± $(@sprintf("%.3f", results[solver].std_time)) s")
            println("    Iterations: $(@sprintf("%.1f", results[solver].mean_iterations))")
            println("    Success rate: $(@sprintf("%.0f", results[solver].success_rate * 100))%")
        else
            results[solver] = (success_rate = 0.0)
            println("    ❌ All trials failed")
        end
    end
    
    # Find fastest solver
    successful_solvers = [s for s in solvers if haskey(results, s) && results[s].success_rate > 0]
    if length(successful_solvers) > 1
        fastest = argmin([results[s].mean_time for s in successful_solvers])
        fastest_solver = successful_solvers[fastest]
        
        println("\n🏆 Performance Ranking:")
        solver_times = [(s, results[s].mean_time) for s in successful_solvers]
        sort!(solver_times, by = x -> x[2])
        
        for (i, (solver, time)) in enumerate(solver_times)
            speedup = solver_times[1][2] / time
            println("  $i. $(solver): $(@sprintf("%.3f", time)) s ($(speedup > 1 ? @sprintf("%.1fx", speedup) : "baseline"))")
        end
    end
    
    return results
end

function benchmark_adaptive_refinement()
    println("\n🔄 Adaptive Refinement Performance")
    println("=" * 40)
    
    # Compare adaptive vs uniform refinement
    cases = [
        ("Uniform", false, 3),
        ("Adaptive-2", true, 2),
        ("Adaptive-3", true, 3),
        ("Adaptive-4", true, 4)
    ]
    
    results = Dict{String, Any}()
    
    for (name, adaptive, levels) in cases
        println("\n  Testing $name refinement...")
        
        options = NSOptions(
            N = 3,  # Start lower for adaptive cases
            n_block = 4,
            nu = 0.005,  # Lower viscosity for more interesting flow
            tfinal = 1.0,
            cfl = 0.3,
            tol = 0.01,  # Relaxed for demonstration
            solver = :julia,
            adaptive_refinement = adaptive,
            refinement_levels = levels,
            verbose = false
        )
        
        start_time = time()
        result = solve_navier_stokes_2d(options)
        end_time = time()
        
        if result.converged
            results[name] = (
                time = end_time - start_time,
                iterations = result.iterations,
                residual = result.residual_norm,
                converged = true
            )
            
            println("    Time: $(@sprintf("%.3f", results[name].time)) s")
            println("    Iterations: $(results[name].iterations)")
            println("    Final residual: $(@sprintf("%.2e", results[name].residual))")
            
            # Analyze refinement if adaptive
            if adaptive && result.multidomain !== nothing && result.multidomain.refinement_map !== nothing
                ref_map = result.multidomain.refinement_map
                matrix_dist = result.multidomain.matrix_dist
                active_levels = ref_map[matrix_dist .== 1]
                max_level = maximum(active_levels)
                avg_level = sum(active_levels) / length(active_levels)
                println("    Max refinement level: $max_level")
                println("    Average level: $(@sprintf("%.1f", avg_level))")
            end
        else
            results[name] = (converged = false)
            println("    ❌ Failed to converge")
        end
    end
    
    # Compare adaptive vs uniform
    if haskey(results, "Uniform") && results["Uniform"].converged
        uniform_time = results["Uniform"].time
        println("\n⚖️  Adaptive vs Uniform Comparison:")
        
        for (name, data) in results
            if name != "Uniform" && data.converged
                speedup = uniform_time / data.time
                if speedup > 1
                    println("  $name: $(@sprintf("%.1fx", speedup)) faster than uniform")
                else
                    println("  $name: $(@sprintf("%.1fx", 1/speedup)) slower than uniform")
                end
            end
        end
    end
    
    return results
end

function estimate_degrees_of_freedom(N::Int, n_block::Int, dim::Int)
    # Rough estimate of DOFs for spectral element method
    # This is a simplified calculation
    elements_per_dim = n_block
    points_per_element = (N + 1)^dim
    total_elements = elements_per_dim^dim
    total_dofs = total_elements * points_per_element
    return total_dofs
end

function generate_performance_report(poly_results, domain_results, solver_results, adaptive_results)
    println("\n" * "=" * 60)
    println("📋 PERFORMANCE BENCHMARK REPORT")
    println("=" * 60)
    
    # System information
    println("🖥️  System Information:")
    println("  Julia version: $(VERSION)")
    println("  Number of threads: $(Threads.nthreads())")
    println("  Available backends: Julia" * (HAS_PETSC ? ", PETSc" : "") * (HAS_GCR ? ", GCR" : ""))
    
    # Polynomial order scaling
    if length(poly_results) > 0
        println("\n📊 Polynomial Order Scaling:")
        successful_N = [N for (N, r) in poly_results if r.success_rate > 0]
        if length(successful_N) >= 2
            fastest_N = argmin([poly_results[N].mean_time for N in successful_N])
            slowest_N = argmax([poly_results[N].mean_time for N in successful_N])
            
            fastest_time = poly_results[successful_N[fastest_N]].mean_time
            slowest_time = poly_results[successful_N[slowest_N]].mean_time
            
            println("  Fastest: N=$(successful_N[fastest_N]) ($(@sprintf("%.3f", fastest_time)) s)")
            println("  Slowest: N=$(successful_N[slowest_N]) ($(@sprintf("%.3f", slowest_time)) s)")
            println("  Performance ratio: $(@sprintf("%.1fx", slowest_time / fastest_time))")
        end
    end
    
    # Domain size scaling
    if length(domain_results) > 0
        println("\n🏗️  Domain Size Scaling:")
        successful_blocks = [n for (n, r) in domain_results if r.converged]
        if length(successful_blocks) >= 2
            min_block = minimum(successful_blocks)
            max_block = maximum(successful_blocks)
            
            min_time = domain_results[min_block].time
            max_time = domain_results[max_block].time
            min_dofs = domain_results[min_block].dofs
            max_dofs = domain_results[max_block].dofs
            
            dof_scaling = log(max_time / min_time) / log(max_dofs / min_dofs)
            
            println("  Small domain (n_block=$min_block): $(@sprintf("%.3f", min_time)) s, $(min_dofs) DOFs")
            println("  Large domain (n_block=$max_block): $(@sprintf("%.3f", max_time)) s, $(max_dofs) DOFs")
            println("  Scaling: Time ~ DOFs^$(@sprintf("%.2f", dof_scaling))")
        end
    end
    
    # Solver comparison
    if length(solver_results) > 0
        println("\n⚡ Solver Backend Performance:")
        successful_solvers = [(s, r) for (s, r) in solver_results if r.success_rate > 0]
        if length(successful_solvers) > 0
            sort!(successful_solvers, by = x -> x[2].mean_time)
            
            baseline_time = successful_solvers[1][2].mean_time
            for (solver, result) in successful_solvers
                speedup = baseline_time / result.mean_time
                println("  $(solver): $(@sprintf("%.3f", result.mean_time)) s ($(speedup >= 1 ? @sprintf("%.1fx", speedup) : @sprintf("%.1fx", 1/speedup)))")
            end
        end
    end
    
    # Adaptive refinement effectiveness
    if length(adaptive_results) > 0
        println("\n🔄 Adaptive Refinement Performance:")
        for (name, result) in adaptive_results
            if result.converged
                efficiency = result.iterations > 0 ? 1000.0 / (result.time * result.iterations) : 0
                println("  $name: $(@sprintf("%.3f", result.time)) s, efficiency: $(@sprintf("%.1f", efficiency))")
            else
                println("  $name: Failed to converge")
            end
        end
    end
    
    println("\n🎯 Recommendations:")
    
    # Polynomial order recommendation
    if length(poly_results) > 0
        successful_N = [(N, r) for (N, r) in poly_results if r.success_rate > 0]
        if length(successful_N) > 0
            # Find balance between speed and accuracy
            best_efficiency = 0.0
            best_N = 0
            for (N, result) in successful_N
                efficiency = result.dofs / (result.mean_time * result.mean_residual)
                if efficiency > best_efficiency
                    best_efficiency = efficiency
                    best_N = N
                end
            end
            println("  Optimal polynomial order: N=$best_N (best speed/accuracy balance)")
        end
    end
    
    # Solver recommendation
    if length(solver_results) > 1
        successful_solvers = [(s, r) for (s, r) in solver_results if r.success_rate > 0]
        if length(successful_solvers) > 0
            fastest_solver = argmin([r.mean_time for (s, r) in successful_solvers])
            recommended_solver = successful_solvers[fastest_solver][1]
            println("  Recommended solver: $recommended_solver")
        end
    end
    
    println("\n✅ Benchmark completed successfully!")
end

function main()
    println("⚡ NSEMSolver.jl Performance Benchmark Suite")
    println("=" * 60)
    
    # Run all benchmarks
    println("Starting comprehensive performance benchmarking...")
    
    poly_results = benchmark_polynomial_orders()
    domain_results = benchmark_domain_sizes()
    solver_results = benchmark_solver_backends()
    adaptive_results = benchmark_adaptive_refinement()
    
    # Generate comprehensive report
    generate_performance_report(poly_results, domain_results, solver_results, adaptive_results)
    
    return (
        polynomial_orders = poly_results,
        domain_sizes = domain_results,
        solver_backends = solver_results,
        adaptive_refinement = adaptive_results
    )
end

# Run the benchmark
if abspath(PROGRAM_FILE) == @__FILE__
    results = main()
end