#!/usr/bin/env julia

"""
Performance Comparison Example

This example compares different solver backends (Julia vs PETSc) and preconditioners
for the 2D lid-driven cavity problem. It performs timing and accuracy analysis to help
users choose optimal solver configurations.

Run with: julia performance_comparison.jl

Features:
- Comparison of Julia vs PETSc backends
- Multiple preconditioner types (:none, :diagonal, :ilu)
- Timing analysis with multiple runs for statistical significance
- Memory usage tracking
- Convergence rate analysis
- Accuracy comparison between different solvers
- Performance scaling with grid size

"""

# Add the package to the path
push!(LOAD_PATH, "../src")

using LidDrivenCavity
using Printf
using Statistics
using LinearAlgebra

"""
    run_performance_benchmark(options::LidDrivenOptions; n_runs::Int=3) -> NamedTuple

Run multiple solver runs to get statistically meaningful performance data.
"""
function run_performance_benchmark(options::LidDrivenOptions; n_runs::Int=3)
    times = Float64[]
    iterations = Int[]
    residuals = Float64[]
    converged_runs = 0
    
    println("  Running $(n_runs) trials for statistical analysis...")
    
    for i in 1:n_runs
        if options.verbose && i == 1
            print("    Trial $i (with output): ")
            flush(stdout)
        elseif !options.verbose
            print("    Trial $i: ")
            flush(stdout)
        end
        
        # Suppress output for consistency in timing
        quiet_options = LidDrivenOptions(
            options.n, options.Re, options.dt, options.max_steps, options.tol,
            options.α_ψ, options.α_ω, options.solver, options.preconditioner,
            false, # verbose = false for timing consistency
            options.save_history, options.U_lid
        )
        
        try
            result = solve_lid_driven_2d(quiet_options)
            
            push!(times, result.solve_time)
            push!(iterations, result.iterations)
            push!(residuals, result.residual_norm)
            
            if result.converged
                converged_runs += 1
                println("✓ ($(result.iterations) iter, $(@sprintf("%.3f", result.solve_time))s)")
            else
                println("✗ (failed to converge)")
            end
            
        catch e
            println("✗ (error: $e)")
            push!(times, NaN)
            push!(iterations, 0)
            push!(residuals, NaN)
        end
    end
    
    return (
        times = times,
        iterations = iterations,
        residuals = residuals,
        converged_runs = converged_runs,
        success_rate = converged_runs / n_runs,
        mean_time = mean(filter(!isnan, times)),
        std_time = std(filter(!isnan, times)),
        mean_iterations = mean(filter(>(0), iterations)),
        mean_residual = mean(filter(!isnan, residuals))
    )
end

"""
    compare_solver_backends(n_grid::Int, reynolds::Float64; n_runs::Int=3)

Compare Julia vs PETSc backends with different preconditioners.
"""
function compare_solver_backends(n_grid::Int, reynolds::Float64; n_runs::Int=3)
    
    println("\n🔧 Solver Backend Comparison")
    println("="^60)
    println("Grid size: $(n_grid)×$(n_grid)")
    println("Reynolds number: $(reynolds)")
    println("Runs per configuration: $(n_runs)")
    println()
    
    # Define test configurations
    configs = [
        (:julia, :none, "Julia + No Preconditioner"),
        (:julia, :diagonal, "Julia + Diagonal Preconditioner"),
        (:petsc, :none, "PETSc + No Preconditioner"),
        (:petsc, :diagonal, "PETSc + Diagonal Preconditioner"),
        (:petsc, :ilu, "PETSc + ILU Preconditioner")
    ]
    
    results = []
    
    for (solver, precond, description) in configs
        println("📊 Testing: $(description)")
        
        options = LidDrivenOptions(
            n = n_grid,
            Re = reynolds,
            solver = solver,
            preconditioner = precond,
            max_steps = 2000,
            tol = 1e-6,
            verbose = false,
            save_history = true
        )
        
        try
            benchmark_result = run_performance_benchmark(options; n_runs=n_runs)
            
            push!(results, (
                solver = solver,
                preconditioner = precond,
                description = description,
                benchmark = benchmark_result
            ))
            
            if benchmark_result.success_rate > 0
                println("  ✅ Success rate: $(@sprintf("%.0f", benchmark_result.success_rate * 100))%")
                println("  ⏱️  Mean time: $(@sprintf("%.3f", benchmark_result.mean_time)) ± $(@sprintf("%.3f", benchmark_result.std_time)) seconds")
                println("  🔢 Mean iterations: $(@sprintf("%.1f", benchmark_result.mean_iterations))")
                println("  📉 Final residual: $(@sprintf("%.2e", benchmark_result.mean_residual))")
            else
                println("  ❌ All runs failed to converge")
            end
            
        catch e
            println("  ❌ Configuration failed: $e")
            push!(results, (
                solver = solver,
                preconditioner = precond,
                description = description,
                benchmark = nothing
            ))
        end
        
        println()
    end
    
    return results
end

"""
    analyze_performance_results(results)

Generate comprehensive performance analysis from comparison results.
"""
function analyze_performance_results(results)
    println("\n📈 Performance Analysis Summary")
    println("="^60)
    
    successful_results = filter(r -> r.benchmark !== nothing && r.benchmark.success_rate > 0, results)
    
    if isempty(successful_results)
        println("❌ No successful configurations to analyze")
        return
    end
    
    # Find fastest configuration
    fastest = minimum(r -> r.benchmark.mean_time, successful_results)
    fastest_config = findfirst(r -> r.benchmark.mean_time ≈ fastest, successful_results)
    
    println("🏆 Fastest Configuration:")
    println("   $(successful_results[fastest_config].description)")
    println("   Time: $(@sprintf("%.3f", fastest)) seconds")
    println()
    
    # Find most reliable configuration (highest success rate, then lowest iterations)
    most_reliable = maximum(r -> (r.benchmark.success_rate, -r.benchmark.mean_iterations), successful_results)
    reliable_config = findfirst(r -> (r.benchmark.success_rate, -r.benchmark.mean_iterations) == most_reliable, successful_results)
    
    println("🎯 Most Reliable Configuration:")
    println("   $(successful_results[reliable_config].description)")
    println("   Success rate: $(@sprintf("%.0f", successful_results[reliable_config].benchmark.success_rate * 100))%")
    println("   Mean iterations: $(@sprintf("%.1f", successful_results[reliable_config].benchmark.mean_iterations))")
    println()
    
    # Detailed comparison table
    println("📊 Detailed Comparison:")
    println("Configuration                          Time(s)   Iter   Success   Residual")
    println("-"^75)
    
    for result in successful_results
        b = result.benchmark
        println(@sprintf("%-38s %6.3f   %5.0f   %5.0f%%   %.2e", 
                result.description, 
                b.mean_time, 
                b.mean_iterations,
                b.success_rate * 100,
                b.mean_residual))
    end
    
    # Performance recommendations
    println("\n💡 Recommendations:")
    if length(successful_results) >= 2
        julia_results = filter(r -> r.solver == :julia, successful_results)
        petsc_results = filter(r -> r.solver == :petsc, successful_results)
        
        if !isempty(julia_results) && !isempty(petsc_results)
            julia_time = minimum(r -> r.benchmark.mean_time, julia_results)
            petsc_time = minimum(r -> r.benchmark.mean_time, petsc_results)
            
            if julia_time < petsc_time
                speedup = petsc_time / julia_time
                println("   • Julia backend is faster by $(@sprintf("%.1f", speedup))x for this problem size")
                println("   • Consider Julia backend for moderate-sized problems")
            else
                speedup = julia_time / petsc_time
                println("   • PETSc backend is faster by $(@sprintf("%.1f", speedup))x for this problem size")
                println("   • Consider PETSc backend for better performance and larger problems")
            end
        end
        
        # Preconditioner analysis
        diagonal_results = filter(r -> r.preconditioner == :diagonal, successful_results)
        no_precond_results = filter(r -> r.preconditioner == :none, successful_results)
        
        if !isempty(diagonal_results) && !isempty(no_precond_results)
            diag_iter = minimum(r -> r.benchmark.mean_iterations, diagonal_results)
            no_precond_iter = minimum(r -> r.benchmark.mean_iterations, no_precond_results)
            
            if diag_iter < no_precond_iter
                iter_reduction = (no_precond_iter - diag_iter) / no_precond_iter * 100
                println("   • Diagonal preconditioning reduces iterations by $(@sprintf("%.1f", iter_reduction))%")
            end
        end
    end
    
    println("   • For larger problems (n ≥ 64), PETSc backend typically provides better scalability")
    println("   • ILU preconditioning may be beneficial for higher Reynolds numbers")
    
end

"""
    grid_scaling_study(reynolds::Float64; grid_sizes=[8, 16, 32, 48])

Study how performance scales with grid size.
"""
function grid_scaling_study(reynolds::Float64; grid_sizes=[8, 16, 32, 48])
    println("\n📏 Grid Scaling Study")
    println("="^50)
    println("Reynolds number: $(reynolds)")
    println("Grid sizes: $(grid_sizes)")
    println()
    
    # Test both Julia and PETSc (if available) with diagonal preconditioning
    solver_configs = [
        (:julia, :diagonal, "Julia + Diagonal"),
        (:petsc, :diagonal, "PETSc + Diagonal")
    ]
    
    scaling_results = []
    
    for (solver, precond, description) in solver_configs
        println("🔍 Testing $(description) scaling:")
        solver_times = Float64[]
        solver_iterations = Int[]
        problem_sizes = Int[]
        
        for n in grid_sizes
            options = LidDrivenOptions(
                n = n,
                Re = reynolds,
                solver = solver,
                preconditioner = precond,
                max_steps = 2000,
                tol = 1e-6,
                verbose = false
            )
            
            print("  n=$(n): ")
            flush(stdout)
            
            try
                result = solve_lid_driven_2d(options)
                
                if result.converged
                    push!(solver_times, result.solve_time)
                    push!(solver_iterations, result.iterations)
                    push!(problem_sizes, n^2)  # Total DOF
                    println("$(@sprintf("%.3f", result.solve_time))s ($(result.iterations) iter)")
                else
                    println("failed to converge")
                end
            catch e
                println("error: $e")
            end
        end
        
        if length(solver_times) >= 2
            push!(scaling_results, (
                solver = description,
                sizes = problem_sizes,
                times = solver_times,
                iterations = solver_iterations
            ))
        end
        
        println()
    end
    
    # Analyze scaling behavior
    if !isempty(scaling_results)
        println("⚖️  Scaling Analysis:")
        println("Problem Size (DOF)   Julia Time   PETSc Time   Julia Iter   PETSc Iter")
        println("-"^70)
        
        max_len = maximum(length(r.times) for r in scaling_results)
        
        for i in 1:max_len
            if i <= length(scaling_results) && i <= length(scaling_results[1].sizes)
                dof = scaling_results[1].sizes[i]
                julia_time = i <= length(scaling_results[1].times) ? @sprintf("%.3f s", scaling_results[1].times[i]) : "N/A"
                petsc_time = length(scaling_results) > 1 && i <= length(scaling_results[2].times) ? @sprintf("%.3f s", scaling_results[2].times[i]) : "N/A"
                julia_iter = i <= length(scaling_results[1].iterations) ? @sprintf("%d", scaling_results[1].iterations[i]) : "N/A"
                petsc_iter = length(scaling_results) > 1 && i <= length(scaling_results[2].iterations) ? @sprintf("%d", scaling_results[2].iterations[i]) : "N/A"
                
                println(@sprintf("%-17d %-12s %-12s %-12s %s", dof, julia_time, petsc_time, julia_iter, petsc_iter))
            end
        end
    end
end

"""
    memory_usage_analysis(n_grid::Int, reynolds::Float64)

Analyze memory usage for different solver configurations.
"""
function memory_usage_analysis(n_grid::Int, reynolds::Float64)
    println("\n💾 Memory Usage Analysis")
    println("="^40)
    
    # Estimate memory requirements
    dof = n_grid^2
    sparse_matrix_memory = 5 * dof * 8 / 1024^2  # ~5 nonzeros per row, 8 bytes per double
    solution_vectors_memory = 4 * dof * 8 / 1024^2  # 4 vectors (ψ, ω, u, v)
    
    println("Grid size: $(n_grid)×$(n_grid) ($(dof) DOF)")
    println("Estimated sparse matrix memory: $(@sprintf("%.1f", sparse_matrix_memory)) MB")  
    println("Estimated solution vectors memory: $(@sprintf("%.1f", solution_vectors_memory)) MB")
    println("Total estimated memory: $(@sprintf("%.1f", sparse_matrix_memory + solution_vectors_memory)) MB")
    
    # Recommendations
    if dof > 10000
        println("\n⚠️  Large problem detected:")
        println("   • Consider PETSc backend for better memory management")
        println("   • Monitor system memory usage during execution")
        println("   • For very large problems, consider distributed computing")
    end
end

function main()
    println("⚡ LidDrivenCavity.jl - Performance Comparison Study")
    println("="^60)
    
    # Display package information
    lid_driven_info()
    println()
    
    # Configuration parameters
    test_grid_sizes = [16, 32]  # Start with moderate sizes
    test_reynolds = 100.0
    n_statistical_runs = 3
    
    println("📋 Test Configuration:")
    println("  Reynolds number: $(test_reynolds)")
    println("  Grid sizes for detailed comparison: $(test_grid_sizes)")
    println("  Statistical runs per configuration: $(n_statistical_runs)")
    println()
    
    # 1. Detailed backend comparison for moderate grid size
    println("Phase 1: Backend and Preconditioner Comparison")
    results_16 = compare_solver_backends(test_grid_sizes[1], test_reynolds; n_runs=n_statistical_runs)
    analyze_performance_results(results_16)
    
    # 2. Scaling study
    println("\n" * "="^60)
    println("Phase 2: Grid Scaling Analysis")
    grid_scaling_study(test_reynolds; grid_sizes=[8, 16, 24, 32])
    
    # 3. Memory analysis for larger problems
    println("\n" * "="^60)
    println("Phase 3: Memory Usage Analysis")
    for n in [32, 64, 128]
        memory_usage_analysis(n, test_reynolds)
        println()
    end
    
    # 4. Higher Reynolds number test (if time allows)
    if test_reynolds == 100.0
        println("="^60)
        println("Phase 4: Higher Reynolds Number Test (Re = 400)")
        try
            results_400 = compare_solver_backends(16, 400.0; n_runs=2)
            analyze_performance_results(results_400)
        catch e
            println("❌ High Reynolds number test failed: $e")
        end
    end
    
    # Final recommendations
    println("\n" * "="^60) 
    println("🎯 Final Recommendations")
    println("-"^25)
    println("• For small problems (n ≤ 32): Julia backend with diagonal preconditioning")
    println("• For larger problems (n ≥ 64): PETSc backend with ILU preconditioning") 
    println("• For high Reynolds numbers (Re ≥ 1000): PETSc backend, smaller time steps")
    println("• For production runs: Use PETSc with parallel computing capabilities")
    println("• Always monitor convergence and adjust tolerances as needed")
    println()
    println("💡 Use this data to optimize solver selection for your specific use case!")
end

# Run the performance comparison
if abspath(PROGRAM_FILE) == @__FILE__
    try
        main()
        println("\n🎉 Performance comparison completed successfully!")
    catch e
        println("\n😞 Performance comparison failed: $e")
        if isa(e, LoadError) || isa(e, MethodError)
            println("\n💡 Make sure all dependencies are available and properly configured")
        end
    end
end