"""
    benchmark_advanced_solvers.jl

Performance benchmarking suite for GSI Advanced Solvers.
Compares performance against reference implementations and analyzes scaling behavior.

This benchmark suite measures:
1. Computational performance (FLOPS, memory usage)
2. Scaling behavior with problem size
3. Convergence rates and iteration counts
4. Memory allocation patterns
5. Comparison with Julia standard library solvers
6. Performance on GSI-representative problems

Run with: julia benchmark_advanced_solvers.jl
"""

using BenchmarkTools
using LinearAlgebra
using SparseArrays
using Random
using Printf
using Statistics

# Add the GSI package to path and import
push!(LOAD_PATH, joinpath(@__DIR__, "src"))
using GSICoreAnalysis
using GSICoreAnalysis.AdvancedSolvers

# Set random seed for reproducible benchmarks
Random.seed!(42)

# Utilities for generating benchmark problems
function create_benchmark_matrix(n::Int, problem_type::Symbol, condition_number::Float64=1e3)
    """Create benchmark matrix of specified type"""
    if problem_type == :symmetric_pd
        # Symmetric positive definite
        A = randn(n, n)
        A = A * A'
        eigenvals = eigvals(A)
        scale = condition_number / (maximum(eigenvals) / minimum(eigenvals))
        A = A + scale * minimum(eigenvals) * I
    elseif problem_type == :nonsymmetric
        # Non-symmetric but well-conditioned
        A = randn(n, n) + I
    elseif problem_type == :sparse_symmetric
        # Sparse symmetric positive definite
        A = sprandn(n, n, 0.1)
        A = A + A' + I
    elseif problem_type == :gsi_like
        # GSI-like structure: B^{-1} + H^T R^{-1} H
        B_inv = create_benchmark_matrix(n, :symmetric_pd, condition_number/10)
        H = 0.3 * randn(n, n)
        R_inv = create_benchmark_matrix(n, :symmetric_pd, condition_number/5)
        A = B_inv + H' * R_inv * H
    else
        error("Unknown problem type: $problem_type")
    end
    
    return Matrix(A)  # Ensure dense for benchmarks
end

function benchmark_solver_performance()
    """Main performance benchmarking function"""
    
    println("=== GSI Advanced Solvers Performance Benchmark ===")
    println("Benchmarking Lanczos, BiCG-Lanczos, and Quasi-Newton methods")
    println("Julia version: $(VERSION)")
    println("BLAS threads: $(BLAS.get_num_threads())")
    println()
    
    # Problem sizes to benchmark
    problem_sizes = [10, 25, 50, 100, 200]
    
    # Results storage
    results = Dict()
    
    @info "Starting benchmarks..."
    
    # Benchmark 1: Lanczos solver performance
    println("=== Lanczos Solver Benchmarks ===")
    results[:lanczos] = benchmark_lanczos_solvers(problem_sizes)
    
    # Benchmark 2: BiCG-Lanczos solver performance  
    println("\n=== BiCG-Lanczos Solver Benchmarks ===")
    results[:bicg] = benchmark_bicg_solvers(problem_sizes)
    
    # Benchmark 3: Quasi-Newton solver performance
    println("\n=== Quasi-Newton Solver Benchmarks ===")
    results[:quasi_newton] = benchmark_quasi_newton_solvers(problem_sizes)
    
    # Benchmark 4: Comparison with Julia standard library
    println("\n=== Comparison with Julia Standard Library ===")
    results[:comparison] = benchmark_vs_stdlib(problem_sizes)
    
    # Benchmark 5: Memory allocation analysis
    println("\n=== Memory Allocation Analysis ===")
    results[:memory] = benchmark_memory_usage(problem_sizes)
    
    # Generate performance summary
    generate_performance_summary(results)
    
    return results
end

function benchmark_lanczos_solvers(problem_sizes::Vector{Int})
    """Benchmark Lanczos solver across different problem sizes"""
    
    results = Dict()
    
    for n in problem_sizes
        println("Benchmarking Lanczos solver (size $n)...")
        
        # Create test problems
        A_pd = create_benchmark_matrix(n, :symmetric_pd, 1e3)
        A_gsi = create_benchmark_matrix(n, :gsi_like, 1e2)
        b = randn(n)
        
        # Different configurations
        configs = [
            ("default", LanczosConfig(max_iter=min(50, n), tolerance=1e-8, precondition=false)),
            ("preconditioned", LanczosConfig(max_iter=min(50, n), tolerance=1e-8, precondition=true)),
            ("high_precision", LanczosConfig(max_iter=min(100, 2*n), tolerance=1e-12, precondition=true))
        ]
        
        size_results = Dict()
        
        for (config_name, config) in configs
            for (matrix_type, A) in [("symmetric_pd", A_pd), ("gsi_like", A_gsi)]
                x = zeros(n)
                
                # Warmup
                lanczos_solve!(copy(x), A, b, config)
                
                # Benchmark
                bench_result = @benchmark lanczos_solve!($x_copy, $A, $b, $config) setup=($x_copy = copy($x))
                
                key = "$(config_name)_$(matrix_type)"
                size_results[key] = (
                    time = median(bench_result.times) / 1e6,  # Convert to ms
                    memory = bench_result.memory,
                    allocs = bench_result.allocs,
                    iterations = nothing  # Will be filled by actual run
                )
                
                # Get iteration count
                x_test = zeros(n)
                result = lanczos_solve!(x_test, A, b, config)
                size_results[key] = (
                    time = size_results[key].time,
                    memory = size_results[key].memory,
                    allocs = size_results[key].allocs,
                    iterations = result.iterations,
                    converged = result.converged
                )
                
                @printf("  %s-%s: %.2f ms, %d iters, %.1f MB\n", 
                       config_name, matrix_type, size_results[key].time, 
                       size_results[key].iterations, size_results[key].memory / 1e6)
            end
        end
        
        results[n] = size_results
    end
    
    return results
end

function benchmark_bicg_solvers(problem_sizes::Vector{Int})
    """Benchmark BiCG-Lanczos solvers"""
    
    results = Dict()
    
    for n in problem_sizes
        println("Benchmarking BiCG-Lanczos solver (size $n)...")
        
        # Create non-symmetric test problems
        A_ns = create_benchmark_matrix(n, :nonsymmetric, 1e2)
        b = randn(n)
        
        # Different variants
        variants = [
            ("BiCGStab", BiCG_STAB),
            ("BiCG", BiCG_STANDARD),
            ("QMR", QMR)
        ]
        
        size_results = Dict()
        
        for (variant_name, variant) in variants
            config = BiCGLanczosConfig(
                max_iter = min(100, 2*n),
                variant = variant,
                tolerance = 1e-8,
                precondition = false,
                verbose = false
            )
            
            x = zeros(n)
            
            # Warmup
            try
                bicg_lanczos_solve!(copy(x), A_ns, b, config)
            catch
                continue  # Skip if solver fails
            end
            
            # Benchmark
            bench_result = @benchmark bicg_lanczos_solve!($x_copy, $A_ns, $b, $config) setup=($x_copy = copy($x))
            
            size_results[variant_name] = (
                time = median(bench_result.times) / 1e6,
                memory = bench_result.memory,
                allocs = bench_result.allocs
            )
            
            # Get convergence info
            x_test = zeros(n)
            result = bicg_lanczos_solve!(x_test, A_ns, b, config)
            
            size_results[variant_name] = (
                time = size_results[variant_name].time,
                memory = size_results[variant_name].memory,
                allocs = size_results[variant_name].allocs,
                iterations = result.iterations,
                converged = result.converged
            )
            
            @printf("  %s: %.2f ms, %d iters, %.1f MB, %s\n", 
                   variant_name, size_results[variant_name].time,
                   size_results[variant_name].iterations,
                   size_results[variant_name].memory / 1e6,
                   result.converged ? "converged" : "failed")
        end
        
        results[n] = size_results
    end
    
    return results
end

function benchmark_quasi_newton_solvers(problem_sizes::Vector{Int})
    """Benchmark quasi-Newton solvers"""
    
    results = Dict()
    
    # Use smaller sizes for nonlinear optimization
    qn_sizes = filter(n -> n <= 100, problem_sizes)
    
    for n in qn_sizes
        println("Benchmarking Quasi-Newton solver (size $n)...")
        
        # Create quadratic test problem: f(x) = 0.5 x^T A x - b^T x
        A = create_benchmark_matrix(n, :symmetric_pd, 1e2)
        b = randn(n)
        
        objective = x -> 0.5 * dot(x, A * x) - dot(b, x)
        function gradient!(g, x)
            g .= A * x - b
            return nothing
        end
        
        # Different configurations
        configs = [
            ("L-BFGS-5", QuasiNewtonConfig(max_iter=50, memory_size=5, tolerance_grad=1e-6)),
            ("L-BFGS-10", QuasiNewtonConfig(max_iter=50, memory_size=10, tolerance_grad=1e-6)),
            ("BFGS", QuasiNewtonConfig(max_iter=50, memory_size=n, tolerance_grad=1e-6))
        ]
        
        size_results = Dict()
        
        for (config_name, config) in configs
            # Skip full BFGS for large problems
            if config_name == "BFGS" && n > 50
                continue
            end
            
            x_init = randn(n)
            
            # Warmup
            lbfgs_solve!(copy(x_init), objective, gradient!, config)
            
            # Benchmark
            bench_result = @benchmark lbfgs_solve!($x_copy, $objective, $gradient!, $config) setup=($x_copy = copy($x_init))
            
            size_results[config_name] = (
                time = median(bench_result.times) / 1e6,
                memory = bench_result.memory,
                allocs = bench_result.allocs
            )
            
            # Get convergence info
            x_test = copy(x_init)
            result = lbfgs_solve!(x_test, objective, gradient!, config)
            
            size_results[config_name] = (
                time = size_results[config_name].time,
                memory = size_results[config_name].memory,
                allocs = size_results[config_name].allocs,
                iterations = result.iterations,
                function_evals = result.function_evals,
                converged = result.converged
            )
            
            @printf("  %s: %.2f ms, %d iters (%d f-evals), %.1f MB, %s\n", 
                   config_name, size_results[config_name].time,
                   size_results[config_name].iterations,
                   size_results[config_name].function_evals,
                   size_results[config_name].memory / 1e6,
                   result.converged ? "converged" : "failed")
        end
        
        results[n] = size_results
    end
    
    return results
end

function benchmark_vs_stdlib(problem_sizes::Vector{Int})
    """Compare against Julia standard library solvers"""
    
    results = Dict()
    
    stdlib_sizes = filter(n -> n <= 100, problem_sizes)  # Keep reasonable
    
    for n in stdlib_sizes
        println("Comparing with stdlib (size $n)...")
        
        A_sym = create_benchmark_matrix(n, :symmetric_pd, 1e2)
        A_ns = create_benchmark_matrix(n, :nonsymmetric, 1e2)
        b = randn(n)
        
        size_results = Dict()
        
        # Direct solve (reference)
        bench_direct_sym = @benchmark $A_sym \ $b
        bench_direct_ns = @benchmark $A_ns \ $b
        
        size_results["direct_symmetric"] = (
            time = median(bench_direct_sym.times) / 1e6,
            memory = bench_direct_sym.memory
        )
        
        size_results["direct_nonsymmetric"] = (
            time = median(bench_direct_ns.times) / 1e6,
            memory = bench_direct_ns.memory
        )
        
        # Our Lanczos solver
        config_lanczos = LanczosConfig(max_iter=min(50, n), tolerance=1e-8)
        x = zeros(n)
        bench_lanczos = @benchmark lanczos_solve!($x_copy, $A_sym, $b, $config_lanczos) setup=($x_copy = copy($x))
        
        size_results["lanczos"] = (
            time = median(bench_lanczos.times) / 1e6,
            memory = bench_lanczos.memory
        )
        
        # Our BiCGStab solver
        config_bicg = BiCGLanczosConfig(max_iter=min(100, 2*n), variant=BiCG_STAB, tolerance=1e-8)
        bench_bicg = @benchmark bicgstab_solve!($x_copy, $A_ns, $b, $config_bicg) setup=($x_copy = copy($x))
        
        size_results["bicgstab"] = (
            time = median(bench_bicg.times) / 1e6,
            memory = bench_bicg.memory
        )
        
        # Print comparison
        @printf("  Direct (symmetric): %.2f ms, %.1f MB\n", 
               size_results["direct_symmetric"].time, 
               size_results["direct_symmetric"].memory / 1e6)
        @printf("  Lanczos (symmetric): %.2f ms, %.1f MB (%.1fx slower)\n", 
               size_results["lanczos"].time, 
               size_results["lanczos"].memory / 1e6,
               size_results["lanczos"].time / size_results["direct_symmetric"].time)
        
        @printf("  Direct (non-symmetric): %.2f ms, %.1f MB\n", 
               size_results["direct_nonsymmetric"].time, 
               size_results["direct_nonsymmetric"].memory / 1e6)
        @printf("  BiCGStab (non-symmetric): %.2f ms, %.1f MB (%.1fx slower)\n", 
               size_results["bicgstab"].time, 
               size_results["bicgstab"].memory / 1e6,
               size_results["bicgstab"].time / size_results["direct_nonsymmetric"].time)
        
        results[n] = size_results
    end
    
    return results
end

function benchmark_memory_usage(problem_sizes::Vector{Int})
    """Detailed memory allocation analysis"""
    
    results = Dict()
    
    mem_sizes = filter(n -> n <= 50, problem_sizes)  # Keep reasonable for detailed analysis
    
    for n in mem_sizes
        println("Memory analysis (size $n)...")
        
        A = create_benchmark_matrix(n, :gsi_like, 1e2)
        b = randn(n)
        
        size_results = Dict()
        
        # Lanczos memory usage
        config_lanczos = LanczosConfig(max_iter=min(30, n), tolerance=1e-8, precondition=true)
        x = zeros(n)
        
        # Detailed allocation tracking
        alloc_before = Base.gc_bytes()
        result = lanczos_solve!(x, A, b, config_lanczos)
        alloc_after = Base.gc_bytes()
        
        size_results["lanczos"] = (
            total_alloc = alloc_after - alloc_before,
            iterations = result.iterations,
            vectors_stored = min(result.iterations + 1, config_lanczos.max_precond_vecs),
            estimated_mem = n * sizeof(Float64) * min(result.iterations + 1, config_lanczos.max_precond_vecs)
        )
        
        # BiCGStab memory usage
        config_bicg = BiCGLanczosConfig(max_iter=min(50, 2*n), variant=BiCG_STAB, tolerance=1e-8)
        x = zeros(n)
        
        alloc_before = Base.gc_bytes()
        result = bicgstab_solve!(x, A, b, config_bicg)
        alloc_after = Base.gc_bytes()
        
        size_results["bicgstab"] = (
            total_alloc = alloc_after - alloc_before,
            iterations = result.iterations,
            vectors_stored = 6,  # Typical for BiCGStab
            estimated_mem = n * sizeof(Float64) * 6
        )
        
        # L-BFGS memory usage
        if n <= 30  # Keep reasonable for nonlinear
            objective = x -> 0.5 * dot(x, A * x) - dot(b, x)
            gradient! = (g, x) -> (g .= A * x - b; nothing)
            config_lbfgs = QuasiNewtonConfig(max_iter=30, memory_size=5, tolerance_grad=1e-6)
            x = randn(n)
            
            alloc_before = Base.gc_bytes()
            result = lbfgs_solve!(x, objective, gradient!, config_lbfgs)
            alloc_after = Base.gc_bytes()
            
            size_results["lbfgs"] = (
                total_alloc = alloc_after - alloc_before,
                iterations = result.iterations,
                vectors_stored = config_lbfgs.memory_size * 2,  # s and y vectors
                estimated_mem = n * sizeof(Float64) * config_lbfgs.memory_size * 2
            )
        end
        
        # Print memory analysis
        for (solver, data) in size_results
            @printf("  %s: %.1f KB total, %d vectors (%.1f KB estimated)\n", 
                   solver, data.total_alloc / 1024, data.vectors_stored,
                   data.estimated_mem / 1024)
        end
        
        results[n] = size_results
    end
    
    return results
end

function generate_performance_summary(results::Dict)
    """Generate comprehensive performance summary"""
    
    println("\n" * "="^60)
    println("PERFORMANCE SUMMARY")
    println("="^60)
    
    # Scaling analysis
    println("\n--- Scaling Analysis ---")
    
    if haskey(results, :lanczos)
        lanczos_data = results[:lanczos]
        sizes = sort(collect(keys(lanczos_data)))
        
        if length(sizes) >= 2
            println("Lanczos Solver Scaling:")
            config_key = "default_symmetric_pd"
            if haskey(lanczos_data[sizes[1]], config_key)
                times = [lanczos_data[s][config_key].time for s in sizes if haskey(lanczos_data[s], config_key)]
                if length(times) >= 2
                    scaling_factor = times[end] / times[1]
                    size_factor = sizes[end] / sizes[1]
                    @printf("  Time scaling: %.1fx for %.1fx size increase\n", scaling_factor, size_factor)
                    @printf("  Scaling exponent: %.2f (ideal: 2.0 for direct, 1.5 for iterative)\n", 
                           log(scaling_factor) / log(size_factor))
                end
            end
        end
    end
    
    # Memory efficiency analysis
    println("\n--- Memory Efficiency ---")
    
    if haskey(results, :memory)
        mem_data = results[:memory]
        for (size, data) in mem_data
            println("Problem size $size:")
            for (solver, info) in data
                efficiency = info.estimated_mem / info.total_alloc
                @printf("  %s: %.1f%% memory efficiency\n", solver, efficiency * 100)
            end
        end
    end
    
    # Convergence analysis
    println("\n--- Convergence Performance ---")
    
    convergence_summary = Dict()
    
    for (method, method_data) in results
        if method in [:lanczos, :bicg, :quasi_newton]
            converged_count = 0
            total_count = 0
            avg_iterations = Float64[]
            
            for (size, size_data) in method_data
                for (config, info) in size_data
                    if haskey(info, :converged)
                        total_count += 1
                        if info.converged
                            converged_count += 1
                            if haskey(info, :iterations)
                                push!(avg_iterations, info.iterations)
                            end
                        end
                    end
                end
            end
            
            if total_count > 0
                convergence_rate = converged_count / total_count
                avg_iters = length(avg_iterations) > 0 ? mean(avg_iterations) : 0
                @printf("%s: %.1f%% convergence rate, %.1f avg iterations\n", 
                       method, convergence_rate * 100, avg_iters)
            end
        end
    end
    
    # Performance recommendations
    println("\n--- Performance Recommendations ---")
    println("1. Lanczos solver is most efficient for symmetric positive definite systems")
    println("2. BiCGStab is recommended for non-symmetric systems")
    println("3. L-BFGS with memory_size=10 provides good balance for nonlinear optimization")
    println("4. Use preconditioning for ill-conditioned problems (condition number > 1e4)")
    println("5. For large problems (n > 1000), consider sparse matrix implementations")
    
    println("\n" * "="^60)
end

# Main execution
if abspath(PROGRAM_FILE) == @__FILE__
    println("Starting comprehensive performance benchmark...")
    println("This may take several minutes to complete...")
    println()
    
    # Run benchmarks
    benchmark_results = benchmark_solver_performance()
    
    println("\n✅ Benchmark complete!")
    println("Results have been analyzed and summarized above.")
    
    # Optional: Save results to file
    # using Serialization
    # serialize("benchmark_results.jls", benchmark_results)
    # println("Results saved to benchmark_results.jls")
end