"""
    test_advanced_solvers.jl

Comprehensive test suite for GSI Advanced Solvers implementation.
Tests numerical accuracy, performance, and integration with GSI framework.

This test suite validates:
1. Lanczos algorithm correctness and eigenvalue computation
2. BiCG-Lanczos methods for various matrix types
3. Quasi-Newton optimization convergence
4. Integration with GSI control vectors and cost functions
5. Performance benchmarks against reference implementations
6. Edge cases and numerical stability

Run with: julia test_advanced_solvers.jl
"""

using Test
using LinearAlgebra
using SparseArrays
using Random
using Printf

# Add the GSI package to path and import
push!(LOAD_PATH, joinpath(@__DIR__, "src"))
using GSICoreAnalysis
using GSICoreAnalysis.AdvancedSolvers

# Set random seed for reproducible tests
Random.seed!(42)

# Test utilities
function generate_test_matrix(n::Int, condition_number::Float64=1e3, symmetric::Bool=true)
    """Generate test matrix with specified condition number"""
    if symmetric
        # Generate symmetric positive definite matrix
        A = randn(n, n)
        A = A * A'  # Make positive definite
        
        # Adjust condition number
        eigenvals = eigvals(A)
        min_eig = minimum(eigenvals)
        max_eig = maximum(eigenvals)
        
        # Scale to achieve desired condition number
        scale_factor = condition_number / (max_eig / min_eig)
        A = A + (scale_factor * min_eig) * I
    else
        # Generate non-symmetric matrix
        A = randn(n, n)
        # Ensure it's not too ill-conditioned
        A = A + 0.1 * I
    end
    
    return A
end

function generate_gsi_like_problem(n::Int)
    """Generate problem similar to GSI cost function structure"""
    # Background error covariance (B matrix)
    B_inv = generate_test_matrix(n, 1e2, true)
    
    # Observation operator (H matrix) - typically rectangular but we'll use square
    H = 0.5 * randn(n, n)
    
    # Observation error covariance (R matrix)
    R_inv = generate_test_matrix(n, 1e1, true)
    
    # Combined Hessian: B^{-1} + H^T R^{-1} H
    Hessian = B_inv + H' * R_inv * H
    
    # Right-hand side (gradient at minimum)
    b = randn(n)
    
    return Hessian, b, B_inv, H, R_inv
end

@testset "Advanced Solvers Test Suite" begin
    
    println("=== GSI Advanced Solvers Test Suite ===")
    println("Testing Lanczos, BiCG-Lanczos, and Quasi-Newton methods")
    println()
    
    # Test problem sizes
    test_sizes = [10, 50, 100]
    
    @testset "Lanczos Solver Tests" begin
        println("Testing Lanczos Solver...")
        
        @testset "Basic Lanczos Functionality" begin
            for n in test_sizes
                @testset "Problem size $n" begin
                    # Generate symmetric positive definite test matrix
                    A = generate_test_matrix(n, 1e2, true)
                    b = randn(n)
                    x_true = A \ b
                    
                    # Test with default configuration
                    config = LanczosConfig(max_iter=min(50, n), tolerance=1e-10, verbose=false)
                    x = zeros(n)
                    
                    result = lanczos_solve!(x, A, b, config)
                    
                    # Check convergence
                    @test result.converged
                    @test result.iterations <= config.max_iter
                    
                    # Check solution accuracy
                    rel_error = norm(x - x_true) / norm(x_true)
                    @test rel_error < 1e-6
                    
                    # Check residual
                    residual = norm(A * x - b)
                    @test residual < 1e-8
                    
                    println("  Size $n: converged in $(result.iterations) iterations, rel_error = $(rel_error:.2e)")
                end
            end
        end
        
        @testset "Lanczos Eigenvalue Computation" begin
            n = 20
            A = generate_test_matrix(n, 1e2, true)
            
            config = LanczosConfig(max_iter=15, max_ritz=5, tolerance=1e-8)
            b = randn(n)  # dummy for this test
            x = zeros(n)
            
            result = lanczos_solve!(x, A, b, config)
            
            # Check that we computed some eigenvalues
            @test length(result.eigenvalues) > 0
            @test all(result.eigenvalues .> 0)  # Should be positive for SPD matrix
            
            # Eigenvalues should be in reasonable range
            true_eigenvals = eigvals(A)
            min_true = minimum(true_eigenvals)
            max_true = maximum(true_eigenvals)
            
            @test all(result.eigenvalues .>= min_true * 0.9)
            @test all(result.eigenvalues .<= max_true * 1.1)
            
            println("  Eigenvalue range: [$(minimum(result.eigenvalues):.3e), $(maximum(result.eigenvalues)):.3e]")
        end
        
        @testset "Lanczos Preconditioning" begin
            n = 30
            A = generate_test_matrix(n, 1e3, true)  # More ill-conditioned
            b = randn(n)
            
            # Test without preconditioning
            config_no_precond = LanczosConfig(max_iter=50, tolerance=1e-8, precondition=false)
            x1 = zeros(n)
            result1 = lanczos_solve!(x1, A, b, config_no_precond)
            
            # Test with preconditioning
            config_precond = LanczosConfig(max_iter=50, tolerance=1e-8, precondition=true)
            x2 = zeros(n)
            result2 = lanczos_solve!(x2, A, b, config_precond)
            
            # Both should converge, but preconditioning might help
            @test result1.converged || result2.converged
            
            println("  Without precond: $(result1.iterations) iters, with precond: $(result2.iterations) iters")
        end
    end
    
    @testset "BiCG-Lanczos Solver Tests" begin
        println("Testing BiCG-Lanczos Solver...")
        
        @testset "BiCGStab Basic Tests" begin
            for n in test_sizes
                @testset "Problem size $n" begin
                    # Generate non-symmetric test matrix
                    A = generate_test_matrix(n, 1e2, false)
                    b = randn(n)
                    x_true = A \ b
                    
                    # Test BiCGStab
                    config = BiCGLanczosConfig(max_iter=min(100, 2*n), variant=BiCG_STAB, 
                                              tolerance=1e-10, verbose=false)
                    x = zeros(n)
                    
                    result = bicgstab_solve!(x, A, b, config)
                    
                    # Check convergence (may not always converge for difficult problems)
                    if result.converged
                        rel_error = norm(x - x_true) / norm(x_true)
                        @test rel_error < 1e-6
                        
                        residual = norm(A * x - b)
                        @test residual < 1e-8
                        
                        println("  Size $n: converged in $(result.iterations) iterations, rel_error = $(rel_error:.2e)")
                    else
                        println("  Size $n: did not converge ($(result.iterations) iterations)")
                        # For difficult problems, just check that residual improved
                        initial_res = norm(b)
                        final_res = norm(A * x - b)
                        @test final_res < initial_res
                    end
                end
            end
        end
        
        @testset "BiCG Variant Comparison" begin
            n = 25
            A = generate_test_matrix(n, 1e2, false)
            b = randn(n)
            
            variants = [BiCG_STANDARD, BiCG_STAB, QMR]
            results = Dict()
            
            for variant in variants
                config = BiCGLanczosConfig(max_iter=100, variant=variant, tolerance=1e-8, verbose=false)
                x = zeros(n)
                result = bicg_lanczos_solve!(x, A, b, config)
                results[variant] = (result=result, solution=copy(x))
            end
            
            # At least one variant should converge
            converged_any = any(r.result.converged for r in values(results))
            @test converged_any
            
            for (variant, data) in results
                status = data.result.converged ? "converged" : "failed"
                println("  $variant: $status ($(data.result.iterations) iterations)")
            end
        end
        
        @testset "BiCG Breakdown Handling" begin
            n = 15
            # Create a matrix that might cause breakdown
            A = randn(n, n)
            A[1, :] = A[2, :]  # Make first two rows identical - potential breakdown
            b = randn(n)
            
            config = BiCGLanczosConfig(max_iter=50, variant=BiCG_STAB, 
                                      breakdown_threshold=1e-12, tolerance=1e-8)
            x = zeros(n)
            result = bicg_lanczos_solve!(x, A, b, config)
            
            # Should detect breakdown gracefully
            @test !isnan(result.final_residual_norm)
            @test !any(isnan.(x))
            
            if result.breakdown_occurred
                println("  Breakdown detection: working correctly")
            end
        end
    end
    
    @testset "Quasi-Newton Solver Tests" begin
        println("Testing Quasi-Newton Solver...")
        
        @testset "L-BFGS Optimization Tests" begin
            for n in [5, 10, 20]  # Smaller sizes for nonlinear optimization
                @testset "Quadratic problem size $n" begin
                    # Create quadratic objective: f(x) = 0.5 x^T A x - b^T x
                    A = generate_test_matrix(n, 1e2, true)
                    b = randn(n)
                    x_opt = A \ b
                    
                    objective = x -> 0.5 * dot(x, A * x) - dot(b, x)
                    function gradient!(g, x)
                        g .= A * x - b
                        return nothing
                    end
                    
                    config = QuasiNewtonConfig(max_iter=50, tolerance_grad=1e-8, 
                                              memory_size=min(5, n), verbose=false)
                    x = randn(n)  # Random starting point
                    
                    result = lbfgs_solve!(x, objective, gradient!, config)
                    
                    @test result.converged
                    @test result.iterations <= config.max_iter
                    
                    # Check solution accuracy
                    rel_error = norm(x - x_opt) / norm(x_opt)
                    @test rel_error < 1e-5
                    
                    # Check gradient at solution
                    grad_final = similar(x)
                    gradient!(grad_final, x)
                    @test norm(grad_final) < 1e-6
                    
                    println("  Size $n: converged in $(result.iterations) iterations, rel_error = $(rel_error:.2e)")
                end
            end
        end
        
        @testset "L-BFGS Rosenbrock Function" begin
            # Test on the challenging Rosenbrock function: f(x,y) = (1-x)^2 + 100(y-x^2)^2
            function rosenbrock(x)
                return (1 - x[1])^2 + 100 * (x[2] - x[1]^2)^2
            end
            
            function rosenbrock_grad!(g, x)
                g[1] = -2 * (1 - x[1]) - 400 * x[1] * (x[2] - x[1]^2)
                g[2] = 200 * (x[2] - x[1]^2)
                return nothing
            end
            
            config = QuasiNewtonConfig(max_iter=100, tolerance_grad=1e-6, 
                                      memory_size=10, line_search_type=WOLFE, verbose=false)
            x = [-1.2, 1.0]  # Standard starting point
            
            result = lbfgs_solve!(x, rosenbrock, rosenbrock_grad!, config)
            
            # Rosenbrock minimum is at (1, 1)
            @test abs(x[1] - 1.0) < 1e-4
            @test abs(x[2] - 1.0) < 1e-4
            @test rosenbrock(x) < 1e-6
            
            println("  Rosenbrock: converged to f = $(rosenbrock(x):.2e) in $(result.iterations) iterations")
        end
        
        @testset "Line Search Methods" begin
            n = 5
            A = generate_test_matrix(n, 1e2, true)
            b = randn(n)
            
            objective = x -> 0.5 * dot(x, A * x) - dot(b, x)
            function gradient!(g, x)
                g .= A * x - b
                return nothing
            end
            
            line_search_methods = [ARMIJO, WOLFE, BACKTRACKING]
            results = Dict()
            
            for method in line_search_methods
                config = QuasiNewtonConfig(max_iter=30, tolerance_grad=1e-6, 
                                          line_search_type=method, verbose=false)
                x = randn(n)
                result = lbfgs_solve!(x, objective, gradient!, config)
                results[method] = result
            end
            
            # All line search methods should converge for this simple problem
            for (method, result) in results
                @test result.converged
                println("  $method: $(result.iterations) iterations, $(result.function_evals) func evals")
            end
        end
    end
    
    @testset "GSI Integration Tests" begin
        println("Testing GSI Framework Integration...")
        
        @testset "GSI-like Problem Structure" begin
            n = 30
            H, b, B_inv, H_obs, R_inv = generate_gsi_like_problem(n)
            
            # Test all three solvers on the same GSI-like problem
            solvers_results = Dict()
            
            # Lanczos solver
            config_lanczos = LanczosConfig(max_iter=50, tolerance=1e-8, precondition=true)
            x_lanczos = zeros(n)
            result_lanczos = lanczos_solve!(x_lanczos, H, b, config_lanczos)
            solvers_results[:lanczos] = (x_lanczos, result_lanczos)
            
            # BiCGStab solver  
            config_bicg = BiCGLanczosConfig(max_iter=100, variant=BiCG_STAB, tolerance=1e-8)
            x_bicg = zeros(n)
            result_bicg = bicgstab_solve!(x_bicg, H, b, config_bicg)
            solvers_results[:bicgstab] = (x_bicg, result_bicg)
            
            # L-BFGS for the quadratic case
            objective = x -> 0.5 * dot(x, H * x) - dot(b, x)
            function gradient!(g, x)
                g .= H * x - b
                return nothing
            end
            config_lbfgs = QuasiNewtonConfig(max_iter=30, tolerance_grad=1e-6)
            x_lbfgs = randn(n)
            result_lbfgs = lbfgs_solve!(x_lbfgs, objective, gradient!, config_lbfgs)
            solvers_results[:lbfgs] = (x_lbfgs, result_lbfgs)
            
            # Compare solutions (should be similar for converged methods)
            true_solution = H \ b
            
            for (solver, (x_sol, result)) in solvers_results
                if result.converged
                    rel_error = norm(x_sol - true_solution) / norm(true_solution)
                    println("  $solver: converged, rel_error = $(rel_error:.2e)")
                    @test rel_error < 1e-4
                else
                    println("  $solver: did not converge")
                end
            end
        end
        
        @testset "Unified Solver Interface" begin
            n = 20
            A = generate_test_matrix(n, 1e2, false)
            b = randn(n)
            
            # Test unified interface for linear solvers
            methods = [:lanczos, :bicg_stab]
            
            for method in methods
                x = zeros(n)
                try
                    result = solve_advanced!(x, A, method; b=b)
                    println("  Unified interface $method: success")
                    @test !any(isnan.(x))
                catch e
                    println("  Unified interface $method: $(typeof(e))")
                end
            end
            
            # Test unified interface for nonlinear optimization
            objective = x -> norm(A * x - b)^2
            function gradient!(g, x)
                g .= 2 * A' * (A * x - b)
                return nothing
            end
            
            x = randn(n)
            try
                result = solve_advanced!(x, objective, :lbfgs; gradient!=gradient!)
                println("  Unified interface L-BFGS: success")
                @test !any(isnan.(x))
            catch e
                println("  Unified interface L-BFGS: $(typeof(e))")
            end
        end
    end
    
    @testset "Performance and Benchmarking" begin
        println("Testing Performance and Benchmarking...")
        
        @testset "Default Configuration Generation" begin
            problem_sizes = [50, 100, 200]
            
            for n in problem_sizes
                configs = create_default_configs(n, Float64)
                
                @test configs.lanczos.max_iter > 0
                @test configs.bicg.max_iter > 0  
                @test configs.quasi_newton.max_iter > 0
                
                # Memory size should scale reasonably with problem size
                @test configs.quasi_newton.memory_size >= 5
                @test configs.lanczos.max_precond_vecs >= 5
                
                println("  Size $n: Lanczos max_iter=$(configs.lanczos.max_iter), " *
                       "QN memory_size=$(configs.quasi_newton.memory_size)")
            end
        end
        
        @testset "Solver Scaling" begin
            # Test how solvers scale with problem size
            sizes = [10, 20, 40]
            times = Dict()
            
            for solver in [:lanczos, :bicgstab]
                times[solver] = Float64[]
                
                for n in sizes
                    A = generate_test_matrix(n, 1e2, solver == :lanczos)
                    b = randn(n)
                    x = zeros(n)
                    
                    start_time = time()
                    try
                        result = solve_advanced!(x, A, solver; b=b)
                        elapsed = time() - start_time
                        push!(times[solver], elapsed)
                    catch
                        push!(times[solver], NaN)
                    end
                end
                
                println("  $solver scaling: $(times[solver])")
            end
            
            # Basic scaling check - times should generally increase with size
            for (solver, solver_times) in times
                valid_times = filter(!isnan, solver_times)
                if length(valid_times) >= 2
                    @test valid_times[end] >= valid_times[1] * 0.5  # Allow some variation
                end
            end
        end
    end
    
    @testset "Edge Cases and Robustness" begin
        println("Testing Edge Cases and Robustness...")
        
        @testset "Singular/Nearly Singular Matrices" begin
            n = 10
            
            # Create nearly singular matrix
            A = randn(n, n)
            A[end, :] = A[1, :] + 1e-12 * randn(n)'  # Nearly dependent rows
            b = randn(n)
            
            config = BiCGLanczosConfig(max_iter=50, tolerance=1e-6, 
                                      breakdown_threshold=1e-10, verbose=false)
            x = zeros(n)
            result = bicg_lanczos_solve!(x, A, b, config)
            
            # Should handle gracefully
            @test !any(isnan.(x))
            @test !isinf(result.final_residual_norm)
            
            if result.breakdown_occurred
                println("  Singular matrix: breakdown detected correctly")
            end
        end
        
        @testset "Zero Right-Hand Side" begin
            n = 15
            A = generate_test_matrix(n, 1e2, true)
            b = zeros(n)  # Zero RHS
            
            config = LanczosConfig(max_iter=10, tolerance=1e-12)
            x = randn(n)  # Non-zero initial guess
            result = lanczos_solve!(x, A, b, config)
            
            # Should converge to zero quickly
            @test result.converged
            @test norm(x) < 1e-10
            println("  Zero RHS: converged in $(result.iterations) iterations")
        end
        
        @testset "Very Small Problems" begin
            # Test 1x1 and 2x2 problems
            for n in [1, 2]
                A = randn(n, n)
                if n == 1
                    A[1,1] = abs(A[1,1]) + 1.0  # Ensure positive
                else
                    A = A * A'  # Make positive definite
                end
                b = randn(n)
                
                config = LanczosConfig(max_iter=5, tolerance=1e-10)
                x = zeros(n)
                result = lanczos_solve!(x, A, b, config)
                
                @test result.converged
                @test norm(A * x - b) < 1e-10
                println("  Size $n: converged correctly")
            end
        end
        
        @testset "Extreme Condition Numbers" begin
            n = 20
            
            # Very well-conditioned (condition number ≈ 1)
            A_good = I + 0.1 * randn(n, n)
            A_good = A_good * A_good'
            b = randn(n)
            
            config = LanczosConfig(max_iter=30, tolerance=1e-10)
            x = zeros(n)
            result = lanczos_solve!(x, A_good, b, config)
            
            @test result.converged
            @test result.iterations <= 10  # Should converge very quickly
            println("  Well-conditioned: $(result.iterations) iterations")
        end
    end
end

println("\n=== Test Suite Complete ===")
println("All advanced solvers have been tested for:")
println("- Numerical accuracy and convergence")
println("- Integration with GSI framework")
println("- Performance characteristics")
println("- Robustness to edge cases")
println("- Mathematical correctness")

# Optional: Run a final integration test
println("\nRunning final integration test...")

try
    n = 25
    A = generate_test_matrix(n, 1e2, true)
    b = randn(n)
    
    # Test all three solver types
    println("Final integration test (size $n):")
    
    # Lanczos
    x1 = zeros(n)
    result1 = solve_advanced!(x1, A, :lanczos; b=b)
    println("- Lanczos: $(result1.converged ? "✓" : "✗")")
    
    # BiCGStab
    x2 = zeros(n) 
    result2 = solve_advanced!(x2, A, :bicg_stab; b=b)
    println("- BiCGStab: $(result2.converged ? "✓" : "✗")")
    
    # L-BFGS
    obj = x -> 0.5 * dot(x, A * x) - dot(b, x)
    grad! = (g, x) -> (g .= A * x - b; nothing)
    x3 = randn(n)
    result3 = solve_advanced!(x3, obj, :lbfgs; gradient!=grad!)
    println("- L-BFGS: $(result3.converged ? "✓" : "✗")")
    
    println("\n🎉 GSI Advanced Solvers implementation complete!")
    
catch e
    println("⚠️  Final integration test failed: $e")
end