"""
Tests for NSEMSolver.jl benchmarking framework
"""

using Test
using NSEMSolver

@testset "Benchmarking Framework Tests" begin
    
    @testset "BenchmarkConfig Creation" begin
        # Test default configuration
        config = BenchmarkConfig()
        @test config.n_trials == 3
        @test config.verbose == true
        @test config.track_allocations == true
        
        # Test custom configuration
        custom_config = BenchmarkConfig(
            n_trials = 5,
            verbose = false,
            export_csv = true
        )
        @test custom_config.n_trials == 5
        @test custom_config.verbose == false
        @test custom_config.export_csv == true
    end
    
    @testset "SystemInfo Collection" begin
        sys_info = SystemInfo()
        @test sys_info.julia_version == string(VERSION)
        @test sys_info.num_threads == Threads.nthreads()
        @test sys_info.cpu_cores > 0
        @test sys_info.total_memory > 0
        @test !isempty(sys_info.timestamp)
    end
    
    @testset "Performance Metrics Structure" begin
        metrics = PerformanceMetrics(
            mean_time = 1.5,
            dofs = 1000,
            success_rate = 1.0
        )
        @test metrics.mean_time == 1.5
        @test metrics.dofs == 1000
        @test metrics.success_rate == 1.0
    end
    
    @testset "Single Configuration Benchmark" begin
        # Test with minimal configuration to ensure it runs quickly
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        solver_config = NSOptions(
            N = 2,
            n_block = 2,
            nu = 0.01,
            tfinal = 0.01,  # Very short simulation
            cfl = 0.5,
            tol = 1e-3,     # Relaxed tolerance
            solver = :julia,
            verbose = false
        )
        
        result = benchmark_single_configuration(solver_config, config)
        
        @test result isa BenchmarkResult
        @test result.config.n_trials == 1
        @test result.metrics isa PerformanceMetrics
        
        # Check that basic metrics are populated
        if result.metrics.success_rate > 0
            @test result.metrics.mean_time > 0
            @test result.metrics.dofs > 0
        end
    end
    
    @testset "Solver Performance Benchmark" begin
        # Test with minimal configurations
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        solver_configs = [
            NSOptions(N = 2, n_block = 2, tfinal = 0.01, tol = 1e-3, verbose = false),
            NSOptions(N = 3, n_block = 2, tfinal = 0.01, tol = 1e-3, verbose = false)
        ]
        
        results = benchmark_solver_performance(solver_configs, config = config)
        
        @test length(results) == 2
        @test all(r -> r isa BenchmarkResult, results)
        
        # Test that results have expected structure
        for result in results
            @test !isempty(result.name)
            @test !isempty(result.description)
        end
    end
    
    @testset "Backend Comparison" begin
        # Test backend comparison with Julia only (always available)
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        backends = [:julia]
        
        results = benchmark_backend_comparison(backends, config = config)
        
        @test length(results) >= 1  # At least Julia should be available
        @test results[1].name == "Backend_julia"
    end
    
    @testset "Memory Usage Benchmark" begin
        # Test memory usage analysis
        config = BenchmarkConfig(n_trials = 1, verbose = false, track_allocations = true)
        problem_sizes = [
            NSOptions(N = 2, n_block = 2, tfinal = 0.01, tol = 1e-3, verbose = false)
        ]
        
        results = benchmark_memory_usage(problem_sizes, config = config)
        
        @test length(results) == 1
        @test results[1].name == "Memory_Size_1"
        
        # Check memory metrics are tracked
        if results[1].metrics.success_rate > 0
            @test results[1].metrics.total_memory >= 0
            @test results[1].metrics.total_allocations >= 0
        end
    end
    
    @testset "Convergence Rate Analysis" begin
        # Test convergence rate analysis with relaxed tolerances
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        tolerance_values = [1e-3, 1e-4]  # Relaxed tolerances for quick testing
        
        results = benchmark_convergence_rates(tolerance_values, config = config)
        
        @test length(results) == 2
        @test results[1].name == "Tolerance_0.001"
        @test results[2].name == "Tolerance_0.0001"
    end
    
    @testset "Scaling Analysis" begin
        # Test scaling analysis with minimal problem sizes
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        N_values = [2, 3]
        n_block_values = [2]
        
        results = benchmark_scaling_analysis(N_values, n_block_values, config = config)
        
        @test length(results) == 2  # 2 N values × 1 n_block value
        
        # Check that scaling information is added
        for result in results
            @test contains(result.description, "N=")
            @test contains(result.description, "n_block=")
        end
    end
    
    @testset "DOF Estimation" begin
        # Test degrees of freedom estimation
        dofs_2d = estimate_degrees_of_freedom(3, 2, 2)
        @test dofs_2d == 2^2 * (3+1)^2  # 2 blocks × 4^2 points per block
        
        dofs_3d = estimate_degrees_of_freedom(3, 2, 3)
        @test dofs_3d == 2^3 * (3+1)^3  # 2^3 blocks × 4^3 points per block
    end
    
    @testset "Convergence Rate Calculation" begin
        # Test convergence rate calculation
        history1 = [1.0, 0.1, 0.01, 0.001]  # Good convergence
        history2 = [1.0, 0.5, 0.25, 0.125]  # Slower convergence
        histories = [history1, history2]
        
        rate = calculate_convergence_rate(histories)
        @test rate > 0  # Should have positive convergence rate
        
        # Test with empty history
        empty_rate = calculate_convergence_rate(Vector{Float64}[])
        @test empty_rate == 0.0
    end
    
    @testset "Legacy Function Compatibility" begin
        # Test that legacy functions still work
        poly_results = benchmark_polynomial_orders()
        @test isa(poly_results, Dict)
        
        domain_results = benchmark_domain_sizes()
        @test isa(domain_results, Dict)
        
        backend_results = benchmark_solver_backends()
        @test isa(backend_results, Dict)
        @test haskey(backend_results, :julia)
        
        adaptive_results = benchmark_adaptive_refinement()
        @test isa(adaptive_results, Dict)
    end
    
    @testset "Error Handling" begin
        # Test that benchmark handles errors gracefully
        config = BenchmarkConfig(n_trials = 1, verbose = false)
        
        # Create an invalid configuration that should fail
        invalid_config = NSOptions(
            N = -1,  # Invalid negative order
            n_block = 0,  # Invalid zero blocks
            verbose = false
        )
        
        result = benchmark_single_configuration(invalid_config, config)
        
        # Should create result structure even for failed benchmark
        @test result isa BenchmarkResult
        @test !isempty(result.errors) || result.metrics.success_rate == 0.0
    end
end

@testset "Integration Tests" begin
    @testset "End-to-End Benchmark" begin
        # Test a minimal end-to-end benchmark similar to CI
        println("Running integration test - minimal benchmark suite")
        
        # This mimics what the CI benchmark would do
        small_config = NSOptions(
            N = 2,
            n_block = 2,
            nu = 0.01,
            tfinal = 0.01,
            cfl = 0.5,
            tol = 1e-3,
            solver = :julia,
            verbose = false
        )
        
        start_time = time()
        result = solve_navier_stokes_2d(small_config)
        test_time = time() - start_time
        
        @test result.converged
        @test test_time < 30.0  # Should complete in reasonable time
        
        println("Integration test completed in $(round(test_time, digits=2)) seconds")
    end
end