# Parallel Testing Framework for NSEMSolver.jl
# Comprehensive test suite for parallel computing components

using Test
using NSEMSolver
using LinearAlgebra
using Statistics

# Test configuration
const TEST_TIMEOUT = 300  # 5 minutes timeout for parallel tests

"""
    run_parallel_tests()

Main entry point for parallel testing framework.
"""
function run_parallel_tests()
    println("🧪 NSEMSolver.jl Parallel Testing Framework")
    println("="^50)
    
    # Initialize MPI for testing
    mpi_ctx = initialize_mpi()
    
    try
        if is_root(mpi_ctx)
            println("Running parallel tests on $(mpi_ctx.size) processes...")
            println()
        end
        
        @testset "NSEMSolver Parallel Tests" begin
            
            # Basic MPI functionality tests
            @testset "MPI Wrapper Tests" begin
                test_mpi_wrapper_functionality(mpi_ctx)
            end
            
            # Domain decomposition tests
            @testset "Domain Decomposition Tests" begin
                test_domain_decomposition_correctness(mpi_ctx)
            end
            
            # Communication tests
            @testset "Inter-Process Communication Tests" begin
                test_communication_correctness(mpi_ctx)
            end
            
            # Parallel solver tests
            @testset "Parallel Linear Solver Tests" begin
                test_parallel_solver_convergence(mpi_ctx)
            end
            
            # Time integration tests
            @testset "Parallel Time Integration Tests" begin
                test_parallel_time_integration(mpi_ctx)
            end
            
            # I/O tests
            @testset "Parallel I/O Tests" begin
                test_parallel_io_functionality(mpi_ctx)
            end
            
            # Performance tests
            @testset "Scaling and Performance Tests" begin
                test_parallel_scaling_behavior(mpi_ctx)
            end
            
            # Integration tests
            @testset "Parallel Integration Tests" begin
                test_parallel_navier_stokes_integration(mpi_ctx)
            end
            
        end
        
        if is_root(mpi_ctx)
            println("\n✅ All parallel tests completed successfully!")
        end
        
    catch e
        if is_root(mpi_ctx)
            println("\n❌ Parallel tests failed with error: $e")
            rethrow()
        end
    finally
        finalize_mpi(mpi_ctx)
    end
end

"""
    test_mpi_wrapper_functionality(mpi_ctx::MPIContext)

Test MPI wrapper and abstraction layer functionality.
"""
function test_mpi_wrapper_functionality(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing MPI wrapper functionality..."
    end
    
    # Test basic MPI context
    @test mpi_ctx.rank >= 0
    @test mpi_ctx.size >= 1
    @test mpi_ctx.rank < mpi_ctx.size
    
    if mpi_ctx.is_parallel
        @test mpi_ctx.size > 1
        @test HAS_MPI == true
    else
        @test mpi_ctx.size == 1
        @test mpi_ctx.rank == 0
    end
    
    # Test collective operations
    test_data = Float64[mpi_ctx.rank + 1.0]
    all_reduce!(test_data, +, mpi_ctx)
    expected_sum = sum(1:mpi_ctx.size)
    @test test_data[1] ≈ expected_sum
    
    # Test scalar reduction
    local_value = Float64(mpi_ctx.rank)
    global_sum = all_reduce_scalar(local_value, +, mpi_ctx)
    expected_scalar_sum = sum(0:mpi_ctx.size-1)
    @test global_sum ≈ expected_scalar_sum
    
    # Test broadcast
    if mpi_ctx.is_parallel
        broadcast_data = [42.0, 3.14]
        broadcast!(broadcast_data, 0, mpi_ctx)
        @test broadcast_data[1] ≈ 42.0
        @test broadcast_data[2] ≈ 3.14
    end
    
    # Test timing functionality
    start_time = wtime()
    sleep(0.01)  # Short sleep
    end_time = wtime()
    elapsed = end_time - start_time
    @test elapsed >= 0.01
    @test elapsed < 1.0  # Should be much less than 1 second
    
    # Test parallel timer
    timer = create_parallel_timer(mpi_ctx)
    start_timer!(timer, "test_operation")
    sleep(0.01)
    stop_timer!(timer, "test_operation")
    
    summary = get_timing_summary(timer)
    @test haskey(summary, "test_operation")
    
    if mpi_ctx.is_parallel
        @test haskey(summary["test_operation"], "global_mean")
        @test summary["test_operation"]["global_mean"] >= 0.01
    end
    
    barrier(mpi_ctx)  # Synchronize all processes
    
    if is_root(mpi_ctx)
        @info "✓ MPI wrapper tests passed"
    end
end

"""
    test_domain_decomposition_correctness(mpi_ctx::MPIContext)

Test domain decomposition algorithms and load balancing.
"""
function test_domain_decomposition_correctness(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing domain decomposition..."
    end
    
    # Create test domain
    n_block = 8
    polynomial_order = 4
    test_domain = create_multidomain(n_block, polynomial_order, 1.0, 2)
    
    # Test block decomposition
    block_strategy = BlockDecomposition(
        compute_optimal_proc_grid(mpi_ctx.size, 2), 1
    )
    
    pdomain = decompose_domain(test_domain, block_strategy, mpi_ctx)
    
    # Verify decomposition properties
    @test pdomain.rank == mpi_ctx.rank
    @test pdomain.num_procs == mpi_ctx.size
    @test pdomain.decomposition_strategy == :block
    
    # Check processor grid validity
    @test prod(pdomain.proc_grid) == mpi_ctx.size
    @test all(x -> x > 0, pdomain.proc_grid)
    
    # Verify work distribution
    @test length(pdomain.global_work_distribution) == mpi_ctx.size
    @test pdomain.global_work_distribution[mpi_ctx.rank + 1] > 0
    
    # Check load balance quality
    @test pdomain.load_balance_quality >= 0.0
    @test pdomain.load_balance_quality <= 1.0
    
    # Verify bounding box
    @test length(pdomain.local_bbox) == 2  # 2D domain
    for (min_val, max_val) in pdomain.local_bbox
        @test min_val <= max_val
        @test min_val >= 0.0
        @test max_val <= test_domain.length
    end
    
    # Test neighbor connectivity
    if mpi_ctx.size > 1
        # Should have some neighbors for multi-process runs
        max_neighbors = 4  # Maximum neighbors in 2D
        @test length(pdomain.neighbors) <= max_neighbors
        
        # All neighbor ranks should be valid
        for neighbor in pdomain.neighbors
            @test neighbor >= 0
            @test neighbor < mpi_ctx.size
            @test neighbor != mpi_ctx.rank
        end
    else
        @test length(pdomain.neighbors) == 0
    end
    
    # Test decomposition statistics
    stats = get_decomposition_stats(pdomain)
    @test haskey(stats, "strategy")
    @test haskey(stats, "num_processes")
    @test haskey(stats, "load_imbalance")
    @test stats["num_processes"] == mpi_ctx.size
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Domain decomposition tests passed"
    end
end

"""
    test_communication_correctness(mpi_ctx::MPIContext)

Test inter-process communication patterns and correctness.
"""
function test_communication_correctness(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing inter-process communication..."
    end
    
    if mpi_ctx.size < 2
        if is_root(mpi_ctx)
            @info "Skipping communication tests (single process)"
        end
        return
    end
    
    # Create mock parallel domain for communication tests
    test_domain = create_multidomain(4, 3, 1.0, 2)
    
    # Setup simple neighbor connectivity for testing
    neighbors = if mpi_ctx.rank == 0
        [1]
    elseif mpi_ctx.rank == mpi_ctx.size - 1
        [mpi_ctx.rank - 1]
    else
        [mpi_ctx.rank - 1, mpi_ctx.rank + 1]
    end
    
    # Create mock buffers
    send_buffers = Dict{Int,Vector{Float64}}()
    recv_buffers = Dict{Int,Vector{Float64}}()
    
    for neighbor in neighbors
        send_buffers[neighbor] = randn(100)
        recv_buffers[neighbor] = zeros(100)
    end
    
    # Create mock parallel domain
    pdomain = ParallelDomain{2}(
        test_domain, test_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), neighbors, Dict{Int,GhostRegion{2}}(),
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        send_buffers, recv_buffers, :block, 0.1
    )
    
    # Test communication pattern creation
    comm_pattern = create_communication_pattern(pdomain)
    @test length(comm_pattern.send_ranks) == length(send_buffers)
    @test length(comm_pattern.recv_ranks) == length(recv_buffers)
    
    # Test distributed vector operations
    n_local = 50
    local_data = randn(n_local)
    dist_vec = create_distributed_vector(local_data, pdomain)
    
    @test length(dist_vec.local_values) == n_local
    @test dist_vec.n_local == n_local
    @test dist_vec.n_global == n_local * mpi_ctx.size
    
    # Test distributed norms
    local_norm = norm(local_data)
    global_norm_parallel = distributed_norm(dist_vec.local_values, 2, mpi_ctx)
    
    # Compute expected global norm
    all_norms_squared = [local_norm^2]
    all_reduce!(all_norms_squared, +, mpi_ctx)
    expected_global_norm = sqrt(all_norms_squared[1])
    
    @test global_norm_parallel ≈ expected_global_norm rtol=1e-12
    
    # Test distributed dot product
    local_data2 = randn(n_local)
    dist_vec2 = create_distributed_vector(local_data2, pdomain)
    
    parallel_dot = distributed_dot_product(dist_vec.local_values, dist_vec2.local_values, mpi_ctx)
    
    # Compute expected dot product
    local_dot = dot(local_data, local_data2)
    expected_dot = all_reduce_scalar(local_dot, +, mpi_ctx)
    
    @test parallel_dot ≈ expected_dot rtol=1e-12
    
    # Test communication profiling
    profiler = CommunicationProfiler()
    
    # Profile some mock communication
    profile_communication!(profiler, pdomain, :start)
    sleep(0.001)  # Simulate communication time
    profile_communication!(profiler, pdomain, :send_complete)
    profile_communication!(profiler, pdomain, :recv_complete)
    
    comm_stats = get_communication_statistics(profiler, mpi_ctx)
    @test haskey(comm_stats, "local_messages")
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Communication tests passed"
    end
end

"""
    test_parallel_solver_convergence(mpi_ctx::MPIContext)

Test parallel linear solver convergence and correctness.
"""
function test_parallel_solver_convergence(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing parallel linear solvers..."
    end
    
    # Create test problem
    n_local = 100
    test_domain = create_multidomain(4, 3, 1.0, 2)
    
    pdomain = ParallelDomain{2}(
        test_domain, test_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), Int[], Dict{Int,GhostRegion{2}}(),
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        Dict{Int,Vector{Float64}}(), Dict{Int,Vector{Float64}}(),
        :block, 0.1
    )
    
    # Create test distributed vectors
    x = create_distributed_vector(zeros(Float64, n_local), pdomain)
    b = create_distributed_vector(randn(n_local), pdomain)
    
    # Create simple diagonal matrix for testing
    local_matrix = sparse(I, n_local, n_local) * 2.0  # Well-conditioned
    interface_matrices = Dict{Int,SparseMatrixCSC{Float64,Int}}()
    
    A = create_distributed_matrix(local_matrix, interface_matrices, pdomain, 1)
    
    # Test GMRES solver
    gmres_solver = create_parallel_gmres(10, 50, 1e-8)
    
    # Solve system
    converged = solve!(x, gmres_solver, A, b)
    @test converged == true
    
    # Verify solution: A*x should equal b (approximately)
    Ax = create_distributed_vector(zeros(Float64, n_local), pdomain)
    distributed_matvec!(Ax, A, x)
    
    # Check residual
    residual_norm = 0.0
    for i in 1:n_local
        residual_norm += (Ax.local_values[i] - b.local_values[i])^2
    end
    global_residual = all_reduce_scalar(residual_norm, +, mpi_ctx)
    global_residual = sqrt(global_residual)
    
    @test global_residual < 1e-6
    
    # Test CG solver (for symmetric matrix)
    symmetric_matrix = sparse(I, n_local, n_local) + 
                      sparse(ones(n_local-1), 1:n_local-1, 2:n_local, n_local, n_local) +
                      sparse(ones(n_local-1), 2:n_local, 1:n_local-1, n_local, n_local)
    
    A_symmetric = create_distributed_matrix(symmetric_matrix, interface_matrices, pdomain, 1)
    cg_solver = ParallelCG(100, 1e-8, nothing)
    
    x_cg = create_distributed_vector(zeros(Float64, n_local), pdomain)
    converged_cg = solve!(x_cg, cg_solver, A_symmetric, b)
    @test converged_cg == true
    
    # Test block Jacobi preconditioner
    preconditioner = create_block_jacobi_preconditioner(A)
    @test preconditioner isa BlockJacobiPreconditioner
    
    # Apply preconditioner
    y = create_distributed_vector(zeros(Float64, n_local), pdomain)
    apply_preconditioner!(y, preconditioner, b)
    
    # Should not be zero after preconditioning
    precond_norm = distributed_norm(y.local_values, 2, mpi_ctx)
    @test precond_norm > 1e-12
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Parallel solver tests passed"
    end
end

"""
    test_parallel_time_integration(mpi_ctx::MPIContext)

Test parallel time integration schemes.
"""
function test_parallel_time_integration(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing parallel time integration..."
    end
    
    # Create test domain and vectors
    n_local = 50
    test_domain = create_multidomain(4, 3, 1.0, 2)
    
    pdomain = ParallelDomain{2}(
        test_domain, test_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), Int[], Dict{Int,GhostRegion{2}}(),
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        Dict{Int,Vector{Float64}}(), Dict{Int,Vector{Float64}}(),
        :block, 0.1
    )
    
    # Test parallel RK4 integrator
    integrator = create_parallel_rk4(pdomain, n_local)
    @test integrator.order == 4
    @test integrator.num_stages == 4
    @test length(integrator.stage_vectors) == 4
    
    # Create test solution vector
    u = create_distributed_vector(ones(Float64, n_local), pdomain)
    
    # Define simple test RHS function: du/dt = -u (exponential decay)
    function test_rhs(du, u_vals, t)
        du .= -u_vals
    end
    
    # Test single time step
    dt = 0.01
    t = 0.0
    dt_actual = parallel_timestep!(integrator, u, test_rhs, dt, t)
    
    @test dt_actual ≈ dt  # Should use the requested timestep
    
    # For exponential decay, analytical solution is u(t) = u0 * exp(-t)
    # After one time step: u ≈ u0 * exp(-dt)
    expected_value = exp(-dt)  # Since u0 = 1
    
    # Check if solution is approximately correct (within RK4 error tolerance)
    local_error = abs(u.local_values[1] - expected_value)
    max_error = all_reduce_scalar(local_error, max, mpi_ctx)
    @test max_error < 1e-6  # RK4 should be quite accurate
    
    # Test adaptive time stepper
    adaptive_stepper = create_parallel_adaptive_stepper(pdomain, n_local, tolerance=1e-4)
    
    u_adaptive = create_distributed_vector(ones(Float64, n_local), pdomain)
    dt_new, accepted = adaptive_parallel_step!(adaptive_stepper, u_adaptive, test_rhs, dt, t)
    
    @test accepted == true
    @test dt_new > 0
    
    # Check adaptive statistics
    stats = get_adaptive_stepping_stats(adaptive_stepper)
    @test haskey(stats, "accepted_steps")
    @test haskey(stats, "rejected_steps")
    @test stats["accepted_steps"] >= 1
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Parallel time integration tests passed"
    end
end

"""
    test_parallel_io_functionality(mpi_ctx::MPIContext)

Test parallel I/O and checkpointing functionality.
"""
function test_parallel_io_functionality(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing parallel I/O..."
    end
    
    # Create test output directory
    test_output_dir = "test_parallel_output"
    io_config = create_parallel_io_config(test_output_dir)
    
    @test io_config.output_directory == test_output_dir
    @test io_config.file_prefix == "nsem"  # Default prefix
    
    # Create mock result and domain for I/O testing
    n = 10
    mock_result = NSResult(
        u = randn(n, n),
        v = randn(n, n),
        w = nothing,
        p = randn(n, n),
        x = collect(range(0, 1, length=n)),
        y = collect(range(0, 1, length=n)),
        z = nothing,
        converged = true,
        iterations = 10,
        residual_norm = 1e-8,
        solve_time = 1.5,
        convergence_history = [1.0, 0.1, 0.01, 1e-8],
        multidomain = nothing,
        options = NSOptions()
    )
    
    test_domain = create_multidomain(4, 3, 1.0, 2)
    pdomain = ParallelDomain{2}(
        test_domain, test_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), Int[], Dict{Int,GhostRegion{2}}(),
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        Dict{Int,Vector{Float64}}(), Dict{Int,Vector{Float64}}(),
        :block, 0.1
    )
    
    # Test checkpointer creation
    checkpointer = ParallelCheckpointer(io_config, pdomain)
    @test checkpointer.config == io_config
    @test checkpointer.pdomain === pdomain
    
    # Test solution collection
    if mpi_ctx.size <= 4  # Only test for small numbers of processes
        collected_result = collect_solution_on_root(mock_result, pdomain)
        
        if is_root(mpi_ctx)
            @test collected_result !== nothing
            @test size(collected_result.u) == size(mock_result.u)
        else
            @test collected_result === nothing
        end
    end
    
    # Cleanup test directory
    if is_root(mpi_ctx)
        try
            rm(test_output_dir, recursive=true, force=true)
        catch
            # Ignore cleanup errors
        end
    end
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Parallel I/O tests passed"
    end
end

"""
    test_parallel_scaling_behavior(mpi_ctx::MPIContext)

Test scaling behavior and performance characteristics.
"""
function test_parallel_scaling_behavior(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing parallel scaling behavior..."
    end
    
    # Create scaling study
    study = ScalingStudy("Test Scaling Study", "Unit test scaling analysis")
    
    # Test performance metrics creation
    test_metrics = ParallelPerformanceMetrics(
        total_time=1.0,
        compute_time=0.8,
        communication_time=0.15,
        io_time=0.05,
        num_processes=mpi_ctx.size,
        local_dofs=1000,
        global_dofs=1000 * mpi_ctx.size
    )
    
    @test test_metrics.total_time ≈ 1.0
    @test test_metrics.num_processes == mpi_ctx.size
    @test test_metrics.global_dofs == 1000 * mpi_ctx.size
    
    # Add results to scaling study
    add_strong_scaling_result!(study, mpi_ctx.size, test_metrics)
    @test haskey(study.strong_scaling_data, mpi_ctx.size)
    @test mpi_ctx.size in study.processor_counts
    
    # Test load imbalance computation
    work_distribution = ones(Float64, mpi_ctx.size)
    if mpi_ctx.size > 1
        work_distribution[1] = 2.0  # Create some imbalance
    end
    
    imbalance = compute_load_imbalance(work_distribution)
    expected_imbalance = mpi_ctx.size > 1 ? 0.5 : 0.0
    @test imbalance ≈ expected_imbalance rtol=0.1
    
    # Test optimal processor count prediction
    if length(study.strong_scaling_data) > 0
        optimal_count = predict_optimal_processor_count(study)
        @test optimal_count > 0
        @test optimal_count <= maximum(keys(study.strong_scaling_data))
    end
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Parallel scaling tests passed"
    end
end

"""
    test_parallel_navier_stokes_integration(mpi_ctx::MPIContext)

Integration test for complete parallel Navier-Stokes solve.
"""
function test_parallel_navier_stokes_integration(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Testing parallel Navier-Stokes integration..."
    end
    
    # Create small test problem
    options = NSOptions(
        N=3,                    # Small polynomial order for testing
        n_block=2,              # Small domain  
        max_steps=5,            # Few time steps
        tol=1e-4,              # Relaxed tolerance
        solver=:julia,          # Use Julia solver
        tfinal=0.01,           # Short time
        verbose=false
    )
    
    if is_root(mpi_ctx)
        @info "Running small parallel NS test problem..."
    end
    
    # Create test domain
    test_domain = create_multidomain(
        options.n_block,
        options.N,
        options.tfinal,
        options.dim
    )
    
    # Test that domain can be created without errors
    @test test_domain.n == options.N
    @test test_domain.n_block == options.n_block
    @test test_domain.dim == options.dim
    
    # Create parallel domain decomposition
    block_strategy = BlockDecomposition(
        compute_optimal_proc_grid(mpi_ctx.size, options.dim), 1
    )
    
    pdomain = decompose_domain(test_domain, block_strategy, mpi_ctx)
    
    # Verify parallel domain properties
    @test pdomain.rank == mpi_ctx.rank
    @test pdomain.num_procs == mpi_ctx.size
    
    # Test that we can create distributed vectors
    n_local = pdomain.local_domain.n_block^2 * pdomain.local_domain.n^2
    u_dist = create_distributed_vector(zeros(Float64, n_local), pdomain)
    v_dist = create_distributed_vector(zeros(Float64, n_local), pdomain)
    p_dist = create_distributed_vector(zeros(Float64, n_local), pdomain)
    
    @test length(u_dist.local_values) == n_local
    @test u_dist.n_global == n_local * mpi_ctx.size
    
    # Test basic operations on distributed vectors
    fill!(u_dist.local_values, 1.0)
    global_sum = all_reduce_scalar(sum(u_dist.local_values), +, mpi_ctx)
    expected_sum = Float64(n_local * mpi_ctx.size)
    @test global_sum ≈ expected_sum
    
    # Test communication patterns
    comm_pattern = create_communication_pattern(pdomain)
    @test comm_pattern isa CommunicationPattern
    
    # Test that ghost exchange doesn't crash
    if length(pdomain.neighbors) > 0
        exchange_ghost_values!(u_dist.local_values, v_dist.local_values, 
                             p_dist.local_values, pdomain, comm_pattern)
    end
    
    # Verify solution values haven't become NaN or infinite
    @test all(isfinite, u_dist.local_values)
    @test all(isfinite, v_dist.local_values)
    @test all(isfinite, p_dist.local_values)
    
    barrier(mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "✓ Parallel Navier-Stokes integration tests passed"
    end
end

"""
    run_parallel_correctness_checks(mpi_ctx::MPIContext)

Additional correctness checks for parallel implementation.
"""
function run_parallel_correctness_checks(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        @info "Running parallel correctness checks..."
    end
    
    # Test data consistency across processes
    test_data = randn(10)
    broadcast!(test_data, 0, mpi_ctx)
    
    # All processes should have identical data after broadcast
    checksum = sum(test_data)
    all_checksums = [checksum]
    all_reduce!(all_checksums, +, mpi_ctx)
    expected_checksum = checksum * mpi_ctx.size
    @test all_checksums[1] ≈ expected_checksum
    
    # Test process synchronization
    start_time = wtime()
    barrier(mpi_ctx)
    end_time = wtime()
    
    # Barrier should complete quickly for test systems
    @test (end_time - start_time) < 1.0
    
    if is_root(mpi_ctx)
        @info "✓ Parallel correctness checks passed"
    end
end

# Export the main test function
export run_parallel_tests

# Run tests if this file is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    run_parallel_tests()
end