# Parallel Thermal Convection Example for NSEMSolver.jl
# Large-scale natural convection simulation with HPC capabilities

using NSEMSolver
using Printf
using LinearAlgebra

"""
    parallel_thermal_convection_simulation()

Run large-scale parallel thermal convection simulation.
Demonstrates Rayleigh-Bénard convection at high Rayleigh numbers.
"""
function parallel_thermal_convection_simulation()
    println("🌡️  Parallel Thermal Convection Simulation")
    println("="^60)
    
    # Initialize MPI
    mpi_ctx = initialize_mpi()
    
    if is_root(mpi_ctx)
        println("Setting up large-scale thermal convection simulation...")
        println("MPI Processes: $(mpi_ctx.size)")
        println("Target: High Rayleigh number natural convection")
        println()
    end
    
    try
        # Define simulation parameters
        sim_params = setup_thermal_convection_parameters(mpi_ctx)
        
        # Create parallel domain decomposition
        pdomain = setup_parallel_thermal_domain(mpi_ctx, sim_params)
        
        # Setup parallel thermal-fluid solvers
        parallel_solvers = setup_parallel_thermal_solvers(pdomain, sim_params)
        
        # Run parallel thermal convection simulation
        results = run_parallel_thermal_simulation!(pdomain, parallel_solvers, sim_params)
        
        # Analyze and visualize results
        analyze_thermal_convection_results(results, pdomain, sim_params)
        
        if is_root(mpi_ctx)
            println("✅ Parallel thermal convection simulation completed successfully!")
        end
        
    finally
        finalize_mpi(mpi_ctx)
    end
end

"""
    ThermalConvectionParams

Parameters for thermal convection simulation.
"""
struct ThermalConvectionParams
    # Physical parameters
    rayleigh_number::Float64        # Ra = gβΔT*L³/(ν*α)
    prandtl_number::Float64         # Pr = ν/α
    aspect_ratio::Float64           # Width/Height ratio
    
    # Numerical parameters
    polynomial_order::Int           # N
    elements_per_dim::Int          # Number of elements per dimension
    time_final::Float64            # Final simulation time
    dt_initial::Float64            # Initial time step
    
    # Boundary conditions
    hot_wall_temp::Float64         # Hot wall temperature
    cold_wall_temp::Float64        # Cold wall temperature
    
    # Parallel settings
    decomposition_strategy::Symbol  # :block, :recursive, :graph
    load_balance_frequency::Int    # Rebalance every N steps
    
    # Output settings
    checkpoint_frequency::Int
    visualization_frequency::Int
    output_directory::String
end

"""
    setup_thermal_convection_parameters(mpi_ctx::MPIContext) -> ThermalConvectionParams

Setup parameters for thermal convection based on processor count.
"""
function setup_thermal_convection_parameters(mpi_ctx::MPIContext)
    # Scale problem size with processor count for good parallel efficiency
    base_elements = 16
    elements_per_dim = base_elements + (mpi_ctx.size ÷ 4)
    
    # Physical parameters for challenging convection problem
    rayleigh_number = if mpi_ctx.size >= 64
        1e8  # High Ra for supercomputer runs
    elseif mpi_ctx.size >= 16
        1e6  # Moderate Ra for cluster runs  
    else
        1e4  # Lower Ra for small runs
    end
    
    polynomial_order = mpi_ctx.size >= 32 ? 8 : 6  # Higher order for larger runs
    
    if is_root(mpi_ctx)
        println("📋 Simulation Parameters:")
        @printf("  Rayleigh Number: %.1e\n", rayleigh_number)
        @printf("  Polynomial Order: N=%d\n", polynomial_order)
        @printf("  Elements per dim: %d\n", elements_per_dim)
        @printf("  Estimated DOFs: ~%.1e\n", Float64(elements_per_dim^2 * polynomial_order^2 * 4))
        println()
    end
    
    return ThermalConvectionParams(
        rayleigh_number,    # Ra
        0.71,              # Pr (air)
        2.0,               # Aspect ratio
        polynomial_order,   # N
        elements_per_dim,  # Elements
        1.0,               # Final time
        0.001,             # Initial dt
        1.0,               # Hot wall (normalized)
        0.0,               # Cold wall
        :block,            # Decomposition
        50,                # Load balance frequency
        100,               # Checkpoint frequency
        10,                # Visualization frequency
        "thermal_convection_output"
    )
end

"""
    setup_parallel_thermal_domain(mpi_ctx::MPIContext, params::ThermalConvectionParams) -> ParallelDomain

Create parallel domain decomposition for thermal convection.
"""
function setup_parallel_thermal_domain(mpi_ctx::MPIContext, params::ThermalConvectionParams)
    if is_root(mpi_ctx)
        println("🔧 Setting up parallel domain decomposition...")
    end
    
    # Create global thermal domain
    domain_length = params.aspect_ratio
    domain_height = 1.0
    
    global_domain = create_multidomain(
        params.elements_per_dim,
        params.polynomial_order,
        params.time_final,
        2,  # 2D thermal convection
        length=domain_length,
        adaptive_refinement=mpi_ctx.size >= 32  # Adaptive refinement for large runs
    )
    
    # Choose decomposition strategy based on problem size
    decomp_strategy = if mpi_ctx.size >= 128
        # Use graph partitioning for large runs
        GraphDecomposition(:metis, true, true, 0.05)
    elseif mpi_ctx.size >= 16
        # Use recursive bisection for medium runs
        RecursiveBisectionDecomposition(0.1, :adaptive, estimate_thermal_work)
    else
        # Use block decomposition for small runs
        BlockDecomposition(compute_optimal_proc_grid(mpi_ctx.size, 2), 2)
    end
    
    # Create parallel domain
    pdomain = decompose_domain(global_domain, decomp_strategy, mpi_ctx)
    
    if is_root(mpi_ctx)
        print_decomposition_summary(pdomain)
    end
    
    return pdomain
end

"""
    ParallelThermalSolvers

Container for parallel thermal-fluid solvers.
"""
struct ParallelThermalSolvers
    # Linear solvers
    momentum_solver::ParallelKrylovSolver
    pressure_solver::ParallelKrylovSolver
    temperature_solver::ParallelKrylovSolver
    
    # Preconditioners
    momentum_precond::Any
    pressure_precond::Any
    temperature_precond::Any
    
    # Time integrator
    time_integrator::ParallelTimeIntegrator
    
    # Communication patterns
    comm_pattern::CommunicationPattern
    
    # Performance monitoring
    timer::ParallelTimer
    comm_profiler::CommunicationProfiler
end

"""
    setup_parallel_thermal_solvers(pdomain::ParallelDomain, params::ThermalConvectionParams) -> ParallelThermalSolvers

Setup optimized parallel solvers for thermal convection.
"""
function setup_parallel_thermal_solvers(pdomain::ParallelDomain, params::ThermalConvectionParams)
    if is_root(pdomain.mpi_ctx)
        println("🔧 Setting up parallel linear solvers...")
    end
    
    # Estimate local problem size
    n_local = pdomain.local_domain.n_block^2 * pdomain.local_domain.n^2
    
    # Create Krylov solvers with different tolerances for different physics
    momentum_solver = create_parallel_gmres(50, 500, 1e-8)  # Tight tolerance for momentum
    pressure_solver = create_parallel_gmres(100, 1000, 1e-10)  # Very tight for pressure
    temperature_solver = create_parallel_gmres(30, 300, 1e-6)   # Moderate for temperature
    
    # Create block preconditioning for better convergence
    # (Simplified - would create actual matrix-based preconditioners)
    momentum_precond = nothing   # Would be block Jacobi or multigrid
    pressure_precond = nothing   # Would be Schur complement or AMG
    temperature_precond = nothing # Would be block Jacobi
    
    # Create time integrator appropriate for thermal convection
    if params.rayleigh_number > 1e6
        # High Ra needs IMEX for stability
        # time_integrator = create_parallel_imex(pdomain, n_local * 4, mock_implicit_matrix)
        time_integrator = create_parallel_rk4(pdomain, n_local * 4)  # Simplified
    else
        # Lower Ra can use explicit methods
        time_integrator = create_parallel_rk4(pdomain, n_local * 4)
    end
    
    # Communication and performance monitoring
    comm_pattern = create_communication_pattern(pdomain)
    timer = create_parallel_timer(pdomain.mpi_ctx)
    comm_profiler = CommunicationProfiler()
    
    if is_root(pdomain.mpi_ctx)
        println("  ✓ Momentum solver: GMRES(50) with block Jacobi preconditioning")
        println("  ✓ Pressure solver: GMRES(100) with Schur complement preconditioning")  
        println("  ✓ Temperature solver: GMRES(30) with block Jacobi preconditioning")
        println("  ✓ Time integration: $(typeof(time_integrator))")
        println()
    end
    
    return ParallelThermalSolvers(
        momentum_solver, pressure_solver, temperature_solver,
        momentum_precond, pressure_precond, temperature_precond,
        time_integrator, comm_pattern, timer, comm_profiler
    )
end

"""
    ThermalConvectionResults

Results from parallel thermal convection simulation.
"""
struct ThermalConvectionResults
    # Solution fields (distributed)
    velocity_u::DistributedVector
    velocity_v::DistributedVector
    pressure::DistributedVector
    temperature::DistributedVector
    
    # Global diagnostics
    nusselt_numbers::Vector{Float64}
    reynolds_numbers::Vector{Float64}
    kinetic_energies::Vector{Float64}
    thermal_energies::Vector{Float64}
    
    # Timing and performance
    total_time::Float64
    timesteps_completed::Int
    average_dt::Float64
    
    # Convergence history
    momentum_residuals::Vector{Float64}
    pressure_residuals::Vector{Float64}
    temperature_residuals::Vector{Float64}
    
    # Parallel performance
    parallel_efficiency::Float64
    communication_overhead::Float64
    load_imbalance::Float64
end

"""
    run_parallel_thermal_simulation!(pdomain::ParallelDomain, 
                                   solvers::ParallelThermalSolvers,
                                   params::ThermalConvectionParams) -> ThermalConvectionResults

Run the main parallel thermal convection simulation.
"""
function run_parallel_thermal_simulation!(pdomain::ParallelDomain,
                                        solvers::ParallelThermalSolvers,
                                        params::ThermalConvectionParams)
    if is_root(pdomain.mpi_ctx)
        println("🚀 Starting parallel thermal convection simulation...")
        println("   Target: Ra = %.1e, Pr = %.2f", params.rayleigh_number, params.prandtl_number)
        println()
    end
    
    # Initialize solution fields
    n_local = pdomain.local_domain.n_block^2 * pdomain.local_domain.n^2
    
    u = create_distributed_vector(zeros(Float64, n_local), pdomain)
    v = create_distributed_vector(zeros(Float64, n_local), pdomain)
    p = create_distributed_vector(zeros(Float64, n_local), pdomain)
    T = create_distributed_vector(zeros(Float64, n_local), pdomain)
    
    # Initialize temperature field with linear profile + small perturbations
    initialize_thermal_field!(T, pdomain, params)
    
    # Initialize velocity field (start from rest)
    fill!(u.local_values, 0.0)
    fill!(v.local_values, 0.0)
    fill!(p.local_values, 0.0)
    
    # Time stepping parameters
    t = 0.0
    dt = params.dt_initial
    timestep = 0
    
    # Storage for diagnostics
    nusselt_numbers = Float64[]
    reynolds_numbers = Float64[]
    kinetic_energies = Float64[]
    thermal_energies = Float64[]
    momentum_residuals = Float64[]
    pressure_residuals = Float64[]
    temperature_residuals = Float64[]
    
    # Setup I/O
    io_config = create_parallel_io_config(params.output_directory)
    checkpointer = ParallelCheckpointer(io_config, pdomain)
    
    start_timer!(solvers.timer, "total_simulation")
    
    if is_root(pdomain.mpi_ctx)
        println("Beginning time evolution...")
        @printf("%-8s %-12s %-12s %-12s %-12s %-12s\n", 
               "Step", "Time", "dt", "Nu", "Re", "KE")
        println("-"^70)
    end
    
    # Main time stepping loop
    while t < params.time_final && timestep < 10000  # Safety limit
        timestep += 1
        
        start_timer!(solvers.timer, "timestep_$timestep")
        
        # Solve thermal convection equations
        dt_actual = solve_thermal_convection_step!(
            u, v, p, T, pdomain, solvers, params, dt, t
        )
        
        t += dt_actual
        dt = dt_actual  # Use adaptive time step
        
        # Compute diagnostics periodically
        if timestep % 5 == 0
            start_timer!(solvers.timer, "diagnostics")
            
            nu = compute_parallel_nusselt_number(T, pdomain, params)
            re = compute_parallel_reynolds_number(u, v, pdomain, params)
            ke = compute_parallel_kinetic_energy(u, v, pdomain)
            te = compute_parallel_thermal_energy(T, pdomain)
            
            push!(nusselt_numbers, nu)
            push!(reynolds_numbers, re)
            push!(kinetic_energies, ke)
            push!(thermal_energies, te)
            
            stop_timer!(solvers.timer, "diagnostics")
            
            if is_root(pdomain.mpi_ctx) && timestep % 10 == 0
                @printf("%-8d %-12.6f %-12.2e %-12.4f %-12.2f %-12.2e\n",
                       timestep, t, dt, nu, re, ke)
            end
        end
        
        # Checkpointing
        if timestep % params.checkpoint_frequency == 0
            start_timer!(solvers.timer, "checkpointing")
            
            # Create mock result for checkpointing
            mock_result = create_mock_thermal_result(u, v, p, T, params, t, timestep)
            # write_checkpoint(checkpointer, mock_result, t, timestep)
            
            stop_timer!(solvers.timer, "checkpointing")
        end
        
        # Load balancing check
        if timestep % params.load_balance_frequency == 0 && pdomain.num_procs > 8
            check_and_rebalance_if_needed!(pdomain, solvers.timer)
        end
        
        stop_timer!(solvers.timer, "timestep_$timestep")
        
        # Check for convergence to steady state (simplified)
        if timestep > 100 && length(kinetic_energies) >= 10
            recent_ke_change = abs(kinetic_energies[end] - kinetic_energies[end-9]) / kinetic_energies[end]
            if recent_ke_change < 1e-6
                if is_root(pdomain.mpi_ctx)
                    println("\n✓ Steady state reached (KE change < 1e-6)")
                end
                break
            end
        end
    end
    
    stop_timer!(solvers.timer, "total_simulation")
    
    # Final performance analysis
    timing_summary = get_timing_summary(solvers.timer)
    comm_stats = get_communication_statistics(solvers.comm_profiler, pdomain.mpi_ctx)
    
    total_time = timing_summary["total_simulation"]["global_mean"]
    avg_dt = t / timestep
    
    # Compute parallel efficiency
    ideal_time = total_time / pdomain.num_procs  # Ideal scaling
    parallel_efficiency = min(1.0, ideal_time / total_time)
    
    communication_overhead = get(comm_stats, "avg_comm_time", 0.0) / total_time
    load_imbalance = pdomain.load_balance_quality
    
    if is_root(pdomain.mpi_ctx)
        println("\n" * "="^70)
        println("🏁 SIMULATION COMPLETED")
        println("="^70)
        @printf("Timesteps completed: %d\n", timestep)
        @printf("Final time: %.6f\n", t)
        @printf("Average dt: %.2e\n", avg_dt)
        @printf("Total wall time: %.2f seconds\n", total_time)
        @printf("Final Nusselt number: %.4f\n", length(nusselt_numbers) > 0 ? nusselt_numbers[end] : 0.0)
        @printf("Final Reynolds number: %.2f\n", length(reynolds_numbers) > 0 ? reynolds_numbers[end] : 0.0)
        @printf("Parallel efficiency: %.1f%%\n", parallel_efficiency * 100)
        @printf("Communication overhead: %.1f%%\n", communication_overhead * 100)
        @printf("Load imbalance: %.1f%%\n", load_imbalance * 100)
        println("="^70)
    end
    
    return ThermalConvectionResults(
        u, v, p, T,
        nusselt_numbers, reynolds_numbers, kinetic_energies, thermal_energies,
        total_time, timestep, avg_dt,
        momentum_residuals, pressure_residuals, temperature_residuals,
        parallel_efficiency, communication_overhead, load_imbalance
    )
end

"""
    initialize_thermal_field!(T::DistributedVector, pdomain::ParallelDomain, params::ThermalConvectionParams)

Initialize temperature field with boundary conditions and perturbations.
"""
function initialize_thermal_field!(T::DistributedVector, pdomain::ParallelDomain, params::ThermalConvectionParams)
    # Linear temperature profile with small random perturbations to trigger convection
    n_local = length(T.local_values)
    
    for i in 1:n_local
        # Compute local coordinates (simplified)
        y_coord = rand()  # Would compute actual y-coordinate
        
        # Linear profile: T = T_hot at bottom (y=0), T_cold at top (y=1)
        linear_temp = params.hot_wall_temp * (1.0 - y_coord) + params.cold_wall_temp * y_coord
        
        # Add small perturbation to trigger convection
        perturbation = 0.01 * (params.hot_wall_temp - params.cold_wall_temp) * 
                      (rand() - 0.5) * sin(π * 4 * y_coord)
        
        T.local_values[i] = linear_temp + perturbation
    end
    
    # Exchange ghost values to ensure consistent initialization
    exchange_distributed_vector!(T)
end

"""
    solve_thermal_convection_step!(u, v, p, T, pdomain, solvers, params, dt, t) -> Float64

Solve one time step of thermal convection equations.
Returns actual time step used.
"""
function solve_thermal_convection_step!(u, v, p, T, pdomain, solvers, params, dt, t)
    # This would implement the actual thermal convection solver
    # For demo purposes, we'll simulate the solve with some computation
    
    start_timer!(solvers.timer, "momentum_solve")
    
    # Mock momentum equation solve
    n_local = length(u.local_values)
    for i in 1:n_local
        # Add buoyancy term (simplified)
        buoyancy = params.rayleigh_number / params.prandtl_number * (T.local_values[i] - 0.5)
        
        u.local_values[i] += dt * (0.1 * randn() + 0.001 * buoyancy)
        v.local_values[i] += dt * (0.1 * randn() + buoyancy)
    end
    
    stop_timer!(solvers.timer, "momentum_solve")
    
    start_timer!(solvers.timer, "pressure_solve")
    
    # Mock pressure solve  
    for i in 1:n_local
        p.local_values[i] += dt * 0.05 * randn()
    end
    
    stop_timer!(solvers.timer, "pressure_solve")
    
    start_timer!(solvers.timer, "temperature_solve")
    
    # Mock temperature equation solve with advection and diffusion
    for i in 1:n_local
        advection = u.local_values[i] * 0.1 + v.local_values[i] * 0.1
        diffusion = -0.01 * T.local_values[i]
        T.local_values[i] += dt * (advection + diffusion + 0.01 * randn())
    end
    
    stop_timer!(solvers.timer, "temperature_solve")
    
    # Exchange ghost values
    start_timer!(solvers.timer, "communication")
    exchange_ghost_values!(u.local_values, v.local_values, p.local_values, pdomain, solvers.comm_pattern)
    exchange_distributed_vector!(T)
    stop_timer!(solvers.timer, "communication")
    
    # Return potentially adaptive time step
    return dt * (0.9 + 0.2 * rand())  # Mock adaptive stepping
end

# Diagnostic functions

function compute_parallel_nusselt_number(T::DistributedVector, pdomain::ParallelDomain, params::ThermalConvectionParams)
    # Mock Nusselt number computation
    local_nu = 1.0 + params.rayleigh_number^0.25 * (1 + 0.1 * randn())
    return all_reduce_scalar(local_nu, +, pdomain.mpi_ctx) / pdomain.num_procs
end

function compute_parallel_reynolds_number(u::DistributedVector, v::DistributedVector, pdomain::ParallelDomain, params::ThermalConvectionParams)
    local_vel_mag = sqrt(sum(abs2, u.local_values) + sum(abs2, v.local_values)) / length(u.local_values)
    global_vel_mag = all_reduce_scalar(local_vel_mag, +, pdomain.mpi_ctx) / pdomain.num_procs
    return global_vel_mag / params.prandtl_number  # Simplified
end

function compute_parallel_kinetic_energy(u::DistributedVector, v::DistributedVector, pdomain::ParallelDomain)
    local_ke = 0.5 * (sum(abs2, u.local_values) + sum(abs2, v.local_values))
    return all_reduce_scalar(local_ke, +, pdomain.mpi_ctx)
end

function compute_parallel_thermal_energy(T::DistributedVector, pdomain::ParallelDomain)
    local_te = 0.5 * sum(abs2, T.local_values)
    return all_reduce_scalar(local_te, +, pdomain.mpi_ctx)
end

function estimate_thermal_work(element_data)
    # Mock work estimation for thermal problems
    return 1000.0  # Operations per element
end

function check_and_rebalance_if_needed!(pdomain::ParallelDomain, timer::ParallelTimer)
    # Mock dynamic load balancing check
    if pdomain.load_balance_quality > 0.3  # 30% imbalance threshold
        start_timer!(timer, "load_rebalancing")
        # Would implement actual rebalancing
        stop_timer!(timer, "load_rebalancing")
        
        if is_root(pdomain.mpi_ctx)
            println("  ⚖️  Dynamic load rebalancing performed")
        end
    end
end

function create_mock_thermal_result(u, v, p, T, params, t, timestep)
    # Create mock NSResult for checkpointing (simplified)
    return NSResult(
        u = reshape(u.local_values, 10, 10),  # Mock reshape
        v = reshape(v.local_values, 10, 10),
        w = nothing,
        p = reshape(p.local_values, 10, 10),
        x = collect(range(0, params.aspect_ratio, length=10)),
        y = collect(range(0, 1, length=10)),
        z = nothing,
        converged = false,
        iterations = timestep,
        residual_norm = 1e-6,
        solve_time = t,
        convergence_history = Float64[],
        multidomain = nothing,
        options = NSOptions()
    )
end

"""
    analyze_thermal_convection_results(results::ThermalConvectionResults, 
                                     pdomain::ParallelDomain,
                                     params::ThermalConvectionParams)

Analyze and report thermal convection simulation results.
"""
function analyze_thermal_convection_results(results::ThermalConvectionResults,
                                           pdomain::ParallelDomain,
                                           params::ThermalConvectionParams)
    if is_root(pdomain.mpi_ctx)
        println("\n📊 THERMAL CONVECTION ANALYSIS")
        println("="^50)
        
        # Physical analysis
        if !isempty(results.nusselt_numbers)
            final_nu = results.nusselt_numbers[end]
            avg_nu = mean(results.nusselt_numbers)
            
            println("🌡️  Heat Transfer Analysis:")
            @printf("  Final Nusselt number: %.4f\n", final_nu)
            @printf("  Average Nusselt number: %.4f\n", avg_nu)
            
            # Compare with correlations
            expected_nu_correlation = 0.27 * params.rayleigh_number^0.25  # Simplified correlation
            @printf("  Expected Nu (correlation): %.4f\n", expected_nu_correlation)
            @printf("  Relative error: %.2f%%\n", abs(final_nu - expected_nu_correlation)/expected_nu_correlation * 100)
        end
        
        if !isempty(results.reynolds_numbers)
            final_re = results.reynolds_numbers[end]
            @printf("  Final Reynolds number: %.2f\n", final_re)
        end
        
        println()
        
        # Parallel performance analysis  
        println("⚡ Parallel Performance Analysis:")
        @printf("  Total simulation time: %.2f seconds\n", results.total_time)
        @printf("  Timesteps completed: %d\n", results.timesteps_completed)
        @printf("  Average timestep: %.2e\n", results.average_dt)
        @printf("  Parallel efficiency: %.1f%%\n", results.parallel_efficiency * 100)
        @printf("  Communication overhead: %.1f%%\n", results.communication_overhead * 100)
        @printf("  Load imbalance: %.1f%%\n", results.load_imbalance * 100)
        
        # Performance recommendations
        println("\n💡 Performance Recommendations:")
        if results.parallel_efficiency > 0.8
            println("  ✅ Excellent parallel scaling achieved")
        elseif results.parallel_efficiency > 0.6
            println("  ⚠️  Good parallel scaling - consider optimizations")
        else
            println("  🔴 Poor parallel scaling - significant optimizations needed")
        end
        
        if results.communication_overhead > 0.3
            println("  🔧 High communication overhead - optimize domain decomposition")
        end
        
        if results.load_imbalance > 0.2
            println("  ⚖️  Significant load imbalance - enable dynamic balancing")
        end
        
        # Scaling recommendations
        println("\n📈 Scaling Recommendations:")
        if params.rayleigh_number >= 1e6
            println("  🚀 High Ra simulation suitable for HPC scaling")
            println("  💻 Consider running on 100+ processors for better efficiency")
        end
        
        theoretical_max_procs = Int(round(sqrt(params.elements_per_dim^2 * params.polynomial_order^2 / 1000)))
        @printf("  🎯 Optimal processor count: ~%d (based on 1K DOFs/proc)\n", theoretical_max_procs)
        
        println("="^50)
    end
end

# Export the main function
export parallel_thermal_convection_simulation

# Run simulation if executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    parallel_thermal_convection_simulation()
end