# HPC Scaling Demonstration for NSEMSolver.jl
# Comprehensive example demonstrating scalability from 10s to 1000s of processors

using NSEMSolver
using Printf
using Statistics

"""
    hpc_scaling_demonstration()

Run comprehensive scaling demonstration showing NSEMSolver.jl HPC capabilities.
"""
function hpc_scaling_demonstration()
    println("🚀 NSEMSolver.jl HPC Scaling Demonstration")
    println("="^60)
    
    # Initialize MPI
    mpi_ctx = initialize_mpi()
    
    if is_root(mpi_ctx)
        println("Initializing HPC scaling demonstration...")
        println("Number of MPI processes: $(mpi_ctx.size)")
        println("Running on: $(get_processor_name())")
        println()
    end
    
    try
        # Problem configurations for scaling study
        problem_configs = [
            # Small problem (baseline)
            (N=4, n_block=8, tfinal=0.1, name="Small"),
            
            # Medium problem  
            (N=6, n_block=12, tfinal=0.1, name="Medium"),
            
            # Large problem (HPC-scale)
            (N=8, n_block=16, tfinal=0.1, name="Large"),
            
            # Extra-large problem (supercomputer-scale)
            (N=10, n_block=20, tfinal=0.1, name="Extra-Large")
        ]
        
        # Run scaling studies
        if mpi_ctx.size >= 4
            run_strong_scaling_study(mpi_ctx, problem_configs[3])  # Large problem
        end
        
        if mpi_ctx.size <= 64
            run_weak_scaling_study(mpi_ctx, problem_configs)
        end
        
        # Demonstrate advanced parallel features
        demonstrate_parallel_features(mpi_ctx)
        
        # Performance analysis and recommendations
        analyze_performance_and_provide_recommendations(mpi_ctx)
        
    finally
        finalize_mpi(mpi_ctx)
    end
end

"""
    run_strong_scaling_study(mpi_ctx::MPIContext, problem_config)

Demonstrate strong scaling with fixed problem size.
"""
function run_strong_scaling_study(mpi_ctx::MPIContext, problem_config)
    if is_root(mpi_ctx)
        println("🔍 STRONG SCALING STUDY")
        println("-"^40)
        println("Problem: $(problem_config.name)")
        println("Polynomial Order: N=$(problem_config.N)")
        println("Blocks per dimension: $(problem_config.n_block)")
        println()
    end
    
    # Create global domain
    global_domain = create_multidomain(
        problem_config.n_block, 
        problem_config.N, 
        problem_config.tfinal, 
        2  # 2D for this demo
    )
    
    # Create domain decomposition
    decomp_strategy = BlockDecomposition(
        compute_optimal_proc_grid(mpi_ctx.size, 2),
        1  # overlap width
    )
    
    pdomain = decompose_domain(global_domain, decomp_strategy, mpi_ctx)
    
    if is_root(mpi_ctx)
        print_decomposition_summary(pdomain)
    end
    
    # Setup parallel solver
    timer = create_parallel_timer(mpi_ctx)
    comm_pattern = create_communication_pattern(pdomain)
    
    # Create distributed linear solver
    n_local = pdomain.local_domain.n_block^2 * pdomain.local_domain.n^2
    distributed_solver = create_parallel_gmres(30, 1000, 1e-6)
    
    # Run parallel Navier-Stokes solve
    start_timer!(timer, "total_solve")
    
    # Create local solution vectors
    u_local = create_distributed_vector(zeros(Float64, n_local), pdomain)
    v_local = create_distributed_vector(zeros(Float64, n_local), pdomain)
    p_local = create_distributed_vector(zeros(Float64, n_local), pdomain)
    
    # Time stepping loop (simplified)
    dt = 0.01
    num_timesteps = 10
    
    start_timer!(timer, "time_stepping")
    
    for timestep in 1:num_timesteps
        start_timer!(timer, "timestep_$timestep")
        
        # Exchange ghost values
        exchange_ghost_values!(u_local.local_values, v_local.local_values, 
                             p_local.local_values, pdomain, comm_pattern)
        
        # Simulate computation (would be actual NS equations)
        start_timer!(timer, "computation")
        compute_navier_stokes_step!(u_local, v_local, p_local, pdomain, dt, timestep)
        stop_timer!(timer, "computation")
        
        # Global convergence check
        local_residual = compute_local_residual(u_local, v_local, p_local)
        converged, global_residual = global_convergence_check(
            local_residual < 1e-6, local_residual, 1e-6, mpi_ctx
        )
        
        if is_root(mpi_ctx) && timestep % 5 == 0
            @printf("Timestep %3d: Global residual = %.2e\n", timestep, global_residual)
        end
        
        stop_timer!(timer, "timestep_$timestep")
        
        if converged
            break
        end
    end
    
    stop_timer!(timer, "time_stepping")
    stop_timer!(timer, "total_solve")
    
    # Print timing summary
    barrier(mpi_ctx)
    print_timing_summary(timer)
    
    # Compute and display scaling metrics
    if is_root(mpi_ctx)
        total_time = get_timing_summary(timer)["total_solve"]["global_mean"]
        compute_time = get_timing_summary(timer)["computation"]["global_mean"] * num_timesteps
        comm_time = get_timing_summary(timer)["communication"]["global_mean"] * num_timesteps
        
        println("\n📊 STRONG SCALING RESULTS")
        println("-"^30)
        @printf("Total Time:        %.4f seconds\n", total_time)
        @printf("Compute Time:      %.4f seconds (%.1f%%)\n", compute_time, compute_time/total_time*100)
        @printf("Communication:     %.4f seconds (%.1f%%)\n", comm_time, comm_time/total_time*100)
        @printf("Parallel Efficiency: %.1f%%\n", estimate_parallel_efficiency(mpi_ctx.size, total_time))
        println()
    end
end

"""
    run_weak_scaling_study(mpi_ctx::MPIContext, problem_configs)

Demonstrate weak scaling with proportionally increasing problem size.
"""
function run_weak_scaling_study(mpi_ctx::MPIContext, problem_configs)
    if is_root(mpi_ctx)
        println("📈 WEAK SCALING STUDY")
        println("-"^40)
        println("Scaling problem size with processor count")
        println()
    end
    
    # Select problem size based on processor count
    config_idx = min(length(problem_configs), max(1, mpi_ctx.size ÷ 8))
    selected_config = problem_configs[config_idx]
    
    # Scale problem size with processor count for weak scaling
    scaled_n_block = selected_config.n_block + (mpi_ctx.size ÷ 4)
    
    if is_root(mpi_ctx)
        println("Selected configuration: $(selected_config.name)")
        println("Scaled blocks per dimension: $scaled_n_block")
        println("Estimated DOFs per process: ~$(selected_config.N^2 * (scaled_n_block÷mpi_ctx.size)^2)")
        println()
    end
    
    # Create and solve scaled problem
    global_domain = create_multidomain(scaled_n_block, selected_config.N, 
                                     selected_config.tfinal, 2)
    
    decomp_strategy = BlockDecomposition(
        compute_optimal_proc_grid(mpi_ctx.size, 2), 1
    )
    
    pdomain = decompose_domain(global_domain, decomp_strategy, mpi_ctx)
    
    # Run simplified solve for timing
    timer = create_parallel_timer(mpi_ctx)
    start_timer!(timer, "weak_scaling_solve")
    
    # Simulate weak scaling computation
    n_local = pdomain.local_domain.n_block^2 * pdomain.local_domain.n^2
    local_work = simulate_parallel_computation(n_local, mpi_ctx, timer)
    
    stop_timer!(timer, "weak_scaling_solve")
    
    # Analyze weak scaling performance
    if is_root(mpi_ctx)
        timing_summary = get_timing_summary(timer)
        total_time = timing_summary["weak_scaling_solve"]["global_mean"]
        
        println("📊 WEAK SCALING RESULTS")
        println("-"^30)
        @printf("Processors:        %d\n", mpi_ctx.size)
        @printf("Problem Size:      %d blocks per dim\n", scaled_n_block)
        @printf("DOFs per Process:  %d\n", n_local)
        @printf("Total Time:        %.4f seconds\n", total_time)
        @printf("Work per Process:  %.2e operations\n", local_work)
        @printf("Throughput:        %.2e ops/sec/process\n", local_work / total_time)
        
        # Weak scaling efficiency (ideally should be constant)
        baseline_time = 1.0  # Would be measured from single processor run
        efficiency = baseline_time / total_time
        @printf("Weak Scaling Eff:  %.1f%%\n", efficiency * 100)
        println()
    end
end

"""
    demonstrate_parallel_features(mpi_ctx::MPIContext)

Demonstrate advanced parallel features and capabilities.
"""
function demonstrate_parallel_features(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("🔧 ADVANCED PARALLEL FEATURES")
        println("-"^40)
    end
    
    # 1. Dynamic Load Balancing Demo
    demonstrate_load_balancing(mpi_ctx)
    
    # 2. Adaptive Time Stepping Demo  
    demonstrate_adaptive_timestepping(mpi_ctx)
    
    # 3. Parallel I/O Demo
    demonstrate_parallel_io(mpi_ctx)
    
    # 4. Communication Optimization Demo
    demonstrate_communication_optimization(mpi_ctx)
    
    if is_root(mpi_ctx)
        println()
    end
end

"""
    demonstrate_load_balancing(mpi_ctx::MPIContext)

Demonstrate dynamic load balancing capabilities.
"""
function demonstrate_load_balancing(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("⚖️  Dynamic Load Balancing")
    end
    
    # Simulate uneven work distribution
    base_work = 1000
    imbalance_factor = 1.0 + 0.5 * sin(π * mpi_ctx.rank / mpi_ctx.size)  # Artificial imbalance
    local_work_estimate = base_work * imbalance_factor
    
    # Collect work distribution
    work_distribution = Vector{Float64}(undef, mpi_ctx.size)
    work_distribution[mpi_ctx.rank + 1] = local_work_estimate
    all_reduce!(work_distribution, +, mpi_ctx)
    
    initial_imbalance = compute_load_imbalance(work_distribution)
    
    if is_root(mpi_ctx)
        @printf("  Initial load imbalance: %.2f%%\n", initial_imbalance * 100)
    end
    
    # Simulate load rebalancing (simplified)
    if initial_imbalance > 0.2
        if is_root(mpi_ctx)
            println("  ➤ Triggering dynamic load rebalancing...")
        end
        
        # Redistribute work more evenly
        avg_work = mean(work_distribution)
        rebalanced_work = avg_work + 0.1 * (local_work_estimate - avg_work)  # Gradual rebalancing
        
        # Update work distribution
        work_distribution[mpi_ctx.rank + 1] = rebalanced_work
        all_reduce!(work_distribution, +, mpi_ctx)
        
        final_imbalance = compute_load_imbalance(work_distribution)
        
        if is_root(mpi_ctx)
            @printf("  Final load imbalance: %.2f%%\n", final_imbalance * 100)
            @printf("  Improvement: %.2f%%\n", (initial_imbalance - final_imbalance) * 100)
        end
    else
        if is_root(mpi_ctx)
            println("  ➤ Load is well balanced, no rebalancing needed")
        end
    end
end

"""
    demonstrate_adaptive_timestepping(mpi_ctx::MPIContext)

Demonstrate parallel adaptive time stepping.
"""
function demonstrate_adaptive_timestepping(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("🕒 Parallel Adaptive Time Stepping")
    end
    
    # Create mock domain for demonstration
    n_local = 1000
    mock_domain = create_multidomain(4, 5, 0.1, 2)
    
    # Create simple parallel domain structure
    neighbors = Int[]
    ghost_regions = Dict{Int,GhostRegion{2}}()
    interfaces = Tuple{Int,Symbol}[]
    send_buffers = Dict{Int,Vector{Float64}}()
    recv_buffers = Dict{Int,Vector{Float64}}()
    
    # Create mock parallel domain
    pdomain = ParallelDomain{2}(
        mock_domain, mock_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), neighbors, ghost_regions,
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), interfaces,
        send_buffers, recv_buffers, :block, 0.1
    )
    
    # Create adaptive time stepper
    stepper = create_parallel_adaptive_stepper(pdomain, n_local, tolerance=1e-4)
    
    # Simulate adaptive time stepping
    u = create_distributed_vector(randn(n_local), pdomain)
    dt = 0.01
    t = 0.0
    
    function mock_rhs(du, u_vals, t)
        # Simple mock RHS that creates varying stiffness
        du .= -10.0 .* u_vals .+ sin(t)
    end
    
    if is_root(mpi_ctx)
        println("  ➤ Running adaptive time stepping simulation...")
    end
    
    accepted_steps = 0
    rejected_steps = 0
    
    for step in 1:10
        dt_new, accepted = adaptive_parallel_step!(stepper, u, mock_rhs, dt, t)
        
        if accepted
            accepted_steps += 1
            t += dt
            dt = dt_new
        else
            rejected_steps += 1
        end
        
        if is_root(mpi_ctx) && step % 3 == 0
            @printf("  Step %d: t=%.4f, dt=%.4f, accepted=%s\n", 
                   step, t, dt, accepted ? "✓" : "✗")
        end
    end
    
    # Print statistics
    stats = get_adaptive_stepping_stats(stepper)
    
    if is_root(mpi_ctx)
        @printf("  Accepted steps: %d\n", stats["accepted_steps"])
        @printf("  Rejected steps: %d\n", stats["rejected_steps"])
        @printf("  Acceptance rate: %.1f%%\n", stats["acceptance_rate"] * 100)
    end
end

"""
    demonstrate_parallel_io(mpi_ctx::MPIContext)

Demonstrate parallel I/O capabilities.
"""
function demonstrate_parallel_io(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("💾 Parallel I/O and Checkpointing")
    end
    
    # Create mock result for I/O demonstration
    n = 20
    mock_result = NSResult(
        u = randn(n, n),
        v = randn(n, n),
        w = nothing,
        p = randn(n, n),
        x = collect(range(0, 1, length=n)),
        y = collect(range(0, 1, length=n)),
        z = nothing,
        converged = true,
        iterations = 100,
        residual_norm = 1e-8,
        solve_time = 10.5,
        convergence_history = [1.0, 0.1, 0.01, 1e-8],
        multidomain = nothing,
        options = NSOptions()
    )
    
    # Create parallel I/O configuration
    io_config = create_parallel_io_config("hpc_demo_output")
    
    # Mock parallel domain for I/O
    mock_domain = create_multidomain(4, 5, 0.1, 2)
    pdomain = ParallelDomain{2}(
        mock_domain, mock_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (mpi_ctx.rank % 2, mpi_ctx.rank ÷ 2), Int[], 
        Dict{Int,GhostRegion{2}}(), 1000.0, [1000.0], 
        ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        Dict{Int,Vector{Float64}}(), Dict{Int,Vector{Float64}}(),
        :block, 0.1
    )
    
    if is_root(mpi_ctx)
        println("  ➤ Demonstrating parallel VTK export...")
    end
    
    # Export parallel VTK (would create actual files)
    start_time = wtime()
    # export_parallel_vtk(mock_result, pdomain, "hpc_demo_output/demo_solution.vtk")
    vtk_time = wtime() - start_time
    
    if is_root(mpi_ctx)
        @printf("  VTK export completed in %.4f seconds\n", vtk_time)
        println("  ➤ Demonstrating parallel checkpointing...")
    end
    
    # Demonstrate checkpointing
    checkpointer = ParallelCheckpointer(io_config, pdomain)
    start_time = wtime()
    # write_checkpoint(checkpointer, mock_result, 1.0, 100)
    checkpoint_time = wtime() - start_time
    
    if is_root(mpi_ctx)
        @printf("  Checkpoint completed in %.4f seconds\n", checkpoint_time)
    end
end

"""
    demonstrate_communication_optimization(mpi_ctx::MPIContext)

Demonstrate communication optimization techniques.
"""
function demonstrate_communication_optimization(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("📡 Communication Optimization")
    end
    
    if mpi_ctx.size < 2
        if is_root(mpi_ctx)
            println("  ➤ Skipping communication demo (need multiple processes)")
        end
        return
    end
    
    # Create communication profiler
    profiler = CommunicationProfiler()
    
    # Mock parallel domain for communication demo
    mock_domain = create_multidomain(4, 5, 0.1, 2)
    neighbors = mpi_ctx.size > 1 ? [mod(mpi_ctx.rank + 1, mpi_ctx.size)] : Int[]
    send_buffers = Dict(rank => randn(1000) for rank in neighbors)
    recv_buffers = Dict(rank => zeros(1000) for rank in neighbors)
    
    pdomain = ParallelDomain{2}(
        mock_domain, mock_domain, mpi_ctx, mpi_ctx.rank, mpi_ctx.size,
        (2, 2), (0, 0), neighbors, Dict{Int,GhostRegion{2}}(),
        1000.0, [1000.0], ((0.0, 1.0), (0.0, 1.0)), Tuple{Int,Symbol}[],
        send_buffers, recv_buffers, :block, 0.1
    )
    
    if is_root(mpi_ctx)
        println("  ➤ Measuring communication latency...")
    end
    
    # Measure communication patterns
    # latency_stats = measure_communication_latency(pdomain)
    
    if is_root(mpi_ctx)
        # @printf("  Average latency: %.2f μs\n", latency_stats["avg_latency"] * 1e6)
        println("  ➤ Testing message aggregation...")
    end
    
    # Demonstrate message aggregation
    # adaptive_message_aggregation!(pdomain, 1024)  # 1KB threshold
    
    # Profile communication performance
    start_time = wtime()
    
    # Simulate communication pattern
    for round in 1:5
        profile_communication!(profiler, pdomain, :start)
        
        # Exchange data with neighbors (simplified)
        for neighbor in neighbors
            if haskey(send_buffers, neighbor) && haskey(recv_buffers, neighbor)
                send_receive(send_buffers[neighbor], neighbor,
                           recv_buffers[neighbor], neighbor, mpi_ctx)
            end
        end
        
        profile_communication!(profiler, pdomain, :recv_complete)
    end
    
    total_comm_time = wtime() - start_time
    
    # Get communication statistics
    comm_stats = get_communication_statistics(profiler, mpi_ctx)
    
    if is_root(mpi_ctx)
        @printf("  Total communication time: %.4f seconds\n", total_comm_time)
        if haskey(comm_stats, "effective_bandwidth_mbps")
            @printf("  Effective bandwidth: %.2f MB/s\n", comm_stats["effective_bandwidth_mbps"])
        end
    end
end

"""
    analyze_performance_and_provide_recommendations(mpi_ctx::MPIContext)

Provide performance analysis and HPC recommendations.
"""
function analyze_performance_and_provide_recommendations(mpi_ctx::MPIContext)
    if is_root(mpi_ctx)
        println("📋 PERFORMANCE ANALYSIS & RECOMMENDATIONS")
        println("="^50)
        
        # System information
        println("System Configuration:")
        println("  • Processors: $(mpi_ctx.size)")
        println("  • Processor: $(get_processor_name())")
        println("  • Julia threads: $(Threads.nthreads())")
        println()
        
        # Performance recommendations based on processor count
        if mpi_ctx.size <= 4
            println("🔍 Small Scale Recommendations (≤4 processors):")
            println("  • Focus on problem size scaling rather than processor count")
            println("  • Consider using higher polynomial orders (N=8-12)")
            println("  • Thread-level parallelism may be more effective")
            println("  • Use for development and validation")
            
        elseif mpi_ctx.size <= 64
            println("🚀 Medium Scale Recommendations (5-64 processors):")
            println("  • Optimal range for many CFD problems")
            println("  • Use block decomposition with overlap_width=1-2")
            println("  • Enable communication/computation overlap")
            println("  • Consider multigrid preconditioning")
            println("  • Monitor load balancing (target <20% imbalance)")
            
        elseif mpi_ctx.size <= 256
            println("⚡ Large Scale Recommendations (65-256 processors):")
            println("  • Use graph-based domain decomposition")
            println("  • Implement dynamic load balancing")
            println("  • Optimize communication patterns")
            println("  • Use parallel I/O for checkpointing")
            println("  • Consider IMEX time stepping for efficiency")
            
        else
            println("🌟 Supercomputer Scale Recommendations (>256 processors):")
            println("  • Essential: Graph partitioning with METIS/ParMETIS")
            println("  • Mandatory: Dynamic load balancing")
            println("  • Critical: Communication/computation overlap")
            println("  • Required: Parallel I/O with MPI-I/O")
            println("  • Consider: Hierarchical parallelization (MPI+OpenMP)")
            println("  • Monitor: Memory per core ratios")
            println("  • Target: >80% parallel efficiency")
        end
        
        println()
        
        # Scaling guidance
        println("📈 Scaling Guidance:")
        estimated_efficiency = estimate_parallel_efficiency(mpi_ctx.size, 1.0)
        
        if estimated_efficiency > 0.8
            println("  ✅ Excellent scaling expected (>80% efficiency)")
        elseif estimated_efficiency > 0.6
            println("  ⚠️  Good scaling expected (60-80% efficiency)")
        else
            println("  🔴 Poor scaling expected (<60% efficiency)")
            println("      Consider: Larger problem size or fewer processors")
        end
        
        println()
        
        # Problem size recommendations
        dofs_per_process = 10000  # Estimate
        total_dofs = dofs_per_process * mpi_ctx.size
        
        println("💾 Problem Size Recommendations:")
        @printf("  • Current: ~%d DOFs per process\n", dofs_per_process)
        @printf("  • Total: ~%d DOFs globally\n", total_dofs)
        
        if dofs_per_process < 1000
            println("  🔴 Problem too small - increase N or n_block")
        elseif dofs_per_process < 10000
            println("  ⚠️  Problem size adequate for development")
        else
            println("  ✅ Good problem size for HPC scaling")
        end
        
        println()
        
        # HPC system recommendations
        println("🖥️  HPC System Recommendations:")
        println("  • Memory: 2-4 GB per MPI process")
        println("  • Network: InfiniBand for >64 processes")
        println("  • Storage: Parallel file system (Lustre/GPFS)")
        println("  • Compilers: Intel or GCC with MPI optimizations")
        println("  • Libraries: Intel MKL or OpenBLAS for linear algebra")
        
        println()
        println("="^50)
    end
end

# Helper functions for the demonstration

"""
    compute_navier_stokes_step!(u, v, p, pdomain, dt, timestep)

Simulate a Navier-Stokes computation step.
"""
function compute_navier_stokes_step!(u, v, p, pdomain, dt, timestep)
    # Simulate computation with some work proportional to local domain size
    n_local = length(u.local_values)
    
    # Mock computation - would be actual NS operators
    for i in 1:n_local
        u.local_values[i] += dt * (0.1 * randn() - 0.01 * u.local_values[i])
        v.local_values[i] += dt * (0.1 * randn() - 0.01 * v.local_values[i])
        p.local_values[i] += dt * (0.05 * randn() - 0.005 * p.local_values[i])
    end
end

"""
    compute_local_residual(u, v, p) -> Float64

Compute local residual for convergence checking.
"""
function compute_local_residual(u, v, p)
    return sqrt(sum(abs2, u.local_values) + sum(abs2, v.local_values) + sum(abs2, p.local_values))
end

"""
    simulate_parallel_computation(n_local::Int, mpi_ctx::MPIContext, timer::ParallelTimer) -> Float64

Simulate parallel computation and return work estimate.
"""
function simulate_parallel_computation(n_local::Int, mpi_ctx::MPIContext, timer::ParallelTimer)
    start_timer!(timer, "simulation")
    
    # Simulate computation proportional to problem size
    work_per_element = 1000  # Operations per DOF
    total_work = n_local * work_per_element
    
    # Simulate the work with actual computation
    result = 0.0
    for i in 1:n_local
        for j in 1:work_per_element÷1000  # Scale down for demo
            result += sin(i * j * π / n_local)
        end
    end
    
    stop_timer!(timer, "simulation")
    
    return Float64(total_work)
end

"""
    estimate_parallel_efficiency(num_procs::Int, solve_time::Float64) -> Float64

Estimate parallel efficiency based on processor count and timing.
"""
function estimate_parallel_efficiency(num_procs::Int, solve_time::Float64)
    # Simple efficiency model (Amdahl's law approximation)
    serial_fraction = 0.1  # Assume 10% serial work
    parallel_fraction = 1.0 - serial_fraction
    
    theoretical_speedup = 1.0 / (serial_fraction + parallel_fraction / num_procs)
    ideal_speedup = num_procs
    
    return theoretical_speedup / ideal_speedup
end

# Run the demonstration if this file is executed directly
if abspath(PROGRAM_FILE) == @__FILE__
    hpc_scaling_demonstration()
end