# MPI Wrapper and Abstraction Layer for NSEMSolver.jl
# Provides unified interface for MPI functionality with fallback for serial execution

"""
    MPIContext

Encapsulates MPI communication context with rank, size, and communicator information.
"""
struct MPIContext
    comm::Any                    # MPI communicator or nothing for serial
    rank::Int                   # Process rank (0 for serial)
    size::Int                   # Number of processes (1 for serial)
    is_parallel::Bool           # True if MPI is active
    root::Int                   # Root process rank (usually 0)
end

"""
    initialize_mpi() -> MPIContext

Initialize MPI environment and return communication context.
Falls back gracefully to serial execution if MPI is not available.
"""
function initialize_mpi()
    if HAS_MPI && MPI_MODULE !== nothing
        try
            # Initialize MPI if not already done
            if !MPI_MODULE.Initialized()
                MPI_MODULE.Init()
            end
            
            comm = MPI_MODULE.COMM_WORLD
            rank = MPI_MODULE.Comm_rank(comm)
            size = MPI_MODULE.Comm_size(comm)
            
            return MPIContext(comm, rank, size, true, 0)
        catch e
            @warn "MPI initialization failed, falling back to serial: $e"
            return MPIContext(nothing, 0, 1, false, 0)
        end
    else
        # Serial execution
        return MPIContext(nothing, 0, 1, false, 0)
    end
end

"""
    finalize_mpi(mpi_ctx::MPIContext)

Finalize MPI environment if it was initialized.
"""
function finalize_mpi(mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        try
            if !MPI_MODULE.Finalized()
                MPI_MODULE.Finalize()
            end
        catch e
            @warn "MPI finalization warning: $e"
        end
    end
end

"""
    is_root(mpi_ctx::MPIContext) -> Bool

Check if current process is the root process.
"""
is_root(mpi_ctx::MPIContext) = mpi_ctx.rank == mpi_ctx.root

"""
    barrier(mpi_ctx::MPIContext)

Synchronize all processes at a barrier.
"""
function barrier(mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        MPI_MODULE.Barrier(mpi_ctx.comm)
    end
    # No-op for serial execution
end

"""
    all_reduce!(data::AbstractArray, op, mpi_ctx::MPIContext)

Perform all-reduce operation across all processes.
"""
function all_reduce!(data::AbstractArray, op, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        # Convert op to MPI operation
        mpi_op = if op == +
            MPI_MODULE.SUM
        elseif op == max
            MPI_MODULE.MAX
        elseif op == min
            MPI_MODULE.MIN
        else
            throw(ArgumentError("Unsupported reduction operation: $op"))
        end
        
        MPI_MODULE.Allreduce!(data, mpi_op, mpi_ctx.comm)
    end
    # No-op for serial execution
end

"""
    all_reduce_scalar(value::T, op, mpi_ctx::MPIContext) where T

Perform all-reduce on a scalar value and return result.
"""
function all_reduce_scalar(value::T, op, mpi_ctx::MPIContext) where T
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        # Convert op to MPI operation
        mpi_op = if op == +
            MPI_MODULE.SUM
        elseif op == max
            MPI_MODULE.MAX
        elseif op == min
            MPI_MODULE.MIN
        else
            throw(ArgumentError("Unsupported reduction operation: $op"))
        end
        
        result = Ref{T}(value)
        MPI_MODULE.Allreduce!(result, mpi_op, mpi_ctx.comm)
        return result[]
    else
        return value
    end
end

"""
    broadcast!(data::AbstractArray, root::Int, mpi_ctx::MPIContext)

Broadcast data from root process to all other processes.
"""
function broadcast!(data::AbstractArray, root::Int, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        MPI_MODULE.Bcast!(data, root, mpi_ctx.comm)
    end
    # No-op for serial execution
end

"""
    gather_to_root(local_data::AbstractArray, mpi_ctx::MPIContext)

Gather data from all processes to root process.
Returns gathered data on root, nothing on other processes.
"""
function gather_to_root(local_data::AbstractArray, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        if is_root(mpi_ctx)
            # Allocate storage for all processes
            total_size = length(local_data) * mpi_ctx.size
            gathered_data = similar(local_data, total_size)
            MPI_MODULE.Gather!(local_data, gathered_data, mpi_ctx.root, mpi_ctx.comm)
            return gathered_data
        else
            MPI_MODULE.Gather!(local_data, nothing, mpi_ctx.root, mpi_ctx.comm)
            return nothing
        end
    else
        return copy(local_data)
    end
end

"""
    send_receive(send_data::AbstractArray, dest::Int, recv_buffer::AbstractArray, 
                source::Int, mpi_ctx::MPIContext)

Simultaneous send and receive operation between processes.
"""
function send_receive(send_data::AbstractArray, dest::Int, 
                     recv_buffer::AbstractArray, source::Int, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        # Use sendrecv for deadlock-free communication
        MPI_MODULE.Sendrecv!(send_data, dest, 0, recv_buffer, source, 0, mpi_ctx.comm)
    else
        # For serial execution, just copy data if sending to self
        if dest == source && dest == 0
            copyto!(recv_buffer, send_data)
        end
    end
end

"""
    isend(data::AbstractArray, dest::Int, tag::Int, mpi_ctx::MPIContext)

Non-blocking send operation.
Returns request handle for later completion check.
"""
function isend(data::AbstractArray, dest::Int, tag::Int, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        return MPI_MODULE.Isend(data, dest, tag, mpi_ctx.comm)
    else
        return nothing  # No-op for serial
    end
end

"""
    irecv!(buffer::AbstractArray, source::Int, tag::Int, mpi_ctx::MPIContext)

Non-blocking receive operation.
Returns request handle for later completion check.
"""
function irecv!(buffer::AbstractArray, source::Int, tag::Int, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel && HAS_MPI && MPI_MODULE !== nothing
        return MPI_MODULE.Irecv!(buffer, source, tag, mpi_ctx.comm)
    else
        return nothing  # No-op for serial
    end
end

"""
    wait!(request)

Wait for non-blocking communication to complete.
"""
function wait!(request)
    if request !== nothing && HAS_MPI && MPI_MODULE !== nothing
        MPI_MODULE.Wait!(request)
    end
end

"""
    test!(request) -> Bool

Test if non-blocking communication has completed.
"""
function test!(request)
    if request !== nothing && HAS_MPI && MPI_MODULE !== nothing
        return MPI_MODULE.Test!(request)
    else
        return true  # Always completed for serial
    end
end

"""
    get_processor_name() -> String

Get the name of the processor/node this process is running on.
"""
function get_processor_name()
    if HAS_MPI && MPI_MODULE !== nothing
        try
            return MPI_MODULE.Get_processor_name()
        catch
            return gethostname()
        end
    else
        return gethostname()
    end
end

"""
    wtime() -> Float64

Get wall-clock time in seconds (high resolution).
"""
function wtime()
    if HAS_MPI && MPI_MODULE !== nothing
        try
            return MPI_MODULE.Wtime()
        catch
            return time()
        end
    else
        return time()
    end
end

"""
    ParallelTimer

Timing utility for parallel performance analysis.
"""
mutable struct ParallelTimer
    start_times::Dict{String, Float64}
    elapsed_times::Dict{String, Vector{Float64}}
    mpi_ctx::MPIContext
    
    function ParallelTimer(mpi_ctx::MPIContext)
        new(Dict{String, Float64}(), Dict{String, Vector{Float64}}(), mpi_ctx)
    end
end

"""
    start_timer!(timer::ParallelTimer, name::String)

Start timing a named operation.
"""
function start_timer!(timer::ParallelTimer, name::String)
    timer.start_times[name] = wtime()
end

"""
    stop_timer!(timer::ParallelTimer, name::String)

Stop timing a named operation and record elapsed time.
"""
function stop_timer!(timer::ParallelTimer, name::String)
    if haskey(timer.start_times, name)
        elapsed = wtime() - timer.start_times[name]
        if !haskey(timer.elapsed_times, name)
            timer.elapsed_times[name] = Float64[]
        end
        push!(timer.elapsed_times[name], elapsed)
        delete!(timer.start_times, name)
    end
end

"""
    get_timing_summary(timer::ParallelTimer) -> Dict

Get timing summary with parallel statistics.
"""
function get_timing_summary(timer::ParallelTimer)
    summary = Dict{String, Dict{String, Float64}}()
    
    for (name, times) in timer.elapsed_times
        local_stats = Dict{String, Float64}(
            "count" => length(times),
            "total" => sum(times),
            "mean" => mean(times),
            "min" => minimum(times),
            "max" => maximum(times)
        )
        
        if timer.mpi_ctx.is_parallel
            # Gather statistics across all processes
            global_total = all_reduce_scalar(local_stats["total"], +, timer.mpi_ctx)
            global_max_time = all_reduce_scalar(local_stats["max"], max, timer.mpi_ctx)
            global_min_time = all_reduce_scalar(local_stats["min"], min, timer.mpi_ctx)
            
            summary[name] = Dict{String, Float64}(
                "local_total" => local_stats["total"],
                "local_mean" => local_stats["mean"],
                "global_total" => global_total,
                "global_mean" => global_total / timer.mpi_ctx.size,
                "max_time" => global_max_time,
                "min_time" => global_min_time,
                "load_imbalance" => (global_max_time - global_min_time) / global_max_time
            )
        else
            summary[name] = local_stats
        end
    end
    
    return summary
end

"""
    print_timing_summary(timer::ParallelTimer)

Print formatted timing summary.
"""
function print_timing_summary(timer::ParallelTimer)
    if is_root(timer.mpi_ctx)
        summary = get_timing_summary(timer)
        
        println("\n" * "="^60)
        println("PARALLEL TIMING SUMMARY")
        println("="^60)
        
        for (name, stats) in summary
            println("\n$name:")
            if timer.mpi_ctx.is_parallel
                @printf("  Global Total: %.4f s\n", stats["global_total"])
                @printf("  Global Mean:  %.4f s\n", stats["global_mean"])
                @printf("  Max Time:     %.4f s\n", stats["max_time"])
                @printf("  Min Time:     %.4f s\n", stats["min_time"])
                @printf("  Load Imbalance: %.2f%%\n", stats["load_imbalance"] * 100)
            else
                @printf("  Total:  %.4f s\n", stats["total"])
                @printf("  Mean:   %.4f s\n", stats["mean"])
                @printf("  Count:  %d\n", Int(stats["count"]))
            end
        end
        println("="^60)
    end
end

"""
    create_parallel_timer(mpi_ctx::MPIContext) -> ParallelTimer

Create a new parallel timer instance.
"""
function create_parallel_timer(mpi_ctx::MPIContext)
    return ParallelTimer(mpi_ctx)
end

# Export public interface
export MPIContext, initialize_mpi, finalize_mpi
export is_root, barrier, all_reduce!, all_reduce_scalar
export broadcast!, gather_to_root, send_receive
export isend, irecv!, wait!, test!
export get_processor_name, wtime
export ParallelTimer, create_parallel_timer, start_timer!, stop_timer!
export get_timing_summary, print_timing_summary