# Inter-Process Communication Utilities for Parallel NSEMSolver.jl
# Optimized communication patterns for ghost region exchange and collective operations

using LinearAlgebra

"""
    CommunicationPattern

Represents a communication pattern for specific data exchange.
"""
struct CommunicationPattern
    send_ranks::Vector{Int}         # Ranks to send data to
    recv_ranks::Vector{Int}         # Ranks to receive data from
    send_requests::Vector{Any}      # MPI send requests
    recv_requests::Vector{Any}      # MPI receive requests
    send_tags::Vector{Int}          # Message tags for sends
    recv_tags::Vector{Int}          # Message tags for receives
end

"""
    create_communication_pattern(pdomain::ParallelDomain) -> CommunicationPattern

Create optimized communication pattern for ghost region exchange.
"""
function create_communication_pattern(pdomain::ParallelDomain)
    send_ranks = collect(keys(pdomain.send_buffers))
    recv_ranks = collect(keys(pdomain.recv_buffers))
    
    # Initialize empty request vectors
    send_requests = Vector{Any}(undef, length(send_ranks))
    recv_requests = Vector{Any}(undef, length(recv_ranks))
    
    # Generate unique tags for each communication pair
    send_tags = [rank + pdomain.rank * pdomain.num_procs for rank in send_ranks]
    recv_tags = [pdomain.rank + rank * pdomain.num_procs for rank in recv_ranks]
    
    return CommunicationPattern(send_ranks, recv_ranks, send_requests, recv_requests, send_tags, recv_tags)
end

"""
    exchange_ghost_values!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                          pdomain::ParallelDomain{D}, comm_pattern::CommunicationPattern) where D

Exchange ghost region values between neighboring processes.
High-performance implementation with overlapped communication/computation.
"""
function exchange_ghost_values!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                               pdomain::ParallelDomain{D}, comm_pattern::CommunicationPattern) where D
    mpi_ctx = pdomain.mpi_ctx
    
    if !mpi_ctx.is_parallel || length(pdomain.neighbors) == 0
        return  # No communication needed
    end
    
    # Pack data into send buffers
    pack_ghost_data!(u, v, p, pdomain, comm_pattern)
    
    # Post non-blocking receives first
    for (i, rank) in enumerate(comm_pattern.recv_ranks)
        recv_buffer = pdomain.recv_buffers[rank]
        tag = comm_pattern.recv_tags[i]
        comm_pattern.recv_requests[i] = irecv!(recv_buffer, rank, tag, mpi_ctx)
    end
    
    # Post non-blocking sends
    for (i, rank) in enumerate(comm_pattern.send_ranks)
        send_buffer = pdomain.send_buffers[rank]
        tag = comm_pattern.send_tags[i]
        comm_pattern.send_requests[i] = isend(send_buffer, rank, tag, mpi_ctx)
    end
    
    # Wait for all communications to complete
    for request in comm_pattern.recv_requests
        wait!(request)
    end
    for request in comm_pattern.send_requests
        wait!(request)
    end
    
    # Unpack received data into ghost regions
    unpack_ghost_data!(u, v, p, pdomain, comm_pattern)
end

"""
    exchange_ghost_values_with_overlap!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                                       pdomain::ParallelDomain{D}, comm_pattern::CommunicationPattern,
                                       compute_fn::Function) where D

Exchange ghost values with computation/communication overlap for better performance.
"""
function exchange_ghost_values_with_overlap!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                                           pdomain::ParallelDomain{D}, comm_pattern::CommunicationPattern,
                                           compute_fn::Function) where D
    mpi_ctx = pdomain.mpi_ctx
    
    if !mpi_ctx.is_parallel || length(pdomain.neighbors) == 0
        # No communication, just do computation
        compute_fn()
        return
    end
    
    # Phase 1: Start communication
    pack_ghost_data!(u, v, p, pdomain, comm_pattern)
    
    # Post receives
    for (i, rank) in enumerate(comm_pattern.recv_ranks)
        recv_buffer = pdomain.recv_buffers[rank]
        tag = comm_pattern.recv_tags[i]
        comm_pattern.recv_requests[i] = irecv!(recv_buffer, rank, tag, mpi_ctx)
    end
    
    # Post sends
    for (i, rank) in enumerate(comm_pattern.send_ranks)
        send_buffer = pdomain.send_buffers[rank]
        tag = comm_pattern.send_tags[i]
        comm_pattern.send_requests[i] = isend(send_buffer, rank, tag, mpi_ctx)
    end
    
    # Phase 2: Do computation on interior points while communication is in flight
    compute_fn()
    
    # Phase 3: Complete communication and update ghost regions
    for request in comm_pattern.recv_requests
        wait!(request)
    end
    for request in comm_pattern.send_requests
        wait!(request)
    end
    
    unpack_ghost_data!(u, v, p, pdomain, comm_pattern)
end

"""
    pack_ghost_data!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                    pdomain::ParallelDomain, comm_pattern::CommunicationPattern)

Pack solution data into send buffers for ghost region communication.
"""
function pack_ghost_data!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                         pdomain::ParallelDomain, comm_pattern::CommunicationPattern)
    D = length(pdomain.proc_coords)
    
    for rank in comm_pattern.send_ranks
        if haskey(pdomain.ghost_regions, rank)
            ghost_region = pdomain.ghost_regions[rank]
            send_buffer = pdomain.send_buffers[rank]
            
            # Pack u, v, p (and w if 3D) values
            buffer_idx = 1
            for node_idx in ghost_region.send_indices
                if D == 2
                    send_buffer[buffer_idx] = u[node_idx]
                    send_buffer[buffer_idx + 1] = v[node_idx]
                    send_buffer[buffer_idx + 2] = p[node_idx]
                    buffer_idx += 3
                elseif D == 3
                    w = p  # Assuming w is passed as third array for 3D
                    send_buffer[buffer_idx] = u[node_idx]
                    send_buffer[buffer_idx + 1] = v[node_idx] 
                    send_buffer[buffer_idx + 2] = w[node_idx]  # This would be actual w component
                    send_buffer[buffer_idx + 3] = p[node_idx]
                    buffer_idx += 4
                end
            end
        end
    end
end

"""
    unpack_ghost_data!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                      pdomain::ParallelDomain, comm_pattern::CommunicationPattern)

Unpack received data from buffers into ghost regions.
"""
function unpack_ghost_data!(u::AbstractArray, v::AbstractArray, p::AbstractArray,
                           pdomain::ParallelDomain, comm_pattern::CommunicationPattern)
    D = length(pdomain.proc_coords)
    
    for rank in comm_pattern.recv_ranks
        if haskey(pdomain.ghost_regions, rank)
            ghost_region = pdomain.ghost_regions[rank]
            recv_buffer = pdomain.recv_buffers[rank]
            
            # Unpack u, v, p (and w if 3D) values
            buffer_idx = 1
            for node_idx in ghost_region.recv_indices
                if D == 2
                    u[node_idx] = recv_buffer[buffer_idx]
                    v[node_idx] = recv_buffer[buffer_idx + 1]
                    p[node_idx] = recv_buffer[buffer_idx + 2]
                    buffer_idx += 3
                elseif D == 3
                    w = p  # Assuming w is passed as third array for 3D
                    u[node_idx] = recv_buffer[buffer_idx]
                    v[node_idx] = recv_buffer[buffer_idx + 1]
                    w[node_idx] = recv_buffer[buffer_idx + 2]  # This would be actual w component
                    p[node_idx] = recv_buffer[buffer_idx + 3]
                    buffer_idx += 4
                end
            end
        end
    end
end

"""
    distributed_dot_product(x::AbstractVector, y::AbstractVector, mpi_ctx::MPIContext) -> Float64

Compute distributed dot product across all processes.
"""
function distributed_dot_product(x::AbstractVector, y::AbstractVector, mpi_ctx::MPIContext)
    local_dot = dot(x, y)
    return all_reduce_scalar(local_dot, +, mpi_ctx)
end

"""
    distributed_norm(x::AbstractVector, p::Real, mpi_ctx::MPIContext) -> Float64

Compute distributed p-norm across all processes.
"""
function distributed_norm(x::AbstractVector, p::Real, mpi_ctx::MPIContext)
    if p == 2
        local_norm_squared = dot(x, x)
        global_norm_squared = all_reduce_scalar(local_norm_squared, +, mpi_ctx)
        return sqrt(global_norm_squared)
    elseif p == Inf
        local_max = maximum(abs.(x))
        return all_reduce_scalar(local_max, max, mpi_ctx)
    elseif p == 1
        local_sum = sum(abs.(x))
        return all_reduce_scalar(local_sum, +, mpi_ctx)
    else
        local_norm_p = sum(abs.(x).^p)
        global_norm_p = all_reduce_scalar(local_norm_p, +, mpi_ctx)
        return global_norm_p^(1/p)
    end
end

"""
    all_reduce_residual!(residual::AbstractVector, mpi_ctx::MPIContext)

Perform all-reduce on residual vector for convergence checking.
"""
function all_reduce_residual!(residual::AbstractVector, mpi_ctx::MPIContext)
    if mpi_ctx.is_parallel
        all_reduce!(residual, +, mpi_ctx)
    end
end

"""
    global_convergence_check(local_converged::Bool, local_residual::Float64,
                            tolerance::Float64, mpi_ctx::MPIContext) -> Tuple{Bool, Float64}

Check global convergence across all processes.
"""
function global_convergence_check(local_converged::Bool, local_residual::Float64,
                                tolerance::Float64, mpi_ctx::MPIContext)
    # Compute global residual norm
    local_residual_squared = local_residual^2
    global_residual_squared = all_reduce_scalar(local_residual_squared, +, mpi_ctx)
    global_residual = sqrt(global_residual_squared)
    
    # Check if all processes have converged
    local_converged_int = local_converged ? 1 : 0
    global_converged_int = all_reduce_scalar(local_converged_int, min, mpi_ctx)
    global_converged = (global_converged_int == 1) && (global_residual < tolerance)
    
    return global_converged, global_residual
end

"""
    CommunicationProfiler

Profiler for analyzing communication patterns and performance.
"""
mutable struct CommunicationProfiler
    total_messages::Int
    total_bytes_sent::Int
    total_bytes_received::Int
    communication_time::Float64
    wait_time::Float64
    overlap_efficiency::Float64
    message_sizes::Vector{Int}
    message_times::Vector{Float64}
    
    function CommunicationProfiler()
        new(0, 0, 0, 0.0, 0.0, 0.0, Int[], Float64[])
    end
end

"""
    profile_communication!(profiler::CommunicationProfiler, 
                          pdomain::ParallelDomain, operation::Symbol)

Profile a communication operation for performance analysis.
"""
function profile_communication!(profiler::CommunicationProfiler, 
                               pdomain::ParallelDomain, operation::Symbol)
    if operation == :start
        # Record start time
        profiler.communication_time = wtime()
    elseif operation == :send_complete
        # Record send completion
        for (rank, buffer) in pdomain.send_buffers
            profiler.total_messages += 1
            profiler.total_bytes_sent += length(buffer) * sizeof(Float64)
            push!(profiler.message_sizes, length(buffer))
        end
    elseif operation == :recv_complete
        # Record receive completion
        for (rank, buffer) in pdomain.recv_buffers
            profiler.total_bytes_received += length(buffer) * sizeof(Float64)
        end
        
        # Calculate total communication time
        total_time = wtime() - profiler.communication_time
        push!(profiler.message_times, total_time)
        profiler.communication_time += total_time
    end
end

"""
    get_communication_statistics(profiler::CommunicationProfiler, mpi_ctx::MPIContext) -> Dict

Get comprehensive communication statistics across all processes.
"""
function get_communication_statistics(profiler::CommunicationProfiler, mpi_ctx::MPIContext)
    stats = Dict{String, Any}()
    
    # Local statistics
    stats["local_messages"] = profiler.total_messages
    stats["local_bytes_sent"] = profiler.total_bytes_sent
    stats["local_bytes_received"] = profiler.total_bytes_received
    stats["local_comm_time"] = profiler.communication_time
    
    if mpi_ctx.is_parallel
        # Global statistics
        stats["global_messages"] = all_reduce_scalar(profiler.total_messages, +, mpi_ctx)
        stats["global_bytes_sent"] = all_reduce_scalar(profiler.total_bytes_sent, +, mpi_ctx)
        stats["global_bytes_received"] = all_reduce_scalar(profiler.total_bytes_received, +, mpi_ctx)
        stats["max_comm_time"] = all_reduce_scalar(profiler.communication_time, max, mpi_ctx)
        stats["min_comm_time"] = all_reduce_scalar(profiler.communication_time, min, mpi_ctx)
        stats["avg_comm_time"] = all_reduce_scalar(profiler.communication_time, +, mpi_ctx) / mpi_ctx.size
        
        # Communication efficiency metrics
        if stats["max_comm_time"] > 0
            stats["comm_load_balance"] = 1.0 - (stats["max_comm_time"] - stats["min_comm_time"]) / stats["max_comm_time"]
        else
            stats["comm_load_balance"] = 1.0
        end
        
        # Bandwidth utilization
        total_data_mb = (stats["global_bytes_sent"] + stats["global_bytes_received"]) / (1024^2)
        if stats["max_comm_time"] > 0
            stats["effective_bandwidth_mbps"] = total_data_mb / stats["max_comm_time"]
        else
            stats["effective_bandwidth_mbps"] = 0.0
        end
    end
    
    return stats
end

"""
    optimize_communication_pattern!(pdomain::ParallelDomain, comm_pattern::CommunicationPattern)

Optimize communication pattern based on runtime measurements.
"""
function optimize_communication_pattern!(pdomain::ParallelDomain, comm_pattern::CommunicationPattern)
    # Analyze current communication pattern
    num_neighbors = length(pdomain.neighbors)
    
    if num_neighbors > 6  # More than face neighbors in 3D
        @info "Communication pattern may benefit from optimization ($(num_neighbors) neighbors)"
        
        # Could implement:
        # 1. Message aggregation for small messages
        # 2. Communication scheduling to avoid contention
        # 3. Topology-aware communication ordering
        
        # For now, just reorder communications to minimize conflicts
        sort!(comm_pattern.send_ranks)
        sort!(comm_pattern.recv_ranks)
    end
end

"""
    measure_communication_latency(pdomain::ParallelDomain) -> Dict

Measure point-to-point communication latency with neighbors.
"""
function measure_communication_latency(pdomain::ParallelDomain)
    if !pdomain.mpi_ctx.is_parallel
        return Dict("latency" => 0.0)
    end
    
    latencies = Dict{Int, Float64}()
    test_message_size = 1024  # 1KB test message
    test_data = ones(Float64, test_message_size)
    recv_buffer = similar(test_data)
    
    for neighbor_rank in pdomain.neighbors
        # Ping-pong test
        barrier(pdomain.mpi_ctx)
        start_time = wtime()
        
        if pdomain.rank < neighbor_rank
            # Send first, then receive
            send_request = isend(test_data, neighbor_rank, 999, pdomain.mpi_ctx)
            recv_request = irecv!(recv_buffer, neighbor_rank, 999, pdomain.mpi_ctx)
            wait!(send_request)
            wait!(recv_request)
        else
            # Receive first, then send
            recv_request = irecv!(recv_buffer, neighbor_rank, 999, pdomain.mpi_ctx)
            send_request = isend(test_data, neighbor_rank, 999, pdomain.mpi_ctx)
            wait!(recv_request)
            wait!(send_request)
        end
        
        end_time = wtime()
        latencies[neighbor_rank] = (end_time - start_time) / 2.0  # Round-trip time / 2
        barrier(pdomain.mpi_ctx)
    end
    
    # Compute statistics
    if !isempty(latencies)
        return Dict(
            "latencies" => latencies,
            "min_latency" => minimum(values(latencies)),
            "max_latency" => maximum(values(latencies)),
            "avg_latency" => mean(values(latencies))
        )
    else
        return Dict("latency" => 0.0)
    end
end

"""
    adaptive_message_aggregation!(pdomain::ParallelDomain, threshold_bytes::Int)

Implement adaptive message aggregation for small messages.
"""
function adaptive_message_aggregation!(pdomain::ParallelDomain, threshold_bytes::Int)
    # Analyze current message sizes
    for (rank, send_buffer) in pdomain.send_buffers
        buffer_size_bytes = length(send_buffer) * sizeof(Float64)
        
        if buffer_size_bytes < threshold_bytes
            # Mark this communication for potential aggregation
            @debug "Small message to rank $rank ($(buffer_size_bytes) bytes) - candidate for aggregation"
            
            # Could implement:
            # 1. Combine multiple small messages into one larger message
            # 2. Use persistent communication handles
            # 3. Implement message coalescing
        end
    end
end

# Export public interface
export CommunicationPattern, create_communication_pattern
export exchange_ghost_values!, exchange_ghost_values_with_overlap!
export distributed_dot_product, distributed_norm, all_reduce_residual!
export global_convergence_check
export CommunicationProfiler, profile_communication!, get_communication_statistics
export optimize_communication_pattern!, measure_communication_latency
export adaptive_message_aggregation!