# Domain Decomposition Framework for Parallel NSEMSolver.jl
# Implements domain partitioning strategies and load balancing for HPC-grade parallel computing

using LinearAlgebra

"""
    GhostRegion

Represents a ghost region for inter-domain communication.
"""
struct GhostRegion{D}
    neighbor_rank::Int              # Rank of neighboring process
    interface_nodes::Vector{Int}    # Local node indices on interface
    ghost_nodes::Vector{Int}        # Ghost node indices
    send_indices::Vector{Int}       # Indices to send to neighbor
    recv_indices::Vector{Int}       # Indices to receive from neighbor
    interface_orientation::Symbol   # :x_pos, :x_neg, :y_pos, :y_neg, :z_pos, :z_neg
    overlap_width::Int              # Width of overlap region
end

"""
    ParallelDomain{D}

Core data structure for parallel domain decomposition.
"""
struct ParallelDomain{D}
    # Global domain information
    global_domain::Any              # Global MultiDomain (will be MultiDomain{D})
    local_domain::Any               # Local MultiDomain partition (will be MultiDomain{D})
    
    # MPI information
    mpi_ctx::MPIContext
    rank::Int
    num_procs::Int
    
    # Domain decomposition information
    proc_grid::NTuple{D,Int}        # Processor grid dimensions
    proc_coords::NTuple{D,Int}      # This process coordinates in grid
    
    # Connectivity and communication
    neighbors::Vector{Int}          # Neighboring process ranks
    ghost_regions::Dict{Int,GhostRegion{D}}  # Ghost regions by neighbor rank
    
    # Load balancing information
    local_work_estimate::Float64    # Estimated work for this subdomain
    global_work_distribution::Vector{Float64}  # Work estimates for all processes
    
    # Domain boundaries
    local_bbox::NTuple{D,NTuple{2,Float64}}  # Local bounding box (min, max) per dimension
    interfaces::Vector{Tuple{Int,Symbol}}    # (neighbor_rank, interface_type)
    
    # Communication buffers (allocated once, reused)
    send_buffers::Dict{Int,Vector{Float64}}
    recv_buffers::Dict{Int,Vector{Float64}}
    
    # Performance tracking
    decomposition_strategy::Symbol   # :block, :recursive, :graph, :custom
    load_balance_quality::Float64   # Load imbalance metric (0 = perfect, 1 = worst)
end

"""
    DecompositionStrategy

Abstract type for different domain decomposition strategies.
"""
abstract type DecompositionStrategy end

"""
    BlockDecomposition

Simple Cartesian block decomposition strategy.
"""
struct BlockDecomposition <: DecompositionStrategy
    proc_grid::NTuple          # Target processor grid
    overlap_width::Int         # Ghost cell overlap width
end

"""
    RecursiveBisectionDecomposition

Recursive coordinate bisection for load balancing.
"""
struct RecursiveBisectionDecomposition <: DecompositionStrategy
    max_imbalance::Float64     # Maximum allowed load imbalance
    bisection_axis::Symbol     # :adaptive, :round_robin, or specific axis
    work_estimate_function     # Function to estimate work per element
end

"""
    GraphDecomposition

Graph-based partitioning using connectivity information.
"""
struct GraphDecomposition <: DecompositionStrategy
    partitioner::Symbol        # :metis, :parmetis, :scotch (when available)
    edge_weights::Bool         # Whether to use edge weights
    vertex_weights::Bool       # Whether to use vertex weights
    imbalance_tolerance::Float64
end

"""
    decompose_domain(global_domain, strategy::DecompositionStrategy, mpi_ctx::MPIContext) -> ParallelDomain

Main entry point for domain decomposition.
"""
function decompose_domain(global_domain, strategy::DecompositionStrategy, mpi_ctx::MPIContext)
    D = global_domain.dim
    
    if strategy isa BlockDecomposition
        return block_decompose_domain(global_domain, strategy, mpi_ctx)
    elseif strategy isa RecursiveBisectionDecomposition  
        return recursive_bisection_decompose(global_domain, strategy, mpi_ctx)
    elseif strategy isa GraphDecomposition
        return graph_decompose_domain(global_domain, strategy, mpi_ctx)
    else
        throw(ArgumentError("Unknown decomposition strategy: $(typeof(strategy))"))
    end
end

"""
    block_decompose_domain(global_domain, strategy::BlockDecomposition, mpi_ctx::MPIContext)

Implement Cartesian block decomposition.
"""
function block_decompose_domain(global_domain, strategy::BlockDecomposition, mpi_ctx::MPIContext)
    D = global_domain.dim
    n_procs = mpi_ctx.size
    rank = mpi_ctx.rank
    
    # Determine processor grid dimensions
    if length(strategy.proc_grid) == D
        proc_grid = strategy.proc_grid
    else
        proc_grid = compute_optimal_proc_grid(n_procs, D)
    end
    
    @assert prod(proc_grid) == n_procs "Processor grid $(proc_grid) doesn't match number of processes ($n_procs)"
    
    # Compute this process coordinates in processor grid
    proc_coords = compute_proc_coordinates(rank, proc_grid)
    
    # Create local subdomain
    local_domain = create_local_subdomain(global_domain, proc_coords, proc_grid, strategy.overlap_width)
    
    # Identify neighbors and create ghost regions
    neighbors, ghost_regions, interfaces = setup_neighbor_connectivity(proc_coords, proc_grid, strategy.overlap_width)
    
    # Compute bounding box for local domain
    local_bbox = compute_local_bounding_box(local_domain, D)
    
    # Estimate work distribution
    local_work = estimate_local_work(local_domain)
    global_work = Vector{Float64}(undef, n_procs)
    global_work[rank + 1] = local_work
    all_reduce!(global_work, +, mpi_ctx)
    
    # Compute load balance quality
    load_balance_quality = compute_load_imbalance(global_work)
    
    # Initialize communication buffers
    send_buffers, recv_buffers = initialize_communication_buffers(ghost_regions, local_domain)
    
    return ParallelDomain{D}(
        global_domain, local_domain, mpi_ctx, rank, n_procs,
        proc_grid, proc_coords, neighbors, ghost_regions,
        local_work, global_work, local_bbox, interfaces,
        send_buffers, recv_buffers, :block, load_balance_quality
    )
end

"""
    compute_optimal_proc_grid(n_procs::Int, dim::Int) -> NTuple

Compute optimal processor grid dimensions for given number of processes.
"""
function compute_optimal_proc_grid(n_procs::Int, dim::Int)
    if dim == 1
        return (n_procs,)
    elseif dim == 2
        # Find factors closest to square
        factors = []
        for i in 1:floor(Int, sqrt(n_procs))
            if n_procs % i == 0
                push!(factors, (i, n_procs ÷ i))
            end
        end
        # Choose most square-like factorization
        best_factor = factors[end]  # Closest to square
        return best_factor
    elseif dim == 3
        # Find factors closest to cube
        factors = []
        for i in 1:floor(Int, cbrt(n_procs))
            if n_procs % i == 0
                remainder = n_procs ÷ i
                for j in i:floor(Int, sqrt(remainder))
                    if remainder % j == 0
                        k = remainder ÷ j
                        push!(factors, (i, j, k))
                    end
                end
            end
        end
        
        if isempty(factors)
            # Fallback: use 1D decomposition
            return (1, 1, n_procs)
        end
        
        # Choose most cube-like factorization
        best_factor = factors[end]
        return best_factor
    else
        throw(ArgumentError("Unsupported dimension: $dim"))
    end
end

"""
    compute_proc_coordinates(rank::Int, proc_grid::NTuple) -> NTuple

Convert linear rank to multidimensional processor coordinates.
"""
function compute_proc_coordinates(rank::Int, proc_grid::NTuple{D,Int}) where D
    coords = Vector{Int}(undef, D)
    remaining = rank
    
    for i in 1:D-1
        coords[i] = remaining % proc_grid[i]
        remaining = remaining ÷ proc_grid[i]
    end
    coords[D] = remaining
    
    return NTuple{D,Int}(coords)
end

"""
    create_local_subdomain(global_domain, proc_coords, proc_grid, overlap_width)

Create local subdomain for this process.
"""
function create_local_subdomain(global_domain, proc_coords, proc_grid, overlap_width)
    # This is a simplified version - would need to interface with actual MultiDomain creation
    # For now, return a modified copy of global domain with local parameters
    
    # Calculate local block indices
    global_n_block = global_domain.n_block
    local_blocks_per_dim = global_n_block .÷ proc_grid
    
    # Compute starting indices for this process
    start_indices = proc_coords .* local_blocks_per_dim
    end_indices = start_indices .+ local_blocks_per_dim .- 1
    
    # Add overlap regions
    start_indices = max.(start_indices .- overlap_width, 0)
    end_indices = min.(end_indices .+ overlap_width, global_n_block - 1)
    
    # Create local domain (simplified - would use proper MultiDomain constructor)
    local_n_block = maximum(end_indices .- start_indices .+ 1)
    
    return create_multidomain_impl(local_n_block, global_domain.n, 
                                  global_domain.time_param, global_domain.dim,
                                  global_domain.length, false, 3)
end

"""
    setup_neighbor_connectivity(proc_coords, proc_grid, overlap_width)

Identify neighboring processes and setup ghost region connectivity.
"""
function setup_neighbor_connectivity(proc_coords, proc_grid, overlap_width)
    D = length(proc_coords)
    neighbors = Int[]
    ghost_regions = Dict{Int,GhostRegion{D}}()
    interfaces = Tuple{Int,Symbol}[]
    
    # Check all adjacent processes in each dimension
    for dim in 1:D
        # Negative direction neighbor
        if proc_coords[dim] > 0
            neighbor_coords = collect(proc_coords)
            neighbor_coords[dim] -= 1
            neighbor_rank = compute_rank_from_coordinates(Tuple(neighbor_coords), proc_grid)
            push!(neighbors, neighbor_rank)
            
            # Create ghost region for this neighbor
            interface_orientation = Symbol("$([:x,:y,:z][dim])_neg")
            ghost_region = create_ghost_region(neighbor_rank, interface_orientation, overlap_width, D)
            ghost_regions[neighbor_rank] = ghost_region
            push!(interfaces, (neighbor_rank, interface_orientation))
        end
        
        # Positive direction neighbor
        if proc_coords[dim] < proc_grid[dim] - 1
            neighbor_coords = collect(proc_coords)
            neighbor_coords[dim] += 1
            neighbor_rank = compute_rank_from_coordinates(Tuple(neighbor_coords), proc_grid)
            push!(neighbors, neighbor_rank)
            
            # Create ghost region for this neighbor
            interface_orientation = Symbol("$([:x,:y,:z][dim])_pos")
            ghost_region = create_ghost_region(neighbor_rank, interface_orientation, overlap_width, D)
            ghost_regions[neighbor_rank] = ghost_region
            push!(interfaces, (neighbor_rank, interface_orientation))
        end
    end
    
    return neighbors, ghost_regions, interfaces
end

"""
    compute_rank_from_coordinates(coords::NTuple, proc_grid::NTuple) -> Int

Convert multidimensional processor coordinates back to linear rank.
"""
function compute_rank_from_coordinates(coords::NTuple{D,Int}, proc_grid::NTuple{D,Int}) where D
    rank = 0
    multiplier = 1
    
    for i in 1:D-1
        rank += coords[i] * multiplier
        multiplier *= proc_grid[i]
    end
    rank += coords[D] * multiplier
    
    return rank
end

"""
    create_ghost_region(neighbor_rank::Int, orientation::Symbol, overlap_width::Int, D::Int)

Create ghost region data structure for communication with specific neighbor.
"""
function create_ghost_region(neighbor_rank::Int, orientation::Symbol, overlap_width::Int, D::Int)
    # Simplified - would compute actual interface and ghost node indices
    # based on local domain structure and interface orientation
    
    interface_nodes = Int[]    # Would be computed from local domain
    ghost_nodes = Int[]        # Would be computed from local domain  
    send_indices = Int[]       # Would be computed from local domain
    recv_indices = Int[]       # Would be computed from local domain
    
    return GhostRegion{D}(neighbor_rank, interface_nodes, ghost_nodes, 
                         send_indices, recv_indices, orientation, overlap_width)
end

"""
    compute_local_bounding_box(local_domain, D::Int)

Compute bounding box for local domain.
"""
function compute_local_bounding_box(local_domain, D::Int)
    # Simplified - would compute from actual domain coordinates
    if D == 2
        return ((0.0, 1.0), (0.0, 1.0))
    elseif D == 3
        return ((0.0, 1.0), (0.0, 1.0), (0.0, 1.0))
    else
        error("Unsupported dimension: $D")
    end
end

"""
    estimate_local_work(local_domain) -> Float64

Estimate computational work for local domain.
"""
function estimate_local_work(local_domain)
    # Simple work estimate based on number of elements and polynomial order
    n_elements = local_domain.n_block^local_domain.dim
    work_per_element = local_domain.n^local_domain.dim  # Polynomial order dependency
    return Float64(n_elements * work_per_element)
end

"""
    compute_load_imbalance(work_distribution::Vector{Float64}) -> Float64

Compute load imbalance metric from work distribution.
"""
function compute_load_imbalance(work_distribution::Vector{Float64})
    if isempty(work_distribution)
        return 1.0
    end
    
    max_work = maximum(work_distribution)
    min_work = minimum(work_distribution)
    
    if max_work ≈ 0.0
        return 0.0
    end
    
    return (max_work - min_work) / max_work
end

"""
    initialize_communication_buffers(ghost_regions, local_domain)

Initialize send and receive buffers for ghost region communication.
"""
function initialize_communication_buffers(ghost_regions, local_domain)
    send_buffers = Dict{Int,Vector{Float64}}()
    recv_buffers = Dict{Int,Vector{Float64}}()
    
    for (rank, ghost_region) in ghost_regions
        # Estimate buffer sizes based on interface size and number of variables
        n_vars = 4  # u, v, p (+ w for 3D)
        if local_domain.dim == 3
            n_vars = 5
        end
        
        send_buffer_size = length(ghost_region.send_indices) * n_vars
        recv_buffer_size = length(ghost_region.recv_indices) * n_vars
        
        send_buffers[rank] = Vector{Float64}(undef, max(send_buffer_size, 1))
        recv_buffers[rank] = Vector{Float64}(undef, max(recv_buffer_size, 1))
    end
    
    return send_buffers, recv_buffers
end

"""
    recursive_bisection_decompose(global_domain, strategy::RecursiveBisectionDecomposition, mpi_ctx::MPIContext)

Implement recursive coordinate bisection for better load balancing.
"""
function recursive_bisection_decompose(global_domain, strategy::RecursiveBisectionDecomposition, mpi_ctx::MPIContext)
    # This would implement a more sophisticated partitioning algorithm
    # For now, fall back to block decomposition
    block_strategy = BlockDecomposition(compute_optimal_proc_grid(mpi_ctx.size, global_domain.dim), 1)
    return block_decompose_domain(global_domain, block_strategy, mpi_ctx)
end

"""
    graph_decompose_domain(global_domain, strategy::GraphDecomposition, mpi_ctx::MPIContext)

Implement graph-based domain decomposition (requires external partitioning library).
"""
function graph_decompose_domain(global_domain, strategy::GraphDecomposition, mpi_ctx::MPIContext)
    # This would implement graph partitioning using METIS/ParMETIS
    # For now, fall back to block decomposition
    block_strategy = BlockDecomposition(compute_optimal_proc_grid(mpi_ctx.size, global_domain.dim), 1)
    return block_decompose_domain(global_domain, block_strategy, mpi_ctx)
end

"""
    rebalance_domain!(pdomain::ParallelDomain, new_work_distribution::Vector{Float64})

Dynamically rebalance domain decomposition based on updated work estimates.
"""
function rebalance_domain!(pdomain::ParallelDomain, new_work_distribution::Vector{Float64})
    # Check if rebalancing is needed
    new_imbalance = compute_load_imbalance(new_work_distribution)
    
    if new_imbalance > pdomain.load_balance_quality * 1.2  # 20% worse
        @info "Dynamic load rebalancing triggered. Imbalance: $(new_imbalance*100)%"
        
        # Implement domain migration/repartitioning
        # This is a complex operation that would:
        # 1. Decide on new partitioning
        # 2. Migrate data between processes
        # 3. Update ghost regions and communication patterns
        
        # For now, just update the work estimates
        pdomain.global_work_distribution .= new_work_distribution
        pdomain.load_balance_quality = new_imbalance
    end
end

"""
    get_decomposition_stats(pdomain::ParallelDomain) -> Dict

Get detailed statistics about the domain decomposition.
"""
function get_decomposition_stats(pdomain::ParallelDomain)
    stats = Dict{String, Any}()
    
    # Basic decomposition info
    stats["strategy"] = pdomain.decomposition_strategy
    stats["num_processes"] = pdomain.num_procs
    stats["processor_grid"] = pdomain.proc_grid
    stats["load_imbalance"] = pdomain.load_balance_quality
    
    # Communication complexity
    stats["num_neighbors"] = length(pdomain.neighbors)
    stats["total_interfaces"] = length(pdomain.interfaces)
    
    # Memory usage for communication
    total_send_buffer = sum(length(buf) for buf in values(pdomain.send_buffers))
    total_recv_buffer = sum(length(buf) for buf in values(pdomain.recv_buffers))
    stats["communication_memory_mb"] = (total_send_buffer + total_recv_buffer) * 8 / (1024^2)
    
    # Work distribution
    if pdomain.mpi_ctx.is_parallel
        global_work_stats = Dict{String, Float64}(
            "total_work" => sum(pdomain.global_work_distribution),
            "min_work" => minimum(pdomain.global_work_distribution),
            "max_work" => maximum(pdomain.global_work_distribution),
            "mean_work" => mean(pdomain.global_work_distribution),
            "std_work" => std(pdomain.global_work_distribution)
        )
        stats["work_distribution"] = global_work_stats
    end
    
    return stats
end

"""
    print_decomposition_summary(pdomain::ParallelDomain)

Print formatted summary of domain decomposition.
"""
function print_decomposition_summary(pdomain::ParallelDomain)
    if is_root(pdomain.mpi_ctx)
        stats = get_decomposition_stats(pdomain)
        
        println("\n" * "="^60)
        println("DOMAIN DECOMPOSITION SUMMARY")
        println("="^60)
        
        println("Strategy: $(stats["strategy"])")
        println("Processes: $(stats["num_processes"])")
        println("Processor Grid: $(stats["processor_grid"])")
        @printf("Load Imbalance: %.2f%%\n", stats["load_imbalance"] * 100)
        println("Neighbors per Process (avg): $(stats["num_neighbors"])")
        println("Total Interfaces: $(stats["total_interfaces"])")
        @printf("Communication Memory: %.2f MB\n", stats["communication_memory_mb"])
        
        if haskey(stats, "work_distribution")
            wd = stats["work_distribution"]
            println("\nWork Distribution:")
            @printf("  Total Work: %.2e\n", wd["total_work"])
            @printf("  Min Work:   %.2e\n", wd["min_work"])
            @printf("  Max Work:   %.2e\n", wd["max_work"])
            @printf("  Mean Work:  %.2e\n", wd["mean_work"])
            @printf("  Std Work:   %.2e\n", wd["std_work"])
        end
        
        println("="^60)
    end
end

# Export public interface
export ParallelDomain, GhostRegion
export DecompositionStrategy, BlockDecomposition, RecursiveBisectionDecomposition, GraphDecomposition
export decompose_domain, block_decompose_domain
export rebalance_domain!, get_decomposition_stats, print_decomposition_summary
export compute_optimal_proc_grid, compute_load_imbalance