# Distributed Linear Solvers for Parallel NSEMSolver.jl
# High-performance parallel Krylov methods with scalable preconditioning

using LinearAlgebra
using SparseArrays

"""
    DistributedNSMatrix

Distributed matrix representation for Navier-Stokes systems.
"""
struct DistributedNSMatrix{T}
    # Local matrix blocks
    local_matrix::SparseMatrixCSC{T,Int}        # Local portion of global matrix
    interface_matrices::Dict{Int,SparseMatrixCSC{T,Int}}  # Interface coupling matrices
    
    # Ghost region information
    ghost_dofs::Vector{Int}                     # Global DOF indices for ghost nodes
    local_dofs::Vector{Int}                     # Global DOF indices for local nodes
    
    # Parallel domain information
    pdomain::ParallelDomain
    
    # Matrix properties
    n_local::Int                                # Number of local DOFs
    n_global::Int                               # Total number of global DOFs
    n_vars::Int                                 # Number of variables per node (u,v,p or u,v,w,p)
    
    # Communication pattern for matrix-vector operations
    matvec_comm_pattern::CommunicationPattern
end

"""
    DistributedVector{T}

Distributed vector for parallel linear algebra operations.
"""
struct DistributedVector{T}
    local_values::Vector{T}                     # Local portion of vector
    ghost_values::Vector{T}                     # Ghost values from neighbors
    pdomain::ParallelDomain
    n_local::Int
    n_global::Int
end

"""
    create_distributed_matrix(local_A::SparseMatrixCSC, interface_matrices::Dict,
                             pdomain::ParallelDomain, n_vars::Int) -> DistributedNSMatrix

Create distributed matrix from local components.
"""
function create_distributed_matrix(local_A::SparseMatrixCSC{T,Int}, 
                                 interface_matrices::Dict{Int,SparseMatrixCSC{T,Int}},
                                 pdomain::ParallelDomain, n_vars::Int) where T
    
    n_local = size(local_A, 1)
    
    # Compute global DOF information
    local_dof_counts = [n_local]
    global_dof_counts = gather_to_root(local_dof_counts, pdomain.mpi_ctx)
    
    if is_root(pdomain.mpi_ctx)
        n_global = sum(global_dof_counts)
        broadcast!([n_global], 0, pdomain.mpi_ctx)
    else
        n_global_array = Vector{Int}(undef, 1)
        broadcast!(n_global_array, 0, pdomain.mpi_ctx)
        n_global = n_global_array[1]
    end
    
    # Create DOF mappings (simplified)
    local_dofs = collect(1:n_local) .+ pdomain.rank * n_local  # Simplified mapping
    ghost_dofs = Int[]  # Would be computed from ghost regions
    
    # Create communication pattern for matrix-vector operations
    matvec_comm_pattern = create_communication_pattern(pdomain)
    
    return DistributedNSMatrix{T}(
        local_A, interface_matrices, ghost_dofs, local_dofs,
        pdomain, n_local, n_global, n_vars, matvec_comm_pattern
    )
end

"""
    create_distributed_vector(local_values::Vector{T}, pdomain::ParallelDomain) -> DistributedVector{T}

Create distributed vector from local values.
"""
function create_distributed_vector(local_values::Vector{T}, pdomain::ParallelDomain) where T
    n_local = length(local_values)
    
    # Estimate ghost region size
    n_ghost = sum(length(ghost_region.ghost_nodes) for ghost_region in values(pdomain.ghost_regions))
    ghost_values = zeros(T, n_ghost)
    
    # Compute global size
    n_global = all_reduce_scalar(n_local, +, pdomain.mpi_ctx)
    
    return DistributedVector{T}(local_values, ghost_values, pdomain, n_local, n_global)
end

"""
    distributed_matvec!(y::DistributedVector, A::DistributedNSMatrix, x::DistributedVector)

Distributed sparse matrix-vector multiplication: y = A * x
"""
function distributed_matvec!(y::DistributedVector{T}, A::DistributedNSMatrix{T}, 
                           x::DistributedVector{T}) where T
    # Step 1: Exchange ghost values for x
    exchange_distributed_vector!(x)
    
    # Step 2: Local matrix-vector multiplication
    mul!(y.local_values, A.local_matrix, x.local_values)
    
    # Step 3: Add contributions from interface matrices
    for (neighbor_rank, interface_matrix) in A.interface_matrices
        # Extract ghost values from this neighbor
        ghost_start_idx = 1  # Would compute proper indexing
        ghost_end_idx = size(interface_matrix, 2)
        ghost_vec = view(x.ghost_values, ghost_start_idx:ghost_end_idx)
        
        # Add interface contribution
        mul!(y.local_values, interface_matrix, ghost_vec, 1.0, 1.0)
    end
    
    # No need to exchange y ghost values - result is purely local
end

"""
    exchange_distributed_vector!(x::DistributedVector)

Exchange ghost values for distributed vector.
"""
function exchange_distributed_vector!(x::DistributedVector{T}) where T
    pdomain = x.pdomain
    mpi_ctx = pdomain.mpi_ctx
    
    if !mpi_ctx.is_parallel || length(pdomain.neighbors) == 0
        return  # No communication needed
    end
    
    # Pack local values into send buffers
    for (rank, ghost_region) in pdomain.ghost_regions
        send_buffer = pdomain.send_buffers[rank]
        for (i, local_idx) in enumerate(ghost_region.send_indices)
            send_buffer[i] = x.local_values[local_idx]
        end
    end
    
    # Exchange data (simplified - would use proper communication pattern)
    for neighbor_rank in pdomain.neighbors
        if haskey(pdomain.send_buffers, neighbor_rank)
            send_buffer = pdomain.send_buffers[neighbor_rank]
            recv_buffer = pdomain.recv_buffers[neighbor_rank]
            
            send_receive(send_buffer, neighbor_rank, recv_buffer, neighbor_rank, mpi_ctx)
        end
    end
    
    # Unpack received values into ghost region
    ghost_idx = 1
    for (rank, ghost_region) in pdomain.ghost_regions
        recv_buffer = pdomain.recv_buffers[rank]
        for (i, _) in enumerate(ghost_region.recv_indices)
            x.ghost_values[ghost_idx] = recv_buffer[i]
            ghost_idx += 1
        end
    end
end

"""
    ParallelKrylovSolver

Abstract base type for parallel Krylov solvers.
"""
abstract type ParallelKrylovSolver end

"""
    ParallelGMRES

Parallel GMRES solver with restart.
"""
struct ParallelGMRES <: ParallelKrylovSolver
    restart::Int                        # GMRES restart parameter
    max_iter::Int                       # Maximum iterations
    tol::Float64                        # Convergence tolerance
    preconditioner::Any                 # Preconditioner (optional)
    
    # Working storage
    krylov_vectors::Vector{DistributedVector}
    hessenberg_matrix::Matrix{Float64}
    givens_rotations::Vector{Tuple{Float64,Float64}}
    residual_norms::Vector{Float64}
end

"""
    create_parallel_gmres(restart::Int, max_iter::Int, tol::Float64, 
                         preconditioner=nothing) -> ParallelGMRES

Create parallel GMRES solver.
"""
function create_parallel_gmres(restart::Int, max_iter::Int, tol::Float64, 
                              preconditioner=nothing)
    # Initialize empty working storage (will be allocated when needed)
    krylov_vectors = DistributedVector[]
    hessenberg_matrix = Matrix{Float64}(undef, restart+1, restart)
    givens_rotations = Tuple{Float64,Float64}[]
    residual_norms = Float64[]
    
    return ParallelGMRES(restart, max_iter, tol, preconditioner,
                        krylov_vectors, hessenberg_matrix, 
                        givens_rotations, residual_norms)
end

"""
    solve!(x::DistributedVector, solver::ParallelGMRES, 
          A::DistributedNSMatrix, b::DistributedVector) -> Bool

Solve linear system Ax = b using parallel GMRES.
"""
function solve!(x::DistributedVector{T}, solver::ParallelGMRES, 
               A::DistributedNSMatrix{T}, b::DistributedVector{T}) where T
    
    pdomain = A.pdomain
    mpi_ctx = pdomain.mpi_ctx
    restart = solver.restart
    max_iter = solver.max_iter
    tol = solver.tol
    
    # Initialize working vectors if not already allocated
    if length(solver.krylov_vectors) != restart + 1
        resize!(solver.krylov_vectors, restart + 1)
        for i in 1:restart+1
            solver.krylov_vectors[i] = create_distributed_vector(zeros(T, x.n_local), pdomain)
        end
    end
    
    # Compute initial residual: r0 = b - A*x
    r = create_distributed_vector(copy(b.local_values), pdomain)
    distributed_matvec!(solver.krylov_vectors[1], A, x)  # Temp use of v[1]
    axpy!(-1.0, solver.krylov_vectors[1].local_values, r.local_values)
    
    initial_residual_norm = distributed_norm(r.local_values, 2, mpi_ctx)
    
    if is_root(mpi_ctx)
        @info "Parallel GMRES: Initial residual norm = $initial_residual_norm"
    end
    
    if initial_residual_norm < tol
        return true  # Already converged
    end
    
    outer_iterations = 0
    converged = false
    
    while outer_iterations < max_iter && !converged
        # Start GMRES restart cycle
        
        # v[1] = r / ||r||
        beta = distributed_norm(r.local_values, 2, mpi_ctx)
        solver.krylov_vectors[1].local_values .= r.local_values ./ beta
        
        # Initialize Hessenberg matrix and Givens rotations
        fill!(solver.hessenberg_matrix, 0.0)
        empty!(solver.givens_rotations)
        empty!(solver.residual_norms)
        push!(solver.residual_norms, beta)
        
        # Arnoldi iteration
        inner_converged = false
        j = 0
        
        for j in 1:restart
            # Apply preconditioner if available
            if solver.preconditioner !== nothing
                apply_preconditioner!(solver.krylov_vectors[j+1], solver.preconditioner, 
                                    solver.krylov_vectors[j])
            else
                solver.krylov_vectors[j+1].local_values .= solver.krylov_vectors[j].local_values
            end
            
            # w = A * v[j+1]
            w = create_distributed_vector(zeros(T, x.n_local), pdomain)
            distributed_matvec!(w, A, solver.krylov_vectors[j+1])
            
            # Modified Gram-Schmidt orthogonalization
            for i in 1:j+1
                solver.hessenberg_matrix[i, j] = distributed_dot_product(
                    w.local_values, solver.krylov_vectors[i].local_values, mpi_ctx)
                axpy!(-solver.hessenberg_matrix[i, j], solver.krylov_vectors[i].local_values, 
                      w.local_values)
            end
            
            # Compute ||w|| for next Krylov vector
            h_norm = distributed_norm(w.local_values, 2, mpi_ctx)
            solver.hessenberg_matrix[j+2, j] = h_norm
            
            if h_norm > 1e-14  # Avoid division by zero
                solver.krylov_vectors[j+1].local_values .= w.local_values ./ h_norm
            end
            
            # Apply previous Givens rotations to new column
            for i in 1:length(solver.givens_rotations)
                c, s = solver.givens_rotations[i]
                temp = c * solver.hessenberg_matrix[i, j] + s * solver.hessenberg_matrix[i+1, j]
                solver.hessenberg_matrix[i+1, j] = -s * solver.hessenberg_matrix[i, j] + c * solver.hessenberg_matrix[i+1, j]
                solver.hessenberg_matrix[i, j] = temp
            end
            
            # Compute new Givens rotation
            a = solver.hessenberg_matrix[j+1, j]
            b = solver.hessenberg_matrix[j+2, j]
            if abs(b) > 1e-14
                if abs(b) > abs(a)
                    tau = -a / b
                    s = 1.0 / sqrt(1.0 + tau^2)
                    c = s * tau
                else
                    tau = -b / a
                    c = 1.0 / sqrt(1.0 + tau^2)
                    s = c * tau
                end
            else
                c, s = 1.0, 0.0
            end
            
            push!(solver.givens_rotations, (c, s))
            
            # Apply new Givens rotation
            solver.hessenberg_matrix[j+1, j] = c * a + s * b
            solver.hessenberg_matrix[j+2, j] = 0.0
            
            # Update residual norm
            new_residual_norm = abs(-s * solver.residual_norms[end])
            push!(solver.residual_norms, new_residual_norm)
            
            if is_root(mpi_ctx) && (j % 10 == 0 || new_residual_norm < tol)
                @info "GMRES iteration $j: residual = $new_residual_norm"
            end
            
            if new_residual_norm < tol
                inner_converged = true
                break
            end
        end
        
        # Solve least squares problem to get coefficients
        y = zeros(T, min(j, restart))
        if j > 0
            y[1] = solver.residual_norms[1]
            # Back-substitution
            for i in min(j, restart):-1:1
                y[i] /= solver.hessenberg_matrix[i, i]
                for k in 1:i-1
                    y[k] -= solver.hessenberg_matrix[k, i] * y[i]
                end
            end
        end
        
        # Update solution: x = x + V * y
        for i in 1:min(j, restart)
            if solver.preconditioner !== nothing
                # Need to apply preconditioner to Krylov vector first
                temp_vec = create_distributed_vector(zeros(T, x.n_local), pdomain)
                apply_preconditioner!(temp_vec, solver.preconditioner, solver.krylov_vectors[i+1])
                axpy!(y[i], temp_vec.local_values, x.local_values)
            else
                axpy!(y[i], solver.krylov_vectors[i+1].local_values, x.local_values)
            end
        end
        
        if inner_converged
            converged = true
            break
        end
        
        # Compute new residual for restart
        distributed_matvec!(solver.krylov_vectors[1], A, x)  # Temp use
        r.local_values .= b.local_values
        axpy!(-1.0, solver.krylov_vectors[1].local_values, r.local_values)
        
        outer_iterations += restart
    end
    
    final_residual_norm = distributed_norm(r.local_values, 2, mpi_ctx)
    if is_root(mpi_ctx)
        @info "Parallel GMRES completed: Final residual norm = $final_residual_norm, Converged = $converged"
    end
    
    return converged
end

"""
    ParallelCG

Parallel Conjugate Gradient solver.
"""
struct ParallelCG <: ParallelKrylovSolver
    max_iter::Int
    tol::Float64
    preconditioner::Any
end

"""
    solve!(x::DistributedVector, solver::ParallelCG,
          A::DistributedNSMatrix, b::DistributedVector) -> Bool

Solve symmetric positive definite system Ax = b using parallel CG.
"""
function solve!(x::DistributedVector{T}, solver::ParallelCG,
               A::DistributedNSMatrix{T}, b::DistributedVector{T}) where T
    
    pdomain = A.pdomain
    mpi_ctx = pdomain.mpi_ctx
    
    # CG working vectors
    r = create_distributed_vector(copy(b.local_values), pdomain)
    p = create_distributed_vector(zeros(T, x.n_local), pdomain)
    Ap = create_distributed_vector(zeros(T, x.n_local), pdomain)
    z = create_distributed_vector(zeros(T, x.n_local), pdomain)
    
    # Initial residual: r = b - A*x
    distributed_matvec!(z, A, x)  # Temp use of z
    r.local_values .= b.local_values
    axpy!(-1.0, z.local_values, r.local_values)
    
    # Apply preconditioner: z = M^(-1) * r
    if solver.preconditioner !== nothing
        apply_preconditioner!(z, solver.preconditioner, r)
    else
        z.local_values .= r.local_values
    end
    
    # Initial values
    rsold = distributed_dot_product(r.local_values, z.local_values, mpi_ctx)
    p.local_values .= z.local_values
    
    initial_residual_norm = sqrt(rsold)
    if is_root(mpi_ctx)
        @info "Parallel CG: Initial residual norm = $initial_residual_norm"
    end
    
    converged = initial_residual_norm < solver.tol
    
    for iter in 1:solver.max_iter
        if converged
            break
        end
        
        # Ap = A * p
        distributed_matvec!(Ap, A, p)
        
        # alpha = rsold / (p^T * A * p)
        pAp = distributed_dot_product(p.local_values, Ap.local_values, mpi_ctx)
        alpha = rsold / pAp
        
        # x = x + alpha * p
        axpy!(alpha, p.local_values, x.local_values)
        
        # r = r - alpha * Ap
        axpy!(-alpha, Ap.local_values, r.local_values)
        
        # Apply preconditioner: z = M^(-1) * r
        if solver.preconditioner !== nothing
            apply_preconditioner!(z, solver.preconditioner, r)
        else
            z.local_values .= r.local_values
        end
        
        # rsnew = r^T * z
        rsnew = distributed_dot_product(r.local_values, z.local_values, mpi_ctx)
        
        residual_norm = sqrt(rsnew)
        if is_root(mpi_ctx) && (iter % 10 == 0 || residual_norm < solver.tol)
            @info "CG iteration $iter: residual = $residual_norm"
        end
        
        if residual_norm < solver.tol
            converged = true
            break
        end
        
        # beta = rsnew / rsold
        beta = rsnew / rsold
        
        # p = z + beta * p
        p.local_values .= z.local_values .+ beta .* p.local_values
        
        rsold = rsnew
    end
    
    if is_root(mpi_ctx)
        @info "Parallel CG completed: Converged = $converged"
    end
    
    return converged
end

"""
    BlockJacobiPreconditioner

Block Jacobi preconditioner for parallel systems.
"""
struct BlockJacobiPreconditioner
    local_blocks::Vector{Any}           # Local block factorizations
    pdomain::ParallelDomain
end

"""
    create_block_jacobi_preconditioner(A::DistributedNSMatrix) -> BlockJacobiPreconditioner

Create block Jacobi preconditioner from distributed matrix.
"""
function create_block_jacobi_preconditioner(A::DistributedNSMatrix)
    # Extract diagonal block for this process
    diagonal_block = A.local_matrix
    
    # Compute factorization of local block (could use LU, Cholesky, etc.)
    local_factorization = try
        lu(diagonal_block)
    catch
        # Fallback to diagonal scaling if LU fails
        Diagonal(diag(diagonal_block))
    end
    
    return BlockJacobiPreconditioner([local_factorization], A.pdomain)
end

"""
    apply_preconditioner!(y::DistributedVector, precond::BlockJacobiPreconditioner,
                         x::DistributedVector)

Apply block Jacobi preconditioner: y = M^(-1) * x
"""
function apply_preconditioner!(y::DistributedVector, precond::BlockJacobiPreconditioner,
                              x::DistributedVector)
    # Apply local block inverse
    local_factorization = precond.local_blocks[1]
    
    if local_factorization isa LU
        # Solve using LU factorization
        ldiv!(y.local_values, local_factorization, x.local_values)
    elseif local_factorization isa Diagonal
        # Apply diagonal scaling
        y.local_values .= local_factorization \ x.local_values
    else
        # Fallback: copy input
        y.local_values .= x.local_values
    end
    
    # No communication needed for block Jacobi
end

# Export public interface
export DistributedNSMatrix, DistributedVector
export create_distributed_matrix, create_distributed_vector
export distributed_matvec!, exchange_distributed_vector!
export ParallelKrylovSolver, ParallelGMRES, ParallelCG
export create_parallel_gmres, solve!
export BlockJacobiPreconditioner, create_block_jacobi_preconditioner, apply_preconditioner!