#include <iostream>
#include <iomanip>
#include <chrono>
#include <cstring>
#include <cmath>
#include <random>
#include <vector>
#include <fstream>
#include <sstream>
#include <algorithm>
#include <mpi.h>
#include "tensor_mat19.h"
#include "pcg_def.h"
#include "tensor_mat_multi_cav.h"

using namespace TensorMat19;
using namespace TensorMatMultiCavity;

// Debug log levels
#define DEBUG_NONE 0
#define DEBUG_ERROR 1
#define DEBUG_WARN 2
#define DEBUG_INFO 3
#define DEBUG_DETAIL 4
#define DEBUG_VERBOSE 5

// We'll use the DEBUG_LOG macro from tensor_mat_multi_cav.h instead of redefining it here

// Stencil offsets for the 19-point stencil in FDM
const int stencil_offsets[19][3] = {
    {0, 0, 0},    // Center
    {1, 0, 0},    // East
    {-1, 0, 0},   // West
    {0, 1, 0},    // North
    {0, -1, 0},   // South
    {0, 0, 1},    // Top
    {0, 0, -1},   // Bottom
    {1, 1, 0},    // Northeast
    {-1, 1, 0},   // Northwest
    {1, -1, 0},   // Southeast
    {-1, -1, 0},  // Southwest
    {1, 0, 1},    // East-Top
    {-1, 0, 1},   // West-Top
    {0, 1, 1},    // North-Top
    {0, -1, 1},   // South-Top
    {1, 0, -1},   // East-Bottom
    {-1, 0, -1},  // West-Bottom
    {0, 1, -1},   // North-Bottom
    {0, -1, -1}   // South-Bottom
};

// Structure to hold process information
struct ProcessInfo {
    int rank;
    int coords[2];
    int neighbors[4]; // north, south, east, west
};

// Function to decompose the global grid into local grids for each MPI process
void decompose_grid(int global_imt, int global_jmt, int global_kmt, TensorMatMultiCavity::LocalGridInfo& local_grid) {
    // Set up a 2D Cartesian communicator
    MPI_Comm cart_comm;
    int dims[2] = {TensorMatMultiCavity::mpi_procs_y, TensorMatMultiCavity::mpi_procs_x};  // dims[0]=rows, dims[1]=cols
    int periods[2] = {0, 0};  // Non-periodic boundaries for local grid
    
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
    MPI_Cart_coords(cart_comm, TensorMatMultiCavity::mpi_rank, 2, TensorMatMultiCavity::mpi_coords);
    
    // Determine neighbors using standard Cartesian topology for local grid mode
    MPI_Cart_shift(cart_comm, 0, 1, &local_grid.south_rank, &local_grid.north_rank);
    MPI_Cart_shift(cart_comm, 1, 1, &local_grid.west_rank, &local_grid.east_rank);
    
    // Calculate local grid dimensions
    local_grid.global_imt = global_imt;
    local_grid.global_jmt = global_jmt;
    local_grid.global_kmt = global_kmt;
    
    // Calculate base block size for each dimension
    int base_imt = global_imt / TensorMatMultiCavity::mpi_procs_x;
    int base_jmt = global_jmt / TensorMatMultiCavity::mpi_procs_y;
    
    // Calculate remainder to distribute
    int remainder_i = global_imt % TensorMatMultiCavity::mpi_procs_x;
    int remainder_j = global_jmt % TensorMatMultiCavity::mpi_procs_y;
    
    // Determine local grid dimensions
    local_grid.local_imt = base_imt + (TensorMatMultiCavity::mpi_coords[1] < remainder_i ? 1 : 0);
    local_grid.local_jmt = base_jmt + (TensorMatMultiCavity::mpi_coords[0] < remainder_j ? 1 : 0);
    local_grid.local_kmt = global_kmt;  // No decomposition in z-direction
    
    // Calculate starting indices in global grid
    local_grid.local_ims = TensorMatMultiCavity::mpi_coords[1] * base_imt + std::min(TensorMatMultiCavity::mpi_coords[1], remainder_i);
    local_grid.local_jms = TensorMatMultiCavity::mpi_coords[0] * base_jmt + std::min(TensorMatMultiCavity::mpi_coords[0], remainder_j);
    local_grid.local_kms = 0;
    
    // Calculate ending indices
    local_grid.local_ime = local_grid.local_ims + local_grid.local_imt - 1;
    local_grid.local_jme = local_grid.local_jms + local_grid.local_jmt - 1;
    local_grid.local_kme = local_grid.local_kms + local_grid.local_kmt - 1;
    
    // Set halo width to 1 for the 19-point stencil
    local_grid.halo_width = 1;
    
    // Print detailed process information
    DEBUG_LOG(DEBUG_INFO, "Process " << TensorMatMultiCavity::mpi_rank << " position: (" << TensorMatMultiCavity::mpi_coords[1] << "," << TensorMatMultiCavity::mpi_coords[0] << ")");
    DEBUG_LOG(DEBUG_INFO, "Process " << TensorMatMultiCavity::mpi_rank << " neighbors: N=" << local_grid.north_rank 
                         << ", S=" << local_grid.south_rank 
                         << ", E=" << local_grid.east_rank 
                         << ", W=" << local_grid.west_rank);
    DEBUG_LOG(DEBUG_INFO, "Process " << TensorMatMultiCavity::mpi_rank << " local dims: " << local_grid.local_imt << "x" 
                         << local_grid.local_jmt << "x" << local_grid.local_kmt);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << TensorMatMultiCavity::mpi_rank << " global domain: i=[" << local_grid.local_ims << "," << local_grid.local_ime 
             << "], j=[" << local_grid.local_jms << "," << local_grid.local_jme 
             << "], k=[" << local_grid.local_kms << "," << local_grid.local_kme << "]");
    
    // Synchronize after printing to avoid mixed output
    MPI_Barrier(MPI_COMM_WORLD);
}

// Add a new function to set up global communication mode for comparison
void decompose_grid_global_mode(int global_imt, int global_jmt, int global_kmt, TensorMatMultiCavity::LocalGridInfo& local_grid) {
    // Set up a 2D Cartesian communicator first (we still need the coords)
    MPI_Comm cart_comm;
    int dims[2] = {TensorMatMultiCavity::mpi_procs_y, TensorMatMultiCavity::mpi_procs_x};  // dims[0]=rows, dims[1]=cols
    int periods[2] = {0, 0};  // Non-periodic boundaries for local grid
    
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
    MPI_Cart_coords(cart_comm, TensorMatMultiCavity::mpi_rank, 2, TensorMatMultiCavity::mpi_coords);
    
    // Get row and column for this process
    int row = TensorMatMultiCavity::mpi_coords[0];
    int col = TensorMatMultiCavity::mpi_coords[1];
    
    // Calculate total processes in grid
    int num_procs_x = TensorMatMultiCavity::mpi_procs_x;
    int num_procs_y = TensorMatMultiCavity::mpi_procs_y;
    
    // Get current rank in row-major order
    int rank = row * num_procs_x + col;
    
    // 1. East-West: Standard wrap-around within each row
    // East neighbor: next process in row (wrap at end)
    int east_rank = (col == num_procs_x - 1) ? (row * num_procs_x) : (rank + 1);
    // West neighbor: previous process in row (wrap at beginning)
    int west_rank = (col == 0) ? (row * num_procs_x + num_procs_x - 1) : (rank - 1);
    
    // Check for self-connections in East-West
    if (east_rank == rank) {
        std::cerr << "ERROR: Process " << rank << " would have self-connection to EAST. Setting to MPI_PROC_NULL." << std::endl;
        east_rank = MPI_PROC_NULL;
    }
    if (west_rank == rank) {
        std::cerr << "ERROR: Process " << rank << " would have self-connection to WEST. Setting to MPI_PROC_NULL." << std::endl;
        west_rank = MPI_PROC_NULL;
    }
    
    // 2. North-South: Using mirroring pattern that works for any grid size
    int north_rank, south_rank;
    
    // For North neighbor:
    if (row == num_procs_y - 1) {  // Top row processes
        // For top row, use a mirroring pattern that works for any grid size
        // We'll use a formula that ensures each process connects to a different process
        // and that no process connects to itself
        int mirror_offset = num_procs_x / 2;
        if (mirror_offset == 0) mirror_offset = 1;  // Ensure at least offset by 1
        
        north_rank = row * num_procs_x + ((col + mirror_offset) % num_procs_x);
        
        // Check if this would create a self-connection
        if (north_rank == rank) {
            // Use a different offset
            mirror_offset = (mirror_offset + 1) % num_procs_x;
            if (mirror_offset == 0) mirror_offset = 1;
            north_rank = row * num_procs_x + ((col + mirror_offset) % num_procs_x);
        }
    } else {
        // For non-top row processes, connect to the row above
        north_rank = ((row + 1) * num_procs_x) + col;
    }
    
    // For South neighbor:
    if (row == 0) {  // Bottom row processes
        // Similar mirroring pattern for bottom row
        int mirror_offset = num_procs_x / 2;
        if (mirror_offset == 0) mirror_offset = 1;  // Ensure at least offset by 1
        
        south_rank = row * num_procs_x + ((col + mirror_offset) % num_procs_x);
        
        // Check if this would create a self-connection
        if (south_rank == rank) {
            // Use a different offset
            mirror_offset = (mirror_offset + 1) % num_procs_x;
            if (mirror_offset == 0) mirror_offset = 1;
            south_rank = row * num_procs_x + ((col + mirror_offset) % num_procs_x);
        }
    } else {
        // For non-bottom row processes, connect to the row below
        south_rank = ((row - 1) * num_procs_x) + col;
    }
    
    // Double-check for self-connections in North-South
    if (north_rank == rank) {
        std::cerr << "ERROR: Process " << rank << " would have self-connection to NORTH. Setting to MPI_PROC_NULL." << std::endl;
        north_rank = MPI_PROC_NULL;
    }
    if (south_rank == rank) {
        std::cerr << "ERROR: Process " << rank << " would have self-connection to SOUTH. Setting to MPI_PROC_NULL." << std::endl;
        south_rank = MPI_PROC_NULL;
    }
    
    // Assign calculated neighbors
    local_grid.north_rank = north_rank;
    local_grid.south_rank = south_rank;
    local_grid.east_rank = east_rank;
    local_grid.west_rank = west_rank;
    
    // Calculate local grid dimensions
    local_grid.global_imt = global_imt;
    local_grid.global_jmt = global_jmt;
    local_grid.global_kmt = global_kmt;
    
    // Calculate base block size for each dimension
    int base_imt = global_imt / TensorMatMultiCavity::mpi_procs_x;
    int base_jmt = global_jmt / TensorMatMultiCavity::mpi_procs_y;
    
    // Calculate remainder to distribute
    int remainder_i = global_imt % TensorMatMultiCavity::mpi_procs_x;
    int remainder_j = global_jmt % TensorMatMultiCavity::mpi_procs_y;
    
    // Determine local grid dimensions
    local_grid.local_imt = base_imt + (TensorMatMultiCavity::mpi_coords[1] < remainder_i ? 1 : 0);
    local_grid.local_jmt = base_jmt + (TensorMatMultiCavity::mpi_coords[0] < remainder_j ? 1 : 0);
    local_grid.local_kmt = global_kmt;  // No decomposition in z-direction
    
    // Calculate starting indices in global grid
    local_grid.local_ims = TensorMatMultiCavity::mpi_coords[1] * base_imt + std::min(TensorMatMultiCavity::mpi_coords[1], remainder_i);
    local_grid.local_jms = TensorMatMultiCavity::mpi_coords[0] * base_jmt + std::min(TensorMatMultiCavity::mpi_coords[0], remainder_j);
    local_grid.local_kms = 0;
    
    // Calculate ending indices
    local_grid.local_ime = local_grid.local_ims + local_grid.local_imt - 1;
    local_grid.local_jme = local_grid.local_jms + local_grid.local_jmt - 1;
    local_grid.local_kme = local_grid.local_kms + local_grid.local_kmt - 1;
    
    // Set halo width to 1 for the 19-point stencil
    local_grid.halo_width = 1;
    
    // Print detailed process information
    DEBUG_LOG(DEBUG_INFO, "GLOBAL MODE - Process " << mpi_rank << " position: (" << mpi_coords[1] << "," << mpi_coords[0] << ")");
    DEBUG_LOG(DEBUG_INFO, "GLOBAL MODE - Process " << mpi_rank << " neighbors: N=" << local_grid.north_rank 
                         << ", S=" << local_grid.south_rank 
                         << ", E=" << local_grid.east_rank 
                         << ", W=" << local_grid.west_rank);
    
    // Synchronize after printing to avoid mixed output
    MPI_Barrier(MPI_COMM_WORLD);
}

// Function to communicate ghost/halo cells between MPI processes
void communicate_local_boundaries(float* vec, const TensorMat19::GridParams& grid, const TensorMatMultiCavity::LocalGridInfo& local_grid) {
    // We only need to communicate if we have more than one process
    if (mpi_size <= 1) {
        return;
    }
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " starting boundary communication");
    
    // For Dirichlet boundary conditions on external boundaries
    // Set boundaries that are on the edge of the global domain to zero
    if (!grid.is_global_grid) {
        // Set left boundary to zero if this process is on the leftmost edge
        if (local_grid.west_rank == MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " setting left boundary to zero (west edge)");
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    // Initially, these are zeros - no need to force again
                    // vec[grid.padded_index(-1, j, k)] = 0.0f;
                }
            }
        }
        
        // Set right boundary to zero if this process is on the rightmost edge
        if (local_grid.east_rank == MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " setting right boundary to zero (east edge)");
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    // Initially, these are zeros - no need to force again
                    // vec[grid.padded_index(local_grid.local_imt, j, k)] = 0.0f;
                }
            }
        }
        
        // Set bottom boundary to zero if this process is on the bottommost edge
        if (local_grid.south_rank == MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " setting bottom boundary to zero (south edge)");
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int i = 0; i < local_grid.local_imt; i++) {
                    // Initially, these are zeros - no need to force again
                    // vec[grid.padded_index(i, -1, k)] = 0.0f;
                }
            }
        }
        
        // Set top boundary to zero if this process is on the topmost edge
        if (local_grid.north_rank == MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " setting top boundary to zero (north edge)");
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int i = 0; i < local_grid.local_imt; i++) {
                    // Initially, these are zeros - no need to force again
                    // vec[grid.padded_index(i, local_grid.local_jmt, k)] = 0.0f;
                }
            }
        }
    }
    
    MPI_Status status;
    int halo_width = local_grid.halo_width;
    
    // Make sure all processes have initialized their boundaries
    MPI_Barrier(MPI_COMM_WORLD);
    
    // Define distinct tags for different communications
    const int TAG_EW = 100;  // Tag for east-west communication
    const int TAG_NS = 200;  // Tag for north-south communication
    
    // ===== EAST-WEST COMMUNICATION =====
    
    if (local_grid.local_imt > 0) {
        // Calculate buffer size
        int ew_buffer_size = halo_width * local_grid.local_jmt * local_grid.local_kmt;
        
        // Allocate send and receive buffers
        float* east_buffer = nullptr;
        float* west_buffer = nullptr;
        
        if (local_grid.east_rank != MPI_PROC_NULL) {
            east_buffer = new float[ew_buffer_size];
            
            // Pack data to send east
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    for (int i = 0; i < halo_width; i++) {
                        int local_i = local_grid.local_imt - halo_width + i;
                        east_buffer[idx++] = vec[grid.padded_index(local_i, j, k)];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " prepared east buffer for rank " << local_grid.east_rank);
        }
        
        if (local_grid.west_rank != MPI_PROC_NULL) {
            west_buffer = new float[ew_buffer_size];
            
            // Pack data to send west
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    for (int i = 0; i < halo_width; i++) {
                        west_buffer[idx++] = vec[grid.padded_index(i, j, k)];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " prepared west buffer for rank " << local_grid.west_rank);
        }
        
        // Create temporary receive buffers
        float* recv_east_buffer = nullptr;
        float* recv_west_buffer = nullptr;
        
        if (local_grid.east_rank != MPI_PROC_NULL) {
            recv_east_buffer = new float[ew_buffer_size];
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " allocated east receive buffer");
        }
        
        if (local_grid.west_rank != MPI_PROC_NULL) {
            recv_west_buffer = new float[ew_buffer_size];
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " allocated west receive buffer");
        }
        
        // Use MPI_Sendrecv for deadlock-free communication
        
        // East-West communication
        if (local_grid.east_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " sending east and receiving from east");
            MPI_Sendrecv(
                east_buffer, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, TAG_EW,
                recv_east_buffer, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, TAG_EW,
                MPI_COMM_WORLD, &status
            );
        }
        
        // West-East communication
        if (local_grid.west_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " sending west and receiving from west");
            MPI_Sendrecv(
                west_buffer, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, TAG_EW,
                recv_west_buffer, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, TAG_EW,
                MPI_COMM_WORLD, &status
            );
        }
        
        // Unpack received data
        if (local_grid.east_rank != MPI_PROC_NULL && recv_east_buffer != nullptr) {
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    for (int i = 0; i < halo_width; i++) {
                        vec[grid.padded_index(local_grid.local_imt + i, j, k)] = recv_east_buffer[idx++];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " unpacked east data");
        }
        
        if (local_grid.west_rank != MPI_PROC_NULL && recv_west_buffer != nullptr) {
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < local_grid.local_jmt; j++) {
                    for (int i = 0; i < halo_width; i++) {
                        vec[grid.padded_index(-halo_width + i, j, k)] = recv_west_buffer[idx++];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " unpacked west data");
        }
        
        // Cleanup
        if (east_buffer) delete[] east_buffer;
        if (west_buffer) delete[] west_buffer;
        if (recv_east_buffer) delete[] recv_east_buffer;
        if (recv_west_buffer) delete[] recv_west_buffer;
        
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " completed east-west communication");
    }
    
    // Make sure all east-west communications are completed before starting north-south
    MPI_Barrier(MPI_COMM_WORLD);
    
    // ===== NORTH-SOUTH COMMUNICATION =====
    
    if (local_grid.local_jmt > 0) {
        // Calculate buffer size
        int ns_buffer_size = halo_width * local_grid.local_imt * local_grid.local_kmt;
        
        // Allocate send and receive buffers
        float* north_buffer = nullptr;
        float* south_buffer = nullptr;
        
        if (local_grid.north_rank != MPI_PROC_NULL) {
            north_buffer = new float[ns_buffer_size];
            
            // Pack data to send north
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < halo_width; j++) {
                    int local_j = local_grid.local_jmt - halo_width + j;
                    for (int i = 0; i < local_grid.local_imt; i++) {
                        north_buffer[idx++] = vec[grid.padded_index(i, local_j, k)];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " prepared north buffer for rank " << local_grid.north_rank);
        }
        
        if (local_grid.south_rank != MPI_PROC_NULL) {
            south_buffer = new float[ns_buffer_size];
            
            // Pack data to send south
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < halo_width; j++) {
                    for (int i = 0; i < local_grid.local_imt; i++) {
                        south_buffer[idx++] = vec[grid.padded_index(i, j, k)];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " prepared south buffer for rank " << local_grid.south_rank);
        }
        
        // Create temporary receive buffers
        float* recv_north_buffer = nullptr;
        float* recv_south_buffer = nullptr;
        
        if (local_grid.north_rank != MPI_PROC_NULL) {
            recv_north_buffer = new float[ns_buffer_size];
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " allocated north receive buffer");
        }
        
        if (local_grid.south_rank != MPI_PROC_NULL) {
            recv_south_buffer = new float[ns_buffer_size];
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " allocated south receive buffer");
        }
        
        // Use MPI_Sendrecv for deadlock-free communication
        
        // North-South communication
        if (local_grid.north_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " sending north and receiving from north");
            MPI_Sendrecv(
                north_buffer, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, TAG_NS,
                recv_north_buffer, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, TAG_NS, 
                MPI_COMM_WORLD, &status
            );
        }
        
        // South-North communication
        if (local_grid.south_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " sending south and receiving from south");
            MPI_Sendrecv(
                south_buffer, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, TAG_NS,
                recv_south_buffer, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, TAG_NS,
                MPI_COMM_WORLD, &status
            );
        }
        
        // Unpack received data
        if (local_grid.north_rank != MPI_PROC_NULL && recv_north_buffer != nullptr) {
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < halo_width; j++) {
                    for (int i = 0; i < local_grid.local_imt; i++) {
                        vec[grid.padded_index(i, local_grid.local_jmt + j, k)] = recv_north_buffer[idx++];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " unpacked north data");
        }
        
        if (local_grid.south_rank != MPI_PROC_NULL && recv_south_buffer != nullptr) {
            int idx = 0;
            for (int k = 0; k < local_grid.local_kmt; k++) {
                for (int j = 0; j < halo_width; j++) {
                    for (int i = 0; i < local_grid.local_imt; i++) {
                        vec[grid.padded_index(i, -halo_width + j, k)] = recv_south_buffer[idx++];
                    }
                }
            }
            DEBUG_LOG(DEBUG_VERBOSE, "Process " << mpi_rank << " unpacked south data");
        }
        
        // Cleanup
        if (north_buffer) delete[] north_buffer;
        if (south_buffer) delete[] south_buffer;
        if (recv_north_buffer) delete[] recv_north_buffer;
        if (recv_south_buffer) delete[] recv_south_buffer;
        
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " completed north-south communication");
    }
    
    // For both grid types, set z-direction boundaries to zero (Dirichlet BC)
    // Only needed when kmt > 1
    if (local_grid.local_kmt > 1) {
        for (int j = -local_grid.halo_width; j < local_grid.local_jmt + local_grid.halo_width; j++) {
            for (int i = -local_grid.halo_width; i < local_grid.local_imt + local_grid.halo_width; i++) {
                // Bottom boundary
                vec[grid.padded_index(i, j, -1)] = 0.0f;
                
                // Top boundary
                vec[grid.padded_index(i, j, local_grid.local_kmt)] = 0.0f;
            }
        }
    }
    
    // Final synchronization
    MPI_Barrier(MPI_COMM_WORLD);
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " completed all boundary communication");
}

// A simplified function for testing global communication using direct indexing
void communicate_global_boundaries_simple(float* vec, const TensorMat19::GridParams&, const TensorMatMultiCavity::LocalGridInfo& local_grid) {
    // We only need to communicate if we have more than one process
    if (mpi_size <= 1) {
        return;
    }
    
    // Calculate dimensions
    int local_imt = local_grid.local_imt;
    int local_jmt = local_grid.local_jmt;
    int local_kmt = local_grid.local_kmt;
    
    // Calculate padded dimensions (internal representation)
    int padded_imt = local_imt + 2;
    int padded_jmt = local_jmt + 2;
    int padded_kmt = local_kmt == 1 ? 1 : (local_kmt + 2);
    
    // Make sure all processes have initialized their boundaries
    MPI_Barrier(MPI_COMM_WORLD);
    
    // === East-West Communication ===
    
    // Calculate size of east-west boundary data
    int ew_buffer_size = local_jmt;
    
    // Allocate buffers
    float* east_send_buffer = new float[ew_buffer_size];
    float* west_send_buffer = new float[ew_buffer_size];
    float* east_recv_buffer = new float[ew_buffer_size];
    float* west_recv_buffer = new float[ew_buffer_size];
    
    // Pack east boundary data (rightmost column of interior)
    for (int j = 0; j < local_jmt; j++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = (j+1) * padded_imt + local_imt;
        } else {
            idx = k_offset * padded_imt * padded_jmt + (j+1) * padded_imt + local_imt;
        }
        east_send_buffer[j] = vec[idx];
    }
    
    // Pack west boundary data (leftmost column of interior)
    for (int j = 0; j < local_jmt; j++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = (j+1) * padded_imt + 1;
        } else {
            idx = k_offset * padded_imt * padded_jmt + (j+1) * padded_imt + 1;
        }
        west_send_buffer[j] = vec[idx];
    }
    
    // Send East, Receive from West
    MPI_Sendrecv(
        east_send_buffer, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 100,
        west_recv_buffer, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 100,
        MPI_COMM_WORLD, MPI_STATUS_IGNORE
    );
    
    // Send West, Receive from East
    MPI_Sendrecv(
        west_send_buffer, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 101,
        east_recv_buffer, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 101,
        MPI_COMM_WORLD, MPI_STATUS_IGNORE
    );
    
    // Unpack east halo data
    for (int j = 0; j < local_jmt; j++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = (j+1) * padded_imt + (local_imt+1);
        } else {
            idx = k_offset * padded_imt * padded_jmt + (j+1) * padded_imt + (local_imt+1);
        }
        vec[idx] = east_recv_buffer[j];
    }
    
    // Unpack west halo data
    for (int j = 0; j < local_jmt; j++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = (j+1) * padded_imt + 0;
        } else {
            idx = k_offset * padded_imt * padded_jmt + (j+1) * padded_imt + 0;
        }
        vec[idx] = west_recv_buffer[j];
    }
    
    // Clean up
    delete[] east_send_buffer;
    delete[] west_send_buffer;
    delete[] east_recv_buffer;
    delete[] west_recv_buffer;
    
    // Synchronize before north-south communication
    MPI_Barrier(MPI_COMM_WORLD);
    
    // === North-South Communication ===
    
    // Calculate size of north-south boundary data
    int ns_buffer_size = local_imt;
    
    // Allocate buffers
    float* north_send_buffer = new float[ns_buffer_size];
    float* south_send_buffer = new float[ns_buffer_size];
    float* north_recv_buffer = new float[ns_buffer_size];
    float* south_recv_buffer = new float[ns_buffer_size];
    
    // Pack north boundary data (topmost row of interior)
    for (int i = 0; i < local_imt; i++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = local_jmt * padded_imt + (i+1);
        } else {
            idx = k_offset * padded_imt * padded_jmt + local_jmt * padded_imt + (i+1);
        }
        north_send_buffer[i] = vec[idx];
    }
    
    // Pack south boundary data (bottommost row of interior)
    for (int i = 0; i < local_imt; i++) {
        // Inside layer (the computational layer)
        int k_offset = local_kmt == 1 ? 0 : 1;
        int idx;
        if (local_kmt == 1) {
            idx = 1 * padded_imt + (i+1);
        } else {
            idx = k_offset * padded_imt * padded_jmt + 1 * padded_imt + (i+1);
        }
        south_send_buffer[i] = vec[idx];
    }
    
    // Send North, Receive from South
    MPI_Sendrecv(
        north_send_buffer, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, 102,
        south_recv_buffer, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, 102,
        MPI_COMM_WORLD, MPI_STATUS_IGNORE
    );
    
    // Send South, Receive from North
    MPI_Sendrecv(
        south_send_buffer, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, 103,
        north_recv_buffer, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, 103,
        MPI_COMM_WORLD, MPI_STATUS_IGNORE
    );
    
    // Unpack received data
    for (int i = 0; i < local_imt; i++) {
        int k_offset = local_kmt == 1 ? 0 : 1;

        // Unpack south halo (received from south)
        int south_idx;
        if (local_kmt == 1) {
            south_idx = 0 * padded_imt + (i+1);
        } else {
            south_idx = k_offset * padded_imt * padded_jmt + 0 * padded_imt + (i+1);
        }
        vec[south_idx] = south_recv_buffer[i];
        
        // Unpack north halo (received from north)
        int north_idx;
        if (local_kmt == 1) {
            north_idx = (local_jmt+1) * padded_imt + (i+1);
        } else {
            north_idx = k_offset * padded_imt * padded_jmt + (local_jmt+1) * padded_imt + (i+1);
        }
        vec[north_idx] = north_recv_buffer[i];
    }
    
    // Clean up north-south buffers
    delete[] north_send_buffer;
    delete[] south_send_buffer;
    delete[] north_recv_buffer;
    delete[] south_recv_buffer;
    
    // Final synchronization
    MPI_Barrier(MPI_COMM_WORLD);
}

// Helper function to initialize a vector with random values using a fixed seed
void initialize_random_vector(float* vec, int size, int seed = 42) {
    std::mt19937 gen(seed);
    std::uniform_real_distribution<float> dist(0.0f, 1.0f);
    
    for (int i = 0; i < size; i++) {
        vec[i] = dist(gen);
    }
}

// Function to compute the norm of a vector
float vector_norm(const float* vec, int size) {
    float sum = 0.0f;
    for (int i = 0; i < size; i++) {
        sum += vec[i] * vec[i];
    }
    float global_sum = sum;
    if (mpi_size > 1) {
        MPI_Allreduce(&sum, &global_sum, 1, MPI_FLOAT, MPI_SUM, MPI_COMM_WORLD);
    }
    return std::sqrt(global_sum);
}

// Function to compute the difference norm between two vectors
float vector_diff_norm(const float* vec1, const float* vec2, int size) {
    float sum = 0.0f;
    for (int i = 0; i < size; i++) {
        float diff = vec1[i] - vec2[i];
        sum += diff * diff;
    }
    return std::sqrt(sum);
}

// Function to save a vector to a file
void save_vector_to_file(const float* vec, int size, const std::string& filename) {
    std::ofstream outfile(filename, std::ios::binary);
    if (!outfile) {
        std::cerr << "Error opening file: " << filename << std::endl;
        return;
    }
    
    outfile.write(reinterpret_cast<const char*>(vec), size * sizeof(float));
    outfile.close();
}

// Function to load a vector from a file
bool load_vector_from_file(float* vec, int size, const std::string& filename) {
    std::ifstream infile(filename, std::ios::binary);
    if (!infile) {
        std::cerr << "Error opening file: " << filename << std::endl;
        return false;
    }
    
    infile.read(reinterpret_cast<char*>(vec), size * sizeof(float));
    infile.close();
    return true;
}

// Function to print processes in a 2D grid layout
void print_process_grid_layout(const std::vector<ProcessInfo>& all_info, int procs_x, int procs_y) {
    if (TensorMatMultiCavity::mpi_rank != 0) return;
    
    // Create 2D grid of process info
    std::vector<std::vector<ProcessInfo>> grid(procs_y, std::vector<ProcessInfo>(procs_x));
    
    // Fill the grid with process info
    for (const auto& info : all_info) {
        int row = info.coords[0];  // y coordinate
        int col = info.coords[1];  // x coordinate
        if (row >= 0 && row < procs_y && col >= 0 && col < procs_x) {
            grid[row][col] = info;
        }
    }
    
    // Print grid layout
    std::cout << "\nProcess Grid Layout (format: [N,S,E,W]):" << std::endl;
    std::cout << "-------------------------------------" << std::endl;
    
    // Print column headers
    std::cout << "   ";
    for (int col = 0; col < procs_x; col++) {
        std::cout << "    " << col << "    ";
    }
    std::cout << std::endl << "   ";
    for (int col = 0; col < procs_x; col++) {
        std::cout << "---------";
    }
    std::cout << std::endl;
    
    // Print grid rows
    for (int row = 0; row < procs_y; row++) {
        std::cout << row << " |";
        for (int col = 0; col < procs_x; col++) {
            std::string n = (grid[row][col].neighbors[0] == MPI_PROC_NULL) ? "x" : std::to_string(grid[row][col].neighbors[0]);
            std::string s = (grid[row][col].neighbors[1] == MPI_PROC_NULL) ? "x" : std::to_string(grid[row][col].neighbors[1]);
            std::string e = (grid[row][col].neighbors[2] == MPI_PROC_NULL) ? "x" : std::to_string(grid[row][col].neighbors[2]);
            std::string w = (grid[row][col].neighbors[3] == MPI_PROC_NULL) ? "x" : std::to_string(grid[row][col].neighbors[3]);
            
            std::cout << " [" << n << "," << s << "," << e << "," << w << "]";
        }
        std::cout << " |" << std::endl;
    }
    
    std::cout << "   ";
    for (int col = 0; col < procs_x; col++) {
        std::cout << "---------";
    }
    std::cout << std::endl;
    
    // Print legend
    std::cout << "Legend: [N,S,E,W] = [North neighbor, South neighbor, East neighbor, West neighbor]" << std::endl;
    std::cout << "x = no neighbor (boundary)" << std::endl;
    std::cout << "-------------------------------------" << std::endl;
}

// Function to print processes in a simplified 2D grid layout matching the requested format
void print_simple_process_grid_layout(const std::vector<ProcessInfo>& all_info, int procs_x, int procs_y) {
    if (TensorMatMultiCavity::mpi_rank != 0) return;
    
    // Create 2D grid of process info
    std::vector<std::vector<ProcessInfo>> grid(procs_y, std::vector<ProcessInfo>(procs_x));
    
    // Fill the grid with process info
    for (const auto& info : all_info) {
        int row = info.coords[0];
        int col = info.coords[1];
        if (row >= 0 && row < procs_y && col >= 0 && col < procs_x) {
            grid[row][col] = info;
        }
    }
    
    // Print header
    std::cout << "\nProcess Grid Connectivity:" << std::endl;
    std::cout << "==============================================" << std::endl;
    std::cout << "Rank  Position  North     South    East     West" << std::endl;
    std::cout << "-----  -------  -----     -----    -----    -----" << std::endl;
    
    // For large grids, print a subset of processes to avoid overwhelming output
    bool print_all = (procs_x <= 6 && procs_y <= 6);
    
    // Print process info row by row, column by column
    for (int row = 0; row < procs_y; row++) {
        // For large grids, only print first, middle, and last rows
        bool print_row = print_all || 
                        row == 0 || 
                        row == procs_y - 1 || 
                        row == procs_y / 2;
        
        if (print_row) {
            for (int col = 0; col < procs_x; col++) {
                // For large grids, only print first, middle, and last columns
                bool print_col = print_all || 
                               col == 0 || 
                               col == procs_x - 1 || 
                               col == procs_x / 2;
                
                if (print_col) {
                    int id = grid[row][col].rank;
                    std::string n = (grid[row][col].neighbors[0] == MPI_PROC_NULL) ? "null" : std::to_string(grid[row][col].neighbors[0]);
                    std::string s = (grid[row][col].neighbors[1] == MPI_PROC_NULL) ? "null" : std::to_string(grid[row][col].neighbors[1]);
                    std::string e = (grid[row][col].neighbors[2] == MPI_PROC_NULL) ? "null" : std::to_string(grid[row][col].neighbors[2]);
                    std::string w = (grid[row][col].neighbors[3] == MPI_PROC_NULL) ? "null" : std::to_string(grid[row][col].neighbors[3]);
                    
                    std::cout << std::setw(5) << id << "  " 
                              << "(" << col << "," << row << ")" << "  "
                              << std::setw(10) << n 
                              << std::setw(10) << s 
                              << std::setw(10) << e 
                              << std::setw(10) << w << std::endl;
                }
            }
            
            // Add separator if not showing all rows
            if (!print_all && row != procs_y - 1 && row != procs_y / 2) {
                std::cout << "    ..." << std::endl;
            }
        }
    }
    
    std::cout << "==============================================" << std::endl;
    std::cout << "Note: For large grids, only a subset of processes is shown." << std::endl;
}

// Function to print the process grid and communication pattern
void print_process_grid_communication(const LocalGridInfo& local_grid) {
    // Collect all process information on rank 0
    ProcessInfo my_info;
    my_info.rank = mpi_rank;
    my_info.coords[0] = mpi_coords[0];  // y-coordinate (row)
    my_info.coords[1] = mpi_coords[1];  // x-coordinate (column)
    my_info.neighbors[0] = local_grid.north_rank;
    my_info.neighbors[1] = local_grid.south_rank;
    my_info.neighbors[2] = local_grid.east_rank;
    my_info.neighbors[3] = local_grid.west_rank;
    
    std::vector<ProcessInfo> all_info;
    
    if (mpi_rank == 0) {
        all_info.resize(mpi_size);
    }
    
    // Gather all process information to rank 0
    MPI_Gather(&my_info, sizeof(ProcessInfo), MPI_BYTE, 
               all_info.data(), sizeof(ProcessInfo), MPI_BYTE, 
               0, MPI_COMM_WORLD);
    
    // Rank 0 prints the process grid
    if (mpi_rank == 0) {
        std::cout << "\n===== PROCESS GRID COMMUNICATION PATTERN =====\n" << std::endl;
        std::cout << "Format: Rank (Row,Col) | N = North, S = South, E = East, W = West" << std::endl;
        std::cout << "For boundary processes, 'null' indicates no communication needed in that direction\n" << std::endl;
        
        // Sort by row (y) then column (x) for organized display
        std::sort(all_info.begin(), all_info.end(), 
                 [](const ProcessInfo& a, const ProcessInfo& b) {
                     if (a.coords[0] != b.coords[0]) return a.coords[0] < b.coords[0];
                     return a.coords[1] < b.coords[1];
                 });
        
        // Print header
        std::cout << std::setw(10) << "Rank" 
                  << std::setw(10) << "Position" 
                  << std::setw(10) << "North" 
                  << std::setw(10) << "South" 
                  << std::setw(10) << "East" 
                  << std::setw(10) << "West" << std::endl;
        std::cout << std::string(60, '-') << std::endl;
        
        // Print process info row by row
        for (const auto& info : all_info) {
            std::cout << std::setw(10) << info.rank 
                      << std::setw(10) << "(" << info.coords[0] << "," << info.coords[1] << ")" 
                      << std::setw(10) << (info.neighbors[0] == MPI_PROC_NULL ? "null" : std::to_string(info.neighbors[0]))
                      << std::setw(10) << (info.neighbors[1] == MPI_PROC_NULL ? "null" : std::to_string(info.neighbors[1]))
                      << std::setw(10) << (info.neighbors[2] == MPI_PROC_NULL ? "null" : std::to_string(info.neighbors[2]))
                      << std::setw(10) << (info.neighbors[3] == MPI_PROC_NULL ? "null" : std::to_string(info.neighbors[3]))
                      << std::endl;
        }
        std::cout << "\n=============================================" << std::endl;
        
        // Also print the 2D grid layout
        print_process_grid_layout(all_info, mpi_procs_x, mpi_procs_y);
        
        // Print the simple grid layout
        print_simple_process_grid_layout(all_info, mpi_procs_x, mpi_procs_y);
    }
    
    // Make sure all processes wait for the output to complete
    MPI_Barrier(MPI_COMM_WORLD);
}

// Function to test global communication in a 2D grid
void test_global_communication(float* vec, int padded_imt, int padded_jmt, const TensorMatMultiCavity::LocalGridInfo& local_grid) {
    // We only need to communicate if we have more than one process
    if (mpi_size <= 1) {
        return;
    }
    
    // Calculate dimensions for the interior region
    int local_imt = local_grid.local_imt;
    int local_jmt = local_grid.local_jmt;
    
    // For this test, we'll only work with the k=1 layer (2D communication)
    int k = 1;
    
    MPI_Barrier(MPI_COMM_WORLD);
    
    // Print all neighbor assignments in a structured way
    for (int rank = 0; rank < mpi_size; rank++) {
        if (rank == mpi_rank) {
            std::cout << "Process " << mpi_rank << " has neighbors: N=" << local_grid.north_rank 
                      << ", S=" << local_grid.south_rank 
                      << ", E=" << local_grid.east_rank 
                      << ", W=" << local_grid.west_rank << std::endl;
            
            // Special check for Process 15
            if (mpi_rank == 15) {
                std::cout << "VERIFICATION - Process 15 neighbors in test_global_communication: N=" 
                          << local_grid.north_rank << ", S=" << local_grid.south_rank 
                          << ", E=" << local_grid.east_rank << ", W=" << local_grid.west_rank << std::endl;
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }
    
    // Barrier to ensure all printing is complete before starting communication
    MPI_Barrier(MPI_COMM_WORLD);
    
    if (mpi_rank == 0) {
        std::cout << "Starting global communication with process neighbors" << std::endl;
    }
    
    // Define tags for different directions
    const int TAG_EAST = 100;   // For sending to the east 
    const int TAG_WEST = 101;   // For sending to the west
    const int TAG_NORTH = 102;  // For sending to the north
    const int TAG_SOUTH = 103;  // For sending to the south
    
    // ****** Use TensorMatMultiCavity::communicate_global_boundaries for actual communication ******
    // Instead of implementing our own communication here, use the improved version that avoids deadlocks
    DEBUG_LOG(DEBUG_INFO, "Process " << mpi_rank << " Calling TensorMatMultiCavity::communicate_global_boundaries");
    TensorMatMultiCavity::communicate_global_boundaries(vec, local_grid);
    DEBUG_LOG(DEBUG_INFO, "Process " << mpi_rank << " Returned from TensorMatMultiCavity::communicate_global_boundaries");
}

// Test single process vs multi-process solution consistency
void test_fdm_consistency(bool use_global_mode = false) {
    // Create grid parameters for a 16x16x1 grid (increased from 8x8x1)
    TensorMat19::GridParams grid_params;
    grid_params.imt = 16;
    grid_params.jmt = 16;
    grid_params.kmt = 1;
    grid_params.is_global_grid = use_global_mode;
    grid_params.total_inner_points = grid_params.imt * grid_params.jmt * grid_params.kmt;
    grid_params.total_padded_points = (grid_params.imt + 2) * (grid_params.jmt + 2) * 
                                     (grid_params.kmt == 1 ? 1 : (grid_params.kmt + 2));

    // Decompose grid using appropriate method
    TensorMatMultiCavity::LocalGridInfo local_info;
    if (use_global_mode) {
        TensorMatMultiCavity::decompose_grid_global_mode(grid_params.imt, grid_params.jmt, grid_params.kmt, local_info);
    } else {
        TensorMatMultiCavity::decompose_grid(grid_params.imt, grid_params.jmt, grid_params.kmt, local_info);
    }
    
    // Print the process grid communication pattern to help diagnose issues
    print_process_grid_communication(local_info);
    
    // Print the actual assigned neighbors for debugging
    if (use_global_mode) {
        // Use a barrier to ensure orderly output
        MPI_Barrier(MPI_COMM_WORLD);
        
        // Have each process report its neighbors
        for (int rank = 0; rank < mpi_size; rank++) {
            if (rank == mpi_rank) {
                std::cout << "NEIGHBOR ASSIGNMENTS - Process " << mpi_rank 
                         << " has neighbors: N=" << local_info.north_rank 
                         << ", S=" << local_info.south_rank
                         << ", E=" << local_info.east_rank
                         << ", W=" << local_info.west_rank << std::endl;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }
    }

    // Calculate sizes for the local grid
    int local_imt = local_info.local_imt;
    int local_jmt = local_info.local_jmt;
    int local_kmt = local_info.local_kmt;
    int padded_imt = local_imt + 2;
    int padded_jmt = local_jmt + 2;
    int padded_kmt = local_kmt == 1 ? 1 : (local_kmt + 2);
    int total_padded_points = padded_imt * padded_jmt * padded_kmt;

    // Allocate and initialize vector with zeros
    std::vector<float> vec(total_padded_points, 0.0f);
    
    // Initialize interior points with MPI rank
    for (int k = 0; k < local_kmt; k++) {
        for (int j = 0; j < local_jmt; j++) {
            for (int i = 0; i < local_imt; i++) {
                // Calculate index based on whether we have z-padding or not
                int idx;
                if (local_kmt == 1) {
                    idx = (i + 1) + (j + 1) * padded_imt;
                } else {
                    idx = (i + 1) + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[idx] = static_cast<float>(mpi_rank + 1);  // Use rank+1 to distinguish from zeros
            }
        }
    }

    // Print initial state
    // if (mpi_rank == 0) {
    //     std::cout << "\nInitial arrays for all processes (" << (use_global_mode ? "global mode" : "local mode") << "):" << std::endl;
    //     std::cout << "=================================" << std::endl;
    // }
    MPI_Barrier(MPI_COMM_WORLD);

    // Only print Layer k = 1 (the computational layer)
    const int k_layer = local_kmt == 1 ? 0 : 1;  // Adjust k_layer based on whether we have z-padding
    //don't print inital values, reserve this commented codes
    // for (int rank = 0; rank < mpi_size; rank++) {
    //     if (rank == mpi_rank) {
    //         std::cout << "\nProcess " << mpi_rank << " initial array (Layer k = " << k_layer << "):" << std::endl;
    //         for (int j = 0; j < padded_jmt; j++) {
    //             for (int i = 0; i < padded_imt; i++) {
    //                 int idx;
    //                 if (local_kmt == 1) {
    //                     idx = i + j * padded_imt;
    //                 } else {
    //                     idx = i + j * padded_imt + k_layer * padded_imt * padded_jmt;
    //                 }
    //                 std::cout << std::setw(4) << vec[idx] << " ";
    //             }
    //             std::cout << std::endl;
    //         }
    //     }
    //     MPI_Barrier(MPI_COMM_WORLD);
    // }

    // Perform boundary communication using the appropriate function
    if (use_global_mode) {
        if (mpi_rank == 0) {
            std::cout << "\nUsing GLOBAL communication mode" << std::endl;
        }
        
        // Debug print for top-right corner process
        for (int r = 0; r < mpi_size; r++) {
            if (r == mpi_rank && TensorMatMultiCavity::mpi_coords[0] == TensorMatMultiCavity::mpi_procs_y - 1 && 
                TensorMatMultiCavity::mpi_coords[1] == TensorMatMultiCavity::mpi_procs_x - 1) {
                std::cout << "DEBUG - Top-right corner process (rank " << mpi_rank << ") before test_global_communication:" << std::endl;
                std::cout << "  North=" << local_info.north_rank << ", South=" << local_info.south_rank 
                          << ", East=" << local_info.east_rank << ", West=" << local_info.west_rank << std::endl;
            }
            MPI_Barrier(MPI_COMM_WORLD);
        }
        
        // Use our simplified communication function for the global mode
        test_global_communication(vec.data(), padded_imt, padded_jmt, local_info);
    } else {
        if (mpi_rank == 0) {
            std::cout << "\nUsing LOCAL communication mode" << std::endl;
        }
        
        // Diagnostic: Print the array right before communication
        if (mpi_rank == 0) {
            std::cout << "\nDEBUG: Array BEFORE communication (Process 0):" << std::endl;
            const int k_layer_debug = local_kmt == 1 ? 0 : 1;
            for (int j = 0; j < padded_jmt; j++) {
                for (int i = 0; i < padded_imt; i++) {
                    int idx;
                    if (local_kmt == 1) {
                        idx = i + j * padded_imt;
                    } else {
                        idx = i + j * padded_imt + k_layer_debug * padded_imt * padded_jmt;
                    }
                    std::cout << std::setw(4) << vec[idx] << " ";
                }
                std::cout << std::endl;
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
        
        // Use the local communication function from the namespace
        // The function only takes vec and local_info, not grid_params
        TensorMatMultiCavity::communicate_local_boundaries(vec.data(), local_info);
        
        // Diagnostic: Print the array right after communication
        if (mpi_rank == 0) {
            std::cout << "\nDEBUG: Array AFTER communication (Process 0):" << std::endl;
            const int k_layer_debug = local_kmt == 1 ? 0 : 1;
            for (int j = 0; j < padded_jmt; j++) {
                for (int i = 0; i < padded_imt; i++) {
                    int idx;
                    if (local_kmt == 1) {
                        idx = i + j * padded_imt;
                    } else {
                        idx = i + j * padded_imt + k_layer_debug * padded_imt * padded_jmt;
                    }
                    std::cout << std::setw(4) << vec[idx] << " ";
                }
                std::cout << std::endl;
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    // Print final state
    if (mpi_rank == 0) {
        std::cout << "\nFinal arrays after communication:" << std::endl;
        std::cout << "===============================" << std::endl;
    }
    MPI_Barrier(MPI_COMM_WORLD);
    
    // Modify how we print arrays to ensure we see output for any grid size
    // Print first row, last row, and some processes in between
    for (int rank = 0; rank < mpi_size; rank++) {
        bool should_print = false;
        
        // Print first row processes
        if (TensorMatMultiCavity::mpi_coords[0] == 0) {
            should_print = true;
        }
        // Print last row processes
        else if (TensorMatMultiCavity::mpi_coords[0] == TensorMatMultiCavity::mpi_procs_y - 1) {
            should_print = true;
        }
        // For larger grids, also print some middle row processes
        else if (TensorMatMultiCavity::mpi_procs_y > 4 && 
                TensorMatMultiCavity::mpi_coords[0] == TensorMatMultiCavity::mpi_procs_y / 2) {
            should_print = true;
        }
        
        // For very large grids, limit column printing to first, middle, and last
        if (should_print && TensorMatMultiCavity::mpi_procs_x > 6) {
            should_print = (TensorMatMultiCavity::mpi_coords[1] == 0 || 
                           TensorMatMultiCavity::mpi_coords[1] == TensorMatMultiCavity::mpi_procs_x - 1 ||
                           TensorMatMultiCavity::mpi_coords[1] == TensorMatMultiCavity::mpi_procs_x / 2);
        }
        
        if (rank == mpi_rank && should_print) {
            std::cout << "\nProcess " << mpi_rank << " final array (Layer k = " << k_layer 
                     << ") at position (" << TensorMatMultiCavity::mpi_coords[1] << "," 
                     << TensorMatMultiCavity::mpi_coords[0] << "):" << std::endl;
            for (int j = 0; j < padded_jmt; j++) {
                for (int i = 0; i < padded_imt; i++) {
                    int idx;
                    if (local_kmt == 1) {
                        idx = i + j * padded_imt;
                    } else {
                        idx = i + j * padded_imt + k_layer * padded_imt * padded_jmt;
                    }
                    std::cout << std::setw(4) << vec[idx] << " ";
                }
                std::cout << std::endl;
            }
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    // Synchronize before ending
    MPI_Barrier(MPI_COMM_WORLD);
}

// Test function for multi-cavity boundary communication
void test_multi_cavity_boundary_comm() {
    // Create multi-cavity grid parameters
    TensorMatMultiCavity::MultiCavityGridParams grid;

    // Create local grid info for each cavity
    TensorMatMultiCavity::LocalGridInfo main_info;
    main_info.local_imt = 32;
    main_info.local_jmt = 32;
    main_info.local_kmt = 32;
    main_info.total_padded_points = (32 + 2) * (32 + 2) * (32 + 2);
    main_info.rank = TensorMatMultiCavity::mpi_rank;
    main_info.size = TensorMatMultiCavity::mpi_size;

    TensorMatMultiCavity::LocalGridInfo left_info;
    left_info.local_imt = 16;
    left_info.local_jmt = 32;
    left_info.local_kmt = 32;
    left_info.total_padded_points = (16 + 2) * (32 + 2) * (32 + 2);
    left_info.rank = TensorMatMultiCavity::mpi_rank;
    left_info.size = TensorMatMultiCavity::mpi_size;

    TensorMatMultiCavity::LocalGridInfo right_info;
    right_info.local_imt = 16;
    right_info.local_jmt = 32;
    right_info.local_kmt = 32;
    right_info.total_padded_points = (16 + 2) * (32 + 2) * (32 + 2);
    right_info.rank = TensorMatMultiCavity::mpi_rank;
    right_info.size = TensorMatMultiCavity::mpi_size;

    // Add cavities to the grid
    grid.add_cavity(main_info);
    grid.add_cavity(left_info);
    grid.add_cavity(right_info);

    // Add connections between cavities
    grid.add_connection(0, 1, TensorMatMultiCavity::CavityConnection::Type::FACE_X, 15, 0);
    grid.add_connection(0, 2, TensorMatMultiCavity::CavityConnection::Type::FACE_X, 0, 31);

    // Allocate vector for all cavities
    std::vector<float> vec(grid.get_total_points());

    // Initialize vector with test values
    for (size_t i = 0; i < vec.size(); i++) {
        vec[i] = static_cast<float>(i);
    }

    // Communicate boundaries
    TensorMatMultiCavity::communicate_boundaries_multi_cavity(vec.data(), grid);

    // Print results (only on rank 0)
    if (TensorMatMultiCavity::mpi_rank == 0) {
        std::cout << "Multi-cavity boundary communication completed successfully." << std::endl;
    }
}

int main(int argc, char** argv) {
    // Parse command-line options
    bool use_global_mode = false;
    bool show_help = false;
    
    for (int i = 1; i < argc; i++) {
        std::string arg = argv[i];
        if (arg == "--help" || arg == "-h") {
            show_help = true;
        } else if (arg == "--global" || arg == "-g") {
            use_global_mode = true;
        } else if (arg == "--debug" || arg == "-d") {
            if (i + 1 < argc) {
                TensorMatMultiCavity::debug_level = std::atoi(argv[i+1]);
                i++;
            }
        }
    }
    
    // Set debug level to ERROR by default unless specified
    if (TensorMatMultiCavity::debug_level > DEBUG_ERROR) {
        TensorMatMultiCavity::debug_level = DEBUG_ERROR;
    }
    
    // Show help and exit if requested
    if (show_help) {
        std::cout << "Usage: " << argv[0] << " [options]" << std::endl;
        std::cout << "Options:" << std::endl;
        std::cout << "  --help, -h    Display this help message" << std::endl;
        std::cout << "  --global, -g  Use global communication mode (default: local mode)" << std::endl;
        std::cout << "  --debug, -d   Set debug level (0-5, default: 1)" << std::endl;
        std::cout << std::endl;
        std::cout << "Description:" << std::endl;
        std::cout << "  Tests the FDM boundary communication in a single cavity." << std::endl;
        std::cout << "  Initializes a 16x16x1 grid decomposed among MPI processes." << std::endl;
        std::cout << "  Each process fills its interior points with its MPI rank + 1." << std::endl;
        std::cout << "  The test then exchanges boundary values between neighboring processes." << std::endl;
        std::cout << std::endl;
        std::cout << "Communication Modes:" << std::endl;
        std::cout << "  Local Mode: Standard nearest-neighbor communication pattern" << std::endl;
        std::cout << "  Global Mode: Uses a different communication pattern with special boundary conditions" << std::endl;
        return 0;
    }

    // Initialize MPI
    MPI_Init(&argc, &argv);

    // Get MPI process information
    MPI_Comm_rank(MPI_COMM_WORLD, &TensorMatMultiCavity::mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &TensorMatMultiCavity::mpi_size);

    // Create process grid
    int dims[2] = {0, 0};
    MPI_Dims_create(TensorMatMultiCavity::mpi_size, 2, dims);
    
    // For a 4x4 grid, we want a specific arrangement
    if (TensorMatMultiCavity::mpi_size == 16) {
        TensorMatMultiCavity::mpi_procs_x = 4;
        TensorMatMultiCavity::mpi_procs_y = 4;
    } else {
        // Otherwise use MPI's automatic decomposition
        TensorMatMultiCavity::mpi_procs_y = dims[0]; // rows
        TensorMatMultiCavity::mpi_procs_x = dims[1]; // columns
    }
    
    if (TensorMatMultiCavity::mpi_rank == 0) {
        std::cout << "Process grid: " << TensorMatMultiCavity::mpi_procs_x << "x" 
                 << TensorMatMultiCavity::mpi_procs_y << std::endl;
    }

    // Create Cartesian communicator
    int periods[2] = {0, 0};
    MPI_Comm cart_comm;
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &cart_comm);

    // Get process coordinates
    MPI_Cart_coords(cart_comm, TensorMatMultiCavity::mpi_rank, 2, TensorMatMultiCavity::mpi_coords);

    // Run tests with the specified communication mode
    test_fdm_consistency(use_global_mode);

    // Clean up
    MPI_Comm_free(&cart_comm);
    MPI_Finalize();

    return 0;
}