#include "tensor_mat_multi_cav.h"
#include <iostream>
#include <cassert>
#include <cmath>
#include <cstring>   // For memcpy
#include <algorithm>
#include <vector>
#include <unistd.h>  // For usleep

namespace TensorMatMultiCavity {

// Global MPI variables
int mpi_rank = 0;
int mpi_size = 1;
int mpi_procs_x = 1;
int mpi_procs_y = 1;
int mpi_coords[2] = {0, 0};
int debug_level = DEBUG_ERROR;

// Helper function to probe for messages with incorrect tags
bool probe_for_wrong_tags(int source, int correct_tag) {
    if (source == MPI_PROC_NULL) {
        return false;
    }
    
    MPI_Status status;
    int flag = 0;
    
    // Try to probe for any message from this source
    MPI_Iprobe(source, MPI_ANY_TAG, MPI_COMM_WORLD, &flag, &status);
    
    if (flag && status.MPI_TAG != correct_tag) {
        DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Detected message with tag=" << status.MPI_TAG 
                  << " from source=" << source << " (expected=" << correct_tag << ")");
        return true;
    }
    
    return false;
}

// Define tag conventions for north/south communication
#define TAG_NORTH_TO_SOUTH 5001
#define TAG_SOUTH_TO_NORTH 5002

// Define tag for all communications to simplify
#define TAG_ANY_DIRECTION 5000

void decompose_grid(int global_imt, int global_jmt, int global_kmt, LocalGridInfo& local_grid) {
    // Set up a 2D Cartesian communicator
    MPI_Comm cart_comm;
    int dims[2] = {0, 0};
    MPI_Dims_create(mpi_size, 2, dims);
    mpi_procs_x = dims[0];
    mpi_procs_y = dims[1];
    int periods[2] = {0, 0};  // Non-periodic boundaries
    
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
    MPI_Cart_coords(cart_comm, mpi_rank, 2, mpi_coords);
    
    // Determine neighbors
    MPI_Cart_shift(cart_comm, 1, 1, &local_grid.west_rank, &local_grid.east_rank);
    MPI_Cart_shift(cart_comm, 0, 1, &local_grid.north_rank, &local_grid.south_rank);
    
    // Set MPI rank in local_grid
    local_grid.mpi_rank = mpi_rank;
    
    // Calculate global domain dimensions
    local_grid.global_imt = global_imt;
    local_grid.global_jmt = global_jmt;
    local_grid.global_kmt = global_kmt;
    
    // Calculate local dimensions
    int base_imt = global_imt / mpi_procs_x;
    int base_jmt = global_jmt / mpi_procs_y;
    int remainder_i = global_imt % mpi_procs_x;
    int remainder_j = global_jmt % mpi_procs_y;
    
    // Set local grid dimensions
    local_grid.local_imt = base_imt + (mpi_coords[1] < remainder_i ? 1 : 0);
    local_grid.local_jmt = base_jmt + (mpi_coords[0] < remainder_j ? 1 : 0);
    local_grid.local_kmt = global_kmt;  // No decomposition in z-direction
    
    // Calculate starting indices
    local_grid.local_ims = mpi_coords[1] * base_imt + std::min(mpi_coords[1], remainder_i);
    local_grid.local_jms = mpi_coords[0] * base_jmt + std::min(mpi_coords[0], remainder_j);
    local_grid.local_kms = 0;
    
    // Calculate ending indices
    local_grid.local_ime = local_grid.local_ims + local_grid.local_imt - 1;
    local_grid.local_jme = local_grid.local_jms + local_grid.local_jmt - 1;
    local_grid.local_kme = local_grid.local_kms + local_grid.local_kmt - 1;
    
    // Set MPI information
    local_grid.rank = mpi_rank;
    local_grid.size = mpi_size;
    
    // Set halo width
    local_grid.halo_width = 1;
    
    // Log information
    DEBUG_LOG(DEBUG_INFO, "Process " << mpi_rank << " local dims: " << local_grid.local_imt << "x"
              << local_grid.local_jmt << "x" << local_grid.local_kmt);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " global domain: i=[" << local_grid.local_ims << ","
              << local_grid.local_ime << "], j=[" << local_grid.local_jms << "," << local_grid.local_jme
              << "], k=[" << local_grid.local_kms << "," << local_grid.local_kme << "]");
    
    // Calculate total padded points for the grid
    local_grid.total_padded_points = (local_grid.local_imt + 2) * (local_grid.local_jmt + 2) * (local_grid.local_kmt + 2);
    
    // Clean up
    MPI_Comm_free(&cart_comm);
}

void decompose_grid_global_mode(int global_imt, int global_jmt, int global_kmt, LocalGridInfo& local_grid) {
    // Set up a 2D Cartesian communicator first (we still need the coords)
    MPI_Comm cart_comm;
    int dims[2] = {TensorMatMultiCavity::mpi_procs_y, TensorMatMultiCavity::mpi_procs_x};  // dims[0]=rows, dims[1]=cols
    int periods[2] = {0, 0};  // Non-periodic boundaries for local grid
    
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm);
    MPI_Cart_coords(cart_comm, TensorMatMultiCavity::mpi_rank, 2, TensorMatMultiCavity::mpi_coords);
    
    // Get row and column for this process
    int row = TensorMatMultiCavity::mpi_coords[0];
    int col = TensorMatMultiCavity::mpi_coords[1];

    // Calculate neighbors using a general algorithm
    int num_rows = TensorMatMultiCavity::mpi_procs_y;
    int num_cols = TensorMatMultiCavity::mpi_procs_x;

    // East-West: Use periodic boundary conditions (wraparound)
    int east_rank = row * num_cols + ((col + 1) % num_cols);
    int west_rank = row * num_cols + ((col - 1 + num_cols) % num_cols);
    
    // North-South: Special pattern based on the global mode rules
    int north_rank, south_rank;
    
    if (row == 0) {
        // Bottom row - mirroring pattern
        north_rank = (row + 1) * num_cols + col;  // Connect to row above (normal)
        
        // For mirroring at bottom row, connect to another process in same row
        // Mirror offset is half the column count (for 4x4 grid, process 0 connects to 2, process 1 to 3)
        int mirror_offset = num_cols / 2;
        south_rank = row * num_cols + ((col + mirror_offset) % num_cols);
    }
    else if (row == num_rows - 1) {
        // Top row - mirroring pattern
        south_rank = (row - 1) * num_cols + col;  // Connect to row below (normal)
        
        // For mirroring at top row, connect to another process in same row
        // Mirror offset is half the column count (for 4x4 grid, process 12 connects to 14, process 13 to 15)
        int mirror_offset = num_cols / 2;
        north_rank = row * num_cols + ((col + mirror_offset) % num_cols);
    }
    else {
        // Middle rows - normal vertical neighbors
        north_rank = (row + 1) * num_cols + col;
        south_rank = (row - 1) * num_cols + col;
    }
    
    // Assign the calculated neighbors
    local_grid.north_rank = north_rank;
    local_grid.south_rank = south_rank;
    local_grid.east_rank = east_rank;
    local_grid.west_rank = west_rank;
    
    // Calculate local grid dimensions
    local_grid.global_imt = global_imt;
    local_grid.global_jmt = global_jmt;
    local_grid.global_kmt = global_kmt;
    
    // Calculate base block size for each dimension
    int base_imt = global_imt / TensorMatMultiCavity::mpi_procs_x;
    int base_jmt = global_jmt / TensorMatMultiCavity::mpi_procs_y;
    
    // Calculate remainder to distribute
    int remainder_i = global_imt % TensorMatMultiCavity::mpi_procs_x;
    int remainder_j = global_jmt % TensorMatMultiCavity::mpi_procs_y;
    
    // Determine local grid dimensions
    local_grid.local_imt = base_imt + (TensorMatMultiCavity::mpi_coords[1] < remainder_i ? 1 : 0);
    local_grid.local_jmt = base_jmt + (TensorMatMultiCavity::mpi_coords[0] < remainder_j ? 1 : 0);
    local_grid.local_kmt = global_kmt;  // No decomposition in z-direction
    
    // Calculate starting indices in global grid
    local_grid.local_ims = TensorMatMultiCavity::mpi_coords[1] * base_imt + std::min(TensorMatMultiCavity::mpi_coords[1], remainder_i);
    local_grid.local_jms = TensorMatMultiCavity::mpi_coords[0] * base_jmt + std::min(TensorMatMultiCavity::mpi_coords[0], remainder_j);
    local_grid.local_kms = 0;
    
    // Calculate ending indices
    local_grid.local_ime = local_grid.local_ims + local_grid.local_imt - 1;
    local_grid.local_jme = local_grid.local_jms + local_grid.local_jmt - 1;
    local_grid.local_kme = local_grid.local_kms + local_grid.local_kmt - 1;
    
    // Set halo width to 1 for the 19-point stencil
    local_grid.halo_width = 1;
    
    // Print detailed process information
    DEBUG_LOG(DEBUG_INFO, "GLOBAL MODE - Process " << mpi_rank << " local dims: " 
              << local_grid.local_imt << "x" << local_grid.local_jmt << "x" << local_grid.local_kmt 
              << " | Neighbors: N=" << north_rank << ", S=" << south_rank 
              << ", E=" << east_rank << ", W=" << west_rank);
    
    // Set MPI information
    local_grid.rank = mpi_rank;
    local_grid.size = mpi_size;
    
    // Calculate total padded points for the grid
    local_grid.total_padded_points = (local_grid.local_imt + 2) * (local_grid.local_jmt + 2) * (local_grid.local_kmt + 2);
    
    // Clean up
    MPI_Comm_free(&cart_comm);
}

void communicate_local_boundaries(float* vec, const LocalGridInfo& local_grid) {
    // Calculate buffer sizes and indices carefully
    int local_imt = local_grid.local_imt;
    int local_jmt = local_grid.local_jmt;
    int local_kmt = local_grid.local_kmt;
    
    // Calculate padded dimensions
    int padded_imt = local_imt + 2;
    int padded_jmt = local_jmt + 2;
    // int padded_kmt = local_kmt + 2;
    
    // Check for nulls and bounds before proceeding
    if (vec == nullptr) {
        std::cerr << "Error: vec pointer is null in communicate_local_boundaries" << std::endl;
        return;
    }
    
    // MPI status and requests
    // MPI_Status status;
    MPI_Request requests[8];
    int req_count = 0;
    
    // =========================
    // East-West Communication
    // =========================
    
    // Allocate send/receive buffers for east-west communication
    int ew_buffer_size = local_jmt * local_kmt;
    std::vector<float> east_send_buffer;
    std::vector<float> west_send_buffer;
    std::vector<float> east_recv_buffer;
    std::vector<float> west_recv_buffer;
    
    if (local_grid.east_rank != MPI_PROC_NULL) {
        east_send_buffer.resize(ew_buffer_size);
        east_recv_buffer.resize(ew_buffer_size);
        
        // Pack data to send east (local_imt-1 column)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                // Get value from the rightmost interior cell
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (local_imt) + (j + 1) * padded_imt;
                } else {
                    vec_idx = (local_imt) + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                east_send_buffer[idx++] = vec[vec_idx];
            }
        }
        
        // Send to east neighbor and receive from east neighbor
        MPI_Isend(east_send_buffer.data(), ew_buffer_size, MPI_FLOAT, 
                 local_grid.east_rank, 0, MPI_COMM_WORLD, &requests[req_count++]);
        MPI_Irecv(east_recv_buffer.data(), ew_buffer_size, MPI_FLOAT,
                 local_grid.east_rank, 1, MPI_COMM_WORLD, &requests[req_count++]);
    }
    
    if (local_grid.west_rank != MPI_PROC_NULL) {
        west_send_buffer.resize(ew_buffer_size);
        west_recv_buffer.resize(ew_buffer_size);
        
        // Pack data to send west (0 column)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                // Get value from the leftmost interior cell
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = 0 + (j + 1) * padded_imt;
                } else {
                    vec_idx = 0 + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                west_send_buffer[idx++] = vec[vec_idx];
            }
        }
        
        // Send to west neighbor and receive from west neighbor
        MPI_Isend(west_send_buffer.data(), ew_buffer_size, MPI_FLOAT,
                 local_grid.west_rank, 1, MPI_COMM_WORLD, &requests[req_count++]);
        MPI_Irecv(west_recv_buffer.data(), ew_buffer_size, MPI_FLOAT,
                 local_grid.west_rank, 0, MPI_COMM_WORLD, &requests[req_count++]);
    }
    
    // =========================
    // North-South Communication
    // =========================
    
    // Allocate send/receive buffers for north-south communication
    int ns_buffer_size = local_imt * local_kmt;
    std::vector<float> north_send_buffer;
    std::vector<float> south_send_buffer;
    std::vector<float> north_recv_buffer;
    std::vector<float> south_recv_buffer;
    
    if (local_grid.north_rank != MPI_PROC_NULL) {
        north_send_buffer.resize(ns_buffer_size);
        north_recv_buffer.resize(ns_buffer_size);
        
        // Pack data to send north (local_jmt-1 row)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                // Get value from the topmost interior cell
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (local_jmt) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (local_jmt) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                north_send_buffer[idx++] = vec[vec_idx];
            }
        }
        
        // Send to north neighbor and receive from north neighbor
        MPI_Isend(north_send_buffer.data(), ns_buffer_size, MPI_FLOAT,
                 local_grid.north_rank, 2, MPI_COMM_WORLD, &requests[req_count++]);
        MPI_Irecv(north_recv_buffer.data(), ns_buffer_size, MPI_FLOAT,
                 local_grid.north_rank, 3, MPI_COMM_WORLD, &requests[req_count++]);
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL) {
        south_send_buffer.resize(ns_buffer_size);
        south_recv_buffer.resize(ns_buffer_size);
        
        // Pack data to send south (0 row)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                // Get value from the bottommost interior cell
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + 0 * padded_imt;
                } else {
                    vec_idx = (i + 1) + 0 * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                south_send_buffer[idx++] = vec[vec_idx];
            }
        }
        
        // Send to south neighbor and receive from south neighbor
        MPI_Isend(south_send_buffer.data(), ns_buffer_size, MPI_FLOAT,
                 local_grid.south_rank, 3, MPI_COMM_WORLD, &requests[req_count++]);
        MPI_Irecv(south_recv_buffer.data(), ns_buffer_size, MPI_FLOAT,
                 local_grid.south_rank, 2, MPI_COMM_WORLD, &requests[req_count++]);
    }
    
    // Wait for all communication to complete
    if (req_count > 0) {
        MPI_Waitall(req_count, requests, MPI_STATUSES_IGNORE);
    }
    
    // Unpack received east-west data
    if (local_grid.east_rank != MPI_PROC_NULL) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                // Place in the east ghost cells (i = local_imt+1)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (local_imt + 1) + (j + 1) * padded_imt;
                } else {
                    vec_idx = (local_imt + 1) + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = east_recv_buffer[idx++];
            }
        }
    }
    
    if (local_grid.west_rank != MPI_PROC_NULL) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                // Place in the west ghost cells (i = 0)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = 0 + (j + 1) * padded_imt;
                } else {
                    vec_idx = 0 + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = west_recv_buffer[idx++];
            }
        }
    }
    
    // Unpack received north-south data
    if (local_grid.north_rank != MPI_PROC_NULL) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                // Place in the north ghost cells (j = local_jmt+1)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = north_recv_buffer[idx++];
            }
        }
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                // Place in the south ghost cells (j = 0)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + 0 * padded_imt;
                } else {
                    vec_idx = (i + 1) + 0 * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = south_recv_buffer[idx++];
            }
        }
    }
    
    // For a single-layer grid (local_kmt = 1), we don't need vertical boundary communication
    // We'll only set the k=0 and k=2 boundaries to zero if there are actually vertical layers
    if (local_kmt > 1) {
        // Set k=0 and k=local_kmt+1 boundaries to zero (Dirichlet boundary conditions in z-direction)
        for (int j = 0; j < padded_jmt; j++) {
            for (int i = 0; i < padded_imt; i++) {
                vec[i + j * padded_imt + 0 * padded_imt * padded_jmt] = 0.0f;  // Bottom boundary
                vec[i + j * padded_imt + (local_kmt + 1) * padded_imt * padded_jmt] = 0.0f;  // Top boundary
            }
        }
    }
    
    // Barrier to ensure all processes complete before moving on
    MPI_Barrier(MPI_COMM_WORLD);
}

// Function to print the current communication pattern for debugging
void print_process_communication_pattern(const LocalGridInfo& local_grid) {
    // Just do a simple display of the current process's communication pattern
    std::cout << "Process " << mpi_rank << " communication pattern: "
              << "N=" << local_grid.north_rank << " (" << (local_grid.north_rank == mpi_rank ? "self" : "other") << "), "
              << "S=" << local_grid.south_rank << " (" << (local_grid.south_rank == mpi_rank ? "self" : "other") << "), "
              << "E=" << local_grid.east_rank << " (" << (local_grid.east_rank == mpi_rank ? "self" : "other") << "), "
              << "W=" << local_grid.west_rank << " (" << (local_grid.west_rank == mpi_rank ? "self" : "other") << ")"
              << std::endl;
}

void communicate_global_boundaries(float* vec, const LocalGridInfo& local_grid) {
    // We only need to communicate if we have more than one process
    if (TensorMatMultiCavity::mpi_size <= 1) {
        return;
    }
    
    // Check for nulls before proceeding
    if (vec == nullptr) {
        std::cerr << "Error: vec pointer is null in communicate_global_boundaries" << std::endl;
        return;
    }
    
    // Check for self-connections, which are not allowed
    bool has_self_connection = false;
    if (local_grid.north_rank == mpi_rank) {
        std::cerr << "ERROR: Process " << mpi_rank << " has self-connection to NORTH" << std::endl;
        has_self_connection = true;
    }
    if (local_grid.south_rank == mpi_rank) {
        std::cerr << "ERROR: Process " << mpi_rank << " has self-connection to SOUTH" << std::endl;
        has_self_connection = true;
    }
    if (local_grid.east_rank == mpi_rank) {
        std::cerr << "ERROR: Process " << mpi_rank << " has self-connection to EAST" << std::endl;
        has_self_connection = true;
    }
    if (local_grid.west_rank == mpi_rank) {
        std::cerr << "ERROR: Process " << mpi_rank << " has self-connection to WEST" << std::endl;
        has_self_connection = true;
    }
    
    if (has_self_connection) {
        std::cerr << "ERROR: Self-connections are not allowed in grid decomposition" << std::endl;
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " starting communicate_global_boundaries");
    
    // Calculate dimensions
    int local_imt = local_grid.local_imt;
    int local_jmt = local_grid.local_jmt;
    int local_kmt = local_grid.local_kmt;
    
    // Calculate padded dimensions
    int padded_imt = local_imt + 2;
    int padded_jmt = local_jmt + 2;
    int padded_kmt = local_kmt == 1 ? 1 : (local_kmt + 2);  // No padding in z-direction for 2D case
    
    // Calculate buffer sizes
    int ew_buffer_size = local_jmt * local_kmt;
    int ns_buffer_size = local_imt * local_kmt;
    
    // ===== EAST-WEST COMMUNICATION =====
    // Break potential deadlocks by using a phased approach
    
    // Allocate buffers for all communications upfront
    float* east_send_buf = nullptr;
    float* west_send_buf = nullptr;
    float* east_recv_buf = nullptr;
    float* west_recv_buf = nullptr;
    
    if (local_grid.east_rank != MPI_PROC_NULL) {
        east_send_buf = new float[ew_buffer_size];
        east_recv_buf = new float[ew_buffer_size];
        
        // Pack east send buffer (rightmost interior column)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (local_imt) + (j + 1) * padded_imt;
                } else {
                    vec_idx = (local_imt) + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                east_send_buf[idx++] = vec[vec_idx];
            }
        }
    }
    
    if (local_grid.west_rank != MPI_PROC_NULL) {
        west_send_buf = new float[ew_buffer_size];
        west_recv_buf = new float[ew_buffer_size];
        
        // Pack west send buffer (leftmost interior column)
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = 1 + (j + 1) * padded_imt;
                } else {
                    vec_idx = 1 + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                west_send_buf[idx++] = vec[vec_idx];
            }
        }
    }
    
    // Phase 1: Even ranks send, odd ranks receive
    MPI_Request req[4] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL};
    int req_count = 0;
    
    // First phase of East-West communication - even ranks send
    if (mpi_rank % 2 == 0) {
        if (local_grid.east_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 1: Sending to EAST " << local_grid.east_rank);
            MPI_Isend(east_send_buf, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 1001, MPI_COMM_WORLD, &req[req_count++]);
        }
        
        if (local_grid.west_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 1: Sending to WEST " << local_grid.west_rank);
            MPI_Isend(west_send_buf, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 1002, MPI_COMM_WORLD, &req[req_count++]);
        }
    } else {
        if (local_grid.west_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 1: Receiving from WEST " << local_grid.west_rank);
            MPI_Irecv(west_recv_buf, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 1001, MPI_COMM_WORLD, &req[req_count++]);
        }
        
        if (local_grid.east_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 1: Receiving from EAST " << local_grid.east_rank);
            MPI_Irecv(east_recv_buf, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 1002, MPI_COMM_WORLD, &req[req_count++]);
        }
    }
    
    // Wait for all communications in phase 1 to complete
    if (req_count > 0) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Waiting for Phase 1 to complete");
        MPI_Waitall(req_count, req, MPI_STATUSES_IGNORE);
    }
    
    // Global barrier to ensure phase 1 is complete before starting phase 2
    MPI_Barrier(MPI_COMM_WORLD);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 1 complete");
    
    // Reset request counter
    req_count = 0;
    
    // Phase 2: Odd ranks send, even ranks receive
    if (mpi_rank % 2 == 1) {
        if (local_grid.east_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 2: Sending to EAST " << local_grid.east_rank);
            MPI_Isend(east_send_buf, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 2001, MPI_COMM_WORLD, &req[req_count++]);
        }
        
        if (local_grid.west_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 2: Sending to WEST " << local_grid.west_rank);
            MPI_Isend(west_send_buf, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 2002, MPI_COMM_WORLD, &req[req_count++]);
        }
    } else {
        if (local_grid.west_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 2: Receiving from WEST " << local_grid.west_rank);
            MPI_Irecv(west_recv_buf, ew_buffer_size, MPI_FLOAT, local_grid.west_rank, 2001, MPI_COMM_WORLD, &req[req_count++]);
        }
        
        if (local_grid.east_rank != MPI_PROC_NULL) {
            DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 2: Receiving from EAST " << local_grid.east_rank);
            MPI_Irecv(east_recv_buf, ew_buffer_size, MPI_FLOAT, local_grid.east_rank, 2002, MPI_COMM_WORLD, &req[req_count++]);
        }
    }
    
    // Wait for all communications in phase 2 to complete
    if (req_count > 0) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Waiting for Phase 2 to complete");
        MPI_Waitall(req_count, req, MPI_STATUSES_IGNORE);
    }
    
    // Global barrier to ensure phase 2 is complete before unpacking
    MPI_Barrier(MPI_COMM_WORLD);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 2 complete");
    
    // Unpack received data
    if (local_grid.east_rank != MPI_PROC_NULL && east_recv_buf != nullptr) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (local_imt + 1) + (j + 1) * padded_imt;
                } else {
                    vec_idx = (local_imt + 1) + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = east_recv_buf[idx++];
            }
        }
    }
    
    if (local_grid.west_rank != MPI_PROC_NULL && west_recv_buf != nullptr) {
        int idx = 0;
        for (int k = 0; k < local_kmt; k++) {
            for (int j = 0; j < local_jmt; j++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = 0 + (j + 1) * padded_imt;
                } else {
                    vec_idx = 0 + (j + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = west_recv_buf[idx++];
            }
        }
    }
    
    // Free east-west buffers
    if (east_send_buf) delete[] east_send_buf;
    if (east_recv_buf) delete[] east_recv_buf;
    if (west_send_buf) delete[] west_send_buf;
    if (west_recv_buf) delete[] west_recv_buf;
    
    // ===== NORTH-SOUTH COMMUNICATION =====
    // Similar phased approach to prevent deadlocks
    
    // Allocate buffers for north-south communication
    float* north_send_buf = nullptr;
    float* south_send_buf = nullptr;
    float* north_recv_buf = nullptr;
    float* south_recv_buf = nullptr;
    
    // Make sure that all processes use the same buffer size to avoid message truncation
    // Use collective operation to ensure everyone agrees on the maximum buffer size
    int local_ns_buffer_size = ns_buffer_size;
    int global_ns_buffer_size = local_ns_buffer_size;
    MPI_Allreduce(&local_ns_buffer_size, &global_ns_buffer_size, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD);
    ns_buffer_size = global_ns_buffer_size;
    
    if (local_grid.north_rank != MPI_PROC_NULL) {
        north_send_buf = new float[ns_buffer_size];
        memset(north_send_buf, 0, ns_buffer_size * sizeof(float));
        
        // Pack north send buffer (topmost interior row)
        int idx = 0;
        for (int k = 0; k < local_kmt && idx < ns_buffer_size; k++) {
            for (int i = 0; i < local_imt && idx < ns_buffer_size; i++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (local_jmt) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (local_jmt) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                north_send_buf[idx++] = vec[vec_idx];
            }
        }
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL) {
        south_send_buf = new float[ns_buffer_size];
        memset(south_send_buf, 0, ns_buffer_size * sizeof(float));
        
        // Pack south send buffer (bottommost interior row)
        int idx = 0;
        for (int k = 0; k < local_kmt && idx < ns_buffer_size; k++) {
            for (int i = 0; i < local_imt && idx < ns_buffer_size; i++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (1) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                south_send_buf[idx++] = vec[vec_idx];
            }
        }
    }
    
    // Phase 3: North-South communication
    MPI_Request send_requests[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
    MPI_Request recv_requests[2] = {MPI_REQUEST_NULL, MPI_REQUEST_NULL};
    int send_count = 0;
    int recv_count = 0;
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3A: Starting communications");
    
    // Synchronize before north-south communication
    MPI_Barrier(MPI_COMM_WORLD);
    
    // Phase 3B: Everyone sends in north-south direction
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3B: My turn to send");
    
    if (local_grid.north_rank != MPI_PROC_NULL) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3B: Sending to NORTH " << local_grid.north_rank);
        MPI_Isend(north_send_buf, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, 3001, MPI_COMM_WORLD, &send_requests[send_count++]);
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3B: Sending to SOUTH " << local_grid.south_rank);
        MPI_Isend(south_send_buf, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, 3002, MPI_COMM_WORLD, &send_requests[send_count++]);
    }
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3B: Completed my sends");
    
    // Phase 3C: Receive in north-south direction, allowing for any tag to support the mirroring pattern
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3C: Posting receives");
    
    if (local_grid.north_rank != MPI_PROC_NULL) {
        north_recv_buf = new float[ns_buffer_size];
        memset(north_recv_buf, 0, ns_buffer_size * sizeof(float));
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3C: Posting receive from NORTH " << local_grid.north_rank);
        MPI_Irecv(north_recv_buf, ns_buffer_size, MPI_FLOAT, local_grid.north_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &recv_requests[recv_count++]);
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL) {
        south_recv_buf = new float[ns_buffer_size];
        memset(south_recv_buf, 0, ns_buffer_size * sizeof(float));
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3C: Posting receive from SOUTH " << local_grid.south_rank);
        MPI_Irecv(south_recv_buf, ns_buffer_size, MPI_FLOAT, local_grid.south_rank, MPI_ANY_TAG, MPI_COMM_WORLD, &recv_requests[recv_count++]);
    }
    
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Phase 3C: Waiting for " << recv_count << " receives to complete");
    
    // Wait for receives with a timeout mechanism
    if (recv_count > 0) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Waiting for " << recv_count << " receives to complete");
        
        int completed = 0;
        int indices[2];  // Array to store indices of completed requests
        MPI_Status statuses[2];
        
        // Use a timeout approach with multiple test calls
        for (int attempt = 0; attempt < 20 && completed < recv_count; attempt++) {
            // Test for completion (fixed parameter order)
            MPI_Testsome(recv_count, recv_requests, &completed, indices, statuses);
            
            if (completed < recv_count) {
                // Not all receives have completed, print a warning after a few attempts
                if (attempt > 3) {
                    DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Warning - Still waiting for receives after attempt " << attempt);
                    
                    // Print which receives are still pending
                    for (int i = 0; i < recv_count; i++) {
                        int flag = 0;
                        MPI_Test(&recv_requests[i], &flag, MPI_STATUS_IGNORE);
                        if (!flag) {
                            int source = (i == 0) ? local_grid.north_rank : local_grid.south_rank;
                            // We're using ANY_TAG now
                            DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Request " << i 
                                      << " from source=" << source << " (using MPI_ANY_TAG) is still pending");
                            
                            // Check if there's a message waiting but with a different tag
                            if (probe_for_wrong_tags(source, MPI_ANY_TAG)) {
                                DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Found a message with a different tag - this is strange!");
                            }
                        }
                    }
                }
                
                // Sleep briefly to avoid busy-waiting
                usleep(100000);  // 100ms - increased from 50ms
            }
        }
        
        // Cancel any outstanding requests to prevent hanging
        for (int i = 0; i < recv_count; i++) {
            if (recv_requests[i] != MPI_REQUEST_NULL) {
                DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Cancelling stuck receive " << i);
                MPI_Cancel(&recv_requests[i]);
                MPI_Request_free(&recv_requests[i]);
                
                // Set the corresponding buffer to zeros if this was a cancelled request
                if (i == 0 && north_recv_buf != nullptr) {
                    DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Filling north buffer with zeros due to cancelled request");
                    std::fill(north_recv_buf, north_recv_buf + ns_buffer_size, 0.0f);
                } else if (i == 1 && south_recv_buf != nullptr) {
                    DEBUG_LOG(DEBUG_WARNING, "Process " << mpi_rank << " Filling south buffer with zeros due to cancelled request");
                    std::fill(south_recv_buf, south_recv_buf + ns_buffer_size, 0.0f);
                }
            }
        }
    }
    
    // Final barrier to ensure all processes have completed their communications
    MPI_Barrier(MPI_COMM_WORLD);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Global communications complete");
    
    // Unpack north-south data, with swapped destinations
    if (local_grid.north_rank != MPI_PROC_NULL && north_recv_buf != nullptr) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Unpacking data from NORTH neighbor");
        int idx = 0;
        for (int k = 0; k < local_kmt && idx < ns_buffer_size; k++) {
            for (int i = 0; i < local_imt && idx < ns_buffer_size; i++) {
                // Place data received from north neighbor into south ghost cells (j=0)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + 0 * padded_imt;
                } else {
                    vec_idx = (i + 1) + 0 * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = north_recv_buf[idx++];
            }
        }
    } else {
        // No north neighbor or failed receive, set south boundary to zeros
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Setting SOUTH boundary to zeros");
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + 0 * padded_imt;
                } else {
                    vec_idx = (i + 1) + 0 * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = 0.0f;
            }
        }
    }
    
    if (local_grid.south_rank != MPI_PROC_NULL && south_recv_buf != nullptr) {
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Unpacking data from SOUTH neighbor");
        int idx = 0;
        for (int k = 0; k < local_kmt && idx < ns_buffer_size; k++) {
            for (int i = 0; i < local_imt && idx < ns_buffer_size; i++) {
                // Place data received from south neighbor into north ghost cells (j=local_jmt+1)
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = south_recv_buf[idx++];
            }
        }
    } else {
        // No south neighbor or failed receive, set north boundary to zeros
        DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Setting NORTH boundary to zeros");
        for (int k = 0; k < local_kmt; k++) {
            for (int i = 0; i < local_imt; i++) {
                int vec_idx;
                if (local_kmt == 1) {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt;
                } else {
                    vec_idx = (i + 1) + (local_jmt + 1) * padded_imt + (k + 1) * padded_imt * padded_jmt;
                }
                vec[vec_idx] = 0.0f;
            }
        }
    }
    
    // Free north-south buffers
    if (north_send_buf) delete[] north_send_buf;
    if (north_recv_buf) delete[] north_recv_buf;
    if (south_send_buf) delete[] south_send_buf;
    if (south_recv_buf) delete[] south_recv_buf;
    
    // Handle z-direction boundaries (no communication needed)
    // Skip z-direction boundaries for 2D case (kmt=1)
    if (local_kmt > 1) {
        for (int j = 0; j < padded_jmt; j++) {
            for (int i = 0; i < padded_imt; i++) {
                vec[i + j * padded_imt + 0 * padded_imt * padded_jmt] = 0.0f;  // Bottom boundary
                vec[i + j * padded_imt + (local_kmt + 1) * padded_imt * padded_jmt] = 0.0f;  // Top boundary
            }
        }
    }
    
    // Final synchronization
    MPI_Barrier(MPI_COMM_WORLD);
    DEBUG_LOG(DEBUG_DETAIL, "Process " << mpi_rank << " Global communications complete");
}

} // namespace TensorMatMultiCavity 