#include "tensor_mat_multi_cav.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <cmath>
#include <mpi.h>
#include <random>
#include <vector>
#include <cassert>

using namespace TensorMatMultiCavity;

// Initialize MPI process information
int mpi_rank = 0;
int mpi_size = 1;
int mpi_procs_x = 1;
int mpi_procs_y = 1;
int mpi_coords[2] = {0, 0};
int debug_level = DEBUG_WARNING;

// Helper function to initialize a vector with random values
void init_vector(float* vec, size_t size) {
    for (size_t i = 0; i < size; ++i) {
        vec[i] = static_cast<float>(rand()) / RAND_MAX;
    }
}

// Helper function to save a vector to a file
void save_vector(const float* vec, size_t size, const std::string& filename) {
    std::ofstream file(filename);
    if (!file.is_open()) {
        std::cerr << "Error: Could not open file " << filename << std::endl;
        return;
    }
    
    for (size_t i = 0; i < size; ++i) {
        file << vec[i] << std::endl;
    }
    
    file.close();
}

// Helper function to load a vector from a file
void load_vector(float* vec, size_t size, const std::string& filename) {
    std::ifstream file(filename);
    if (!file.is_open()) {
        std::cerr << "Error: Could not open file " << filename << std::endl;
        return;
    }
    
    for (size_t i = 0; i < size; ++i) {
        file >> vec[i];
    }
    
    file.close();
}

// Helper function to compute vector norm
float compute_norm(const float* vec, size_t size) {
    float sum = 0.0f;
    for (size_t i = 0; i < size; ++i) {
        sum += vec[i] * vec[i];
    }
    return std::sqrt(sum);
}

// Test function for multi-cavity boundary communication
void test_multi_cavity_boundary_comm() {
    // Create multi-cavity grid parameters
    TensorMatMultiCavity::MultiCavityGridParams grid;

    // Define cavity IDs
    int main_cavity_id = 0;
    int left_cavity_id = 1;
    int right_cavity_id = 2;

    // Create local grid info for each cavity
    TensorMatMultiCavity::LocalGridInfo main_info;
    main_info.local_imt = 32;
    main_info.local_jmt = 32;
    main_info.local_kmt = 32;
    main_info.total_padded_points = (32 + 2) * (32 + 2) * (32 + 2);
    main_info.rank = TensorMatMultiCavity::mpi_rank;
    main_info.size = TensorMatMultiCavity::mpi_size;

    TensorMatMultiCavity::LocalGridInfo left_info;
    left_info.local_imt = 16;
    left_info.local_jmt = 32;
    left_info.local_kmt = 32;
    left_info.total_padded_points = (16 + 2) * (32 + 2) * (32 + 2);
    left_info.rank = TensorMatMultiCavity::mpi_rank;
    left_info.size = TensorMatMultiCavity::mpi_size;

    TensorMatMultiCavity::LocalGridInfo right_info;
    right_info.local_imt = 16;
    right_info.local_jmt = 32;
    right_info.local_kmt = 32;
    right_info.total_padded_points = (16 + 2) * (32 + 2) * (32 + 2);
    right_info.rank = TensorMatMultiCavity::mpi_rank;
    right_info.size = TensorMatMultiCavity::mpi_size;

    // Add cavities to the grid
    grid.add_cavity(main_info);
    grid.add_cavity(left_info);
    grid.add_cavity(right_info);

    // Add connections between cavities
    grid.add_connection(left_cavity_id, main_cavity_id, TensorMatMultiCavity::CavityConnection::Type::FACE_X, 15, 0);
    grid.add_connection(right_cavity_id, main_cavity_id, TensorMatMultiCavity::CavityConnection::Type::FACE_X, 0, 31);

    // Allocate vector for all cavities
    std::vector<float> vec(grid.get_total_points());

    // Initialize vector with test values
    for (size_t i = 0; i < vec.size(); i++) {
        vec[i] = static_cast<float>(i);
    }

    // Communicate boundaries
    TensorMatMultiCavity::communicate_boundaries_multi_cavity(vec.data(), grid);

    // Print results (only on rank 0)
    if (TensorMatMultiCavity::mpi_rank == 0) {
        std::cout << "Boundary communication completed successfully." << std::endl;
    }
}

int main(int argc, char** argv) {
    // Initialize MPI
    MPI_Init(&argc, &argv);

    // Get MPI process information
    MPI_Comm_rank(MPI_COMM_WORLD, &TensorMatMultiCavity::mpi_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &TensorMatMultiCavity::mpi_size);

    // Create process grid
    int dims[2] = {0, 0};
    MPI_Dims_create(TensorMatMultiCavity::mpi_size, 2, dims);
    TensorMatMultiCavity::mpi_procs_x = dims[0];
    TensorMatMultiCavity::mpi_procs_y = dims[1];

    // Create Cartesian communicator
    int periods[2] = {0, 0};
    MPI_Comm cart_comm;
    MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &cart_comm);

    // Get process coordinates
    MPI_Cart_coords(cart_comm, TensorMatMultiCavity::mpi_rank, 2, TensorMatMultiCavity::mpi_coords);

    // Run test
    test_multi_cavity_boundary_comm();

    // Clean up
    MPI_Comm_free(&cart_comm);
    MPI_Finalize();

    return 0;
} 