#include <mpi.h>
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>     // for getpid(), close()
#include <sys/types.h>  // for ftruncate()
#include <chrono>
#include <vector>
#include <string>
#include <thread>
#include <numa.h>
#include <numaif.h>
#include <cstring>
#include <stdio.h>
#include <omp.h>
#include <pthread.h>
#include <future>
#include <mutex>
#include <condition_variable>
#include <algorithm>
#include <numeric>
#include "../common/Logger.hpp"

void hexdump(const void* data, size_t size);

// Size of shared memory pool (1GB)
const size_t LOCAL_POOL_SIZE = ((size_t)2)*1024*1024*1024;

// Add after existing constants
//const int RANKS_PER_NUMA = 2;  // 8 ranks total = 2 ranks per NUMA node

class SharedMemory {
public:
    void* mapped_region;
    int fd;
    std::string name;
};

class SharedMemoryPool {
public:
    void* local_region;  // This rank's NUMA-local memory
    int numa_node;       // NUMA node for this rank
    MPI_Comm node_comm;
    int node_rank;
    int node_size;

    std::vector<SharedMemory> shm_regions;
    cpu_set_t cpuset_local;

public:
    SharedMemoryPool() : local_region(nullptr) {
        // Initialize NUMA
        if (numa_available() < 0) {
            perror("NUMA not available");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        // Create communicator for ranks on same node
        MPI_Comm_split_type(MPI_COMM_WORLD, MPI_COMM_TYPE_SHARED, 0,
                           MPI_INFO_NULL, &node_comm);
        
        MPI_Comm_rank(node_comm, &node_rank);
        MPI_Comm_size(node_comm, &node_size);

        // Get NUMA node of current rank
        int cpu = sched_getcpu();
        if (cpu < 0) {
            perror("Failed to get current CPU");
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
        numa_node = numa_node_of_cpu(cpu);

        // Initialize vectors
        shm_regions.resize(node_size);

        // Set NUMA affinity for each thread
        CPU_ZERO(&cpuset_local);
            
        // Get cpuset_local
        for (int cpu = 0; cpu < numa_num_configured_cpus(); cpu++) {
            if (numa_node_of_cpu(cpu) == numa_node) {
                CPU_SET(cpu, &cpuset_local);
            }
        }

        // Each rank creates its own shared memory segment
        std::string shm_name = generateShmName(node_rank);
        shm_regions[node_rank].name = shm_name;

        // Cleanup any existing shared memory
        shm_unlink(shm_name.c_str());

        // Create shared memory segment
        shm_regions[node_rank].fd = shm_open(shm_name.c_str(),
                                          O_CREAT | O_RDWR,
                                          S_IRUSR | S_IWUSR);
        if (shm_regions[node_rank].fd == -1) {
            printf("Rank %d: shm_open failed: %s\n", node_rank, strerror(errno));
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        // Set the size
        if (ftruncate(shm_regions[node_rank].fd, LOCAL_POOL_SIZE) == -1) {
            printf("Rank %d: ftruncate failed: %s\n", node_rank, strerror(errno));
            close(shm_regions[node_rank].fd);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        // Map the memory
        void* mapped_addr = nullptr;

        // First try without huge pages
        mapped_addr = mmap(NULL, LOCAL_POOL_SIZE, 
                          PROT_READ | PROT_WRITE,
                          MAP_SHARED | MAP_POPULATE,
                          shm_regions[node_rank].fd, 0);

        if (mapped_addr == MAP_FAILED) {
            printf("Rank %d: mmap failed: %s (errno=%d)\n", 
                   node_rank, strerror(errno), errno);
            close(shm_regions[node_rank].fd);
            shm_unlink(shm_name.c_str());
            MPI_Abort(MPI_COMM_WORLD, 1);
        }

        // Bind to NUMA node
        struct bitmask* bm = numa_allocate_nodemask();
        numa_bitmask_clearall(bm);
        numa_bitmask_setbit(bm, numa_node);

        if (mbind(mapped_addr, LOCAL_POOL_SIZE, MPOL_BIND, 
                 bm->maskp, bm->size + 1, MPOL_MF_MOVE | MPOL_MF_STRICT) == -1) {
            printf("Rank %d: mbind failed: %s\n", node_rank, strerror(errno));
        }

        // Pre-touch memory pages to ensure proper allocation
        #pragma omp parallel
        {
            pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset_local);
            
            #pragma omp for
            for(size_t i = 0; i < LOCAL_POOL_SIZE; i += 4096) { // 4KB page size
                static_cast<char*>(mapped_addr)[i] = 0;
            }
        }

        numa_free_nodemask(bm);
        close(shm_regions[node_rank].fd);

        // Store mapped address
        shm_regions[node_rank].mapped_region = mapped_addr;

        // Initialize memory
        char* ptr = static_cast<char*>(mapped_addr);
        
        #pragma omp parallel
        {
            pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset_local);
            
            #pragma omp for
            for(int j = 0; j < LOCAL_POOL_SIZE; j++) {
                ptr[j] = 0;
            }
        }

        //for(int j = 0; j < LOCAL_POOL_SIZE; j++) {
        //        ptr[j] = node_rank;
        //}


        MPI_Barrier(node_comm);

        // Map other ranks' memory regions
        for (int rank = 0; rank < node_size; rank++) {
            
            shm_regions[rank].name = generateShmName(rank);

            if (rank != node_rank) {
                int fd = shm_open(shm_regions[rank].name.c_str(), O_RDWR, 0666);
                if (fd == -1) {
                    printf("Rank %d: shm_open failed for rank %d: %s\n", 
                           node_rank, rank, strerror(errno));
                    MPI_Abort(MPI_COMM_WORLD, 1);
                }

                void* other_mapped_addr = mmap(NULL, LOCAL_POOL_SIZE, 
                                             PROT_READ | PROT_WRITE,
                                             MAP_SHARED, fd, 0);
                close(fd);

                if (other_mapped_addr == MAP_FAILED) {
                    printf("Rank %d: mmap failed for rank %d: %s\n", 
                           node_rank, rank, strerror(errno));
                    MPI_Abort(MPI_COMM_WORLD, 1);
                }

                shm_regions[rank].mapped_region = other_mapped_addr;
            }
        }

        // Set local_region
        local_region = shm_regions[node_rank].mapped_region;

        MPI_Barrier(node_comm);
        printNumaAffinity();
    }

    void write(size_t offset, const void* data, size_t size) {
        if (offset + size > LOCAL_POOL_SIZE * node_size) {
            printf("Error: Write exceeds total pool size\n");
            return;
        }
        size_t target_rank = offset / LOCAL_POOL_SIZE;
        size_t local_offset = offset % LOCAL_POOL_SIZE;
        
        char* dest = static_cast<char*>(shm_regions[target_rank].mapped_region) + local_offset;
        const char* src = static_cast<const char*>(data);
        
        //printf("write: target_rank=%d, local_offset=%zu, size=%zu\n", target_rank, local_offset, size);

        // Use stream_copy for better performance
        stream_copy(src, dest, size);
    }

    void stream_copy(const char* src, char* dest, size_t size) {
        if (!src || !dest || size == 0) return;
        
        //printf("stream_copy: size=%zu\n", size);
        
        /*for (int i = 0; i < CPU_SETSIZE; i++) {
            if (CPU_ISSET(i, &cpuset_local)) {
                printf("1");
            } else {
                printf("0");
            }
        }
        printf("\n");
        */

        #pragma omp parallel
        {
            // Set thread affinity
            pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t), &cpuset_local);
            
            #pragma omp for
            for(size_t j = 0; j < size; j++) 
            {
                dest[j] = src[j];
            }
        }
    }
    
    void read(size_t offset, void* data, size_t size) {
        if (offset + size > LOCAL_POOL_SIZE * node_size) {
            printf("Error: Read exceeds total pool size\n");
            return;
        }
        int target_rank = offset / LOCAL_POOL_SIZE;
        size_t local_offset = offset % LOCAL_POOL_SIZE;
        
        const char* src = static_cast<const char*>(shm_regions[target_rank].mapped_region) + local_offset;
        char* dest = static_cast<char*>(data);
        
        // Use stream_copy for better performance
        stream_copy(src, dest, size);
    }

  void measurePerformanceRank0() {
        printf("Rank 0: Measuring performance\n");
        const int NUM_ITERATIONS = 5;
        const size_t CHUNK_SIZE = LOCAL_POOL_SIZE; // 4GB chunks
        
        // Allocate test buffers on local NUMA node
        char* local_buf = static_cast<char*>(numa_alloc_onnode(CHUNK_SIZE, 0));

        // Test local and remote memory access
        for (int target_rank = 1; target_rank < node_size; target_rank++) {
            size_t offset = target_rank * LOCAL_POOL_SIZE;

            printf("111:target_rank=%d, offset=%zu\n", target_rank, offset);          
            // Write test
            auto start = std::chrono::high_resolution_clock::now();
            for (int i = 0; i < NUM_ITERATIONS; i++) {
                //write(offset, local_buf, CHUNK_SIZE);
                char* src = static_cast<char*>(shm_regions[0].mapped_region);
                char* dst = static_cast<char*>(shm_regions[target_rank].mapped_region);
                stream_copy(src, dst, CHUNK_SIZE);
            }
 printf("222\n");   
            auto end = std::chrono::high_resolution_clock::now();
            auto write_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();

            // Read test
            start = std::chrono::high_resolution_clock::now();
            for (int i = 0; i < NUM_ITERATIONS; i++) {
                read(offset, local_buf, CHUNK_SIZE);
            }
            end = std::chrono::high_resolution_clock::now();
            auto read_time = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
 printf("333\n");   
            
            double write_bw = (NUM_ITERATIONS * CHUNK_SIZE) / (write_time * 1000.0);
            double read_bw = (NUM_ITERATIONS * CHUNK_SIZE) / (read_time * 1000.0);
            printf("Access from Rank %d (NUMA %d) to Rank %d:\n", 
                    node_rank, numa_node, target_rank);
            printf("  Write bandwidth: %.2f GB/s\n", write_bw);
            printf("  Read bandwidth: %.2f GB/s\n", read_bw);
        }

    }

    void measurePerformance() {
        const int WARMUP_ITERATIONS = 3;
        const int NUM_ITERATIONS = 5;
        const size_t CHUNK_SIZE = LOCAL_POOL_SIZE;
        
        // Allocate test buffers on local NUMA node
        std::vector<char*> local_bufs(node_size);
        for (int i = 0; i < node_size; i++) {
            local_bufs[i] = static_cast<char*>(numa_alloc_onnode(CHUNK_SIZE, numa_node));
            if (!local_bufs[i]) {
                perror("numa_alloc_onnode failed in measurePerformance");
                MPI_Abort(MPI_COMM_WORLD, 1);
            }
            //memset(local_bufs[i], i*i, CHUNK_SIZE);

            #pragma omp parallel for
            for(size_t j = 0; j < CHUNK_SIZE; j++){
                local_bufs[i][j] = static_cast<char>(i*2 % 256 + 100);  ;
            }
        }

        // Test local and remote memory access
        for (int target_rank = 0; target_rank < node_size; target_rank++) {
            //int target_numa = target_rank / RANKS_PER_NUMA;
            
            // Warmup phase
            for (int i = 0; i < WARMUP_ITERATIONS; i++) {
                size_t offset = target_rank * LOCAL_POOL_SIZE;
                write(offset, local_bufs[node_rank], CHUNK_SIZE);
                read(offset, local_bufs[0], CHUNK_SIZE);
            }


            // Measurement phase
            std::vector<double> write_times(NUM_ITERATIONS);
            std::vector<double> read_times(NUM_ITERATIONS);

            for (int i = 0; i < NUM_ITERATIONS; i++) {
                size_t offset = target_rank * LOCAL_POOL_SIZE;
                
                // Write test
                auto start = std::chrono::high_resolution_clock::now();
                write(offset, local_bufs[node_rank], CHUNK_SIZE);
                auto end = std::chrono::high_resolution_clock::now();
                write_times[i] = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();

                // Read test
                start = std::chrono::high_resolution_clock::now();
                read(offset, local_bufs[0], CHUNK_SIZE);
                end = std::chrono::high_resolution_clock::now();
                read_times[i] = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
            }

            if (node_rank == 0) {
                // Calculate statistics for write
                double write_min = *std::min_element(write_times.begin(), write_times.end());
                double write_max = *std::max_element(write_times.begin(), write_times.end());
                double write_avg = std::accumulate(write_times.begin(), write_times.end(), 0.0) / NUM_ITERATIONS;

                // Calculate statistics for read
                double read_min = *std::min_element(read_times.begin(), read_times.end());
                double read_max = *std::max_element(read_times.begin(), read_times.end());
                double read_avg = std::accumulate(read_times.begin(), read_times.end(), 0.0) / NUM_ITERATIONS;

                // Convert to bandwidth (GB/s)
                auto to_bandwidth = [CHUNK_SIZE](double time_us) { return CHUNK_SIZE / (time_us * 1000.0); };

                printf("Access from Rank %d (NUMA %d) to Rank %d :\n", 
                       node_rank, numa_node, target_rank);
                printf("  Write bandwidth (GB/s) - Min: %.2f, Max: %.2f, Avg: %.2f\n",
                       to_bandwidth(write_max), to_bandwidth(write_min), to_bandwidth(write_avg));
                printf("  Read bandwidth (GB/s) - Min: %.2f, Max: %.2f, Avg: %.2f\n",
                       to_bandwidth(read_max), to_bandwidth(read_min), to_bandwidth(read_avg));


            }
        }

        if(node_rank == 0){
            for(int i = 0; i < node_size; i++){
                hexdump(shm_regions[i].mapped_region, 32);
            }
        }

        // Cleanup
        for (int i = 0; i < node_size; i++) {
            numa_free(local_bufs[i], CHUNK_SIZE);
        }
    }

    ~SharedMemoryPool() {
        // Unmap all regions
        for (int rank = 0; rank < node_size; rank++) {
            if (shm_regions[rank].mapped_region) {
                munmap(shm_regions[rank].mapped_region, LOCAL_POOL_SIZE);
            }
        }
        
        // Only rank 0 unlinks the shared memory segments
        if (node_rank == 0) {
            for (int rank = 0; rank < node_size; rank++) {
                shm_unlink(shm_regions[rank].name.c_str());
            }
        }
        
        //MPI_Comm_free(&node_comm);
    }

    // Add these getter methods
    int getNodeRank() const { return node_rank; }
    int getNodeSize() const { return node_size; }


    std::string generateShmName(int rank) const {
        return "shm_pool_" + std::to_string(rank);
    }

    // Add this function to the SharedMemoryPool class
    void printNumaAffinity() {
        if (node_rank != 0) return;

        printf("\n=== NUMA Affinity Report ===\n");
        for (size_t i = 0; i < shm_regions.size(); i++) {
            if (!shm_regions[i].mapped_region) continue;

            int status;
            int mode;
            struct bitmask *nodemask = numa_allocate_nodemask();
            
            // Get NUMA policy and nodes for this memory region
            status = get_mempolicy(&mode, nodemask->maskp, nodemask->size, 
                                 shm_regions[i].mapped_region, MPOL_F_ADDR);
            
            if (status == -1) {
                printf("Region %zu: Failed to get NUMA policy: %s\n", i, strerror(errno));
                numa_free_nodemask(nodemask);
                continue;
            }

            // Find which NUMA nodes this memory is on
            printf("Region %zu (owned by rank %zu):\n", i, i);
            printf("  Physical address: %p\n", shm_regions[i].mapped_region);
            printf("  NUMA policy: %s\n", 
                   mode == MPOL_DEFAULT ? "default" :
                   mode == MPOL_PREFERRED ? "preferred" :
                   mode == MPOL_BIND ? "bind" :
                   mode == MPOL_INTERLEAVE ? "interleave" : "unknown");
            printf("  NUMA nodes: ");
            bool found_node = false;
            for (int node = 0; node < numa_num_possible_nodes(); node++) {
                if (numa_bitmask_isbitset(nodemask, node)) {
                    if (found_node) printf(", ");
                    printf("%d", node);
                    found_node = true;
                }
            }
            printf("\n");

            // Get page count per NUMA node
            void *pages[1] = { shm_regions[i].mapped_region };
            int status_nodes[1];
            move_pages(0, 1, pages, NULL, status_nodes, 0);
            if (status_nodes[0] >= 0) {
                printf("  Currently on NUMA node: %d\n", status_nodes[0]);
            }
            printf("\n");

            numa_free_nodemask(nodemask);
        }
        printf("=========================\n\n");
    }
};

// Example usage in main
int main(int argc, char** argv) {

    //printf("main: start\n");
    int provided;
    MPI_Init_thread(&argc, &argv, MPI_THREAD_FUNNELED, &provided);
    if (provided < MPI_THREAD_FUNNELED) {
        printf("Warning: The MPI implementation does not support MPI_THREAD_FUNNELED\n");
        // You might want to fall back to MPI_THREAD_SINGLE
        MPI_Finalize();
        return 1;
    }

    common::Logger::getInstance().init(common::LogLevel::INFO, true, "log");
    LOG_INFO("main: mpi init done");

    int world_rank, world_size;
    MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &world_size);
    
    // Create shared memory pool
    SharedMemoryPool pool;

    LOG_INFO("main: pool created");

    //printf("main: pool created\n");

    //std::vector<char> send_buf(LOCAL_POOL_SIZE);
    //std::vector<char> recv_buf(LOCAL_POOL_SIZE);

    char* send_buf = (char*)malloc(LOCAL_POOL_SIZE);
    char* recv_buf = (char*)malloc(LOCAL_POOL_SIZE);

    // Initialize send buffer with world_rank for unique identification
    #pragma omp parallel for
    for(size_t i = 0; i < LOCAL_POOL_SIZE; i++) {
        send_buf[i] = static_cast<char>(world_rank);
    }

    // Use node_rank for local shared memory offset
    pool.write(pool.getNodeRank() * LOCAL_POOL_SIZE, send_buf, LOCAL_POOL_SIZE);
    MPI_Barrier(MPI_COMM_WORLD);

    for(int i = 0; i < pool.getNodeSize(); i++) {
        if( i == world_rank){
            hexdump(pool.shm_regions[world_rank].mapped_region, 32);
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    // Read from previous rank on the same node
    int prev_node_rank = (pool.getNodeRank() - 1 + pool.getNodeSize()) % pool.getNodeSize();
    pool.read(prev_node_rank * LOCAL_POOL_SIZE, recv_buf, LOCAL_POOL_SIZE);

    // Verify data
    bool data_valid = true;
    for(size_t i = 0; i < LOCAL_POOL_SIZE; i++) {
        if(recv_buf[i] != static_cast<char>(prev_node_rank)) {
            data_valid = false;
            break;
        }
    }
    printf("Rank %d: Data verification %s\n", world_rank, data_valid ? "passed" : "failed");


    // Measure performance
    if(world_rank == 0){
        pool.measurePerformance();
    }

    MPI_Finalize();
    return 0;
}

void hexdump(const void* data, size_t size) {
    const unsigned char* bytes = static_cast<const unsigned char*>(data);
    
    printf("==========================================\n");
    // Print header with adjusted spacing
    printf("Offset:         "); // Added more spaces and colon for consistency
    for (int i = 0; i < 16; i++) {
        printf("%02X ", i);
    }
    printf("  ASCII\n");

    // Process data in 16-byte chunks
    for (size_t i = 0; i < size; i += 16) {
        // Print address
        printf("%p: ", static_cast<const void*>(bytes + i));
        
        // Print hex values
        for (size_t j = 0; j < 16; j++) {
            if (i + j < size) {
                printf("%02X ", bytes[i + j]);
            } else {
                printf("   "); // Padding for incomplete lines
            }
        }
        
        // Print ASCII representation
        printf(" ");
        for (size_t j = 0; j < 16; j++) {
            if (i + j < size) {
                unsigned char c = bytes[i + j];
                // Print '.' for non-printable characters
                printf("%c", (c >= 32 && c <= 126) ? c : '.');
            }
        }
        printf("\n");
    }
}

