/*
 * mpiBench - MPI Communication Performance Benchmark Tool
 * 
 * Purpose:
 * This benchmark tool measures the performance of various MPI communication operations
 * including point-to-point and collective communications. It provides detailed timing
 * and performance metrics for:
 * - Basic operations (Barrier, Broadcast)
 * - Point-to-point communications (P2P, P2P_DUAL)
 * - Collective operations (Alltoall, Gather, Scatter, Reduce, etc.)
 * - NUMA-aware communication patterns
 *
 * Features:
 * - Configurable message sizes from bytes to gigabytes
 * - Customizable iteration counts and time limits
 * - Memory usage optimization including NUMA awareness
 * - Detailed performance statistics (min/max/avg timings)
 * - Process and hardware topology awareness
 * - Optional data validation
 *
 * Originally forked from Lawrence Livermore National Security (LLNS) mpiBench
 * Modified to include additional features and optimizations
 * 
 * Note: this soruce based on a folk of mpiBench from Lawrence Livermore National Security (LLNS), LLC
 */

/* 
Comiple flags:
  -DNO_BARRIER       - Drops MPI_Barrier() call that separates consecutive collective calls
  -DUSE_GETTIMEOFDAY - Use gettimeofday() for timing rather than MPI_WTime()
*/

//typedef unsigned long long size_t;

#define _GNU_SOURCE         /* Must be defined before any includes */

#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <assert.h>
#include <signal.h>
#include <string.h>
#include <sys/time.h>
#include <mpi.h>
#include <math.h>
#include <sched.h>    // for CPU_ZERO, CPU_SET, etc
#include <unistd.h>   // for sysconf
#include <sys/types.h>     // For pid_t
#include <sys/syscall.h>   // For SYS_getcpu
#include <numa.h>
#include <numaif.h>
#include <sys/mman.h>     // For mmap flags

#if 0
#ifndef _AIX
#include "print_mpi_resources.h"
#endif
#endif

#include <sched.h>         /* For sched_setaffinity */
#include <sys/mman.h>      /* For mmap flags */
#include <sys/types.h>     /* For pid_t */
#include <sys/syscall.h>   /* For SYS_getcpu */

char VERS[] = "1.5";

/*
------------------------------------------------------
Globals
------------------------------------------------------
*/
#define KILO (1024ULL)
#define MEGA (KILO*KILO)
#define GIGA (KILO*MEGA)

#define ITRS_EST       (5)        /* Number of iterations used to estimate time */
#define ITRS_RUN       (1000)     /* Number of iterations to run (without timelimits) */
#define MSG_SIZE_START (0)        /* Lower bound of message sizes in bytes */
#define MSG_SIZE_STOP  (256*KILO) /* Upper bound of message sizes in bytes */
#define MAX_PROC_MEM   (2ULL*GIGA)   /* Limit on MPI buffer sizes in bytes */

/* Compile with -DNO_BARRIER to drop barriers between collective calls
     Adds barrier between test iterations to sync all procs before issuing next collective
     Prevents non-root MPI ranks from escaping ahead into future iterations
     Barrier overhead is not subtracted from timing results
*/
#ifdef NO_BARRIER
  #define __BAR__(comm)
#else
  #define __BAR__(comm) MPI_Barrier(comm)
#endif

/* we use a bit mask to flag which collectives to test */
#define BARRIER    (0x001)
#define BCAST      (0x002)
#define ALLTOALL   (0x004)
#define ALLGATHER  (0x008)
#define GATHER     (0x010)
#define SCATTER    (0x020)
#define ALLREDUCE  (0x040)
#define REDUCE     (0x080)
#define ALLTOALLV  (0x100)
#define ALLGATHERV (0x200)
#define GATHERV    (0x400)
#define P2P        (0x800)
#define P2P_DUAL   (0x1000)

#define NUM_TESTS  (12)

char* TEST_NAMES[] = {
  "Barrier", "Bcast", "Alltoall", "Allgather", "Gather", "Scatter", "Allreduce", "Reduce", "Alltoallv", "Allgatherv", "Gatherv", "P2P"
};

/*int   TEST_FLAGS[] = {
   BARRIER,   BCAST,   ALLTOALL,   
   ALLGATHER,   GATHER,   SCATTER,   ALLREDUCE,   
   REDUCE,   ALLTOALLV,   ALLGATHERV,   GATHERV,   
   P2P, P2P_DUAL
};*/

int   TEST_FLAGS[] = {
   BARRIER,  BCAST, P2P, P2P_DUAL
};

  
int rank_local; /* my MPI rank */
int rank_count; /* number of ranks in job */
int dimid_key;
size_t allocated_memory = 0; /* number of bytes allocated */

/*
------------------------------------------------------
Utility Functions
------------------------------------------------------
*/

/* Print usage syntax and exit */
int usage()
{
    if (rank_local == 0) {
        printf("\n");
        printf("  Usage:  mpiBench [options] [operations]\n");
        printf("\n");
        printf("  Options:\n");
        printf("    -b <byte>  Beginning message size in bytes (default 0)\n");
        printf("    -e <byte>  Ending message size in bytes (default 1K)\n");
        printf("    -m <byte>  Process memory buffer limit (send+recv) in bytes (default 1G)\n");
        printf("    -i <itrs>  Maximum number of iterations for a single test (default 1000)\n");
        printf("    -t <usec>  Time limit for any single test in microseconds (default 0 = infinity)\n");
        printf("    -d <ndim>  Number of Cartesian dimensions to split processes in (default 0 = MPI_COMM_WORLD only)\n");
        printf("    -p <size>  Minimum partition size (number of ranks) to divide MPI_COMM_WORLD by\n");
        printf("    -c         Check receive buffer for expected data in last interation (default disabled)\n");
        printf("    -C         Check receive buffer for expected data every iteration (default disabled)\n");
        printf("    -h         Print this help screen and exit\n");
        printf("    where <byte> = [0-9]+[KMG], e.g., 32K or 64M\n");
        printf("\n");
        printf("  Operations:\n");
        printf("    Barrier\n");
        printf("    Bcast\n");
        printf("    Alltoall, Alltoallv\n");
        printf("    Allgather, Allgatherv\n");
        printf("    Gather, Gatherv\n");
        printf("    Scatter\n");
        printf("    Allreduce\n");
        printf("    Reduce\n");
        printf("    P2P\n");
        printf("\n");
    }
    exit(1);
}

/* Allocate size bytes and keep track of amount allocated */
void* _ALLOC_MAIN_ (size_t size, char* debug) 
{
    void* p_buf;
    p_buf = malloc(size);
    if (!p_buf) {
        printf("ERROR:  Allocating memory %s:  requesting %ld bytes\n", debug, size);
        exit(1);
    }
    memset(p_buf, 0, size);
    allocated_memory += size;
    return p_buf;
}

/* Processes byte strings in the following format:
     <float_num>[kKmMgG][bB]
   and returns number of bytes as an size_t
   returns 0 on error
   Examples: 1K, 2.5kb, .5GB
*/
size_t atobytes(char* str)
{
    char* next;
    size_t units = 1;

    double num = strtod(str, &next);
    if (num == 0.0 && next == str) return 0;
    if (*next != 0) {
        /* process units for kilo, mega, or gigabytes */
        switch(*next) {
            case 'k':
            case 'K':
                units = (size_t) KILO;
                break;
            case 'm':
            case 'M':
                units = (size_t) MEGA;
                break;
            case 'g':
            case 'G':
                units = (size_t) GIGA;
                break;
            default:
                printf("ERROR:  unexpected byte string %s\n", str);
                exit(1);
        }
        next++;
        if (*next == 'b' || *next == 'B') { next++; } /* handle optional b or B character, e.g. in 10KB */
        if (*next != 0) {
            printf("ERROR:  unexpected byte string: %s\n", str);
            exit(1);
        }
    }
    if (num < 0) { printf("ERROR:  byte string must be positive: %s\n", str);  exit(1); }
    return (size_t) (num * (double) units);
}

/*
------------------------------------------------------
TIMING CODE - start/stop the timer and measure the difference
------------------------------------------------------
*/

#ifdef USE_GETTIMEOFDAY

/* use gettimeofday() for timers */
#include <sys/time.h>
#define __TIME_START__    (gettimeofday(&g_timeval__start, &g_timezone))
#define __TIME_END__      (gettimeofday(&g_timeval__end  , &g_timezone))
#define __TIME_USECS__    (d_Time_Diff_Micros(g_timeval__start, g_timeval__end))
#define d_Time_Diff_Micros(timeval__start, timeval__end) \
  ( \
    (double) (  (timeval__end.tv_sec  - timeval__start.tv_sec ) * 1000000 \
              + (timeval__end.tv_usec - timeval__start.tv_usec)  ) \
  )
#define d_Time_Micros(timeval) \
  ( \
    (double) (  timeval.tv_sec * 1000000 \
              + timeval.tv_usec  ) \
  )
struct timeval  g_timeval__start, g_timeval__end;
struct timezone g_timezone;

#else

/* use MPI_Wtime for timers (recommened)
   on some systems gettimeofday may be reset backwards by a global clock,
   which can even lead to negative length time intervals
*/
#define __TIME_START__    (g_timeval__start    = MPI_Wtime())
#define __TIME_END__      (g_timeval__end      = MPI_Wtime())
#define __TIME_USECS__    ((g_timeval__end - g_timeval__start) * 1000000.0)
double g_timeval__start, g_timeval__end;

#endif /* of USE_GETTIMEOFDAY */

/* Gather value from each task and print statistics */
double Print_Timings(double value, char* title, size_t bytes, int iters, MPI_Comm comm)
{
    int i;
    double min, max, avg, dev;
    double* times = NULL;

    if(rank_local == 0) {
        times = (double*) malloc(sizeof(double) * rank_count);
    }

    /* gather single time value from each task to rank 0 */
    MPI_Gather(&value, 1, MPI_DOUBLE, times, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    /* rank 0 computes the min, max, and average over the set */
    if(rank_local == 0) {
        avg = 0;
        dev = 0;
        min = 100000000;
        max = -1;
        for(i = 0; i < rank_count; i++) {
            if(times[i] < min) { min = times[i]; }
            if(times[i] > max) { max = times[i]; }
            avg += times[i];
            dev += times[i] * times[i];
        }
        avg /= (double) rank_count;
        dev = 0; /*sqrt((dev / (double) rank_count - avg * avg)); */

        /* determine who we are in this communicator */
        int nranks, flag;
        char* str = "";
        //MPI_Comm_get_attr(comm, dimid_key, (void*) &str, &flag); 
        MPI_Comm_size(comm, &nranks);

        printf("%-20.20s\t", title);
        printf("Bytes:\t%lu\tIters:\t%7d\t", bytes, iters);
        printf("Avg(us):\t%8.4f\tMin:\t%8.4f\tMax:\t%8.4f\t", avg, min, max);
        printf("Comm: %s\tRanks: %d\n", str, nranks);
        fflush(stdout);

        free((void*) times);
    }

    /* broadcast the average value back out */
    MPI_Bcast(&avg, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);

    return avg;
}

/*
------------------------------------------------------
MAIN
------------------------------------------------------
*/

char *sbuffer;
char *rbuffer;
int  *sendcounts, *sdispls, *recvcounts, *rdispls;
size_t buffer_size = 0;
int check_once;
int check_every;

struct argList {
    int    iters;
    size_t messStart;
    size_t messStop;
    size_t memLimit;
    double timeLimit;
    int    testFlags;
    int    checkOnce;
    int    checkEvery;
    int    ndims;
    size_t    partSize;
};

int processArgs(int argc, char **argv, struct argList* args)
{
  int i, j;
  char *argptr;
  char flag;

  /* set to default values */
  args->iters      = ITRS_RUN;
  args->messStart  = (size_t) MSG_SIZE_START;
  args->messStop   = (size_t) MSG_SIZE_STOP;
  args->memLimit   = (size_t) MAX_PROC_MEM;
  args->timeLimit  = 0;
  //args->testFlags  = 0x1FFF;
  args->testFlags = BARRIER | BCAST | P2P | P2P_DUAL;
  args->checkOnce  = 0;
  args->checkEvery = 0;
  args->ndims      = 0;
  args->partSize   = 0;

  int iters_set = 0;
  int time_set  = 0;
  for (i=0; i<argc; i++)
  {
    /* check for options */
    if (argv[i][0] == '-')
    {
      /* flag is the first char following the '-' */
      flag   = argv[i][1];
      argptr = NULL;

      /* single argument parameters */
      if (strchr("cC", flag))
      {
        switch(flag)
        {
        case 'c':
          args->checkOnce = 1;
          break;
        case 'C':
          args->checkEvery = 1;
          break;
        }
        continue;
      }
      
      /* check that we've got a valid option */
      if (!strchr("beithmdp", flag))
      {
        printf("\nInvalid flag -%c\n", flag);
        return(0);
      }
      
      /* handles "-i#" or "-i #" */
      if (argv[i][2] != 0) {
        argptr = &(argv[i][2]);
      } else {
        argptr = argv[i+1];
        i++;
      }

      switch(flag)
      {
      case 'b':
        args->messStart = atobytes(argptr);
        break;
      case 'e':
        args->messStop = atobytes(argptr);
        break;
      case 'i':
        args->iters = atoi(argptr);
        iters_set = 1;
        break;
      case 'm':
	args->memLimit = atobytes(argptr);
        break;
      case 't':
        args->timeLimit = (double) atol(argptr);
        time_set = 1;
        break;
      case 'd':
        args->ndims = atoi(argptr);
        break;
      case 'p':
        args->partSize = atoi(argptr);
        break;
      default:
        return(0);
      }
    }
    //printf("start size = %lu\n", args->messStart);
    //printf("end size = %lu\n", args->messStop);
    
    /* if the user gave no iteration limit and no time limit, set a reasonable time limit */
    if (!iters_set && !time_set) { args->timeLimit = 50000; }

    /* turn on test flags requested by user
       if user doesn't specify any, all will be run */
    for(j=0; j<NUM_TESTS; j++) {
      if(!strcasecmp(TEST_NAMES[j], argv[i])) {
        if(args->testFlags == 0x1FFF) args->testFlags = 0;
        args->testFlags |= TEST_FLAGS[j];
      }
    }
  }

  if (args->iters == 0)
  {
    printf("\n  Must define number of operations per measurement!\n\n");
    return(0);
  }

  return(1);
}

/* fill the send buffer with a known pattern */
void init_sbuffer(int rank)
{
    size_t i;
    char value;
    for(i=0; i<buffer_size; i++) {
        value = (char) ((i+1)*(rank+1) + i);
        sbuffer[i] = value;
    }
}

/* fill the receive buffer with a known pattern */
void init_rbuffer(int rank)
{
    /* nothing fancy here -- just blank it out */
    memset(rbuffer, 0, buffer_size);
}

/* check the send buffer for any deviation from expected pattern */
void check_sbuffer(int rank)
{
    size_t i;
    char value;
    for(i=0; i<buffer_size; i++) {
        value = (char) ((i+1)*(rank+1) + i);
        if (sbuffer[i] != value) {
            printf("Send buffer corruption detected on rank %d at sbuffer[%d]\n", rank, i);
            MPI_Abort(MPI_COMM_WORLD, 1);
        }
    }
}

/* check the receive buffer for any deviation from expected pattern */
void check_rbuffer(char* buffer, size_t byte_offset, int rank, size_t src_byte_offset, size_t element_count)
{
    size_t i, j;
    char value;
    buffer += byte_offset;
    for(i=0, j=src_byte_offset; i<element_count; i++, j++) {
        value = (char) ((j+1)*(rank+1) + j);
        if (buffer[i] != value) {
              printf("Receive buffer corruption detected on rank %d at rbuffer[%d] from rank %d\n", rank_local, byte_offset+i, rank);
              MPI_Abort(MPI_COMM_WORLD, 1);
        }
    }
}

struct collParams {
    size_t   size;     /* message (element) size in bytes */
    int      iter;     /* number of iterations to test with */
    int      root;     /* root of collective operation */
    MPI_Comm comm;     /* communicator to test collective on */
    int      myrank;   /* my rank in the above communicator */
    int      nranks;   /* number of ranks in the above communicator */
    size_t      count;    /* element count for collective */
    MPI_Datatype type; /* MPI_Datatype to be used in collective (assumed contiguous) */
    MPI_Op   reduceop; /* MPI_Reduce operation to be used */
};

double time_barrier(struct collParams* p)
{
    int i;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        MPI_Barrier(p->comm);
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_bcast(struct collParams* p)
{
    int i;
    char* buffer = (p->myrank == p->root) ? sbuffer : rbuffer;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
          init_sbuffer(p->myrank);
          init_rbuffer(p->myrank);
        }

        MPI_Bcast(buffer, p->count, p->type, p->root, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            check_rbuffer(buffer, 0, p->root, 0, p->size);
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_alltoall(struct collParams* p)
{
    int i, j;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Alltoall(sbuffer, p->count, p->type, rbuffer, p->size, p->type, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            for (j = 0; j < p->nranks; j++) {
                check_rbuffer(rbuffer, j*p->size, j, p->myrank*p->size, p->size);
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_alltoallv(struct collParams* p)
{
    int i, j, k, count;
    int disp = 0;
    int chunksize = p->count / p->nranks;
    if (chunksize == 0) { chunksize = 1; }
    for (i = 0; i < p->nranks; i++) {
        int count = ((i+p->myrank)*chunksize) % (p->count+1);
        sendcounts[i] = count;
        recvcounts[i] = count;
        sdispls[i] = disp;
        rdispls[i] = disp;
        disp += count;
    }
    size_t scale = (p->count > 0) ? (p->size/p->count) : 0;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Alltoallv(sbuffer, sendcounts, sdispls, p->type, rbuffer, recvcounts, rdispls, p->type, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            for (k = 0; k < p->nranks; k++) {
                disp = 0;
                for (j = 0; j < p->myrank; j++) { disp += ((j+k)*chunksize) % (p->size+1); }
                check_rbuffer(rbuffer, rdispls[k]*scale, k, disp, recvcounts[k]*scale);
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_allgather(struct collParams* p)
{
    int i, j;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Allgather(sbuffer, p->count, p->type, rbuffer, p->count, p->type, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            for (j = 0; j < p->nranks; j++) {
                check_rbuffer(rbuffer, j*p->size, j, 0, p->size);
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_allgatherv(struct collParams* p)
{
    int i, j, count;
    int disp = 0;
    int chunksize = p->count / p->nranks;
    if (chunksize == 0) { chunksize = 1; }
    for ( i = 0; i < p->nranks; i++) {
        int count = (i*chunksize) % (p->count+1);
        recvcounts[i] = count;
        rdispls[i] = disp;
        disp += count;
    }
    size_t scale = (p->count > 0) ? (p->size/p->count) : 0;
    MPI_Barrier(MPI_COMM_WORLD);

    count = (p->myrank*chunksize) % (p->count+1);
    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Allgatherv(sbuffer, count, p->type, rbuffer, recvcounts, rdispls, p->type, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            for (j = 0; j < p->nranks; j++) {
                check_rbuffer(rbuffer, rdispls[j]*scale, j, 0, recvcounts[j]*scale);
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_gather(struct collParams* p)
{
    int i, j;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        //MPI_Gather(sbuffer, p->count, p->type, rbuffer, p->count, p->type, p->root, p->comm);
        MPI_Request request;
        MPI_Igather(sbuffer, p->count, p->type, rbuffer, p->count, p->type, p->root, p->comm, &request);

        // You can now perform other work here while the gather operation is in progress.

        // When you need to ensure the gather operation is complete, use MPI_Wait or MPI_Test.
        MPI_Wait(&request, MPI_STATUS_IGNORE);

        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            if (p->myrank == p->root) {
                for (j = 0; j < p->nranks; j++) {
                    check_rbuffer(rbuffer, j*p->size, j, 0, p->size);
                }
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_gatherv(struct collParams* p)
{
    int i, j, count;
    int disp = 0;
    int chunksize = p->count / p->nranks;
    if (chunksize == 0) { chunksize = 1; }
    for ( i = 0; i < p->nranks; i++) {
        int count = (i*chunksize) % (p->count+1);
        recvcounts[i] = count;
        rdispls[i] = disp;
        disp += count;
    }
    size_t scale = (p->count > 0) ? (p->size/p->count) : 0;
    MPI_Barrier(MPI_COMM_WORLD);

    count = (p->myrank*chunksize) % (p->count+1);
    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Gatherv(sbuffer, count, p->type, rbuffer, recvcounts, rdispls, p->type, p->root, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            if (p->myrank == p->root) {
                for (j = 0; j < p->nranks; j++) {
                    check_rbuffer(rbuffer, rdispls[j]*scale, j, 0, recvcounts[j]*scale);
                }
            }
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_scatter(struct collParams* p)
{
    int i;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        MPI_Scatter(sbuffer, p->count, p->type, rbuffer, p->count, p->type, p->root, p->comm);
        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            check_rbuffer(rbuffer, 0, p->root, p->myrank*p->size, p->size);
        }
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

//4 mpi procs on two nodes, each node has 2 cpu socket, each socket has a mpi proc
//I want to test the mpi send/recv bandwidth between two corresponding socket on different nodes
double time_p2p_dual(struct collParams* p)
{
    if(p->nranks != 4 && p->nranks != 8)
    {
        printf("p->nranks != 4\n");
        return 0;
    }

    int i;
    MPI_Request send_req, recv_req;
    MPI_Status send_status, recv_status;
    int peer;

    // Determine peer rank on other node
    // Ranks 0,1 are on node 1, ranks 2,3 are on node 2
    // Map rank 0->2 and 1->3 for socket-to-socket communication

    if (p->myrank < p->nranks/2) {
        peer = p->myrank + p->nranks/2;  // First node talks to corresponding socket on second node
    } else {
        peer = p->myrank - p->nranks/2;  // Second node talks to corresponding socket on first node
    }

    MPI_Barrier(MPI_COMM_WORLD);
    //printf("p->count=%d\n", p->count);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        int check = (check_every || (check_once && i == p->iter-1));
        if (check) {
            init_sbuffer(p->myrank);
            init_rbuffer(p->myrank);
        }

        // Non-blocking send and receive with peer
        MPI_Isend(sbuffer, p->count, p->type, peer, 0, p->comm, &send_req);
        MPI_Irecv(rbuffer, p->count, p->type, peer, 0, p->comm, &recv_req);

        // Wait for both operations to complete
        MPI_Wait(&send_req, &send_status);
        MPI_Wait(&recv_req, &recv_status);

        __BAR__(p->comm);

        if (check) {
            check_sbuffer(p->myrank);
            check_rbuffer(rbuffer, 0, peer, 0, p->size);
        }
    }
    __TIME_END__;


    char filename[32] = {0};
    FILE *fp;
    sprintf(filename, "test_p2p_dual_rank_%d.txt", p->myrank);
    fp = fopen(filename, "w");

    fprintf(fp, "rank = %d\n", p->myrank);
    fprintf(fp, "send data to %d\n", peer);

    fprintf(fp, "head:");
    for(int i = 0; i < 15; ++i)
    {
        fprintf(fp,"%d, ", sbuffer[i]);
    }
    fprintf(fp, "tail:");
    for(int i = 0; i < 15; ++i)
    {
        fprintf(fp,"%d, ", sbuffer[p->count-15+i]);
    }

    fprintf(fp,"\nrecv data from %d\n", peer);
    fprintf(fp, "head:");
    for(int i = 0; i < 15; ++i)
    {
        fprintf(fp,"%d, ", rbuffer[i]);
    }
    fprintf(fp, "tail:");
    for(int i = 0; i < 15; ++i)
    {
        fprintf(fp,"%d, ", rbuffer[p->count-15+i]);
    }
    fprintf(fp, "\n");
    fclose(fp);

    return __TIME_USECS__ / (double)p->iter;
}

static inline int on_same_node(int rank1, int rank2) {
    char hostname1[256], hostname2[256];
    int len1, len2;
    
    MPI_Get_processor_name(hostname1, &len1);
    
    // Get hostname of other rank
    if (rank1 == rank_local) {
        MPI_Send(hostname1, len1 + 1, MPI_CHAR, rank2, 0, MPI_COMM_WORLD);
        MPI_Recv(hostname2, sizeof(hostname2), MPI_CHAR, rank2, 0, 
                 MPI_COMM_WORLD, MPI_STATUS_IGNORE);
    } else {
        MPI_Recv(hostname2, sizeof(hostname2), MPI_CHAR, rank1, 0, 
                 MPI_COMM_WORLD, MPI_STATUS_IGNORE);
        MPI_Send(hostname1, len1 + 1, MPI_CHAR, rank1, 0, MPI_COMM_WORLD);
    }
    
    return (strcmp(hostname1, hostname2) == 0);
}

double time_p2p(struct collParams* p) {
    MPI_Info info;
    MPI_Info_create(&info);
    MPI_Info_set(info, "mpi_assert_allow_overtaking", "true");
    
    // Use stack-allocated arrays for requests and statuses
    MPI_Request reqs[2];
    MPI_Status stats[2];
    
    // Allocate aligned buffers only if needed
    void *aligned_sbuffer = sbuffer;
    void *aligned_rbuffer = rbuffer;
    
    #ifdef USE_ALIGNED_BUFFER
    size_t alignment = 64;
    if (p->size >= 4096) {  // Only align for larger messages
        if (posix_memalign(&aligned_sbuffer, alignment, p->size) != 0) {
            aligned_sbuffer = sbuffer;  // Fallback to original buffer
        } else {
            memcpy(aligned_sbuffer, sbuffer, p->size);
        }
        
        if (posix_memalign(&aligned_rbuffer, alignment, p->size) != 0) {
            aligned_rbuffer = rbuffer;  // Fallback to original buffer
        }
    }
    #endif
    
    // Prefetch data into cache
    #ifdef __GNUC__
    for (size_t i = 0; i < p->size; i += 64) {
        __builtin_prefetch(&((char*)aligned_sbuffer)[i], 0, 3);
        __builtin_prefetch(&((char*)aligned_rbuffer)[i], 1, 3);
    }
    #endif
    
    __TIME_START__;
    for (int i = 0; i < p->iter; i++) {
        // Post receives before sends
        MPI_Irecv(aligned_rbuffer, p->count, p->type,
                  (p->myrank - 1 + p->nranks) % p->nranks, 
                  0, p->comm, &reqs[1]);
        
        MPI_Isend(aligned_sbuffer, p->count, p->type, 
                  (p->myrank + 1) % p->nranks, 
                  0, p->comm, &reqs[0]);
        
        MPI_Waitall(2, reqs, stats);
    }
    __TIME_END__;
    
    // Cleanup
    MPI_Info_free(&info);
    
    #ifdef USE_ALIGNED_BUFFER
    if (aligned_sbuffer != sbuffer) {
        free(aligned_sbuffer);
    }
    if (aligned_rbuffer != rbuffer) {
        free(aligned_rbuffer);
    }
    #endif
    
    return __TIME_USECS__ / (double)p->iter;
}

double time_allreduce(struct collParams* p)
{
    int i;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        MPI_Allreduce(sbuffer, rbuffer, p->count, p->type, p->reduceop, p->comm);
        __BAR__(p->comm);
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

double time_reduce(struct collParams* p)
{
    int i;
    MPI_Barrier(MPI_COMM_WORLD);

    __TIME_START__;
    for (i = 0; i < p->iter; i++) {
        MPI_Reduce(sbuffer, rbuffer, p->count, p->type, p->reduceop, p->root, p->comm);
        __BAR__(p->comm);
    }
    __TIME_END__;

    return __TIME_USECS__ / (double)p->iter;
}

/* Prime, estimate, and time the collective called by the specified function
   for the given message size, iteration count, and time limit.  Then, print
   out the results.
*/
double get_time(double (*fn)(struct collParams* p), char* title, struct collParams* p, int iter, int time_limit)
{
    double time;
    double time_avg;
    int iter_limit;

    /* initialize the send and receive buffer with something */
    init_sbuffer(p->myrank);
    init_rbuffer(p->myrank);


    /* prime the collective with an intial call */
    p->iter = 1;
    time = fn(p);

    //printf("my rank = %d\n", p->myrank);
    //MPI_Barrier(MPI_COMM_WORLD);

    // run through a small number of iterations to get a rough estimate of time 
    p->iter = ITRS_EST;
    time = fn(p);


    // if a time limit has been specified, use the esitmate to limit the maximum number of iterations 
    iter_limit = (time_limit > 0   ) ? (int) (time_limit / time) : iter;
    iter_limit = (iter_limit < iter) ? iter_limit : iter;

    iter_limit = 5;
    // use the number calculated by the root (rank 0) which should be the slowest 
    MPI_Bcast(&iter_limit, 1, MPI_INT, 0, MPI_COMM_WORLD);

    // run the tests (unless the limited iteration count is smaller than that used in the estimate) 
    if(iter_limit > ITRS_EST) {
        p->iter = iter_limit;
        time = fn(p);
    } else {
        iter_limit = ITRS_EST;
    }

    /* Collect and print the timing results recorded by each process */
    Print_Timings(time, title, p->size, iter_limit, p->comm);

    return time;
}

static inline int get_cpu(void) {
#ifdef SYS_getcpu
    unsigned cpu;
    if (syscall(SYS_getcpu, &cpu, NULL, NULL) == 0)
        return cpu;
#endif
    return -1;  // Return -1 if we can't get CPU info
}

void print_process_info(int rank, int nranks) {
    char hostname[256];
    gethostname(hostname, sizeof(hostname));

    // Get current CPU using our helper function
    int cpu = get_cpu();

    // Print sequentially using token passing
    if (rank == 0) {
        printf("\n=== Process Information ===\n");
    }

    for (int i = 0; i < nranks; i++) {
        if (i == rank) {
            printf("Rank %d: Host=%s, Current CPU=%d",
                   rank, hostname, cpu);
            
            // Try to get NUMA node info
            char numa_cmd[256];
            char numa_info[256] = "Unknown";
            snprintf(numa_cmd, sizeof(numa_cmd), 
                     "cat /sys/devices/system/cpu/cpu%d/topology/physical_package_id 2>/dev/null", 
                     cpu);
            FILE *fp = popen(numa_cmd, "r");
            if (fp) {
                if (fgets(numa_info, sizeof(numa_info), fp)) {
                    // Remove newline
                    numa_info[strcspn(numa_info, "\n")] = 0;
                    printf("         Socket/Package=%s\n", numa_info);
                }
                pclose(fp);
            }
            
            fflush(stdout);
        }
        MPI_Barrier(MPI_COMM_WORLD);
    }

    if (rank == 0) {
        printf("=========================\n\n");
        fflush(stdout);
    }
}

void optimize_memory(struct collParams* p) {
    // Bind process to CPU
    cpu_set_t cpuset;
    CPU_ZERO(&cpuset);
    CPU_SET(get_cpu(), &cpuset);
    sched_setaffinity(0, sizeof(cpuset), &cpuset);
    
    // Get NUMA node for current CPU
    int numa_node = numa_node_of_cpu(get_cpu());
    
    // Bind memory allocation to local NUMA node
    struct bitmask* mask = numa_allocate_nodemask();
    numa_bitmask_setbit(mask, numa_node);
    numa_set_membind(mask);
    numa_bitmask_free(mask);
    
    // Use huge pages for large buffers if available
    size_t huge_page_size = 2 * 1024 * 1024; // 2MB huge pages
    if (p->size >= huge_page_size) {
        void* new_sbuffer = mmap(NULL, p->size, 
                                PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 
                                -1, 0);
        if (new_sbuffer != MAP_FAILED) {
            if (sbuffer) free(sbuffer);
            sbuffer = new_sbuffer;
        }
        
        void* new_rbuffer = mmap(NULL, p->size, 
                                PROT_READ | PROT_WRITE,
                                MAP_PRIVATE | MAP_ANONYMOUS | MAP_HUGETLB, 
                                -1, 0);
        if (new_rbuffer != MAP_FAILED) {
            if (rbuffer) free(rbuffer);
            rbuffer = new_rbuffer;
        }
    }
}

void* allocate_aligned_buffer(size_t size) {
    void* ptr;
    size_t alignment = 64; // Cache line size
    if (posix_memalign(&ptr, alignment, size) != 0) {
        return NULL;
    }
    
    // Prefetch pages into memory
    char* cptr = (char*)ptr;
    for (size_t i = 0; i < size; i += 4096) { // Page size
        cptr[i] = 0;
    }
    
    return ptr;
}

void set_mpi_hints(MPI_Comm* comm) {
    MPI_Info info;
    MPI_Info_create(&info);
    
    // Optimize for intra-node communication
    MPI_Info_set(info, "mpi_assert_allow_overtaking", "true");
    MPI_Info_set(info, "mpi_assert_no_locks", "true");
    MPI_Info_set(info, "mpi_assert_exact_length", "true");
    
    // Create new communicator with hints
    MPI_Comm newcomm;
    MPI_Comm_dup_with_info(*comm, info, &newcomm);
    *comm = newcomm;
    
    MPI_Info_free(&info);
}

void check_hugepage_support(void) {
    FILE* fp = fopen("/proc/meminfo", "r");
    if (fp) {
        char line[256];
        while (fgets(line, sizeof(line), fp)) {
            if (strstr(line, "HugePages_Total:")) {
                unsigned long pages;
                if (sscanf(line, "HugePages_Total: %lu", &pages) == 1) {
                    if (pages == 0) {
                        fprintf(stderr, "Warning: No huge pages available\n");
                    }
                    break;
                }
            }
        }
        fclose(fp);
    }
}

int main (int argc, char *argv[])
{
    numa_set_localalloc();
    int myrank, nranks;
    int err;
    double time, time_limit, time_maxMsg;

    int iter, iter_limit;
    size_t size, messStart, messStop, mem_limit;
    int testFlags, ndims, partsize;
    int k;

    err = MPI_Init(&argc, &argv);
    if (err) { printf("Error in MPI_Init\n"); exit(1); }
     
    /* determine who we are in the MPI world */
    MPI_Comm_rank(MPI_COMM_WORLD, &rank_local);
    MPI_Comm_size(MPI_COMM_WORLD, &rank_count);

    MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
    MPI_Comm_size(MPI_COMM_WORLD, &nranks);

    printf("barrier %d\n", myrank); 

    MPI_Barrier(MPI_COMM_WORLD);
    print_process_info(rank_local, rank_count);
    MPI_Barrier(MPI_COMM_WORLD);
    

    char  hostname[256];
    char* hostnames;

    int root = 0;

    struct argList args;
    /* process the command-line arguments, printing usage info on error */
    if (!processArgs(argc, argv, &args)) { usage(); }
    iter        = args.iters;
    messStart   = args.messStart;
    messStop    = args.messStop;
    mem_limit   = args.memLimit;
    time_limit  = args.timeLimit;
    testFlags   = args.testFlags;
    check_once  = args.checkOnce;
    check_every = args.checkEvery;
    ndims       = args.ndims;
    partsize    = args.partSize; 


    /* mark start of mpiBench output */
    if (rank_local == 0) { printf("START mpiBench v%s\n", VERS); }

    /* collect hostnames of all the processes and print rank layout */
    gethostname(hostname, sizeof(hostname));
    hostnames = (char*) _ALLOC_MAIN_(sizeof(hostname)*rank_count, "Hostname array");
    MPI_Gather(hostname, sizeof(hostname), MPI_CHAR, hostnames, sizeof(hostname), MPI_CHAR, 0, MPI_COMM_WORLD);
    if (rank_local == 0) {
        for(k=0; k<rank_count; k++) {
            printf("%d : %s\n", k, &hostnames[k*sizeof(hostname)]);
        }
    }

    /* allocate message buffers and initailize timing functions */
    while(messStop*((size_t)rank_count)*2 > mem_limit && messStop > 0) messStop /= 2;
    buffer_size = messStop * rank_count;
    sbuffer   = (char*) _ALLOC_MAIN_(messStop    * rank_count, "Send Buffer");
    rbuffer   = (char*) _ALLOC_MAIN_(messStop    * rank_count, "Receive Buffer");
    sendcounts = (int*) _ALLOC_MAIN_(sizeof(int) * rank_count, "Send Counts");
    sdispls    = (int*) _ALLOC_MAIN_(sizeof(int) * rank_count, "Send Displacements");
    recvcounts = (int*) _ALLOC_MAIN_(sizeof(int) * rank_count, "Recv Counts");
    rdispls    = (int*) _ALLOC_MAIN_(sizeof(int) * rank_count, "Recv Displacements");

    /*time_maxMsg = 2*time_limit; */
    time_maxMsg = 0.0;

        struct collParams p;
        p.root   = 0;
        p.comm   = MPI_COMM_WORLD;
        p.myrank = myrank;
        p.nranks = nranks;
        p.type   = MPI_BYTE;

        //MPI_Barrier(MPI_COMM_WORLD);
        //printf("comm=%d, myrank=%d, nranks=%d\n", MPI_COMM_WORLD, myrank, nranks);

        //MPI_Barrier(MPI_COMM_WORLD);

        /* time requested collectives */
        if(testFlags & BARRIER) {
            p.size = 0;
            p.count = 0;
            get_time(time_barrier, "Barrier", &p, iter, time_limit);
        }

        if(testFlags & BCAST) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_bcast, "Bcast", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

       if(testFlags & P2P) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                get_time(time_p2p, "P2P", &p, iter, time_limit);
            }
        }

        if(testFlags & P2P_DUAL){
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                get_time(time_p2p_dual, "P2P_DUAL", &p, iter, time_limit);
            }
        }

        if(testFlags & ALLTOALL) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_alltoall, "Alltoall", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }
        printf("p.size=%lu, p.count=%lu, messStart=%lu, messStop=%lu\n", p.size, p.count, messStart, messStop);



        if(testFlags & ALLTOALLV) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_alltoallv, "Alltoallv", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & ALLGATHER) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_allgather, "Allgather", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & ALLGATHERV) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_allgatherv, "Allgatherv", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & GATHER) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_gather, "Gather", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & GATHERV) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_gatherv, "Gatherv", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & SCATTER) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                p.count = p.size;
                if(get_time(time_scatter, "Scatter", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }



        /* for the reductions, actually add some doubles to do something of interest */
        p.type     = MPI_DOUBLE;
        p.reduceop = MPI_SUM;

        if(testFlags & ALLREDUCE) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                if(p.size < sizeof(double)) continue;
                p.count = p.size / sizeof(double);
                if(get_time(time_allreduce, "Allreduce", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

        if(testFlags & REDUCE) {
            for(p.size = messStart; p.size <= messStop; p.size = (p.size > 0) ? p.size << 1 : 1) {
                if(p.size < sizeof(double)) continue;
                p.count = p.size / sizeof(double);
                if(get_time(time_reduce, "Reduce", &p, iter, time_limit) > time_maxMsg && time_maxMsg > 0.0) break;
            }
        }

    /* print memory usage */
    if (rank_local == 0) {
        printf("Message buffers (KB):\t%ld\n", allocated_memory/1024);
    }

#if 0
#ifndef _AIX
    print_mpi_resources();
#endif
#endif

    /* mark end of output */
    if (rank_local == 0) { printf("END mpiBench\n"); }

    /* free memory */
    if (hostnames)  { free(hostnames);  hostnames  = NULL; }

    if (sbuffer)    { free(sbuffer);    sbuffer    = NULL; }
    if (rbuffer)    { free(rbuffer);    rbuffer    = NULL; }
    if (sendcounts) { free(sendcounts); sendcounts = NULL; }
    if (sdispls)    { free(sdispls);    sdispls    = NULL; }
    if (recvcounts) { free(recvcounts); recvcounts = NULL; }
    if (rdispls)    { free(rdispls);    rdispls    = NULL; }


    /* shut down */
    MPI_Finalize();



    return 0;
}
