#include "test.h"

#define error_check(code, value) { error_assert((code), __FILE__, __LINE__, (value)); }

char *_progname;

inline boolean
IsAppBuiltAs64()
{
#if defined(__x86_64) || defined(AMD64) || defined(_M_AMD64)
   return TRUE;
#else
   return FALSE;
#endif
}

inline void
error_assert(cudaError_t code, char *file, int line, boolean abort)
{
   if (code != cudaSuccess) {
      if (abort == 1)
         errx(code, "Assert: %s %s %d\n", cudaGetErrorString(code), file, line);
      else
         fprintf(stderr, "Assert: %s %s %d\n", cudaGetErrorString(code), file,
               line);
   }
}

__global__ void
simpleKernel(int *dest, int *source, int number)
{
   int index = blockIdx.x * blockDim.x + threadIdx.x;
   dest[index] = source[index] / number;
}

void
get_device_count(ipcdevices_t *devices)
{
   pid_t pid = fork();
   if (0 == pid) {
      int ix = 0, count, uva_count = 0;
      int uva_ordinals[MAX_DEVICES];

      printf("checking for multiple GPUs... ");

      checkCudaErrors(cudaGetDeviceCount(&count));
      printf("CUDA capable device count: %d\n", count);

      printf("Searching for UVA capable devices...\n");
      for (ix = 0; ix < count; ix++) {
         cudaDeviceProp prop;
         checkCudaErrors(cudaGetDeviceProperties(&prop, ix));
         if (prop.unifiedAddressing) {
            uva_ordinals[uva_count] = ix;
            printf("GPU%d = %s is capable of UVA\n", ix, prop.name);
            uva_count++;
         }

         if (prop.computeMode != cudaComputeModeDefault) {
            printf("GPU device must be in compute mode default to run\n");
            errx(1, "Use nvidia-smi to change the compute mode default\n");
         }
      }

      devices->ordinals[0] = uva_ordinals[0];
      if (uva_count < 2) {
         devices->count = uva_count;
         exit(EXIT_SUCCESS);
      }

      printf("Checking GPUs to support p2p memory access...\n");
      devices->count = 1;
      int canaccess_peer0i;
      int canaccess_peeri0;

      for (ix = 0; ix < uva_count; ix++) {
         checkCudaErrors(cudaDeviceCanAccessPeer(&canaccess_peer0i,
                  uva_ordinals[0], uva_ordinals[ix]));
         checkCudaErrors(cudaDeviceCanAccessPeer(&canaccess_peeri0,
                  uva_ordinals[ix], uva_ordinals[0]));

         if (canaccess_peer0i * canaccess_peeri0) {
            devices->ordinals[devices->count] = uva_ordinals[ix];
            printf("2-way peer access between GPU%d and GPU%d: yes\n",
                  devices->ordinals[0], devices->ordinals[devices->count]);
            devices->count++;
         }
      }

      exit(EXIT_SUCCESS);
   }
   else {
      int status = 0;
      waitpid(pid, &status, 0);
      assert(!status);
   }
}

void
runtest_multikernel(ipccuda_t *shared_mem, int index, int proc_count)
{
   int *ptr;
   
   int refdata[BUFSIZE];
   memset(refdata, rand(), BUFSIZE);

   checkCudaErrors(cudaSetDevice(shared_mem[index].device));
   if (index == 0) {
      printf("Launching kernels...\n");
      int results[BUFSIZE * MAX_DEVICES * PROC_PER_DEV];
      cudaEvent_t event[MAX_DEVICES * PROC_PER_DEV];
      checkCudaErrors(
            cudaMalloc((void **)&ptr, BUFSIZE * proc_count * sizeof(int)));
      checkCudaErrors(
            cudaIpcGetMemHandle((cudaIpcMemHandle_t *)&shared_mem[0].mem_handle,
               (void *)ptr));
      checkCudaErrors(
            cudaMemcpy((void *)ptr, (void *)refdata, BUFSIZE * sizeof(int),
               cudaMemcpyHostToDevice));

      proc_barrier(proc_count);
      for (int ix = 0; ix < proc_count; ix++)
         checkCudaErrors(cudaIpcOpenEventHandle(&event[ix],
                  shared_mem[ix].event_handle));

      proc_barrier(proc_count);
      for (int ix = 0; ix < proc_count; ix++)
         checkCudaErrors(cudaEventSynchronize(event[ix]));

      proc_barrier(proc_count);
      checkCudaErrors(cudaMemcpy(results, ptr + BUFSIZE,
               BUFSIZE * (proc_count - 1) * sizeof(int),
               cudaMemcpyDeviceToHost));
      checkCudaErrors(cudaFree(ptr));

      printf("checking test results...\n");
      for (int jy = 1; jy < proc_count; jy++) 
         for (int ix = 0; ix < BUFSIZE; ix++) {
            int temp = results[(jy - 1) * BUFSIZE * ix];
            if (refdata[ix] / (jy + 1) != temp) {
               printf("Data check error at index %d in process %d: %i, %i\n",
                     ix, jy, refdata[ix], temp);
               barrier->exit = TRUE;
               exit(EXIT_FAILURE);
            }
         }

   }
   else {
      cudaEvent_t event;
      checkCudaErrors(cudaEventCreate(&event, cudaEventDisableTiming |
               cudaEventInterprocess));
      checkCudaErrors(cudaIpcGetEventHandle(
               (cudaIpcEventHandle_t *)&shared_mem[index].event_handle, event));
      
      proc_barrier(proc_count);
      checkCudaErrors(cudaIpcOpenMemHandle((void **)&ptr,
               shared_mem[0].mem_handle, cudaIpcMemLazyEnablePeerAccess));
      printf("Process %3d: ", index);
      printf("Run kernel on GPU%d, taking source data from and ",
            shared_mem[index].device);
      printf("writing results to process %d, GPU%d...\n", 
            0, shared_mem[0].device);
      const dim3 threads(512, 1);
      const dim3 blocks(BUFSIZE / threads.x, 1);
      simpleKernel<<<blocks, threads>>> (ptr + index * BUFSIZE, ptr,
            index + 1);
      checkCudaErrors(cudaEventRecord(event));

      proc_barrier(proc_count);
      checkCudaErrors(cudaIpcCloseMemHandle(ptr));

      proc_barrier(proc_count);
      checkCudaErrors(cudaEventDestroy(event));
   }
}

void
proc_barrier(int proc_count)
{
   int new_count = __sync_add_and_fetch(&barrier->count, 1);
   if (new_count == proc_count) {
      barrier->count = 0;
      barrier->sense = !proc_sense;
   }
   else {
      while (barrier->sense == proc_sense) {
         if (barrier->exit)
            sched_yield();
         else
            exit(EXIT_FAILURE);
      }
   }

   proc_sense = !proc_sense;
}


int
main(int argc, char **argv)
{
   int retval = 0;
   int proc_count = 0;
   barrier = NULL;

   /* findCudaDevice(argc, (const char **)argv); */ 
   if (!IsAppBuiltAs64()) {
      printf("%s is only supported on 64 bit Linux OS", _progname);
      retval = 1;
   }
   else {
      ipcdevices_t *devices = (ipcdevices_t *)mmap(NULL, sizeof(ipcdevices_t *),
            PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
      assert(MAP_FAILED != devices);
      get_device_count(devices);

      if (devices->count < 1) {
         printf("One or more (SM 2.0) class GPU's are required for %s\n",
               _progname);
         errx(1, "Waiving test\n");
      }

      if (devices->count > 1)
         proc_count = PROC_PER_DEV * devices->count;
      else 
         proc_count = 2;

      barrier = (ipcbarrier_t *)mmap(NULL, sizeof(ipcbarrier_t *),
            PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
      assert(MAP_FAILED != barrier);
      memset(barrier, 0, sizeof(barrier));
      proc_sense = 0;

      ipccuda_t *shared_mem = (ipccuda_t *)mmap(NULL,
            proc_count * sizeof(ipccuda_t *),
            PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, 0, 0);
      assert(MAP_FAILED != shared_mem);
      memset(shared_mem, 0, proc_count * sizeof(shared_mem));

      int index = 0;
      for (int ix = 1; ix < proc_count; ix++) {
         int pid = fork();
         if (!pid) {
            index = ix;
            break;
         }
         else {
            shared_mem[ix].pid = pid;
         }
      }

      if (devices->count > 1)
         shared_mem[index].device = devices->ordinals[index / PROC_PER_DEV];
      else
         shared_mem[0].device = devices->ordinals[0];

      printf("Process %3d -> GPU%d\n", index, shared_mem[index].device);
      runtest_multikernel(shared_mem, index, proc_count);

      if (index == 0) {
         for (int ix = 1; ix < proc_count; ix++) {
            int status = 0;
            waitpid(shared_mem[ix].pid, &status, 0);
            assert(WIFEXITED(status));
         }

         printf("shutting down\n");

         for (int ix = 0; ix < devices->count; ix++) {
            checkCudaErrors(cudaSetDevice(devices->ordinals[ix]));
            cudaDeviceReset();
         }
      }

      retval = 0;
   }

   return retval;
}
