/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */


#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdint.h>
#include <assert.h>
#include <sys/time.h>

//#include <boost/function.hpp>
//#include <boost/bind.hpp>
//#include <boost/thread.hpp>
//#include <boost/thread/condition.hpp>
//#include <boost/thread/mutex.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>

//#define CUQU_DISABLE_CUPRINTF 1
//#define CUOS_ENABLE_TRACING 1

#include <cuos/cuos.h>
#include <cuos/mpi.h>

#include <mpi.h>

#ifdef CUPRINTF_INCLUDED
#warning "including cuPrintf.cu"
#include <utils/cuPrintf.cu>
#endif

#define ODDGPU 0 // even GPUs do test
//#define ODDGPU 1 // odd GPUs do test
//#define ODDGPU 2 // no GPU at all

//----------------------------------------------------------------------
//----------------------------------------------------------------------
// GPU kernels

using namespace cuos::device;

__device__ void calc(const uint gid, const uint tot_threads, float *o, const float *i1, const float *i2, const float c1, const float c2, const int n_floats)
{
    for(int idx = gid; idx < n_floats; idx += tot_threads)
        if(gid < n_floats)
            o[gid] += sinf(i1[gid])*c1 + cosf(i2[gid])*c2;
}

//----------------------------------------------------------------------

__global__ void mpi_kernel_blocking(sys_queue_t *sys_q, const int rank, const int n_nodes, const int n_calls, float *o, const float *i1, const float *i2, const int n_floats) 
{
    const uint tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
    const uint bid = blockIdx.x + gridDim.x*blockIdx.y;
    const uint block_size = blockDim.x*blockDim.y*blockDim.z;
    const uint grid_size = gridDim.y*gridDim.x;
    const uint gid = tid + bid*block_size;
    const uint tot_threads = block_size*grid_size;

#ifdef CUPRINTF_CU
    //cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

#if 0
    if(gid == 0) {
        cudaPrintf("[%d:%d] m_rep=%p m_req=%p m_initialized=%d\n", bid, tid,
                   sys_q->m_req, sys_q->m_rep, sys_q->m_initialized);
    }
#endif

    int n = 0;
    while(n < n_calls) {
        int retcode;
#if 0
        for(int k=0; k<100; ++k)
            calc(gid, tot_threads, o, i1, i2, (float)n/n_calls, (float)(n_calls-n)/n_calls, n_floats);
#endif   
        //if(tid == 0) cudaPrintf("[%d:%d] before MPI call n=%d\n",  bid, tid, n);

        if ((rank % 2) == 1)
            retcode = cuMPI_Recv(sys_q, o, n_floats, cuMPI_FLOAT, rank-1, 1 /*tag*/, cuMPI_COMM_WORLD);
        else 
            retcode = cuMPI_Send(sys_q, o, n_floats, cuMPI_FLOAT, rank+1, 1 /*tag*/, cuMPI_COMM_WORLD);

        if(retcode != cuMPI_SUCCESS) {
            //if(tid==0)
                cuos_error("ERROR: [%d:%d] n=%d retcode=%x in MPI_Send/Recv\n", bid, tid, n, retcode);
            break;
        }

        ++n;
    }
}

//----------------------------------------------------------------------

__device__ cuMPI_Request req;

__global__ void mpi_kernel_async(sys_queue_t *sys_q, int rank, int n_nodes, int n_calls, float *o, float *i1, float *i2, int n_floats) 
{
    const uint tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
    const uint bid = blockIdx.x + gridDim.x*blockIdx.y;
    const uint block_size = blockDim.x*blockDim.y*blockDim.z;
    const uint grid_size = gridDim.y*gridDim.x;
    const uint gid = tid + bid*block_size;
    const uint tot_threads = block_size*grid_size;

#ifdef CUPRINTF_CU
    //cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    int n = 0;

    while(n < n_calls) {
        int retcode;

        calc(gid, tot_threads, o, i1, i2, (float)n/n_calls, (float)(n_calls-n)/n_calls, n_floats);

        if ((rank % 2) == 1)
            retcode = cuMPI_Irecv(sys_q, o, n_floats, cuMPI_FLOAT, rank-1, 1 /*tag*/, cuMPI_COMM_WORLD, &req);
        else 
            retcode = cuMPI_Isend(sys_q, o, n_floats, cuMPI_FLOAT, rank+1, 1 /*tag*/, cuMPI_COMM_WORLD, &req);

        if(retcode != cuMPI_SUCCESS) {
            cuos_error("ERROR: gid=%d n=%d retcode=%d in MPI_Isend/Irecv\n", gid, n, retcode);
            break;
        }

        ++n;
#if 1
        int testcode = cuMPI_Wait(sys_q, &req);
        if(testcode > 0)
            continue;
        else if(testcode <= 0) {
            cuos_error("ERROR: [%d:%d] retcode=%d in MPI_Wait\n", bid, tid, testcode);
            break;
        }
#else
        while(true) {
            // useful calculations could be done here !!!

            int testcode = cuMPI_Test(sys_q, &req);
            if(testcode > 0)
                // previous MPI request completed
                break;
            else if(testcode == 0)
                // MPI request still active
                continue;
            else {
                // error!!!
                cuos_error("ERROR: [%d:%d] retcode=%d in MPI_Test, going on anyway...\n", bid, tid, testcode);
                break;
            }
        }
#endif
    }
}

//----------------------------------------------------------------------
// GPU cuos kernel launching code

void test_mpi_cuos(cuos::host::runtime_ref_t os, int rank, int n_nodes, int n_threads, int n_blocks, int n_repeats, int n_calls, bool verbose, bool partial_dump, bool dump, bool async_test)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
#if defined(CUOS_DEBUG) || defined(_DEBUG) || defined(DEBUG)
    printf("Beware, debug builds are not reliable, they may deadlock!\n");
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    printf("# [%d] test_mpi_cuos config: n_threads=%d n_blocks=%d n_repeats=%d n_calls=%d async=%d\n", rank, n_threads, n_blocks, n_repeats, n_calls, async_test);

    cuosSafeCall(cudaEventCreate(&startEvent));
    cuosSafeCall(cudaEventCreate(&stopEvent));

    const int n_floats = n_blocks*n_threads;
    float *d_out, *d_in1, *d_in2;
    cuosSafeCall(cudaMalloc(&d_out,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in1,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in1, 0, sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in2,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in2, 0, sizeof(float)*n_floats));

    if(verbose) printf("# [%d] allocated 3 GPU buffers of %d floats\n", rank, n_floats);

    os->prepare_device();

    if(verbose) printf("# [%d] n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n", rank);

    struct timeval tm_s, tm_e;
    gettimeofday(&tm_s, 0);

    for(int nr=0; nr<n_repeats; ++nr) {
        //cuosCheckMsg("before mpi_kernel launch");
        cudaStream_t exec_stream = os->exec_stream();
        // start timer
        cuosSafeCall(cudaEventRecord(startEvent, exec_stream));
        cuosTrace("before kernel launch\n");
        // spawn GPU kernel
        if(async_test)
            mpi_kernel_async<<<dimGrid, dimBlock, 0, exec_stream>>>(os->sys_q(), rank, n_nodes, n_calls, d_out, d_in1, d_in2, n_floats);
        else
            mpi_kernel_blocking<<<dimGrid, dimBlock, 0, exec_stream>>>(os->sys_q(), rank, n_nodes, n_calls, d_out, d_in1, d_in2, n_floats);

        // queue stop event
        cuosSafeCall(cudaEventRecord(stopEvent, exec_stream));
        // offloaded system service handling is here
        bool kernel_running = true;
        int err = 0;
        while(kernel_running) {
            cuosTrace("before cudaEventQuery()\n");
            cudaError_t retcode = cudaEventQuery(stopEvent);
            switch(retcode) {
            case cudaSuccess:
                printf("# [%d] kernel ended!\n", rank);
                kernel_running = false;
                break;
            case cudaErrorNotReady:
                {
                    //printf("# [%d] handling system services\n", rank);
                    int ret = os->handle_system_service(1 /*ms*/);
                    cuosTrace("after handle_system_service() ret=%d\n", ret);
                }
                break;
            default:
                printf("# [%d] ERROR unexpected CUDA retcode=%d (%s)\n", rank, retcode, cudaGetErrorString(retcode));
                ++err;
                if(err > 4)
                    kernel_running = false;
                break;
            }
        }
        // ok done!
    }

    gettimeofday(&tm_e, 0);
    double dt = (tm_e.tv_sec - tm_s.tv_sec) + (tm_e.tv_usec - tm_s.tv_usec) / ( 1000.0 * 1000.0 );
    printf("# [%d] GPU: dt=%fs\n", rank, dt);

    cuosSafeCall(cudaEventDestroy(startEvent));
    cuosSafeCall(cudaEventDestroy(stopEvent));
    
    cuosSafeCall(cudaFree(d_out));
    cuosSafeCall(cudaFree(d_in1));
    cuosSafeCall(cudaFree(d_in2));
        
#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
// GPU cuos+sys_server kernel launching code

void test_mpi_cuos_ss(cuos::host::runtime_ref_t os, int rank, int n_nodes, int n_threads, int n_blocks, int n_repeats, int n_calls, bool verbose, bool partial_dump, bool dump, bool async_test)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
#if defined(CUOS_DEBUG) || defined(_DEBUG) || defined(DEBUG)
    printf("Beware, debug builds are not reliable, they may deadlock!\n");
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    printf("# [%d] test_mpi_cuos_ss config: n_threads=%d n_blocks=%d n_repeats=%d n_calls=%d async=%d\n", rank, n_threads, n_blocks, n_repeats, n_calls, async_test);

    cuosSafeCall(cudaEventCreate(&startEvent));
    cuosSafeCall(cudaEventCreate(&stopEvent));

    const int n_floats = n_blocks*n_threads;
    float *d_out, *d_in1, *d_in2;
    cuosSafeCall(cudaMalloc(&d_out,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in1,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in1, 0, sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in2,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in2, 0, sizeof(float)*n_floats));

    if(verbose) printf("# [%d] allocated 3 GPU buffers of %d floats\n", rank, n_floats);

    os->prepare_device();

    if(verbose) printf("# [%d] n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n", rank);

    os->start_sys_server();

    struct timeval tm_s, tm_e;
    gettimeofday(&tm_s, 0);

    for(int nr=0; nr<n_repeats; ++nr) {
        //cuosCheckMsg("before mpi_kernel launch");
        cudaStream_t exec_stream = os->exec_stream();
        // start timer
        cuosSafeCall(cudaEventRecord(startEvent, exec_stream));
        cuosTrace("before kernel launch\n");
        // spawn GPU kernel
        if(async_test)
            mpi_kernel_async<<<dimGrid, dimBlock, 0, exec_stream>>>(os->sys_q(), rank, n_nodes, n_calls, d_out, d_in1, d_in2, n_floats);
        else
            mpi_kernel_blocking<<<dimGrid, dimBlock, 0, exec_stream>>>(os->sys_q(), rank, n_nodes, n_calls, d_out, d_in1, d_in2, n_floats);
        // queue stop event
        cuosSafeCall(cudaEventRecord(stopEvent, exec_stream));
        cuosSafeCall(cudaStreamSynchronize(exec_stream));
        cuosTrace("after kernel end\n");
    }

    gettimeofday(&tm_e, 0);
    double dt = (tm_e.tv_sec - tm_s.tv_sec) + (tm_e.tv_usec - tm_s.tv_usec) / ( 1000.0 * 1000.0 );
    printf("# [%d] GPU: dt=%fs\n", rank, dt);

    cuosSafeCall(cudaEventDestroy(startEvent));
    cuosSafeCall(cudaEventDestroy(stopEvent));
    
    cuosSafeCall(cudaFree(d_out));
    cuosSafeCall(cudaFree(d_in1));
    cuosSafeCall(cudaFree(d_in2));

    os->stop_sys_server();
        
#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
// GPU plain CUDA kernel launching code

__global__ void mpi_kernel_cuda(const int n, const int n_calls, float *o, const float *i1, const float *i2, const int n_floats) 
{
    const uint tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
    const uint bid = blockIdx.x + gridDim.x*blockIdx.y;
    const uint block_size = blockDim.x*blockDim.y*blockDim.z;
    const uint grid_size = gridDim.y*gridDim.x;
    const uint gid = tid + bid*block_size;
    const uint tot_threads = block_size*grid_size;

#ifdef CUPRINTF_CU
    //cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    calc(gid, tot_threads, o, i1, i2, (float)n/n_calls, (float)(n_calls-n)/n_calls, n_floats);
}


void test_mpi_cuda(cuos::host::runtime_ref_t os, int rank, int n_nodes, int n_threads, int n_blocks, int n_repeats, int n_calls, bool verbose, bool partial_dump, bool dump, bool async_test)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
#if defined(CUOS_DEBUG) || defined(_DEBUG) || defined(DEBUG)
    printf("Beware, debug builds are not reliable, they may deadlock!\n");
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    printf("# [%d] test_mpi_cuda config: n_threads=%d n_blocks=%d n_repeats=%d n_calls=%d async=%d\n", rank, n_threads, n_blocks, n_repeats, n_calls, async_test);

    cuosSafeCall(cudaEventCreate(&startEvent));
    cuosSafeCall(cudaEventCreate(&stopEvent));

    const int n_floats = n_blocks*n_threads;
    float *d_out, *d_in1, *d_in2;
    cuosSafeCall(cudaMalloc(&d_out,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in1,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in1, 0, sizeof(float)*n_floats));
    cuosSafeCall(cudaMalloc(&d_in2,    sizeof(float)*n_floats));
    cuosSafeCall(cudaMemset( d_in2, 0, sizeof(float)*n_floats));

    float *h_buf;
    h_buf = (float*)os->get_tmp_buf(sizeof(float)*n_floats);

    if(verbose) printf("# [%d] allocated 3 GPU buffers of %d floats\n", rank, n_floats);

    if(verbose) printf("# [%d] n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n", rank);

    struct timeval tm_s, tm_e;
    gettimeofday(&tm_s, 0);

    for(int nr=0; nr<n_repeats; ++nr) {
        cuosCheckMsg("before mpi_kernel launch");
        // start timer
        //cuosSafeCall(cudaEventRecord(startEvent, exec_stream));

        for(int n=0; n<n_calls; ++n) {
            cudaStream_t exec_stream = os->exec_stream();
            int retcode;
            MPI_Status stat;

            // spawn GPU kernel
            mpi_kernel_cuda<<<dimGrid, dimBlock, 0, exec_stream>>>(n, n_calls, d_out, d_in1, d_in2, n_floats);
            cudaMemcpyAsync(h_buf, d_out, sizeof(float)*n_floats, cudaMemcpyDeviceToHost, exec_stream);
            cudaStreamSynchronize(exec_stream);

            if ((rank % 2) == 1) {
                if(verbose || n_calls/10 == 0 || (n % (n_calls/10)) == 0) {
                    printf("# [%d] GPU: iter=%d invoking pinned MPI_Recv()\n", rank, n);
                    fflush(stdout);
                }
                retcode = MPI_Recv(h_buf, n_floats, MPI_FLOAT, rank-1, 1 /*tag*/, MPI_COMM_WORLD, &stat);
            } else {
                if(verbose || n_calls/10 == 0 || (n % (n_calls/10)) == 0) { 
                    printf("# [%d] GPU: iter=%d invoking pinned MPI_Send()\n", rank, n);
                    fflush(stdout);
                }
                retcode = MPI_Send(h_buf, n_floats, MPI_FLOAT, rank+1, 1 /*tag*/, MPI_COMM_WORLD);
            }

            if(retcode != MPI_SUCCESS) {
                cuosError("ERROR: GPU: retcode=%d in MPI_Send/Recv\n", retcode);
                break;
            }
        }

        // queue stop event
        //cuosSafeCall(cudaEventRecord(stopEvent, exec_stream));
    }
    gettimeofday(&tm_e, 0);
    double dt = (tm_e.tv_sec - tm_s.tv_sec) + (tm_e.tv_usec - tm_s.tv_usec) / ( 1000.0 * 1000.0 );
    printf("# [%d] GPU: dt=%fs\n", rank, dt);

    os->rel_tmp_buf(h_buf);

    cuosSafeCall(cudaEventDestroy(startEvent));
    cuosSafeCall(cudaEventDestroy(stopEvent));
    
    cuosSafeCall(cudaFree(d_out));
    cuosSafeCall(cudaFree(d_in1));
    cuosSafeCall(cudaFree(d_in2));
        
#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
// CPU code

void test_mpi_host(int rank, int n_nodes, int n_threads, int n_blocks, int n_repeats, int n_calls, bool verbose, bool partial_dump, bool dump)
{
    const int n_floats = n_blocks*n_threads;

    assert(n_threads > 0);
    assert(n_blocks > 0);

    float *out, *in1, *in2;
    out = (float*)malloc(sizeof(float)*n_floats);
    in1 = (float*)malloc(sizeof(float)*n_floats);
    memset(in1, 0, sizeof(float)*n_floats);
    in2 = (float*)malloc(sizeof(float)*n_floats);
    memset(in2, 0, sizeof(float)*n_floats);    

    struct timeval tm_s, tm_e;
    gettimeofday(&tm_s, 0);

    int n = 0;
    while(n < n_calls) {
        int retcode;
        MPI_Status stat;

        for(int idx = 0; idx < n_floats; ++idx)
            out[idx] = sinf(in1[idx])*(float)n/n_calls + cosf(in2[idx])*(float)(n_calls-n)/n_calls;

        if ((rank % 2) == 1) {
            if(verbose || n_calls/10 == 0 || (n % (n_calls/10)) == 0) {
                printf("# [%d] CPU: iter=%d invoking MPI_Recv()\n", rank, n);
                fflush(stdout);
            }
            retcode = MPI_Recv(out, n_floats, MPI_FLOAT, rank-1, 1 /*tag*/, MPI_COMM_WORLD, &stat);
        } else {
            if(verbose || n_calls/10 == 0 || (n % (n_calls/10)) == 0) { 
                printf("# [%d] CPU: iter=%d invoking MPI_Send()\n", rank, n);
                fflush(stdout);
            }
            retcode = MPI_Send(out, n_floats, MPI_FLOAT, rank+1, 1 /*tag*/, MPI_COMM_WORLD);
        }

        if(retcode != MPI_SUCCESS) {
            cuosError("ERROR: CPU: retcode=%d in MPI_Send/Recv\n", retcode);
            break;
        }

        ++n;
    }

    gettimeofday(&tm_e, 0);
    double dt = (tm_e.tv_sec - tm_s.tv_sec) + (tm_e.tv_usec - tm_s.tv_usec) / ( 1000.0 * 1000.0 );
    printf("# [%d] CPU: dt=%fs\n", rank, dt);

    if(verbose) printf("# [%d] CPU: freeing buffers\n", rank);
    free(out);
    free(in1);
    free(in2);
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

#define USE_MPI 1

int main(int argc, char* argv[])
{
    int n_threads = 128; //64 bad, 128 ok
    int n_blocks = -1;
    int rank, n_nodes;
    char host_name[512];
    int n_calls = 1;
    int n_repeats = 1;
    bool verbose = false;
    int device_id = 0;
    bool async_test = false;
    bool enable_cuos = true;

#if USE_MPI
    MPI_Init( &argc, &argv );
    MPI_Comm_rank( MPI_COMM_WORLD, &rank );
    MPI_Comm_size( MPI_COMM_WORLD, &n_nodes );
    int name_len = sizeof(host_name);
    MPI_Get_processor_name(host_name, &name_len);
#else
    printf("fake MPI run!\n");
    rank = 1;
    n_nodes = 2;
#endif
    printf("# [%d] rank=%d n_nodes=%d host_name=%s\n", rank, rank, n_nodes, host_name);

    // arg scan
    int opt;
    while ((opt = getopt(argc, argv, "hab:c:d:t:ov")) != -1) {
        switch (opt) {
        case 'a':
            printf("# async flag set\n");
            async_test = true;
            break;
        case 'b':
            n_blocks = atoi(optarg);
            break;
        case 'c':
            n_calls = atoi(optarg);
            break;
        case 'd':
            device_id = atoi(optarg);
            break;
        case 't':
            n_threads = atoi(optarg);
            break;
        case 'o':
            enable_cuos = false;
            break;
        case 'v':
            verbose = true;
            break;
        default: /* '?' */
            fprintf(stderr,
                    "Usage: %s [-c n_calls][-b n_blocks][-t n_threads][-v][d device_id]\n"
                    "  -a         enable async test\n"
                    "  -o         use plain CUDA test (disable CUOS)\n"
                    "  -v         verbose dump\n",
                    argv[0]);
            exit(EXIT_FAILURE);
        }
    }

    //printf("# [%d] sleeping 100s\n", rank);
    //sleep(100);
    if(rank == 0) {
        printf("# n_calls=%d\n", n_calls);
    }

    if(n_blocks == -1) {
        n_blocks = 14;
        printf("# [%d] assigning default n_blocks=%d\n", rank, n_blocks); fflush(stdout);
    }
            
    if((rank % 2) == ODDGPU) {
        {
            cuos::host::runtime_ref_t os;
            
            printf("# [%d] calling GPU implementation, cuOS version %d.%d\n", rank, CUOS_MAJOR_VERSION, CUOS_MINOR_VERSION);
            
            printf("# [%d] GPU: calling cuda_init(device=%d)\n", rank, device_id); fflush(stdout);
            cuos::host::cuda_init(device_id);
           
            printf("# [%d] GPU: calling make_runtime\n", rank); fflush(stdout);
            os = cuos::host::make_runtime(device_id, n_threads);
            if(os == 0) {
                fprintf(stderr, "error creating cuos runtime\n");
                exit(EXIT_FAILURE);
            }

            printf("# [%d] GPU: calling test function, barrier implementation: %s\n", rank, CUQU_BARRIER_STR);
            printf("# [%d] GPU: CUDA ver: %d.%d\n", rank, (CUDA_VERSION/1000), (CUDA_VERSION / 10 % 100));
            printf("# [%d] GPU: CUQU_ENABLE_TRACING=%d\n", rank, CUQU_ENABLE_TRACING);
            printf("# [%d] GPU: CUQU_DEBUG_VERBOSE=%d\n", rank, CUQU_DEBUG_VERBOSE);
            printf("# [%d] GPU: CUQU_ENABLE_PERF_HIST=%d\n", rank, CUQU_ENABLE_PERF_HIST);
            printf("# [%d] GPU: CUQU_INVALIDATE_ENTRIES=%d\n", rank, CUQU_INVALIDATE_ENTRIES);
            printf("# [%d] GPU: CUQU_ENABLE_COPY_FENCE=%d\n", rank, CUQU_ENABLE_COPY_FENCE);
            printf("# [%d] GPU: CUQU_ENABLE_TAIL_FENCE=%d\n", rank, CUQU_ENABLE_TAIL_FENCE);

            if(enable_cuos)
                test_mpi_cuos(os, rank, n_nodes, n_threads, n_blocks, n_repeats, n_calls, verbose, false, false, async_test);
            else
                test_mpi_cuda(os, rank, n_nodes, n_threads, n_blocks, n_repeats, n_calls, verbose, false, false, async_test);
            
            // invoke DTOR before cudaThreadExit()
            os.reset();
            
            printf("# [%d] GPU: calling cudaThreadExit()\n", rank);
        }
        cudaThreadExit();

    } else {

        printf("# [%d] calling CPU implementation\n", rank);
        test_mpi_host(rank, n_nodes, n_threads, n_blocks, n_repeats, n_calls, verbose, true, true);

    }

#if USE_MPI
    printf("# [%d] before Barrier\n", rank);
    MPI_Barrier(MPI_COMM_WORLD);
    printf("# [%d] Barrier done!\n", rank);
    MPI_Finalize();
#endif

    return EXIT_SUCCESS;
}


/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
