/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/mutex.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>

#include <cuqu/cuqu.h>
#ifdef CUPRINTF_INCLUDED
#include <utils/cuPrintf.cu>
#endif

#include <sys/time.h>

//----------------------------------------------------------------------
// taken from cuos

struct param {
    enum tag_t {
        tag_none   = 0,
        tag_int    = 1,
        tag_uint   = 2,
        tag_long   = 3,
        tag_ulong  = 4,
        tag_float  = 5,
        tag_double = 6,
        tag_ptr    = 7,
        num_tags
    };
    int               tag;
    uint              count;
    union {
        int           si;
        uint          ui;
        long          sl;
        unsigned long ul;
        float         f;
        double        d;
        void         *ptr;
    };
};

static inline __host__ __device__ param cuos_make_none_param() {
    param p;
    p.tag = param::tag_none;
    p.count = 0;
    p.ptr = 0;            
    return p;
}

static inline __host__ __device__ param cuos_make_ptr_param(void *ptr, size_t count) {
    param p;
    p.tag = param::tag_ptr;
    p.count = count;
    p.ptr = ptr;
    return p;
}

static inline __host__ __device__ param cuos_make_int_param(int val) {
    param p;
    p.tag = param::tag_int;
    p.count = 1;
    p.si = val;
    return p;
}

struct request {
    enum {
        // bit values for flags field
        flag_pending      = 1<<0, 
        flag_need_reply   = 1<<1,
        flag_request      = 1<<2,
        // max number of params for syscall invocation 
        max_params        = 8
    };
    int    id;
    uint   serial;
    uint   n_params;
    uint   flags;
    param  params[max_params];
    //param  retval;
};

struct reply {
    uint   serial;
    uint   flags; // undefined id, ...
    param  retval;
};

//----------------------------------------------------------------------

typedef cuqu::host::queue<request> req_queue_t;
typedef cuqu::host::queue<reply>   rep_queue_t;

typedef req_queue_t::raw_queue_t   req_raw_queue_t;
typedef rep_queue_t::raw_queue_t   rep_raw_queue_t;

typedef req_queue_t::event_t req_event_t;
typedef rep_queue_t::event_t rep_event_t;

//typedef cuqu::gpu_event<test_event_t> gpu_event_t;

struct dev_dblqueue {
    cuqu::device::queue *req;
    cuqu::device::queue *rep;
};

typedef struct dev_dblqueue dev_dblqueue_t;

struct dblqueue {
    req_queue_t req;
    rep_queue_t rep;

    __host__ dblqueue(size_t queue_size, size_t block_size) : req(queue_size, block_size), rep(queue_size, block_size)
    {
    }
    
    __host__ dev_dblqueue get_dev_dblqueue()
    {
        dev_dblqueue ret;
        ret.req = req.get_device_queue();
        ret.rep = rep.get_device_queue();
        return ret;
    }
};

typedef struct dblqueue dblqueue_t;

//----------------------------------------------------------------------

__device__ req_event_t req_event;
__device__ rep_event_t rep_event;

__global__ void device_pushpop(dev_dblqueue dbl_q, int n_events)
{
    const unsigned int tid = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z;
    const unsigned int bid = blockIdx.x + gridDim.x * blockIdx.y;
    const unsigned int block_size = blockDim.x * blockDim.y * blockDim.z;
    //const unsigned int grid_size = gridDim.y * gridDim.x;
    const unsigned int gid = tid + bid * block_size;

    int n_req = 0;
    int n_rep = 0;
    int retcode = 0;
    
    while(n_req < n_events) {

        // Only thread with gid equal 0 creates event.
        // block_size=128 -> accounts for ~ 1us
        if(gid == 0) {
            request sys_desc;
            sys_desc.id = 8;
            sys_desc.serial = 0;
            sys_desc.n_params = 7;
            sys_desc.flags = 0xf100fe00;
            sys_desc.params[0] = cuos_make_ptr_param( 0, 1 );
            sys_desc.params[1] = cuos_make_int_param( 0x23232123 );
            sys_desc.params[2] = cuos_make_int_param( 0x11231232 );
            sys_desc.params[3] = cuos_make_int_param( 0x77712121 );
            sys_desc.params[4] = cuos_make_int_param( 0x99112233 );
            sys_desc.params[5] = cuos_make_int_param( 0xf8f8f8f8 );
            sys_desc.params[6] = cuos_make_ptr_param( (void*)0x55533322, 1 );
            sys_desc.params[7] = cuos_make_none_param();

            req_event.u.t = sys_desc;
        }
        // ??
        __syncthreads();

        // Push request
        retcode = dbl_q.req->timed_push(&req_event, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n_req;
        } else if(retcode == cuqu::WOULDBLOCK) {
            cuqu_error("device_pushpop[%d,%d]: timeout exceeded for request %d\n", bid, tid, n_req);
            //break;
        } else {
            cuqu_error("device_pushpop[%d,%d]: error retcode=%d\n", bid, tid, retcode);
            break;
        }

        // Pop reply
        retcode = dbl_q.rep->timed_fetch(&rep_event, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n_rep;
        } else {
            cudaPrintf("ERROR: device_pushpop() unexpected retcode=%d\n", retcode);
            break;
        }
 
    }
}

//----------------------------------------------------------------------

__global__ void device_pushpop_opt(dev_dblqueue dbl_q, int n_events)
{
    const unsigned int tid = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z;
    const unsigned int bid = blockIdx.x + gridDim.x * blockIdx.y;
    const unsigned int block_size = blockDim.x * blockDim.y * blockDim.z;
    //const unsigned int grid_size = gridDim.y * gridDim.x;
    const unsigned int gid = tid + bid * block_size;

    int n_req = 0;
    int n_rep = 0;
    int retcode = 0;
    
    while(n_req < n_events) {

        // Only thread with gid equal 0 creates event.
        if(gid == 0) {
            request sys_desc;
            sys_desc.id = 8;
            sys_desc.serial = 0;
            sys_desc.n_params = 7;
            sys_desc.flags = 0xf100fe00;
            sys_desc.params[0] = cuos_make_ptr_param( 0, 1 );
            sys_desc.params[1] = cuos_make_int_param( 0x23232123 );
            sys_desc.params[2] = cuos_make_int_param( 0x11231232 );
            sys_desc.params[3] = cuos_make_int_param( 0x77712121 );
            sys_desc.params[4] = cuos_make_int_param( 0x99112233 );
            sys_desc.params[5] = cuos_make_int_param( 0xf8f8f8f8 );
            sys_desc.params[6] = cuos_make_ptr_param( (void*)0x55533322, 1 );
            sys_desc.params[7] = cuos_make_none_param();

            req_event.u.t = sys_desc;
        }
        // ??
        __syncthreads();

        // Push request
        retcode = dbl_q.req->timed_push_nobar(&req_event, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n_req;
        } else if(retcode == cuqu::WOULDBLOCK) {
            cuqu_error("device_pushpop[%d,%d]: timeout exceeded for request %d\n", bid, tid, n_req);
            //break;
        } else {
            cuqu_error("device_pushpop[%d,%d]: error retcode=%d\n", bid, tid, retcode);
            break;
        }

        // Pop reply
        retcode = dbl_q.rep->timed_fetch(&rep_event, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n_rep;
        } else {
            cudaPrintf("ERROR: device_pushpop() unexpected retcode=%d\n", retcode);
            break;
        }
 
    }
}


//----------------------------------------------------------------------

#if 1
void print_histogram(dblqueue *dbl_q) {
#if CUQU_ENABLE_PERF_HIST
    req_queue_t *q = &dbl_q->req;
    unsigned int dev_hist[q->hist_size()];
    unsigned int *host_hist = q->get_host_hist_ptr();

    // Retrieve device histogram from device global memory.
    cuquSafeCall(cudaMemcpy(dev_hist, q->get_device_hist_ptr(), sizeof(unsigned int) * q->hist_size(), cudaMemcpyDeviceToHost));

    // print histogram
    for (size_t i = 0; i < q->hist_size(); i++) {
        printf("d[%2d]: %2d, h[%2d]: %2d\n", i, dev_hist[i], i, host_hist[i]);
    }
#else
    printf("performance histogram feature is disabled\n"); fflush(stdout);
#endif
}
#endif

//----------------------------------------------------------------------

//int pusher_started = 0;
//volatile int pusher_stop = 0;

int host_thread_started = 0;
volatile int host_thread_stop = 0;
boost::mutex mtx;

// ---------------------------------------------------------------------
// Host pop & push
// ---------------------------------------------------------------------
void host_poppush(dblqueue *dbl_q, boost::condition &cond, int sleep_us)
{
    {
        boost::mutex::scoped_lock lock(mtx);
        host_thread_started = 1;
        host_thread_stop = 0;
        cond.notify_all();
    }

    int k=0;
    while(true) {
        request req;
        reply rep;
        int retcode;

        retcode = dbl_q->req.timed_fetch(&req, 100/*timeout ms*/);
        // in case of pusher_stop==true, retcode can be WOULDBLOCK
        if(retcode == cuqu::WOULDBLOCK) {
            boost::mutex::scoped_lock lock(mtx);
            if(host_thread_stop) {
                host_thread_started = 0;
                cuquTrace("host_thread: exiting due to host_thread_stop signal\n");
                break;
            }
        } else if(retcode != cuqu::SUCCESS) {
            printf("host_thread: error retcode %d for %d-th timed_fetch(100ms), aborting fetch attempt\n", retcode, k);
            break;
        } 

        // sleep to simulate some work
        if(sleep_us > 0) {
#if 0
            usleep(sleep_us);
#else
            struct timeval tm;
            gettimeofday(&tm, NULL);
            double now;
            double tmout = tm.tv_sec + (tm.tv_usec + sleep_us) / 1000000.0;
            do {
                gettimeofday(&tm, NULL);
                now = tm.tv_sec + tm.tv_usec / 1000000.0;
            } while(now < tmout);
#endif
        }

        // fill in rep
        // rep_e = ...

        // send reply back
        retcode = dbl_q->rep.timed_push(&rep, 10000/*timeout ms*/);
        if(retcode != cuqu::SUCCESS) {
            printf("host_thread: error retcode %d for %d-th timed_push(100ms), aborting push attempt\n", retcode, k);
            break;
        } 

        k++;
    }
}

// ---------------------------------------------------------------------
// Test DEVICE <-> HOST queue direction
// ---------------------------------------------------------------------

enum test_type {
    test_plain,
    test_opt
};

void test_pingpong(test_type test_variant, int n_threads, int n_blocks, int n_repeats, int queue_size, int n_events, bool verbose, bool partial_dump, bool dump, bool hist, int sleep_us)
{

#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    if(verbose) {   
        printf("# device <--> host\n");      
        printf("# kernel config: n_threads=%d n_blocks=%d n_repeats=%d queue_size=%d n_events=%d\n", n_threads, n_blocks, n_repeats, queue_size, n_events); fflush(stdout);
    }

    void *d_req_event;
    size_t req_event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_req_event, "req_event"));
    cuquSafeCall(cudaGetSymbolSize(&req_event_size, "req_event"));
    cuquSafeCall(cudaMemset(d_req_event, 0, req_event_size));

    void *d_rep_event;
    size_t rep_event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_rep_event, "rep_event"));
    cuquSafeCall(cudaGetSymbolSize(&rep_event_size, "rep_event"));
    cuquSafeCall(cudaMemset(d_rep_event, 0, rep_event_size));

    cuquTrace("queue CTOR with %d slots\n", queue_size);
    dblqueue_t dbl_q(queue_size, n_threads);
    dev_dblqueue_t dev_dbl_q = dbl_q.get_dev_dblqueue();

    cuquTrace("spawning poppush thread\n");
    boost::condition cond;
    boost::function0<void> fun = boost::bind(host_poppush, &dbl_q, boost::ref(cond), sleep_us);
    boost::thread host_thread(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the poppush thread to start up\n");
    {
        boost::mutex::scoped_lock lock(mtx);
        while(!host_thread_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = cond.timed_wait(lock, tmout);
            if(!retcode) {
                printf("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    float rep_us[n_repeats];
    float rep_ms[n_repeats];

    if(verbose) printf("# n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n");

    for(int nr=0; nr<n_repeats; ++nr) {
        // start timer
        cuquSafeCall(cudaEventRecord(startEvent, 0));
        
        // spawn GPU kernel
        if(test_variant == test_plain) {
            cuquTrace("spawning devide_enqueuer() kernel\n");
            device_pushpop<<<dimGrid, dimBlock>>>(dev_dbl_q, n_events);
        } else if(test_variant == test_opt) {
            cuquTrace("spawning devide_enqueuer_opt() kernel\n");
            device_pushpop_opt<<<dimGrid, dimBlock>>>(dev_dbl_q, n_events);
        } else {
            cuquError("invalid test type\n");
            exit(EXIT_FAILURE);
        }
        
        // stop and sync timer
        cuquSafeCall(cudaEventRecord(stopEvent, 0));
        cuquSafeCall(cudaEventSynchronize(stopEvent));
        
        float milliseconds = 0.0f;
        cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
        float uspe = milliseconds / n_events * 1000.0f;
        if(partial_dump) {
            printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, milliseconds, 0.0, uspe, 0.0);
            fflush(stdout);
        }

        rep_us[nr] = uspe;
        rep_ms[nr] = milliseconds;
    }
    // calc average
    double ms = 0,  us = 0;
    double ms2 = 0, us2 = 0;
    //double ems = 0, eus = 0;
    double sms = 0, sus = 0;
    for(int nr=0; nr < n_repeats; ++nr) {
        ms  += rep_ms[nr];
        ms2 += rep_ms[nr]*rep_ms[nr];
        us  += rep_us[nr];
        us2 += rep_us[nr]*rep_us[nr];
    }
    ms /= n_repeats;
    us /= n_repeats;
    for(int nr=0; nr < n_repeats; ++nr) {
        sms = (rep_ms[nr] - ms)*(rep_ms[nr] - ms);
        sus = (rep_us[nr] - us)*(rep_us[nr] - us);
    }
    sms = std::sqrt(sms/n_repeats);
    sus = std::sqrt(sus/n_repeats);
    //printf("ms=%f ms2=%f ems=%f sms=%e\n", ms, ms2, ems, sms);
    //printf("us=%f us2=%f eus=%f sus=%e\n", us, us2, eus, sus);
    if(dump) {        
    	printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, ms, sms, us, sus);
	    fflush(stdout);
        if (hist) {
            print_histogram(&dbl_q);
            fflush(stdout);
        }
    }

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

    cuquTrace("waiting end of dequeuer thread\n");
    host_thread_stop = 1;
    host_thread.join();

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif

    cuquTrace("destroying queue object\n");
}

// ---------------------------------------------------------------------
// simulate CUOS cuMPI_Send behaviour to reproduce a bug
// ---------------------------------------------------------------------

void host_poppush_io(dblqueue *dbl_q, boost::condition &cond, int sleep_us, void *d_buf, void *h_buf,size_t buf_size, cudaStream_t io_stream)
{
    {
        boost::mutex::scoped_lock lock(mtx);
        host_thread_started = 1;
        host_thread_stop = 0;
        cond.notify_all();
    }

    int k=0;
    while(true) {
        request req;
        reply rep;
        int retcode;

        retcode = dbl_q->req.timed_fetch(&req, 100/*timeout ms*/);
        // in case of pusher_stop==true, retcode can be WOULDBLOCK
        if(retcode == cuqu::WOULDBLOCK) {
            boost::mutex::scoped_lock lock(mtx);
            if(host_thread_stop) {
                host_thread_started = 0;
                cuquTrace("host_thread: exiting due to host_thread_stop signal\n");
                break;
            }
        } else if(retcode != cuqu::SUCCESS) {
            printf("host_thread: error retcode %d for %d-th timed_fetch(100ms), aborting fetch attempt\n", retcode, k);
            break;
        } 

        // sleep to simulate some work
        if(sleep_us > 0) {
#if 0
            usleep(sleep_us);
#else
            struct timeval tm;
            gettimeofday(&tm, NULL);
            double now;
            double tmout = tm.tv_sec + (tm.tv_usec + sleep_us) / 1000000.0;
            do {
                gettimeofday(&tm, NULL);
                now = tm.tv_sec + tm.tv_usec / 1000000.0;
            } while(now < tmout);
#endif
        }


        cudaError_t ret = cudaMemcpyAsync(h_buf, d_buf, buf_size, cudaMemcpyDeviceToHost, io_stream);
        if(retcode != cudaSuccess) {
            printf("host_thread: cudaMemcpyAsync error ret=%d\n", ret);
            break;
        }
        while(true) {
            ret = cudaStreamQuery(io_stream);
            if(ret == cudaSuccess) {
                cuquTrace("stream done!\n");
                break;
            } else if(ret == cudaErrorNotReady)
                continue;
            else {
                printf("host_thread: ERROR streamQuery ret=%d\n", ret);
                exit(EXIT_FAILURE);
            }
        }

        // fill in rep
        // rep_e = ...

        // send reply back
        retcode = dbl_q->rep.timed_push(&rep, 10000/*timeout ms*/);
        if(retcode != cuqu::SUCCESS) {
            printf("host_thread: error retcode %d for %d-th timed_push(100ms), aborting push attempt\n", retcode, k);
            break;
        } 

        k++;
    }
}

// ---------------------------------------------------------------------

void test_pingpong_io(test_type test_variant, int n_threads, int n_blocks, int n_repeats, int queue_size, int n_events, bool verbose, bool partial_dump, bool dump, bool hist, int sleep_us, size_t buf_size)
{
    if(CUDA_VERSION < 4000) {
        printf("ERROR: can't exec on CUDA ver < 4.0 due to multi-thread sharing\n");
        exit(EXIT_FAILURE);
    }

#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    if(verbose) {   
        printf("# device <--> host\n");      
        printf("# kernel config: n_threads=%d n_blocks=%d n_repeats=%d queue_size=%d n_events=%d\n", n_threads, n_blocks, n_repeats, queue_size, n_events); fflush(stdout);
        printf("# device and host buffer size: %d\n", buf_size);
    }

    void *d_req_event;
    size_t req_event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_req_event, "req_event"));
    cuquSafeCall(cudaGetSymbolSize(&req_event_size, "req_event"));
    cuquSafeCall(cudaMemset(d_req_event, 0, req_event_size));

    void *d_rep_event;
    size_t rep_event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_rep_event, "rep_event"));
    cuquSafeCall(cudaGetSymbolSize(&rep_event_size, "rep_event"));
    cuquSafeCall(cudaMemset(d_rep_event, 0, rep_event_size));

    cuquTrace("queue CTOR with %d slots\n", queue_size);
    dblqueue_t dbl_q(queue_size, n_threads);
    dev_dblqueue_t dev_dbl_q = dbl_q.get_dev_dblqueue();

    void *h_buf;
    cuquSafeCall(cudaHostAlloc(&h_buf, buf_size, cudaHostAllocPortable));

    void *d_buf;
    cuquSafeCall(cudaMalloc(&d_buf, buf_size));

    cudaStream_t io_stream;
    cuquSafeCall(cudaStreamCreate(&io_stream));

    cudaStream_t exec_stream;
    cuquSafeCall(cudaStreamCreate(&exec_stream));

    cuquTrace("spawning poppush_io host thread\n");
    boost::condition cond;
    boost::function0<void> fun = boost::bind(host_poppush_io, &dbl_q, boost::ref(cond), sleep_us, d_buf, h_buf, buf_size, io_stream);
    boost::thread host_thread(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the poppush_io thread to start up\n");
    {
        boost::mutex::scoped_lock lock(mtx);
        while(!host_thread_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = cond.timed_wait(lock, tmout);
            if(!retcode) {
                printf("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    float rep_us[n_repeats];
    float rep_ms[n_repeats];

    if(verbose) printf("# n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n");

    for(int nr=0; nr<n_repeats; ++nr) {
        // start timer
        cuquSafeCall(cudaEventRecord(startEvent, exec_stream));
        
        // spawn GPU kernel
        if(test_variant == test_plain) {
            cuquTrace("spawning devide_enqueuer() kernel\n");
            device_pushpop<<<dimGrid, dimBlock, 0, exec_stream>>>(dev_dbl_q, n_events);
        } else if(test_variant == test_opt) {
            cuquTrace("spawning devide_enqueuer_opt() kernel\n");
            device_pushpop_opt<<<dimGrid, dimBlock, 0, exec_stream>>>(dev_dbl_q, n_events);
        } else {
            cuquError("invalid test type\n");
            exit(EXIT_FAILURE);
        }
        
        // stop and sync timer
        cuquSafeCall(cudaEventRecord(stopEvent, exec_stream));
        cuquSafeCall(cudaEventSynchronize(stopEvent));
        
        float milliseconds = 0.0f;
        cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
        float uspe = milliseconds / n_events * 1000.0f;
        if(partial_dump) {
            printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, milliseconds, 0.0, uspe, 0.0);
            fflush(stdout);
        }

        rep_us[nr] = uspe;
        rep_ms[nr] = milliseconds;
    }
    // calc average
    double ms = 0,  us = 0;
    double ms2 = 0, us2 = 0;
    //double ems = 0, eus = 0;
    double sms = 0, sus = 0;
    for(int nr=0; nr < n_repeats; ++nr) {
        ms  += rep_ms[nr];
        ms2 += rep_ms[nr]*rep_ms[nr];
        us  += rep_us[nr];
        us2 += rep_us[nr]*rep_us[nr];
    }
    ms /= n_repeats;
    us /= n_repeats;
    for(int nr=0; nr < n_repeats; ++nr) {
        sms = (rep_ms[nr] - ms)*(rep_ms[nr] - ms);
        sus = (rep_us[nr] - us)*(rep_us[nr] - us);
    }
    sms = std::sqrt(sms/n_repeats);
    sus = std::sqrt(sus/n_repeats);
    //printf("ms=%f ms2=%f ems=%f sms=%e\n", ms, ms2, ems, sms);
    //printf("us=%f us2=%f eus=%f sus=%e\n", us, us2, eus, sus);
    if(dump) {        
    	printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, ms, sms, us, sus);
	    fflush(stdout);
        if (hist) {
            print_histogram(&dbl_q);
            fflush(stdout);
        }
    }

    cuquTrace("waiting end of dequeuer thread\n");
    host_thread_stop = 1;
    host_thread.join();

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

    cuquSafeCall(cudaStreamDestroy(io_stream));
    cuquSafeCall(cudaFreeHost(h_buf));
    cuquSafeCall(cudaFree(d_buf));

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif

    cuquTrace("destroying queue object\n");
}

// ---------------------------------------------------------------------
// Main function
// ---------------------------------------------------------------------

int main(int argc, char* argv[])
{
    cudaDeviceProp deviceProp;
    int idev;

    // CUDA initialization
    printf("# CUDA init\n"); fflush(stdout);
    idev = 0;
    cuquSafeCall(cudaSetDevice(idev));
    cuquSafeCall(cudaGetDeviceProperties(&deviceProp, idev));
    if(!deviceProp.canMapHostMemory) {
        fprintf(stderr, "Device %d cannot map host memory!\n", idev);
        exit(EXIT_FAILURE);
    }
    cuquSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));

    //
    int queue_size = 8;
    int n_events = 1000; //queue_size;
    int n_threads = 64;
    int n_blocks = 2*14; // default 14 = # SM on C2050
    bool sweep_test = false;
    int num_sm = deviceProp.multiProcessorCount;
    bool hist = false;
    bool no_warmup = false;
    test_type test_variant = test_plain;
    int sleep_us = 0;
    int n_repeats = 10;
    size_t buf_size = 1024*16;

    // arg scan
    int opt;
    while ((opt = getopt(argc, argv, "Hhe:q:b:t:swT:p:n:")) != -1) {
        switch (opt) {
        case 's':
            sweep_test = true;
            break;
        case 'b':
            n_blocks = atoi(optarg);
            break;
        case 't':
            n_threads = atoi(optarg);
            break;
        case 'e':
            n_events = atoi(optarg);
            break;
        case 'q':
            queue_size = atoi(optarg);
            break;
        case 'H':
            hist = true;
            break;
        case 'w':
            no_warmup = true;
            break;
        case 'p':
            sleep_us = atoi(optarg);
            break;
        case 'T':
            if(!strcmp(optarg, "plain")) {
                test_variant = test_plain;
            } else if(!strcmp(optarg, "opt")) {
                test_variant = test_opt;
            } else {
                cuquError("invalid test type:%s\n", optarg);
                exit(EXIT_FAILURE);
            }
            break;
        case 'n':
            n_repeats = atoi(optarg);
            break;
        case 'h':
        default:
            fprintf(stderr,
                    "Usage: %s [-rsHw][-e num_events][-q queue_size][-b n_blocks][-t n_threads]\n"
                    "  -e N     set the desired number of events to N (default 1000)\n"
                    "  -H       dump histogram\n"
                    "  -q       set the size of the queue, in number of events\n"
                    "  -s       do a sweep test over thread number (32 to 512)\n"
                    "  -w       don't do warm-up\n"
                    "  -T X     set test type to: plain,opt\n"
                    "  -p X     set sleep time (us) in host poppush thread\n"
                    "  -n X     set num of test repetitions to X\n",
                    argv[0]);
            exit(EXIT_FAILURE);
        }
    }

    printf("# barrier implementation: %s\n", CUQU_BARRIER_STR);
    printf("# request queue item size: %d\n", sizeof(req_event_t));
    printf("# reply queue item size: %d\n", sizeof(rep_event_t));
    printf("# test type: %s\n", (test_variant==test_plain)?"plain":(test_variant==test_opt?"opt":"none"));
    printf("# host poppush thread sleep: %dus\n", sleep_us);
    printf("# n_repeats=%d\n", n_repeats);
    printf("# n_events=%d\n", n_events);
    fflush(stdout);
    
#if 1
    if(!no_warmup) {
        int warmup_n_events = 100;
        int warmup_n_repeats = 2;
        printf("# WARMUP with %d events and %d n_repeats\n", warmup_n_events, warmup_n_repeats); fflush(stdout);
        test_pingpong(test_variant, n_threads, n_blocks, warmup_n_repeats, queue_size, warmup_n_events, false, false, false, false, sleep_us);
    }

    if(sweep_test) {
        bool verbose = true;
        for(int nt=32; nt<=512; nt *= 2)
            for(int nb=1; nb<=4*14; ++nb) {
                if(nt>256 && nb > 2*num_sm) {
                    // skip deadlocking combination
                    continue;
                }
                //test_dequeue_kernel(nt, nb, n_repeats, queue_size, n_events, verbose, false, true);
                test_pingpong(test_variant, nt, nb, n_repeats, queue_size, n_events, verbose, false, true, hist, sleep_us);
                verbose = false;
            }
    } else {
        //test_dequeue_kernel(n_threads, n_blocks, n_repeats, queue_size, n_events, true, true, true);
        test_pingpong(test_variant, n_threads, n_blocks, n_repeats, queue_size, n_events, true, true, true, hist, sleep_us);
    }
#else
    test_pingpong_io(test_variant, n_threads, n_blocks, n_repeats, queue_size, n_events, true, true, true, hist, sleep_us, buf_size);
#endif
    cudaThreadExit();

    return EXIT_SUCCESS;
}

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
