/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */


#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h> // for size_t
#include <stdint.h> // for uint32_t
#include <assert.h>

#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/mutex.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>

#include <cuda.h>
#include <cuqu/queue.h>
#include <cuqu/cuda_queue.h>

#if 0
#define BLOCK_SIZE 32
#define N_BLOCKS   14
#elif 1
#define BLOCK_SIZE 64
#define N_BLOCKS   28
#endif

#define NTHREADS (BLOCK_SIZE*N_BLOCKS)
//#define WARP_SIZE 32

typedef cuqu::cuda_queue_t cuda_queue_t;

//----------------------------------------------------------------------
//----------------------------------------------------------------------

__global__ void delay_kernel(int num_calls)
{
#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif
    
    int n = 0;
    while(n < num_calls) {
        cuqu::cuqu_delay(10);
        ++n;
    }
}

//----------------------------------------------------------------------

void test_delay_kernel(int num_calls, bool verbose)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    cudaEvent_t startEvent, stopEvent;

    if(verbose) printf("# test delay_kernel config: BLOCK_SIZE=%d N_BLOCKS=%d num_calls=%d\n", BLOCK_SIZE, N_BLOCKS, num_calls);

    // some warm up
    for(int k=0; k<10; ++k)
        delay_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(100);

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    // start timer
    cuquSafeCall(cudaEventRecord(startEvent, 0));

    // spawn GPU kernel
    delay_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(num_calls);

    // stop and sync timer
    cuquSafeCall(cudaEventRecord(stopEvent, 0));
    cuquSafeCall(cudaEventSynchronize(stopEvent));

    float milliseconds = 0.0f;
    cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
    float uspe = milliseconds / num_calls * 1000.0f;
    if(verbose) printf("# elapsed time(ms)\t\ttime per call(us)\n");
    printf("%f\t\t\t%f\n", milliseconds, uspe);

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

__device__ int void_cond = 0;

__global__ void void_kernel(int num_events) 
{
#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    if(void_cond > 0)
        void_cond *= 2;
}

//----------------------------------------------------------------------

void test_void_kernel(bool verbose)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    cudaEvent_t startEvent, stopEvent;
    float ms , us;

    if(verbose) printf("# test void_kernel config: BLOCK_SIZE=%d N_BLOCKS=%d\n", BLOCK_SIZE, N_BLOCKS);

    // some warm up
    for(int k=0; k<10; ++k)
        void_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(10);

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    // start timer
    cuquSafeCall(cudaEventRecord(startEvent, 0));
    // spawn GPU kernel
    cuquTrace("spawning void_kernel()\n");
    void_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(10);
    // stop and sync timer
    cuquSafeCall(cudaEventRecord(stopEvent, 0));
    cuquSafeCall(cudaEventSynchronize(stopEvent));
    ms = 0.0f;
    cuquSafeCall(cudaEventElapsedTime(&ms, startEvent, stopEvent));
    us = ms * 1000.0f;
    if(verbose) printf("# single void kernel invocation elapsed time:%fus\n", us);

    int n_rounds = 10000;
    // overlap multiple-kernels invocations
    ms = 0.0f;
    for(int k=0; k<n_rounds; ++k) {
        cuquSafeCall(cudaEventRecord(startEvent, 0));
        void_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(10);
        cuquSafeCall(cudaEventRecord(stopEvent, 0));
        cuquSafeCall(cudaEventSynchronize(stopEvent));
        float lms = 0.0f;
        cuquSafeCall(cudaEventElapsedTime(&lms, startEvent, stopEvent));
        ms += lms;
    }
    us = ms * 1000.0f / n_rounds;
    if(verbose) {
        printf("# %d synchronized void kernel invocations\n", n_rounds);
        printf("# elapsed time(ms)\t\tsingle kernel time(us)\n");
    } 
    printf("%f\t\t\t%f\n", ms, us);   

    int n_kernels = 10000;
    cuquSafeCall(cudaEventRecord(startEvent, 0));
    // overlap multiple-kernels invocations
    for(int k=0; k<n_kernels; ++k) {
        void_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(10);
    }
    cuquSafeCall(cudaEventRecord(stopEvent, 0));
    cuquSafeCall(cudaEventSynchronize(stopEvent));
    ms = 0.0f;
    cuquSafeCall(cudaEventElapsedTime(&ms, startEvent, stopEvent));
    us = ms * 1000.0f / n_kernels;
    if(verbose) {
        printf("# %d queued void kernel invocations\n", n_kernels);
        printf("# elapsed time(ms)\t\tsingle kernel time(us)\n");
    }
    printf("%f\t\t\t%f\n", ms, us);
    
    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

__device__ cuqu::detail::barrier_t barrier = CUQU_INIT_BARRIER;

__global__ void barrier_kernel(int num_events) 
{
    //const uint tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
    //const uint bid = blockIdx.x + gridDim.x*blockIdx.y;
    //const uint block_size = blockDim.x*blockDim.y*blockDim.z;
    const uint grid_size = gridDim.y*gridDim.x;
    //const uint gid = tid + bid*block_size;

#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    int n = 0;
    while(n < num_events) {
        barrier_wait(&barrier, grid_size);
        //barrier_wait(&barrier, n*grid_size);
        ++n;
    }
}

//----------------------------------------------------------------------

void test_barrier_kernel(int n_pump_events, bool verbose)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    cudaEvent_t startEvent, stopEvent;

    if(verbose) printf("# barrier_kernel config: BLOCK_SIZE=%d N_BLOCKS=%d n_pump_events=%d\n", BLOCK_SIZE, N_BLOCKS, n_pump_events);

    cuqu::detail::barrier_t barrier = CUQU_INIT_BARRIER;
    cuquSafeCall(cudaMemcpyToSymbol("barrier", &barrier, sizeof(barrier), 0));

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    // start timer
    cuquSafeCall(cudaEventRecord(startEvent, 0));

    // spawn GPU kernel
    barrier_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(n_pump_events);

    // stop and sync timer
    cuquSafeCall(cudaEventRecord(stopEvent, 0));
    cuquSafeCall(cudaEventSynchronize(stopEvent));

    float milliseconds = 0.0f;
    cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
    float uspe = milliseconds / n_pump_events * 1000.0f;
    if(verbose) printf("# elapsed time(ms)\t\ttime per event(us)\n");
    printf("%f\t\t\t%f\n", milliseconds, uspe);

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

// 256 bits
typedef struct test_event {
    uint32_t w[8];
} test_event_t;

#define INIT_TEST_EVENT { 0,0,0,0,0,0,0,0 }

typedef cuqu::queue<test_event_t> queue_t;
typedef cuqu::gpu_event<test_event_t> gpu_event_t;

//----------------------------------------------------------------------

__device__ gpu_event_t event = INIT_GPU_EVENT(INIT_TEST_EVENT);

// this triggers a compiler error loop on CUDA3.2rc
//gpu_event_t event_0 = INIT_GPU_EVENT(INIT_TEST_EVENT);

__global__ void dequeue_kernel(cuda_queue_t q, int num_events) 
{
#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    int n = 0;
    int retcode = 0;
    gpu_event_t *e;

    while(n < num_events) {
        e = &event;
        retcode = q->timed_fetch(e, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n;
        } else {
            cudaPrintf("ERROR: dequeue_kernel() unexpected retcode=%d\n", n, retcode);
            break;
        }
    }
}

//----------------------------------------------------------------------

int pusher_started = 0;
volatile int pusher_stop = 0;
boost::mutex mtx;

void pusher_thread(queue_t *q, int n_pump_events, int cnt_start, boost::condition &cond)
{
    static int cnt = 0;

    //printf("calling notify_all()\n"); fflush(stdout);
    {
        boost::mutex::scoped_lock lock(mtx);
        pusher_started = 1;
        pusher_stop = 0;
        cond.notify_all();
    }

    for(int k=cnt_start; k<cnt_start+n_pump_events; ++k) {
        test_event_t e = {{cnt,10000,k,20202+k,0,0,0,0}};
        int retcode = q->timed_push(&e, 100/*timeout ms*/);
        if(retcode != cuqu::SUCCESS) {
            printf("ERROR: error retcode %d for %d-th push, aborting it\n", retcode, k);
            break;
        }
        {
            boost::mutex::scoped_lock lock(mtx);
            if(pusher_stop) {
                pusher_started = 0;
                printf("exiting due to pusher_stop signal\n");
                break;
            }
        }
    }
}

//----------------------------------------------------------------------

void test_dequeue_kernel(int queue_size, int n_pump_events, bool verbose)
{
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    cudaEvent_t startEvent, stopEvent;

    if(verbose) printf("# dequeue_kernel config: BLOCK_SIZE=%d N_BLOCKS=%d queue_size=%d n_pump_events=%d\n", BLOCK_SIZE, N_BLOCKS, queue_size, n_pump_events);

    void *d_event;
    size_t event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_event, "event"));
    cuquSafeCall(cudaGetSymbolSize(&event_size, "event"));
    cuquSafeCall(cudaMemset(d_event, 0, event_size));

    cuquTrace("queue CTOR with %d slots\n", queue_size);
    queue_t queue(queue_size, BLOCK_SIZE);
    queue_t *q = &queue;
    cuda_queue_t cuda_q = queue.get_cuda_ref();

    cuquTrace("spawning pusher thread\n");
    boost::condition cond;
    boost::function0<void> fun = boost::bind(pusher_thread, q, n_pump_events, 0, boost::ref(cond));
    boost::thread pusher(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the pusher thread to start up\n");
    {
        boost::mutex::scoped_lock lock(mtx);
        while(!pusher_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = cond.timed_wait(lock, tmout);
            if(!retcode) {
                printf("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    // start timer
    cuquSafeCall(cudaEventRecord(startEvent, 0));

    // spawn GPU kernel
    cuquTrace("spawning dequeue_kernel()\n");
    dequeue_kernel<<<N_BLOCKS, BLOCK_SIZE>>>(cuda_q, n_pump_events);

    // stop and sync timer
    cuquSafeCall(cudaEventRecord(stopEvent, 0));
    cuquSafeCall(cudaEventSynchronize(stopEvent));

    //printf("calling cudaThreadSynchronize()\n");
    //cuquSafeCall(cudaThreadSynchronize());
    //cuquCheckMsg("dequeue() execution failed");

    float milliseconds = 0.0f;
    cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
    float uspe = milliseconds / n_pump_events * 1000.0f;
    if(verbose) printf("# elapsed time(ms)\t\ttime per event(us)\n");
    printf("%f\t\t\t%f\n", milliseconds, uspe);

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

    cuquTrace("waiting end of pusher thread\n");
    pusher_stop = 1;
    pusher.join();

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif

    cuquTrace("destroying queue object\n");
}

//----------------------------------------------------------------------

int main(int argc, char* argv[])
{
    cudaDeviceProp deviceProp;
    int idev;

    //
    int queue_size = 8;
    int n_pump_events = queue_size;

    // arg scan
    int opt;
    while ((opt = getopt(argc, argv, "he:q:")) != -1) {
        switch (opt) {
        case 'e':
            n_pump_events = atoi(optarg);
            //printf("parsed param -e %d\n", n_pump_events);
            break;
        case 'q':
            queue_size = atoi(optarg);
            //printf("parsed param -q %d\n", queue_size);
            break;
        default: /* '?' */
            fprintf(stderr, "Usage: %s [-e num_events] [-q queue_size]\n",
                    argv[0]);
            exit(EXIT_FAILURE);
        }
    }

    // CUDA initialization
    printf("# CUDA init\n"); fflush(stdout);
    idev = 0;
    cuquSafeCall(cudaSetDevice(idev));
    cuquSafeCall(cudaGetDeviceProperties(&deviceProp, idev));
    if(!deviceProp.canMapHostMemory) {
        fprintf(stderr, "Device %d cannot map host memory!\n", idev);
        exit(EXIT_FAILURE);
    }
    cuquSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));

    test_void_kernel(true);
    test_delay_kernel(10000, true);

    int warmup_n_events = 100;

#if 1
    printf("# WARMUP with %d events\n", warmup_n_events); fflush(stdout);
    test_barrier_kernel(warmup_n_events, true);
    printf("# REAL with %d events\n", n_pump_events); fflush(stdout);
    for(int k=0; k<4; ++k) {
        test_barrier_kernel(n_pump_events, !k);
    }
#endif
#if 1
    printf("# WARMUP with %d events\n", warmup_n_events); fflush(stdout);
    test_dequeue_kernel(queue_size, warmup_n_events, true);
    printf("# REAL with %d events\n", n_pump_events); fflush(stdout);
    for(int k=0; k<4; ++k) {
        test_dequeue_kernel(queue_size, n_pump_events, !k);
    }
#endif
    cudaThreadExit();

    return EXIT_SUCCESS;
}

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
