/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#include <boost/function.hpp>
#include <boost/bind.hpp>
#include <boost/thread.hpp>
#include <boost/thread/condition.hpp>
#include <boost/thread/mutex.hpp>
//#include <boost/date_time/posix_time/posix_time.hpp>

#include <cuqu/cuqu.h>
#ifdef CUPRINTF_INCLUDED
#include <utils/cuPrintf.cu>
#endif

typedef cuqu::cuda_queue_t cuda_queue_t;

typedef enum {
    HOST_TO_DEVICE,
    DEVICE_TO_HOST
} TestType;

//----------------------------------------------------------------------
//----------------------------------------------------------------------

// 256 bits
typedef struct test_event {
    uint32_t w[8*7];
} test_event_t;

#define INIT_TEST_EVENT { 0,0,0,0,0,0,0,0 }

typedef cuqu::host::queue<test_event_t> queue_t;
typedef queue_t::event_t event_t;
//typedef cuqu::gpu_event<test_event_t> gpu_event_t;

//----------------------------------------------------------------------

__device__ event_t event;

// this triggers a compiler error loop on CUDA3.2rc
//gpu_event_t event_0 = INIT_GPU_EVENT(INIT_TEST_EVENT);

__global__ void device_dequeuer(cuda_queue_t q, int n_events) 
{
#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    int n = 0;
    int retcode = 0;

    while(n < n_events) {
        retcode = q->timed_fetch(&event, cuqu::timeout_infinite);
        if(cuqu::SUCCESS == retcode) {
            ++n;
        } else {
            cudaPrintf("ERROR: device_dequeuer() unexpected retcode=%d\n", n, retcode);
            break;
        }
    }
}


__global__ void device_enqueuer(cuda_queue_t q, int n_events) {
#ifdef CUPRINTF_CU
    cuPrintfRestrict(0, CUPRINTF_UNRESTRICTED);
    //cuPrintfRestrict(CUPRINTF_UNRESTRICTED, 0);
    //cuPrintf("num_events=%d\n", num_events);
#endif

    const unsigned int tid = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y * threadIdx.z;
    const unsigned int bid = blockIdx.x + gridDim.x * blockIdx.y;
    const unsigned int block_size = blockDim.x * blockDim.y * blockDim.z;
    //const unsigned int grid_size = gridDim.y * gridDim.x;
    const unsigned int gid = tid + bid * block_size;

    int n = 0;
    int retcode = 0;
    
    while(n < n_events) {

        // Only thread with gid equal 0 creates event.
        if(gid == 0) {
            event_t e = { n, 10000, n, 0x20202+n, 0, 0, 0, 0 };
            event = e;
        }

        // ??
        __syncthreads();

        // Push message.
        retcode = q->timed_push(&event, 100000/*ms*/);
        if(cuqu::SUCCESS == retcode) {
            ++n;
        } else if(retcode == cuqu::WOULDBLOCK) {
            cuqu_error("device_enqueuer[%d,%d]: timeout exceeded for event %d\n", bid, tid, n);
            //break;
        } else {
            cuqu_error("device_enqueuer[%d,%d]: bad retcode=%d\n", bid, tid, retcode);
            break;
        }
    }
}


//----------------------------------------------------------------------

void print_histogram(queue_t *q) {
#if CUQU_ENABLE_PERF_HIST
    unsigned int dev_hist[q->hist_size()];
    unsigned int *host_hist = q->get_host_hist_ptr();

    // Retrieve device histogram from device global memory.
    cuquSafeCall(cudaMemcpy(dev_hist, q->get_device_hist_ptr(), sizeof(unsigned int) * q->hist_size(), cudaMemcpyDeviceToHost));

    // print histogram
    for (size_t i = 0; i < q->hist_size(); i++) {
        printf("d[%2d]: %2d, h[%2d]: %2d\n", i, dev_hist[i], i, host_hist[i]);
    }
#else
    printf("performance histogram feature is disabled\n"); fflush(stdout);
#endif
}


//----------------------------------------------------------------------

//int pusher_started = 0;
//volatile int pusher_stop = 0;

int host_thread_started = 0;
volatile int host_thread_stop = 0;
boost::mutex mtx;

// ---------------------------------------------------------------------
// Host enqueuer
// ---------------------------------------------------------------------
void host_enqueuer(queue_t *q, int cnt_start, boost::condition &cond) {

    static int cnt = 0;

    //printf("calling notify_all()\n"); fflush(stdout);
    {
        boost::mutex::scoped_lock lock(mtx);
        host_thread_started = 1;
        host_thread_stop = 0;
        cond.notify_all();
    }

    int k=cnt_start;
    while(true) {
        test_event_t e = {{cnt,10000,k,20202+k,0,0,0,0}};
        int retcode = q->timed_push(&e, 100/*timeout ms*/);
        // in case of pusher_stop==true, retcode can be WOULDBLOCK
        {
            boost::mutex::scoped_lock lock(mtx);
            if(host_thread_stop) {
                host_thread_started = 0;
                cuquTrace("host_enqueuer: exiting due to host_thread_stop signal\n");
                break;
            }
        }
        if(retcode != cuqu::SUCCESS) {
            printf("host_enqueuer: error retcode %d for %d-th timed_push(100ms), aborting push attempt\n", retcode, k);
            break;
        } 
        ++k;
    }
}


// ---------------------------------------------------------------------
// Host dequeuer 
// ---------------------------------------------------------------------
void host_dequeuer(queue_t *q, int cnt_start, boost::condition &cond) {

    {
        boost::mutex::scoped_lock lock(mtx);
        host_thread_started = 1;
        host_thread_stop = 0;
        cond.notify_all();
    }

    int k=cnt_start;
    while(true) {
        test_event_t e;
        int retcode = q->timed_fetch(&e, 100/*timeout ms*/);
        // in case of pusher_stop==true, retcode can be WOULDBLOCK
        {
            boost::mutex::scoped_lock lock(mtx);
            if(host_thread_stop) {
                host_thread_started = 0;
                cuquTrace("host_dequeuer: exiting due to host_thread_stop signal\n");
                break;
            }
        }

        if(retcode != cuqu::SUCCESS) {
            printf("host_dequeuer: error retcode %d for %d-th timed_fetch(100ms), aborting fetch attempt\n", retcode, k);
            break;
        } 

        k++;
    }
}



// ---------------------------------------------------------------------
// Test DEVICE to HOST queue direction
// ---------------------------------------------------------------------
void test_device_to_host(int n_threads, int n_blocks, int n_repeats, int queue_size, int n_events, bool verbose, bool partial_dump, bool dump, bool hist) {

#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif

    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    if(verbose) {   
        printf("# device --> host\n"); fflush(stdout);       
        printf("# kernel config: n_threads=%d n_blocks=%d n_repeats=%d queue_size=%d n_events=%d\n", n_threads, n_blocks, n_repeats, queue_size, n_events); fflush(stdout);
    }

    void *d_event;
    size_t event_size;

    cuquSafeCall(cudaGetSymbolAddress(&d_event, "event"));
    cuquSafeCall(cudaGetSymbolSize(&event_size, "event"));
    cuquSafeCall(cudaMemset(d_event, 0, event_size));

    cuquTrace("queue CTOR with %d slots\n", queue_size);
    queue_t queue(queue_size, n_threads);
    queue_t *q = &queue;
    cuda_queue_t cuda_q = queue.get_cuda_ref();

    cuquTrace("spawning dequeuer thread\n");
    boost::condition cond;
    boost::function0<void> fun = boost::bind(host_dequeuer, q, 0, boost::ref(cond));
    boost::thread host_thread(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the dequeuer thread to start up\n");
    {
        boost::mutex::scoped_lock lock(mtx);
        while(!host_thread_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = cond.timed_wait(lock, tmout);
            if(!retcode) {
                printf("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    float rep_us[n_repeats];
    float rep_ms[n_repeats];

    if(verbose) printf("# n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n");

    for(int nr=0; nr<n_repeats; ++nr) {
        // start timer
        cuquSafeCall(cudaEventRecord(startEvent, 0));
        
        // spawn GPU kernel
        cuquTrace("spawning devide_enqueuer() kernel\n");
        device_enqueuer<<<dimGrid, dimBlock>>>(cuda_q, n_events);
        
        // stop and sync timer
        cuquSafeCall(cudaEventRecord(stopEvent, 0));
        cuquSafeCall(cudaEventSynchronize(stopEvent));
        
        float milliseconds = 0.0f;
        cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
        float uspe = milliseconds / n_events * 1000.0f;
        if(partial_dump) {
            printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, milliseconds, 0.0, uspe, 0.0);
            fflush(stdout);
        }

        rep_us[nr] = uspe;
        rep_ms[nr] = milliseconds;
    }
    // calc average
    double ms = 0,  us = 0;
    double ms2 = 0, us2 = 0;
    //double ems = 0, eus = 0;
    double sms = 0, sus = 0;
    for(int nr=0; nr < n_repeats; ++nr) {
        ms  += rep_ms[nr];
        ms2 += rep_ms[nr]*rep_ms[nr];
        us  += rep_us[nr];
        us2 += rep_us[nr]*rep_us[nr];
    }
    ms /= n_repeats;
    us /= n_repeats;
    for(int nr=0; nr < n_repeats; ++nr) {
        sms = (rep_ms[nr] - ms)*(rep_ms[nr] - ms);
        sus = (rep_us[nr] - us)*(rep_us[nr] - us);
    }
    sms = std::sqrt(sms/n_repeats);
    sus = std::sqrt(sus/n_repeats);
    //printf("ms=%f ms2=%f ems=%f sms=%e\n", ms, ms2, ems, sms);
    //printf("us=%f us2=%f eus=%f sus=%e\n", us, us2, eus, sus);
    if(dump) {        
    	printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, ms, sms, us, sus);
	    fflush(stdout);
        if (hist) {
            print_histogram(q);
            fflush(stdout);
        }
    }

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

    cuquTrace("waiting end of dequeuer thread\n");
    host_thread_stop = 1;
    host_thread.join();

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif

    cuquTrace("destroying queue object\n");
}

// ---------------------------------------------------------------------
// Test HOST to DEVICE queue direction
// ---------------------------------------------------------------------
void test_host_to_device(int n_threads, int n_blocks, int n_repeats, int queue_size, int n_events, bool verbose, bool partial_dump, bool dump, bool hist) {
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    dim3 dimBlock(n_threads, 1, 1);
    dim3 dimGrid(n_blocks, 1, 1);
    cudaEvent_t startEvent, stopEvent;

    if(verbose) {
        printf("# host --> device\n"); fflush(stdout);
        printf("# kernel config: n_threads=%d n_blocks=%d n_repeats=%d queue_size=%d n_events=%d\n", n_threads, n_blocks, n_repeats, queue_size, n_events);
	fflush(stdout);
    }

    void *d_event;
    size_t event_size;
    cuquSafeCall(cudaGetSymbolAddress(&d_event, "event"));
    cuquSafeCall(cudaGetSymbolSize(&event_size, "event"));
    cuquSafeCall(cudaMemset(d_event, 0, event_size));

    cuquTrace("queue CTOR with %d slots\n", queue_size);
    queue_t queue(queue_size, n_threads);
    queue_t *q = &queue;
    cuda_queue_t cuda_q = queue.get_cuda_ref();

    cuquTrace("spawning enqueuer thread\n");
    boost::condition cond;
    boost::function0<void> fun = boost::bind(host_enqueuer, q, 0, boost::ref(cond));
    boost::thread host_thread(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the enqueuer thread to start up\n");
    {
        boost::mutex::scoped_lock lock(mtx);
        while(!host_thread_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = cond.timed_wait(lock, tmout);
            if(!retcode) {
                printf("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }

    cuquSafeCall(cudaEventCreate(&startEvent));
    cuquSafeCall(cudaEventCreate(&stopEvent));

    float rep_us[n_repeats];
    float rep_ms[n_repeats];

    if(verbose) printf("# n_threads\tn_blocks\ttot tm(ms)\tsigma\t\ttm per call(us)\tsigma\n");

    for(int nr=0; nr<n_repeats; ++nr) {
        // start timer
        cuquSafeCall(cudaEventRecord(startEvent, 0));
        
        // spawn GPU kernel
        cuquTrace("spawning device_dequeuer() kernel\n");
        device_dequeuer<<<dimGrid, dimBlock>>>(cuda_q, n_events);
        
        // stop and sync timer
        cuquSafeCall(cudaEventRecord(stopEvent, 0));
        cuquSafeCall(cudaEventSynchronize(stopEvent));
        
        float milliseconds = 0.0f;
        cuquSafeCall(cudaEventElapsedTime(&milliseconds, startEvent, stopEvent));
        float uspe = milliseconds / n_events * 1000.0f;
        if(partial_dump) { 
	    	printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, milliseconds, 0.0, uspe, 0.0);
		    fflush(stdout);
    	}

        rep_us[nr] = uspe;
        rep_ms[nr] = milliseconds;
    }
    // calc average
    double ms = 0,  us = 0;
    double ms2 = 0, us2 = 0;
    //double ems = 0, eus = 0;
    double sms = 0, sus = 0;
    for(int nr=0; nr < n_repeats; ++nr) {
        ms  += rep_ms[nr];
        ms2 += rep_ms[nr]*rep_ms[nr];
        us  += rep_us[nr];
        us2 += rep_us[nr]*rep_us[nr];
    }
    ms /= n_repeats;
    us /= n_repeats;
    for(int nr=0; nr < n_repeats; ++nr) {
        sms = (rep_ms[nr] - ms)*(rep_ms[nr] - ms);
        sus = (rep_us[nr] - us)*(rep_us[nr] - us);
    }
    sms = std::sqrt(sms/n_repeats);
    sus = std::sqrt(sus/n_repeats);
    //printf("ms=%f ms2=%f ems=%f sms=%e\n", ms, ms2, ems, sms);
    //printf("us=%f us2=%f eus=%f sus=%e\n", us, us2, eus, sus);
    if(dump) {
	    printf("%d\t\t%d\t\t%f\t%f\t%f\t%f\n", n_threads, n_blocks, ms, sms, us, sus);
    	fflush(stdout);
         if (hist) {
            print_histogram(q);
            fflush(stdout);
        }
    }	

    cuquSafeCall(cudaEventDestroy(startEvent));
    cuquSafeCall(cudaEventDestroy(stopEvent));

    cuquTrace("waiting end of enqueuer thread\n");
    host_thread_stop = 1;
    host_thread.join();

#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif

    cuquTrace("destroying queue object\n");
}


// ---------------------------------------------------------------------
// Test switcher
// ---------------------------------------------------------------------
void run_test(  int n_threads, 
                int n_blocks, 
                int n_repeats, 
                int queue_size, 
                int n_events, 
                bool verbose, 
                bool partial_dump, 
                bool dump,
                bool hist, 
                TestType t) 
{
    switch (t) {

        case HOST_TO_DEVICE:
            test_host_to_device(n_threads, n_blocks, n_repeats, queue_size, n_events, verbose, partial_dump, dump, hist);
            break;
        
        case DEVICE_TO_HOST:
            test_device_to_host(n_threads, n_blocks, n_repeats, queue_size, n_events, verbose, partial_dump, dump, hist);
            break;
    
        default:
            abort();
    }
}

// ---------------------------------------------------------------------
// Main function
// ---------------------------------------------------------------------
int main(int argc, char* argv[]) {
    cudaDeviceProp deviceProp;
    int idev;
    TestType test_type = HOST_TO_DEVICE;

    // CUDA initialization
    printf("# CUDA init\n"); fflush(stdout);
    idev = 0;
    cuquSafeCall(cudaSetDevice(idev));
    cuquSafeCall(cudaGetDeviceProperties(&deviceProp, idev));
    if(!deviceProp.canMapHostMemory) {
        fprintf(stderr, "Device %d cannot map host memory!\n", idev);
        exit(EXIT_FAILURE);
    }
    cuquSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));

    //
    int queue_size = 8;
    int n_events = 1000; //queue_size;
    int n_threads = 64;
    int n_blocks = 2*14; // default 14 = # SM on C2050
    bool sweep_test = false;
    int num_sm = deviceProp.multiProcessorCount;
    bool hist = false;

    // arg scan
    int opt;
    while ((opt = getopt(argc, argv, "Hhe:q:b:t:sr")) != -1) {
        switch (opt) {
        case 'r':
            test_type = DEVICE_TO_HOST;
            break;
        case 's':
            sweep_test = true;
            break;
        case 'b':
            n_blocks = atoi(optarg);
            break;
        case 't':
            n_threads = atoi(optarg);
            break;
        case 'e':
            n_events = atoi(optarg);
            break;
        case 'q':
            queue_size = atoi(optarg);
            break;
        case 'H':
            hist = true;
            break;
        case 'h':
        default:
            fprintf(stderr,
                    "Usage: %s [-r][-s][-H][-e num_events][-q queue_size][-b n_blocks][-t n_threads]\n"
                    "  -e N     set the desired number of events to N (default 1000)\n"
                    "  -H       dump histogram\n"
                    "  -q       set the size of the queue, in number of events\n"
                    "  -s       do a sweep test over thread number (32 to 512)\n"
                    "  -r       test device-to-host instead of the default host-to-device\n",
                    argv[0]);
            exit(EXIT_FAILURE);
        }
    }

    printf("# barrier implementation: %s\n", CUQU_BARRIER_STR);
    printf("# queue item size: %d\n", sizeof(test_event_t));

    int warmup_n_events = 100;
    int n_repeats = 4;

    printf("# WARMUP with %d events\n", warmup_n_events); fflush(stdout);

    

    //test_dequeue_kernel(n_threads, n_blocks, 2, queue_size, warmup_n_events, false, false, false);
    run_test(n_threads, n_blocks, 2, queue_size, warmup_n_events, false, false, false, false, test_type);

    printf("# %d repetitions with %d events\n", n_repeats, n_events); fflush(stdout);
    if(sweep_test) {
        bool verbose = true;
        for(int nt=32; nt<=512; nt *= 2)
            for(int nb=1; nb<=4*14; ++nb) {
                if(nt>256 && nb > 2*num_sm) {
                    // skip deadlocking combination
                    continue;
                }
                //test_dequeue_kernel(nt, nb, n_repeats, queue_size, n_events, verbose, false, true);
                run_test(nt, nb, n_repeats, queue_size, n_events, verbose, false, true, hist, test_type);
                verbose = false;
            }
    } else {
        //test_dequeue_kernel(n_threads, n_blocks, n_repeats, queue_size, n_events, true, true, true);
        run_test(n_threads, n_blocks, n_repeats, queue_size, n_events, true, true, true, hist, test_type);        
    }

    cudaThreadExit();

    return EXIT_SUCCESS;
}

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
