/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#pragma once

#define cuqu_barrier() __asm__ __volatile__(""      :::"memory")
#define cuqu_mb() 	   __asm__ __volatile__("mfence":::"memory")
#define cuqu_rmb()	   __asm__ __volatile__("lfence":::"memory")
#define cuqu_wmb()	   __asm__ __volatile__("sfence":::"memory")


namespace cuqu
{ 
    namespace host
    {

        template <typename T>        
        static __inline__ __host__ void __queue_perf_fail(queue<T> *q, unsigned int idx) 
        {
#if CUQU_ENABLE_PERF_HIST  
            q->get_host_hist_ptr()[idx]++;
#endif
        }

        template <typename T>
        static __inline__ __host__ void __queue_perf_success(queue<T> *q, unsigned int idx)
        {
#if CUQU_ENABLE_PERF_HIST 
            q->get_host_hist_ptr()[idx]++;
#endif
        }

        template <typename T>
        queue<T>::queue(unsigned int num_events, unsigned int block_size) :
            m_nevents(0), m_mask(0), m_block_size(block_size), m_h_raw_q(0), m_d_raw_q(0), m_d_gpu_q(0)
        {
            init(num_events);
        }

        template <typename T>
        void queue<T>::init(unsigned int num_events)
        {
            unsigned int nelem = num_events;
            unsigned int mask = nelem-1;

            assert(nelem > 0);
            assert(isPow2(nelem));
#if 1
            if(m_block_size < detail::event<T>::nwords) {
                cuquError("invalid thread block size=%d < event size in w32=%d\n", m_block_size, detail::event<T>::nwords);
                exit(EXIT_FAILURE);
            }
#endif
            // alloc raw_queue
            int raw_queue_size = sizeof(raw_queue_t) + nelem*sizeof(event_t);
            raw_queue_t *h_raw_q, *d_raw_q;
            cuquSafeCall(cudaHostAlloc(&h_raw_q, raw_queue_size, cudaHostAllocMapped|cudaHostAllocPortable));
            // WARN: need to call this one from each different thread invoking push()

            // see CUDA_C_Programming_Guide.pdf 3.2.6.3
            cuquSafeCall(cudaHostGetDevicePointer(&d_raw_q, h_raw_q, 0));

            cuquTrace("h_raw_q=%p d_raw_q=%p\n", d_raw_q, h_raw_q);

            // init raw_queue
            volatile raw_queue_t *raw_q = h_raw_q;
            raw_q->nr = 0;
            raw_q->nw = 0;
            raw_q->pad1 = 0;
            raw_q->pad2 = 0;
            //raw_q->nevents = nelem;
            //raw_q->mask = nelem-1;
            event_t *pe = (event_t *)(raw_q->events);
            for(int i=0; i<nelem; ++i) {
                for(int j=0; j<event_t::nwords; ++j)
                    pe[i].u.a.w[j] = detail::event_magic;
            }
            cuqu_mb();
            // compiler/memory sync needed here on x86/x86_64?

            // alloc gpu_queue
            cuda_queue_t d_gpu_q = 0;
            cuquSafeCall(cudaMalloc(&d_gpu_q, sizeof(device::queue)));
            //printf("d_gpu_q=%p\n", d_gpu_q);

            // init gpu_queue
            device::queue gpu_q;
            gpu_q.eq_nr = raw_q->nr;
            //gpu_q.eq_nw = raw_q->nw;
            gpu_q.nevents = nelem;
            gpu_q.mask = mask;
            // work around 32/64 difference between host and device
            gpu_q.raw_ptr = (CUdeviceptr)(size_t)d_raw_q;
            gpu_q.errors = (cuqu::detail::atomic_t)CUQU_INIT_ATOMIC;
            gpu_q.rc = cuqu::UNSET;
            gpu_q.cnt = (cuqu::detail::atomic_t)CUQU_INIT_ATOMIC;
            gpu_q.bar_head = (cuqu::detail::barrier_t)CUQU_INIT_BARRIER;
            gpu_q.bar_tail = (cuqu::detail::barrier_t)CUQU_INIT_BARRIER;


#if CUQU_ENABLE_PERF_HIST
            // Size of histograms
            size_t hist_mem = sizeof(unsigned int) * (nelem + 1);

            // Init device perf histogram 
            cuquSafeCall(cudaMalloc(&gpu_q.hist, hist_mem));
            cuquSafeCall(cudaMemset(gpu_q.hist, 0, hist_mem));
            m_d_hist = gpu_q.hist;
            
            // Init host perf histogram
            m_h_hist = (unsigned int *) malloc(hist_mem);
            memset(m_h_hist, 0, hist_mem);                        
#endif
            //printf("d_gpu_q=%p &gpu_q=%p sizeof(gpu_q)=%d\n", d_gpu_q, &gpu_q, sizeof(gpu_q)); fflush(stdout);
            cuquSafeCall(cudaMemcpy(d_gpu_q, &gpu_q, sizeof(gpu_q), cudaMemcpyHostToDevice));

            // compiler/memory sync needed here on x86/x86_64?

            // init queue
            m_nevents = nelem;
            m_mask    = mask;
            m_h_raw_q = h_raw_q;
            m_d_raw_q = d_raw_q;
            m_d_gpu_q = d_gpu_q;

            cuquTrace("init() d_gpu_q=%p\n", m_d_gpu_q);
        }

        template <typename T>
        queue<T>::~queue()
        {
            cuquTrace("~queue(): this=%p\n", this);
            cuquTrace("before cudaFreeHost(m_h_raw_q=%p)\n", m_h_raw_q);
            cuquSafeCall(cudaFreeHost(m_h_raw_q));
            cuquTrace("before cudaFree(m_d_gpu_q=%p)\n", m_d_gpu_q);
            cuquSafeCall(cudaFree(m_d_gpu_q));
            m_d_gpu_q = 0;
            m_h_raw_q = 0;
            m_d_raw_q = 0;
#if CUQU_ENABLE_PERF_HIST
            cuquTrace("before cudaFree(m_d_hist=%p)\n", m_d_hist);
            cuquSafeCall(cudaFree(m_d_hist));
            free(m_h_hist);
#endif
        }

        template <typename T>
        int queue<T>::try_push(queue::value_t *val)
        {
            int ret = WOULDBLOCK;
            assert(m_h_raw_q);
            unsigned int nw = *(volatile unsigned int*)&m_h_raw_q->nw;
            unsigned int nr = *(volatile unsigned int*)&m_h_raw_q->nr;
            int n_queued_events = (int)nw - (int)nr;
            //bool is_full = (__queued_events(nw,nr) == q->nevents);
            bool is_full = (n_queued_events == m_nevents);
            //bool is_empty = (nw == nr);

            if(!is_full) {
                unsigned int masked_nw = nw & m_mask;
                event_t *e = (event_t *)(m_h_raw_q->events)+masked_nw;
                assert(sizeof(queue::value_t) <= sizeof(event_t));
                // potentially leaving a hole in e if sizeof(queue::value_t) < sizeof(event_t)
                memcpy(e, val, sizeof(queue::value_t));
                // serialize memory access with write barrier
                cuqu_mb();

                *(volatile uint32_t*)&(m_h_raw_q->nw) = nw + 1;
                cuqu_mb();                

#if CUQU_DEBUG_VERBOSE
                //cuquTrace("memcopy events[%d]=%p nr=%d nw=%d e={%08x,%08x,...,%08x,%08x}\n", masked_nw, e, nr, nw, e->u.t.w[0], e->u.t.w[1], e->u.t.w[6], e->u.t.w[7]);
                cuquTrace("memcopy events[%d]=%p nr=%d nw=%d\n", masked_nw, e, nr, nw+1);
                for(int k=0; k<event_t::nwords; ++k)
                    cuquTrace("e[%d]=%08x\n", k, e->u.a.w[k]);
#endif

#if CUQU_ENABLE_PERF_HIST  
                // Update performance histogram
                m_h_hist[n_queued_events]++;
#endif
                ret = SUCCESS;
            } else {
                //cuquErrorC(10, "%s:%d queue is full, nw=%u nr=%u\n", __FILE__, __LINE__, nw, nr);
#if CUQU_ENABLE_PERF_HIST  
                // Update performance histogram
                m_h_hist[m_nevents]++;
#endif
                cuquTrace("%s:%d queue is full, nw=%u nr=%u\n", __FILE__, __LINE__, nw, nr);
            }
            return ret;
        }

        template <typename T>
        int queue<T>::blocking_push(queue::value_t *val)
        {
            int retcode;
            const int max_cnt = 1000;
            int cnt = max_cnt;
            do {
                retcode = try_push(val);
                if(retcode != WOULDBLOCK)
                    break;
                if(! --cnt) {
                    cuquTrace("WARNING: exceeded polling cnt, sleeping a little bit\n");
                    usleep(1);
                    cnt = max_cnt;
                }
            } while(true);
            return retcode;
        }

        template <typename T>
        int queue<T>::waiting_push(queue::value_t *val, int timeout_ms)
        {
            int retcode;
            const int max_cnt = 1000;
            int cnt = max_cnt;
            assert(timeout_ms >= 0);
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += timeout_ms*1000000;
            if(tmout.nsec > 1000000000) {
                tmout.nsec -= 1000000000;
                tmout.sec  += 1;
            }
            do {
                retcode = try_push(val);
                if(retcode != WOULDBLOCK)
                    break;
                if(! --cnt) {
                    cuquTraceC(10, "exceeded polling cnt, checking time\n");
                    cnt = max_cnt;
                    boost::xtime now;
                    boost::xtime_get(&now, boost::TIME_UTC);
                    if(xtime_cmp(tmout, now) < 0) {
                        //unsigned int nw = m_h_raw_q->nw;
                        //unsigned int nr = m_h_raw_q->nr;
                        //cuquTrace("ERROR: timeout of %dms detected in %s, nw=%u nr=%u\n", timeout_ms, __FUNCTION__, nw, nr);
                        cuquTrace("ERROR: timeout of %dms detected in %s()\n", timeout_ms, __FUNCTION__);
                        break;
                    }
                }
            } while(true);
            return retcode;
        }

        template <typename T>
        int queue<T>::timed_push(queue::value_t *val, int timeout_ms)
        {
            int retcode;
            if(timeout_ms == timeout_infinite)
                retcode = blocking_push(val);
            else
                retcode = waiting_push(val, timeout_ms);
            return retcode;
        }

        template <typename T>
        int queue<T>::try_fetch(queue::value_t *val)
        {
            int ret = WOULDBLOCK;
            assert(m_h_raw_q);
            unsigned int nw = *(volatile unsigned int*)&m_h_raw_q->nw;
            unsigned int nr = *(volatile unsigned int*)&m_h_raw_q->nr;
            int n_queued_events = (int)nw - (int)nr;
            //bool is_full = (n_queued_events == m_nevents);
            bool is_empty = (nw == nr);

            if(!is_empty) {
                unsigned int masked_nr = nr & m_mask;
                event_t *e = (event_t *)(m_h_raw_q->events)+masked_nr;
                assert(n_queued_events > 0);
                // potentially leaving a hole in e if sizeof(queue::value_t) < sizeof(event_t)
                memcpy(val, e, sizeof(queue::value_t));
                //cuquTrace("fetched events[%d]=%p nr=%d nw=%d e={%08x,%08x,%08x,...,%08x,%08x}\n", masked_nr, e, nr, nw, e->u.t.w[0], e->u.t.w[1], e->u.t.w[2], e->u.t.w[6], e->u.t.w[7]);

                // serialize memory access with full barrier
                cuqu_mb();

                *(volatile uint32_t*)&(m_h_raw_q->nr) = nr + 1;
                cuqu_barrier();

#if CUQU_ENABLE_PERF_HIST  
                // Update performance histogram
                m_h_hist[n_queued_events]++;
#endif
                //__queue_perf_success(&this, n_queued_events);     // better ??
                ret = SUCCESS;
            } else {
                //cuquErrorC(10, "%s:%d queue is full, nw=%u nr=%u\n", __FILE__, __LINE__, nw, nr);
#if CUQU_ENABLE_PERF_HIST  
                // Update performance histogram
                m_h_hist[0]++;
#endif
                //cuquTrace("%s:%d queue is empty, nw=%u nr=%u\n", __FILE__, __LINE__, nw, nr);
            }
            return ret;
        }

        template <typename T>
        int queue<T>::blocking_fetch(queue::value_t *val)
        {
            int retcode;
            const int max_cnt = 1000;
            int cnt = max_cnt;
            do {
                retcode = try_fetch(val);
                if(retcode != WOULDBLOCK)
                    break;
                if(! --cnt) {
                    cuquTrace("exceeded polling cnt, sleeping a bit\n");
                    usleep(1);
                    cnt = max_cnt;
                }
            } while(true);
            return retcode;
        }

        static const boost::xtime::xtime_nsec_t NANOSECONDS_PER_SECOND = 1000000000;

        template <typename T>
        int queue<T>::waiting_fetch(queue::value_t *val, int timeout_ms)
        {
            int retcode = WOULDBLOCK;
            const int max_cnt = 1000;
            int cnt = max_cnt;
            assert(timeout_ms >= 0);
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            const boost::xtime::xtime_nsec_t ms_to_ns = 1000000;
            tmout.nsec += timeout_ms*ms_to_ns;
            if(tmout.nsec > NANOSECONDS_PER_SECOND) {
                tmout.nsec -= NANOSECONDS_PER_SECOND;
                tmout.sec  += 1;
            }
            do {
                retcode = try_fetch(val);
                if(retcode != WOULDBLOCK)
                    break;
                if(! --cnt) {
                    //cuquTraceC(10, "exceeded polling cnt, checking time\n");
                    cnt = max_cnt;
                    boost::xtime now;
                    boost::xtime_get(&now, boost::TIME_UTC);
                    if(xtime_cmp(tmout, now) < 0) {
                        cuquTrace("ERROR: timeout of %dms detected in %s\n", timeout_ms, __FUNCTION__);
                        break;
                    }
                }            
            } while(true);
            return retcode;
        }

        template <typename T>
        int queue<T>::timed_fetch(queue::value_t *val, int timeout_ms)
        {
            int retcode;
            if(timeout_ms == timeout_infinite)
                retcode = blocking_fetch(val);
            else
                retcode = waiting_fetch(val, timeout_ms);
            return retcode;
        }

        template <typename T>
        device::queue *queue<T>::get_device_queue()
        {
            return m_d_gpu_q;
        }

        template <typename T>
        queue<T>::raw_queue_t *queue<T>::get_raw_ptr()
        {
            return m_h_raw_q;
        }


    }  // end namespace host
}  // end namespace cuqu


#undef cuqu_barrier
#undef cuqu_mb 
#undef cuqu_rmb
#undef cuqu_wmb

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
