/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#pragma once

#include <cuqu/detail/atomic.h>
#include <cuqu/detail/barrier.h>
#include <cuqu/detail/types.h>
#include <cuqu/device_queue.h>

namespace cuqu
{
    namespace detail
    {    
        static __inline__ __device__ detail::raw_queue_t *__queue_get_raw_ptr(cuda_queue_t q)
        {
            return cuqu_cast_to_ptr(detail::raw_queue_t, q->raw_ptr);
        }

        template <typename T>
        static __device__ __inline__ detail::event<T> *__rawqueue_get_event_ptr(detail::raw_queue_t *raw_q, unsigned int index)
        {
            typedef detail::event<T> event_t;
            //return &((event_t*)(raw_q->events))[index];
            event_t *e = (event_t*)(raw_q->events);
            return e + index;
        }

        static __inline__ __device__ unsigned int __rawqueue_read_nw(detail::raw_queue_t *raw_q)
        {

            unsigned int nw = *(volatile unsigned int*)&(raw_q->nw);
            return nw;
        }

        static __inline__ __device__ unsigned int __rawqueue_read_nr(detail::raw_queue_t *raw_q)
        {
            unsigned int nr = *(volatile unsigned int*)&(raw_q->nr);
            return nr;
        }

        static __inline__ __device__ unsigned int __queue_nr(cuda_queue_t q)
        {
            unsigned int nr = *(volatile unsigned int*)&q->eq_nr; // & q->mask;
            return nr;
        }

        static __inline__ __device__ void __queue_signal_failure(cuda_queue_t device_q)
        {
            atomic_inc(&device_q->errors);
        }

        static __inline__ __device__ unsigned int __queue_n_failures(cuda_queue_t device_q)
        {
            return atomic_read(&device_q->errors);
        }

        static __inline__ __device__ void __queue_perf_fail(cuda_queue_t device_q, unsigned int idx)
        {
#if CUQU_ENABLE_PERF_HIST  
            device_q->hist[idx]++;
#endif
        }

        static __inline__ __device__ void __queue_perf_success(cuda_queue_t device_q, unsigned int idx)
        {
#if CUQU_ENABLE_PERF_HIST  
            device_q->hist[idx]++;
#endif
        }

        static __inline__ __device__ void __queue_dump_details(cuda_queue_t device_q)
        {
            const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
            const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
            cuqu::detail::raw_queue_t *raw_q = cuqu::detail::__queue_get_raw_ptr(device_q);
            unsigned int nw = __rawqueue_read_nw(raw_q);
            unsigned int nr = __rawqueue_read_nr(raw_q);
            unsigned int local_nr = __queue_nr(device_q);
            cuqu_error("[%d:%d] device_q=%p bar_head=%p bar_tail=%p raw_nw=%u raw_nr=%u local_nr=%d\n", bid, tid, device_q, &device_q->bar_head, &device_q->bar_tail, nw, nr, local_nr);
        }

        //__device__ void __queue_set_nr(gpu_queue_t *q, unsigned int nr)
        //{
        //        q->eq_nr = nr;
        //}

        // static __inline__ __device__ void __queue_inc_nr(gpu_queue_t *q)
        // {
        //         ++q->eq_nr;
        // }

        // need proper system memory fence
        static __inline__ __device__ void __queue_update_nr(cuda_queue_t q, unsigned int nr)
        {
            detail::raw_queue_t *raw_q = __queue_get_raw_ptr(q);
            // update device mem
            *(volatile unsigned int*)&q->eq_nr = nr;
            // update host pinned mem
            *(volatile unsigned int*)&raw_q->nr = nr;
            //raw_q->nr = __queue_nr(q);
        }

        // need proper system memory fence
        static __inline__ __device__ void __queue_update_nw(cuda_queue_t q, unsigned int nw)
        {
            detail::raw_queue_t *raw_q = __queue_get_raw_ptr(q);
            // update device mem
            //*(volatile unsigned int*)&q->eq_nr = nr;
            // update host pinned mem
            *(volatile unsigned int*)&raw_q->nw = nw;
            //raw_q->nr = __queue_nr(q);
        }

        static __inline__ __device__ void __queue_syncthreads() { }
        //static __inline__ __device__ void __queue_syncthreads() { __syncthreads(); }

#if CUQU_ENABLE_COPY_FENCE
        static __inline__ __device__ void __queue_threadfence() { __threadfence(); }
        static __inline__ __device__ void __queue_threadfence_system() { __threadfence_system(); }
#else
        static __inline__ __device__ void __queue_threadfence() { }
        static __inline__ __device__ void __queue_threadfence_system() { }
#endif
    
#define __queue_load_entry(Q, SHR_E, PTR_GLB_E)     \
        {                                           \
            if(tid < event_t::nwords)               \
                SHR_E.w[tid] = PTR_GLB_E->w[tid];   \
            __queue_syncthreads();                  \
        }

#define __queue_valid_event(SHR_E)                                      \
        all((tid < event_t::nwords) ? (SHR_E.w[tid] != detail::event_magic) : true)
        
#define __queue_copy_event(DST_E, SRC_E)                                \
        {                                                               \
            if(tid < event_t::nwords) {                                 \
                *(volatile unsigned int *)&(DST_E->w[tid]) = SRC_E->w[tid]; \
            }                                                           \
            __queue_syncthreads();                                      \
        }

#define __queue_copy_event_to_global(DST_E, SRC_E)                      \
        {                                                               \
            if(tid < event_t::nwords) {                                 \
                *(volatile unsigned int *)&(DST_E->w[tid]) = SRC_E->w[tid]; \
                __queue_threadfence();                                  \
            }                                                           \
            __queue_syncthreads();                                      \
        }

#define __queue_copy_event_to_pinned(DST_E, SRC_E)                      \
        {                                                               \
            if(tid < event_t::nwords) {                                 \
                *(volatile unsigned int *)&(DST_E->w[tid]) = SRC_E->w[tid]; \
                __queue_threadfence_system();                           \
            }                                                           \
            __queue_syncthreads();                                      \
        }

#define __queue_invalidate_event(PTR_GLB_E)                             \
        {                                                               \
            if(tid < event_t::nwords) {                                 \
                *(volatile unsigned int *)&(PTR_GLB_E->w[tid]) = detail::event_magic; \
                __threadfence_system();                                 \
            }                                                           \
            __queue_syncthreads();                                      \
        }

#define __queue_num_queued_events(Q, NW, NR) ((int)(NW) - (int)(NR))

#define __queue_is_empty(Q, NW, NR) ((NW) == (NR))

#define __queue_is_full(Q, NW, NR) (__queue_num_queued_events(Q, NW, NR) == (Q)->nevents)


        //----------------------------------------------------------------------

        template <class T>
        static __device__ inline void __queue_push(cuda_queue_t q, event<T> *global_event, const unsigned int bid, const unsigned int tid)
        {
            //cuqu_trace("I am First block\n");

            detail::raw_queue_t *raw_q = detail::__queue_get_raw_ptr(q);
            // all threads in block read the register? what about performance?
            unsigned int nw = __rawqueue_read_nw(raw_q);
            unsigned int nr = __rawqueue_read_nr(raw_q);
            unsigned int masked_nw = nw & q->mask;
            //int n_queued_events = __queue_unread_events(nw, nr);
            typedef detail::event<T> event_t;
            typedef typename event_t::array_t array_t;
            event_t *ge = detail::__rawqueue_get_event_ptr<T>(raw_q, masked_nw); //&raw_q->events[masked_nr];
            array_t *glb_dst_e = &ge->u.a;
            array_t *glb_src_e = &global_event->u.a;
            unsigned int num_queued_events = (unsigned int) __queue_num_queued_events(q, nw, nr);

            //if(!__queue_is_full(q, nw, nr)) {
            if(num_queued_events != q->nevents) {
                //__queue_copy_event(glb_dst_e, glb_src_e);
                __queue_copy_event_to_pinned(glb_dst_e, glb_src_e);

                //cuqu_trace("fetched new event shr_e[%d]=%08x nr=%d\n", tid, shr_e.w[tid], nr);

                // only necessary if __queue_valid_event() is used
                //__queue_invalidate_event(glb_e);

                ++nw;
                if(0==tid) {
                    __queue_update_nw(q, nw);
                    __queue_perf_success(q, num_queued_events);
                    q->rc = SUCCESS;
#if CUQU_ENABLE_TAIL_FENCE
                    //__threadfence();
                    __threadfence_system();
#endif
                }
                //__syncthreads();
            } else {
                if(0 == tid) {
                    __queue_perf_fail(q, (unsigned int)q->nevents);
                    q->rc = WOULDBLOCK;
#if CUQU_ENABLE_TAIL_FENCE
                    __threadfence();
#endif
                }
                //__syncthreads();
            }
            __syncthreads();
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_try_push(cuda_queue_t q, event<T>* global_event)
        {
            const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
            const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
            const unsigned int block_size = blockDim.x*blockDim.y*blockDim.z;
            const unsigned int grid_size = gridDim.y*gridDim.x;
            //const unsigned int gid = tid + bid*block_size;
            int retcode;

            // needed for coaleshed read in __queue_load_entry()
            if(block_size < detail::event<T>::nwords) {
                cuqu_error("ERROR: queue_try_push block_size=%d is incompatible with coalesched memory access\n", block_size);
                return INVALID;
            }

            retcode = barrier_wait(&q->bar_head, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                //cuqu_error("[%d:%d] ERROR: queue_try_push queue=%p retcode %d during head barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }

            if(0 == bid)
                __queue_push(q, global_event, bid, tid);

#if CUQU_ENABLE_TAIL_BARRIER
            retcode = barrier_wait(&q->bar_tail, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                //cuqu_error("[%d:%d] ERROR: queue_try_push queue=%p retcode %d during tail barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }
#endif

            retcode = q->rc;

            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_blocking_push(cuda_queue_t q, event<T>* e)
        {
            int retcode;
            do {
                retcode = queue_try_push(q, e);
            } while(WOULDBLOCK == retcode);
            // SUCCESS or ERROR
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_waiting_push(cuda_queue_t q, event<T>* e, int timeout_ms)
        {
            int retcode;
            int us_per_call = 5/*try_fetch*/+1/*delay*/; // rough estimate on C2050
            int tmout_us = 1000*timeout_ms;
            int tm = 0;
            do {
                // here we pay for multiple barriers. it could be
                // optimized
                retcode = queue_try_push(q, e);
                if(WOULDBLOCK != retcode) // SUCCESS or ERROR
                    break;
                tm += us_per_call;
                cuqu::cuqu_delay(10);
            } while(tm < tmout_us);
#if defined(CUQU_DEBUG)
            const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
            const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
            // to debug a livelock situation I had...
            if(WOULDBLOCK == retcode) {
                if(0 == tid && 0 == bid)
                    __queue_dump_details(q);
            }
#endif
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_timed_push(cuda_queue_t q, event<T>* global_event, int timeout_ms)
        {
            int retcode;
            if(timeout_ms == timeout_infinite)
                retcode = queue_blocking_push(q, global_event);
            else
                retcode = queue_waiting_push(q, global_event, timeout_ms);
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_blocking_push_nobar(cuda_queue_t q, event<T>* e)
        {
            const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
            const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
            const unsigned int grid_size = gridDim.y*gridDim.x;
            int retcode;

            retcode = barrier_wait(&q->bar_head, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                cuqu_error("[%d:%d] ERROR: queue_try_push queue=%p retcode %d during head barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }

            if(0 == bid) {
                do {
                    __queue_push(q, e, bid, tid);
                } while(WOULDBLOCK == q->rc);
            }

            retcode = barrier_wait(&q->bar_tail, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                cuqu_error("[%d:%d] ERROR: queue_try_push queue=%p retcode %d during tail barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }

            retcode = q->rc;
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_timed_push_nobar(cuda_queue_t q, event<T>* global_event, int timeout_ms)
        {
            const unsigned int block_size = blockDim.x*blockDim.y*blockDim.z;
            int retcode;

            // needed for coaleshed read in __queue_load_entry()
            if(block_size < detail::event<T>::nwords) {
                cuqu_error("ERROR: queue_timed_push_nobar block_size=%d is incompatible with coalesched memory access\n", block_size);
                return INVALID;
            }

            if(timeout_ms == timeout_infinite)
                retcode = queue_blocking_push_nobar(q, global_event);
            else
                retcode = queue_waiting_push(q, global_event, timeout_ms);
            return retcode;
        }

        //----------------------------------------------------------------------

        template <class T>
        static __device__ inline void __queue_fetch(cuda_queue_t q, event<T> *global_event, const unsigned int bid, const unsigned int tid)
        {
            //cuqu_trace("I am First block\n");

            detail::raw_queue_t *raw_q = detail::__queue_get_raw_ptr(q);
            // all threads in block read the register? what about performance?
            unsigned int nw = __rawqueue_read_nw(raw_q);
            unsigned int nr = __queue_nr(q);
            unsigned int masked_nr = nr & q->mask;
            //int n_queued_events = __queue_unread_events(nw, nr);
            typedef detail::event<T> event_t;
            typedef typename event_t::array_t array_t;
            event_t *ge = detail::__rawqueue_get_event_ptr<T>(raw_q, masked_nr); //&raw_q->events[masked_nr];
            array_t *glb_e = &ge->u.a;
            array_t *glb_dest_e = &global_event->u.a;

            unsigned int num_queued_events = (unsigned int) __queue_num_queued_events(q, nw, nr);

            // 1) memory polling approach:
            // instead of reading & using the NW pointer (see 2), poll the
            // NR event memory area for a non magic value
            //
            // entry is valid if it is not invalid, i.e. it does
            // not contain the magic number in all its words
            //
            // WARN: problem on Fermi, it seems to stop after a full roll
            // over the queue
            //
            // __shared__ array_t shr_e;
            // __queue_load_entry(q, shr_e, glb_e);
            // if(__queue_valid_event(shr_e)) {
            //    __queue_copy_event(glb_dest_e, shr_e);
            //
            // 2) pointers checking
            // read both NR and NR, then calc signed difference,
            // considering that both are ever increasing pointer
            // 
            if(num_queued_events > 0) {
                __queue_copy_event_to_global(glb_dest_e, glb_e);
#if CUQU_DEBUG_VERBOSE
                if(tid == 0)
                    cuqu_trace("[%d:%d] q=%p raw_q=%p glb_e=%p\n", bid, tid, q, raw_q, glb_e);
                if(tid < event_t::nwords)
                    cuqu_trace("[%d:%d] fetched new event glb_dest_e[%d]=%08x\n", bid, tid, tid, glb_dest_e->w[tid]);
#endif

#if CUQU_INVALIDATE_ENTRIES
                // strictly necessary when __queue_valid_event() method is used
                __queue_invalidate_event(glb_e);
#endif
                ++nr;
                if(0==tid) {
#if CUQU_DEBUG_VERBOSE
                    cuqu_trace("[%d:%d] nr=0x%08x nw=0x%08x masked_nr=0x%08x\n", bid, tid, nr, nw, masked_nr);
#endif
                    __queue_update_nr(q, nr);
                    __queue_perf_success(q, num_queued_events);
                    q->rc = SUCCESS;
#if CUQU_ENABLE_TAIL_FENCE
                    //__threadfence();
                    __threadfence_system();
#endif
                }
                //__syncthreads();
            } else {
                if(0 == tid) {
                    __queue_perf_fail(q, 0);
                    q->rc = WOULDBLOCK;
#if CUQU_ENABLE_TAIL_FENCE
                    __threadfence();
#endif
                }
                //__syncthreads();
            }
            __syncthreads();
        }

        //----------------------------------------------------------------------

        /*! \p queue_try_fetch is a non-blocking fetch from the queue.
         *
         *  \param q The pointer to the CUDA side of the queue.
         *  \param gloval_event The pointer to a <tt>event</tt> instance located in global memory.
         *
         *  \tparam T The user contained type.
         */

        template <typename T>
        static __device__ int queue_try_fetch(cuda_queue_t q, event<T>* global_event)
        {
            const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
            const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
            const unsigned int block_size = blockDim.x*blockDim.y*blockDim.z;
            const unsigned int grid_size = gridDim.y*gridDim.x;
            //const unsigned int gid = tid + bid*block_size;
            int retcode;

            // needed for coaleshed read in __queue_load_entry()
            if(block_size < detail::event<T>::nwords) {
                cuqu_error("ERROR: queue_try_fetch block_size=%d is incompatible with coalesched memory access\n", block_size);
                return INVALID;
            }

            retcode = barrier_wait(&q->bar_head, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                cuqu_error("[%d:%d] ERROR: queue_try_fetch queue=%p retcode %d during head barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }

            if(0 == bid)
                __queue_fetch(q, global_event, bid, tid);

#if CUQU_ENABLE_TAIL_BARRIER
            retcode = barrier_wait(&q->bar_tail, grid_size);
            if(retcode != SUCCESS) {
                __queue_signal_failure(q);
                cuqu_error("[%d:%d] ERROR: queue_try_fetch queue=%p retcode %d during tail barrier_wait\n", bid, tid, q, retcode);
                return retcode;
            }
#endif

            retcode = q->rc;

            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_blocking_fetch(cuda_queue_t q, event<T>* e)
        {
            int retcode;
            do {
                retcode = queue_try_fetch(q, e);
            } while(WOULDBLOCK == retcode);
            // SUCCESS or ERROR
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_waiting_fetch(cuda_queue_t q, event<T>* e, int timeout_ms)
        {
            int retcode;
            int us_per_call = 5/*try_fetch*/+1/*delay*/; // rough estimate on C2050
            int tmout_us = 1000*timeout_ms;
            int tm = 0;
            do {
                // here we pay for multiple barriers. it could be
                // optimized
                retcode = queue_try_fetch(q, e);
                if(WOULDBLOCK != retcode) // SUCCESS or ERROR
                    break;
                tm += us_per_call;
                cuqu::cuqu_delay(10);
            } while(tm < tmout_us);
            return retcode;
        }

        //----------------------------------------------------------------------

        template <typename T>
        static __device__ int queue_timed_fetch(cuda_queue_t q, event<T>* global_event, int timeout_ms)
        {
            int retcode;
            if(timeout_ms == timeout_infinite)
                retcode = queue_blocking_fetch(q, global_event);
            else
                retcode = queue_waiting_fetch(q, global_event, timeout_ms);
            return retcode;
        }

        //----------------------------------------------------------------------

    } // end namespace detail

} // end namespace cuqu

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
