/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#include <boost/function.hpp>
#include <boost/bind.hpp>

//#define CUQU_DISABLE_CUPRINTF 1
//#define CUOS_ENABLE_TRACING 1

#include <cuos/cuos.h>
#include <cuos/detail/handler.h>

using namespace cuos::system_service;

//----------------------------------------------------------------------
//----------------------------------------------------------------------
//----------------------------------------------------------------------

cuos::host::runtime::runtime(int device_id, unsigned int block_size) : 
    m_device_id(device_id), 
    m_queue_d2h(128, block_size), 
    m_queue_h2d(128, block_size),
    m_io_stream(0),
    m_exec_stream(0),
    m_d_sys_q(0),
    m_host_thread_started(0),
    m_host_thread_stop(0)
{
    cuosTrace("runtime CTOR this=%p\n", this);
    cuosTrace("req event size: %d\n", sizeof(sys_req_event_t));
    cuosTrace("rep event size: %d\n", sizeof(sys_rep_event_t));
    //printf("calling cuda_init\n");
    //cuda_init(device_id);
}

//----------------------------------------------------------------------

cuos::host::runtime::~runtime()
{
    cuosTrace("runtime DTOR this=%p\n", this);

    if(m_host_thread_started) {
        cuosError("ERROR: sys_server still running, stopping it!\n");
        stop_sys_server();
    }

    if(m_handler_queue.size() != 0) {
        cuosError("ERROR: still %d uncompleted handlers while destroying runtime!!\n", m_handler_queue.size());
    }
    if(m_io_stream) {
        cuosTrace("destroying I/O CUDA stream\n");
        cuosSafeCall(cudaStreamDestroy(m_io_stream));
        m_io_stream = 0;
    }
    if(m_exec_stream) {
        cuosTrace("destroying exec CUDA stream\n");
        cuosSafeCall(cudaStreamDestroy(m_exec_stream));
        m_exec_stream = 0;
    }
    if(m_d_sys_q) {
        cuosTrace("deallocate device sys_q\n");
        cuosSafeCall(cudaFree(m_d_sys_q));
        m_d_sys_q = 0;
    }
}

//----------------------------------------------------------------------

cudaStream_t cuos::host::runtime::io_stream()
{
    if(!m_io_stream) {
        cuosTrace("lazily allocating I/O CUDA stream\n");
        cuosSafeCall(cudaStreamCreate(&m_io_stream));
    }
    return m_io_stream;
}

//----------------------------------------------------------------------

cudaStream_t cuos::host::runtime::exec_stream()
{
    if(!m_exec_stream) {
        cuosTrace("lazily allocating exec CUDA stream\n");
        cuosSafeCall(cudaStreamCreate(&m_exec_stream));
    }
    return m_exec_stream;
}

//----------------------------------------------------------------------

/*
  Two commands from different streams cannot run concurrently if
  either one of the following operations is issued in-between them by
  the host thread:
  * a page-locked host memory allocation,
  * a device memory allocation,
  * a device memory set,
  * a device to device memory copy,
  * any CUDA command to stream 0 (including kernel launches and host
    to device memory copies that do not specify any stream parameter),
  * a switch between the L1/shared memory configurations described in Section G.4.1.

 */

void *cuos::host::runtime::get_tmp_buf(size_t size)
{
#if CUOS_USE_PINNED_POOL
    return m_pool.alloc(size);
#elif CUOS_USE_PINNED_BUF
    void *h_buf = 0;
    if(size > 0) {
        cuosTrace("%s: calling cudaHostAlloc(%d)\n", __FUNCTION__, size);
        cudaError_t err = cudaHostAlloc(&h_buf, size, 
                                        cudaHostAllocPortable
                                        //|cudaHostAllocWriteCombined
                                        //|cudaHostAllocMapped
                                        );
        if(cudaSuccess != err) {
            cuosError("ERROR cudaHostAlloc() retcode=%d\n", err);
            h_buf = 0;
        }
    }
    return h_buf;
#else
    return malloc(size);
#endif
}

//----------------------------------------------------------------------

void cuos::host::runtime::rel_tmp_buf(void *buf)
{
#if CUOS_USE_PINNED_POOL
    cuosTrace("calling pool.release(%p)\n", buf);
    m_pool.release(buf);
#elif CUOS_USE_PINNED_BUF
    if(buf) {
        cuosTrace("calling cudaFreeHost(%p)\n", buf);
        cudaError_t err = cudaFreeHost(buf);
        if(cudaSuccess != err) {
            cuosError("ERROR cudaFreeHost() buf=%p retcode=%d\n", buf, err);
        }
    } else
        cuosTrace("buf is NULL!!!\n");
#else
    free(buf);
#endif
}

//----------------------------------------------------------------------

int cuos::host::runtime::handle_req(sys_req_t *req, sys_rep_t *rep)
{
    int ret = UNSUPPORTED;
    assert(req);
    cuosTrace("handle_req req->id=%d\n", req->id);
    switch(req->id) {
    case sys_null:
        break;
    case sys_open:
        break;
    case sys_close:
        break;
    case sys_write:
        break;
    case sys_read:
        break;
    case sys_mpi_send:
        ret = handle_mpi_send(req, rep);
        break;
    case sys_mpi_isend:
        break;
    case sys_mpi_bsend:
        break;
    case sys_mpi_recv:
        ret = handle_mpi_recv(req, rep);
        break;
    case sys_mpi_irecv:
        break;
    case sys_mpi_test:
        break;
    case sys_mpi_wait:
        break;
    default:
        break;
    }
    cuosTrace("handle_req done, ret=%d\n", ret);
    return ret;
}

//----------------------------------------------------------------------

__host__ void cuos::host::dump(const cuos::sys_req_t &req)
{
    cuosError("req: size=%d param_size=%d(%d,%d) id=%d serial=%08x n_params=%d flags=0x%08x\n", 
              sizeof(req), sizeof(req.params), sizeof(req.params[0]), sizeof(system_service::param), 
              req.id, req.serial, req.n_params, req.flags);
    for(int i=0; i<system_service::request::max_params /*req.n_params*/; ++i) {
        const param &p = req.params[i];
        cuosError(" param[%d]: tag=%08x count=%08x value=%08x\n", i, p.tag, p.count, p.ui);
    }
}

//----------------------------------------------------------------------

#if 0
namespace {
    bool progress_and_test(cuos::host::runtime::handler_queue_t::iterator i)
    {
        int retcode = *i->progress(this);
        if(retcode != SUCCESS) {
            cuosTrace("handler->progress() ret=%d!\n");
        }
        return *i->done();
    }
}
#endif

int cuos::host::runtime::progress_handlers()
{
    int ret = SUCCESS;

#if 1
    //BOOST_FOREACH(handler *h, m_handler_queue) {
    for(handler_queue_t::iterator i = m_handler_queue.begin(); i != m_handler_queue.end();) {
        handler *h = *i;
        assert(h);
        // progress each handler
        int retcode = h->progress(this);
        if(retcode != SUCCESS) {
            cuosError("ERROR %d in handler %p\n", retcode, h);
            ret = retcode;
            break;
        }
        // remove it from list if done
        if(h->done()) {
            // watch out!! this invalidates all iterators if not associative containers
            cuosTrace("handler %p is done, removing from handler_queue\n", h);
            delete h;
            *i = 0;
            m_handler_queue.erase(i++);
        } else
            ++i;
    }
#else
    list.erase(
       std::remove_if(list.begin(), list.end(), progress_and_test),
       list_end()
    );
#endif

    return ret;
}

//----------------------------------------------------------------------

int cuos::host::runtime::handle_system_service(int timeout_ms)
{
    /*
      while(!timeout && !kernel_ended) {
        handle_sys_req();
      }
     */

    int ret = cuos::SUCCESS;
    sys_req_t req;
    //sys_rep_t rep;
    do {
        int retcode;

        //cuosTrace("before d2h.timed_fetch(timeout=%d)\n", timeout_ms);
        retcode = m_queue_d2h.timed_fetch(&req, timeout_ms);

        // process request
        if(cuqu::SUCCESS == retcode) {
            //dump(req);
            handler *h = make_handler(&req);
            if(h) {
                queue_handler(h);
            } else {
                cuosError("can't create handler!\n");
                ret = cuos::RUNTIME;
                break;
            }
        } else if(cuqu::WOULDBLOCK == retcode) {
            //cuosTrace("queu_d2h.timed_fetch() returned WOULDBLOCK, progressing handlers anyway\n");
            //ret = cuos::WOULDBLOCK;
            //break;
            // reach progress_handlers() below...
        } else {
            //cuqu::INVALID:
            //cuqu::UNSET:
            ret = cuos::INVALID;
            break;
        }

        // kick a global progress() over all handlers
        ret = progress_handlers();
    } while(false);

    //cuosTrace("handle_system_service done! ret=%d\n", ret);

    return ret;
}

//----------------------------------------------------------------------

int cuos::host::runtime::push_reply(sys_rep_t *rep)
{
    int ret;
    assert(rep);
    ret = m_queue_h2d.timed_push(rep, reply_timeout);
    return ret;
}

//----------------------------------------------------------------------

int cuos::host::runtime::handle_system_service_old(int timeout_ms)
{
    /*
      while(!timeout && !kernel_ended) {
        handle_sys_req();
      }
     */

    int ret = cuos::SUCCESS;
    sys_req_t req;
    sys_rep_t rep;
    do {
        int retcode;

        cuosTrace("before d2h.timed_fetch(timeout=%d)\n", timeout_ms);
        retcode = m_queue_d2h.timed_fetch(&req, timeout_ms);

        // process request
        if(cuqu::SUCCESS == retcode) {
            ret = handle_req(&req, &rep);
        } else if(cuqu::WOULDBLOCK == retcode) {
            ret = cuos::WOULDBLOCK;
            break;
        } else {
            //cuqu::INVALID:
            //cuqu::UNSET:
            ret = cuos::INVALID;
            break;
        }

        // send back reply if needed
        if(req.flags & system_service::request::flag_need_reply) {
            rep.serial = req.serial;
            rep.flags = 0;
            cuosTrace("this system service needs a reply, sending it back\n");
            retcode =  m_queue_h2d.timed_push(&rep, timeout_infinite);
            if(cuqu::SUCCESS == retcode) {
                // fine!
            } else if(cuqu::WOULDBLOCK == retcode) {
                // something is wrong !!!
                cuosError("got WOULDBLOCK while sending reply back\n");
                ret = cuos::RUNTIME;
                break;
            } else {
                //cuqu::INVALID:
                //cuqu::UNSET:
                cuosError("got retcode=%d while sending reply back\n", retcode);
                ret = cuos::INVALID;
                break;
            }
        } else {
            cuosTrace("no reply needed\n");
        }
    } while(false);

    cuosTrace("handle_system_service done! ret=%d\n", ret);

    return ret;
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------
// dev_id == -1 means 'pick first suitable device'

int cuos::host::cuda_init(int dev_id)
{
    cudaDeviceProp deviceProp;
    int device_count = 0;
    bool dev_found = false;
    // CUDA initialization
    cuosSafeCall(cudaGetDeviceCount(&device_count));
    cuosTrace("device_count=%d\n", device_count);

    if(-1 == dev_id) {
        // pick first suitable device
        cuosTrace("selecting first suitable device\n");
        for(int dev=0; dev<device_count; ++dev) {
            cuosSafeCall(cudaGetDeviceProperties(&deviceProp, dev));
            if(!deviceProp.canMapHostMemory) {
                cuosTrace("ERROR Device %d cannot map host memory!\n", dev);
                continue;
            }
            if(!deviceProp.deviceOverlap) {
                cuosTrace("ERROR Device %d cannot concurrently copy memory and execute a kernel!\n", dev);
                continue;
            }

            cuosTrace("selecting device=%d\n", dev);
            cudaError err = cudaSetDevice(dev);
            if(cudaSuccess == err) {
                dev_found = true;
                dev_id = dev;
                break;
            }
            cuosTrace("WARNING error %d while selecting device=%d\n", dev);
        }

        if(!dev_found) {
            cuosError("ERROR can't find suitable device, exiting...\n");
            return cuos::INVALID;
        }

    } else {
        // pick that very device
        cuosTrace("selecting device %d\n", dev_id);
        cuosSafeCall(cudaGetDeviceProperties(&deviceProp, dev_id));
        if(!deviceProp.canMapHostMemory) {
            cuosError("ERROR Device %d cannot map host memory!\n", dev_id);
            return cuos::INVALID;
        }
        if(!deviceProp.deviceOverlap) {
            cuosError("ERROR Device %d cannot concurrently copy memory and execute a kernel!\n", dev_id);
            return cuos::INVALID;
        }

        cuosTrace("device %d passed check, calling cudaSetDevice()\n", dev_id);
        cudaError err = cudaSetDevice(dev_id);
        if(cudaSuccess != err) {
            cuosError("ERROR err=%d while selecting device=%d, exiting...\n", err, dev_id);
            return cuos::INVALID;
        }
    }

    // if(0) {
    //     int cur_dev = -1;
    //     cuosSafeCall(cudaGetDevice(&cur_dev));
    //     cuosTrace("cur_dev=%d\n", cur_dev);
    // }

    cuosSafeCall(cudaSetDeviceFlags(cudaDeviceMapHost));

    //cuosTrace("## before cudaSetDevice()\n");
    //cuosSafeCall(cudaSetDevice(device_id));
    int num_sm = deviceProp.multiProcessorCount;
    cuosTrace("device=%s properties: tot_global_mem=%lu num_sm=%d ECC_enabled=%d compute_mode=%d\n", deviceProp.name, deviceProp.totalGlobalMem, num_sm, deviceProp.ECCEnabled, deviceProp.computeMode);
    cuosCheckMsg("in cuda_init()");

    return cuos::SUCCESS;
}

//----------------------------------------------------------------------

cuos::host::runtime_ref_t cuos::host::make_runtime(int device_id, unsigned int block_size)
{
    int cur_dev;
    cuosSafeCall(cudaGetDevice(&cur_dev));
    if(cur_dev != device_id)
        cuosError("ERROR currently selected device %d, supposed %d\n", cur_dev, device_id);
    runtime *pr = new runtime(device_id, block_size);
#if 0
    int ret = pr->prepare_device();
    if(SUCCESS != ret) {
        cuosError("ERROR in prepare_device() returning null smart_ptr\n");
        delete pr;
        pr = 0;
    }
#endif
    return runtime_ref_t(pr);
}

//----------------------------------------------------------------------

static __global__ void cuos_init_sys_queue(cuos::device::sys_queue_t *sys_q)
{
    const unsigned int tid = threadIdx.x + blockDim.x*threadIdx.y + blockDim.x*blockDim.y*threadIdx.z;
    const unsigned int bid = blockIdx.x + gridDim.x*blockIdx.y;
    const unsigned int block_size = blockDim.x*blockDim.y*blockDim.z;
    //const unsigned int grid_size = gridDim.y*gridDim.x;
    const unsigned int gid = tid + bid*block_size;

    //cuos::device::sys_q = sys_q;
    if(gid == 0) {
#if 0
        if(0 == cuos::device::sys_q()) {
            cuos_error("[%d:%d] setting sys_q to %p\n", bid, tid, sys_q_init);
            cuos::device::sys_q = sys_q_init;
        }
#endif
        if(!sys_q || ! sys_q->m_initialized) {
            cuos_error("[%d:%d] sys_q is not initialized\n", bid, tid);
        } else {
            cuos_trace("[%d:%d] sys_q IS initialized!!!\n", bid, tid);
        }
    }
    __syncthreads();
}

//----------------------------------------------------------------------

cuos::device::sys_queue_t *cuos::host::runtime::sys_q()
{
    if(! m_d_sys_q)
        allocate_sys_q();
    return m_d_sys_q;
}

//----------------------------------------------------------------------

void cuos::host::runtime::allocate_sys_q()
{
    struct device::sys_queue init_sys_q;
    // fill it
    init_sys_q.m_req = m_queue_d2h.get_device_queue();
    init_sys_q.m_rep = m_queue_h2d.get_device_queue();
    init_sys_q.m_initialized = true;
    cuosTrace("init_sys_q: req=%p rep=%p initialized=%d\n", init_sys_q.m_req, init_sys_q.m_rep, (int)init_sys_q.m_initialized);

    //cuosSafeCall(cudaMemcpyToSymbol(cuos::device::sys_q, &init_sys_q, sizeof(struct device::sys_queue)));

    // allocate device object
    cuosSafeCall(cudaMalloc(&m_d_sys_q, sizeof(struct device::sys_queue)));
    cuosSafeCall(cudaMemcpy(m_d_sys_q, &init_sys_q, sizeof(struct device::sys_queue), cudaMemcpyHostToDevice));
}

//----------------------------------------------------------------------

int cuos::host::runtime::prepare_device()
{
    //cuosSafeCall(cudaMemcpyToSymbol(cuos::device::sys_q, &init_sys_q, sizeof(struct device::sys_queue)));

#if 1
#ifdef CUPRINTF_CU
    cudaPrintfInit();
#endif
    cuosTrace("before launching cuos_init_sys_queue kernel\n");
    cuos_init_sys_queue<<<14,32, 0, exec_stream()>>>(sys_q());
    cuosSafeCall(cudaStreamSynchronize(exec_stream()));
    cuosTrace("cuos_init_sys_queue() passed\n");
#ifdef CUPRINTF_CU
    cudaPrintfDisplay(stdout, true);
    cudaPrintfEnd();
#endif
#endif

    return cuos::SUCCESS;
}

//----------------------------------------------------------------------

void cuos::host::runtime::sys_server(cuos::host::runtime &rtm)
{
    // signal thread start
    {
        boost::mutex::scoped_lock lock(rtm.m_mutex);
        rtm.m_host_thread_started = 1;
        rtm.m_host_thread_stop = 0;
        rtm.m_cond.notify_all();
    }

    while(true) {
        int ret = rtm.handle_system_service(1 /*ms*/);
        if(ret != cuos::SUCCESS) {
            cuquError("sys_server: ERROR %d\n", ret);
            break;
        }
        {
            boost::mutex::scoped_lock lock(rtm.m_mutex);
            if(rtm.m_host_thread_stop) {
                rtm.m_host_thread_started = 0;
                cuquTrace("sys_server: exiting due to m_host_thread_stop signal\n");
                break;
            }
        }

    }
}

//----------------------------------------------------------------------

void cuos::host::runtime::start_sys_server()
{
    cuquTrace("spawning sys_server thread\n");

    m_host_thread_started = 0;
    m_host_thread_stop = 0;

    boost::function0<void> fun = boost::bind(sys_server, boost::ref(*this));
    m_host_thread = new boost::thread(fun);

    // leave some time to the thread to startup
    cuquTrace("waiting for the poppush_io thread to start up\n");
    {
        boost::mutex::scoped_lock lock(m_mutex);
        while(!m_host_thread_started) {
            boost::xtime tmout;
            boost::xtime_get(&tmout, boost::TIME_UTC);
            tmout.nsec += 100000;
            bool retcode = m_cond.timed_wait(lock, tmout);
            if(!retcode) {
                cuquTrace("WARNING: cond.timed_wait() timeout reached, rechecking\n"); fflush(stdout);
            }
        }
    }
}

//----------------------------------------------------------------------

void cuos::host::runtime::stop_sys_server()
{
    cuquTrace("waiting for end of sys_server thread\n");
    {
        boost::mutex::scoped_lock lock(m_mutex);
        m_host_thread_stop = 1;
    }
    m_host_thread->join();
    delete m_host_thread;
    m_host_thread = 0;
    cuquTrace("sys_server thread ended\n");
}

//----------------------------------------------------------------------

#if 0

#define ALIGN_UP(OFF, SIZE) do { int tmp = (OFF); tmp += SIZE; tmp -= tmp & 0x3; (OFF) = tmp; } while(0)
#define ALIGN_OF(VAR) (sizeof(VAR)<4?4:sizeof(VAR))

int cuos::host::runtime::spawn(const char *kernel_name, dim3 grid_dim, dim3 block_dim, suze_t dynSharedMem, params...)
{
        cudaFuncAttributes attr;
        cuosSafeCall(cudaFuncGetAttributes(&attr, kernel_name));
        cuosTrace("kernel %s: sharedSizeBytes=%u constSizeBytes=%u maxThreadsPerBlock=%d numRegs=%d\n", kernel_name, attr.sharedSizeBytes, attr.constSizeBytes, attr.maxThreadsPerBlock, attr.numRegs);

        threads_per_block = block_dim.x * block_dim.y * block_dim.z;
        n_blocks = grid_dim.x * grid_dim.y * grid_dim.z;

        int num_sm = os->device_num_sm();
        if(threads_per_block > attr.maxThreadsPerBlock)
            cuosError("too many threads per block\n");
        int limit_1 = threads_per_block / attr.maxThreadsPerBlock;
        int req_shared = dynSharedMem + attr.sharedSizeBytes;
        int limit_2 = os->device_shared_per_sm() / req_shared;
        int blocks_per_sm = std::min(limit_1, limit_2);
        if(n_blocks > num_sm * blocks_per_sm)
            cuosError("blocks have to be 'resident' to avoid deadlocks in global GPU synchronization\n");

        cudaConfigureCall(grid_dim, block_dim, dynSharedMem, exec_stream);
        int offset = 0;
        void* ptr;
        ptr = (void*)(size_t)d_A;
        ALIGN_UP(offset, ALIGN_OF(ptr));
        cudaSetupArgument(&ptr, sizeof(ptr), offset);
        offset += sizeof(ptr);
        ptr = (void*)(size_t)d_B;
        ALIGN_UP(offset, ALIGN_OF(ptr));
        cudaSetupArgument(&ptr, sizeof(ptr), offset);
        offset += sizeof(ptr);
        ptr = (void*)(size_t)d_C;
        ALIGN_UP(offset, ALIGN_OF(ptr));
        cudaSetupArgument(&ptr, sizeof(ptr), offset);
        offset += sizeof(ptr);
        ALIGN_UP(offset, ALIGN_OF(N));
        int ip = N;
        cudaSetupArgument(&ip, sizeof(int), offset);
        offset += sizeof(N);
        cudaLaunch(kernel_name);
}

#endif

//----------------------------------------------------------------------

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
