/*
 *  Copyright 2010 INFN - APE group
 *
 *  Licensed under the Apache License, Version 2.0 (the "License");
 *  you may not use this file except in compliance with the License.
 *  You may obtain a copy of the License at
 *
 *      http://www.apache.org/licenses/LICENSE-2.0
 *
 *  Unless required by applicable law or agreed to in writing, software
 *  distributed under the License is distributed on an "AS IS" BASIS,
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *  See the License for the specific language governing permissions and
 *  limitations under the License.
 */

#include <sys/time.h>

//#define CUQU_DISABLE_CUPRINTF 1
//#define CUOS_ENABLE_TRACING 1

#include <cuos/cuos.h>
#include <cuos/mpi.h>

// standard MPI include
#include <mpi.h>

using namespace cuos::system_service;

//----------------------------------------------------------------------
//----------------------------------------------------------------------

static size_t param_size(cuos::system_service::param &p)
{
    static size_t param_size[param::num_tags] = {
        0, 
        sizeof(int), 
        sizeof(unsigned int), 
        sizeof(long), 
        sizeof(unsigned long),
        sizeof(float), 
        sizeof(double),
        sizeof(void*)
    };
    assert(p.tag < param::num_tags);
    return param_size[p.tag];
}

//----------------------------------------------------------------------

static size_t cumpi_type_size(cuMPI_Datatype t)
{
    static size_t size_tbl[cuMPI_NUM_TYPES] = {
        0,                      //DATATYPE_NULL
        sizeof(unsigned char),  //BYTE
        sizeof(unsigned char),  //PACKED
        sizeof(char),           //CHAR
        sizeof(short),          //SHORT
        sizeof(int),            //INT
        sizeof(long),           //LONG
        sizeof(float),          //FLOAT
        sizeof(double)          //DOUBLE
    };
    assert(t >= 0);
    assert(t < cuMPI_NUM_TYPES);
    return size_tbl[t];
}

static MPI_Datatype cumpi_trans_type(cuMPI_Datatype t)
{
    static MPI_Datatype type_tbl[cuMPI_NUM_TYPES] = {
        MPI_DATATYPE_NULL,
        MPI_BYTE,
        MPI_PACKED,
        MPI_CHAR,
        MPI_SHORT, 
        MPI_INT,
        MPI_LONG,
        MPI_FLOAT,
        MPI_DOUBLE
    };
    assert(t >= 0);
    assert(t < cuMPI_NUM_TYPES);
    return type_tbl[t];
}

static MPI_Comm cumpi_trans_comm(cuMPI_Comm c)
{
    static MPI_Comm comm_tbl[cuMPI_NUM_COMMS] = {
        MPI_COMM_NULL,
        MPI_COMM_WORLD,
        MPI_COMM_SELF
    };
    assert(c >= 0);
    assert(c < cuMPI_NUM_COMMS);
    return comm_tbl[c];
}

//----------------------------------------------------------------------
// old implementation
//----------------------------------------------------------------------

int cuos::host::runtime::handle_mpi_send(sys_req_t *req, sys_rep_t *rep)
{
    int ret = SUCCESS;
    void *h_buf = 0;
    int count = 0;
    cuMPI_Datatype datatype;
    int dest;
    int tag;
    cuMPI_Comm comm;
    size_t buf_size = 0;
    
    h_buf    = req->params[0].ptr; assert(req->params[0].tag == param::tag_ptr);
    count    = req->params[1].si;
    datatype = req->params[2].si;
    dest     = req->params[3].si;
    tag      = req->params[4].si;
    comm     = req->params[5].si;

    buf_size = cumpi_type_size(datatype)*count;
    void *tmp_buf = get_tmp_buf(buf_size);

    cuosTrace("invoking cuMPI_Send(device_buf=%p, count=%d, type=%d, dest_rank=%d)\n", h_buf, count, datatype, dest);

    do {
        cudaError_t retcode;

        retcode= cudaMemcpyAsync(tmp_buf, h_buf, buf_size, cudaMemcpyDeviceToHost, io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
            ret = RUNTIME;
            break;
        }

        retcode = cudaStreamSynchronize(io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
            ret = RUNTIME;
            break;
        }
        
        int err = MPI_Send(tmp_buf, count, cumpi_trans_type(datatype), dest, tag, cumpi_trans_comm(comm));
        if(err != MPI_SUCCESS) {
            cuosError("ERROR MPI_Send() err=%d\n", err);
        }        
        rep->retval = cuos_make_int_param(err);

    }  while(false);

    rel_tmp_buf(tmp_buf);
    return ret;
}

//----------------------------------------------------------------------

int cuos::host::runtime::handle_mpi_recv(sys_req_t *req, sys_rep_t *rep)
{
    int ret = SUCCESS;
    void *h_buf = 0;
    int count = 0;
    cuMPI_Datatype datatype;
    int src;
    int tag;
    cuMPI_Comm comm;
    size_t buf_size = 0;
    
    h_buf    = req->params[0].ptr; assert(req->params[0].tag == param::tag_ptr);
    count    = req->params[1].si;
    datatype = req->params[2].si;
    src     = req->params[3].si;
    tag      = req->params[4].si;
    comm     = req->params[5].si;

    buf_size = cumpi_type_size(datatype)*count;
    void *tmp_buf = get_tmp_buf(buf_size);

    cuosTrace("invoking cuMPI_Recv(device_buf=%p, count=%d, type=%d, src_rank=%d)\n", h_buf, count, datatype, src);

    do {
        cudaError_t retcode;
        MPI_Status stat;

        int err = MPI_Recv(tmp_buf, count, cumpi_trans_type(datatype), src, tag, cumpi_trans_comm(comm), &stat);
        rep->retval = cuos_make_int_param(err);
        if(err != MPI_SUCCESS) {
            cuosError("ERROR MPI_Recv() err=%d, skipping cudaMemcpy\n", err);
            break;
        }
        cuosTrace("after MPI_Recv() before cudaMemcpyAsync\n");
        retcode = cudaMemcpyAsync(h_buf, tmp_buf, buf_size, cudaMemcpyHostToDevice, io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
            ret = RUNTIME;
            break;
        }
        cuosTrace("before cudaStreamSynchronize\n");
        retcode = cudaStreamSynchronize(io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaStreamSynchronize() retcode=%d\n", retcode);
            ret = RUNTIME;
            break;
        }
    }  while(false);

    rel_tmp_buf(tmp_buf);

    return ret;
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

#include "cuos/detail/handler.h"

namespace cuos {
    namespace host {
        class mpi_send: public handler
        {
        public:
            enum state_t {
                NONE = 0,
                INIT,
                READY,
                WAIT_MEMCPY,
                WAIT_MPI,
                REPLY,
                FLAGREQ,
                RELEASE,
                DONE
            };
            state_t     m_state;
            void       *m_d_buf;
            void       *m_tmp_buf;
            int         m_count;
            cuMPI_Datatype m_datatype;
            int         m_dest;
            int         m_tag;
            cuMPI_Comm  m_comm;
            size_t      m_buf_size;
            MPI_Request m_mpi_req;
            void       *m_d_mpireq;

        public:
            mpi_send(sys_req_t *req);
            ~mpi_send();
            bool done() { return m_state == DONE; }
            int progress(runtime *rtm);
        };

        handler *make_mpi_send_handler(sys_req_t *req)
        {
            return new mpi_send(req);
        }

    }
}


//----------------------------------------------------------------------

cuos::host::mpi_send::mpi_send(sys_req_t *req) : handler(req), m_state(INIT), m_d_buf(0), m_tmp_buf(0), m_d_mpireq(0), m_count(-1), m_datatype(-1), m_dest(-1), m_tag(-1), m_comm(-1), m_buf_size(0)
{
    cuosTrace("mpi_send CTOR this=%p req.id=%d\n", this, m_req.id);
}

//----------------------------------------------------------------------

cuos::host::mpi_send::~mpi_send()
{
    cuosTrace("mpi_send DTOR this=%p\n", this);

    if(m_tmp_buf)
        cuosError("ERROR: tmp_buf is still allocated!!!\n");

    if(m_state != NONE && m_state != DONE)
        cuosError("ERROR: handler DTOR invoked while in state=%d!!!!\n", m_state);

    m_state = NONE;
    m_d_buf = 0;
    m_tmp_buf = 0;
    m_count = -1;
    m_datatype = -1;
    m_dest = -1;
    m_tag = -1;
    m_comm = -1;
    m_buf_size = 0;
}

//----------------------------------------------------------------------

int cuos::host::mpi_send::progress(runtime *rtm)
{
    int ret = SUCCESS;
    int err;
    //MPI_Status stat;
    int flag;
    cudaError_t retcode;
    struct timeval tm;
    static int cnt=0;
    gettimeofday(&tm, NULL);
    switch(m_state) {
    case NONE:
        cuosError("NONE is not valid!!\n");
        ret = RUNTIME;
        break;

    case INIT:
        cuosTrace("%d:%ld this=%p state=INIT\n", tm.tv_sec, tm.tv_usec, this);
        {
            m_init_tm = tm;
            if(m_req.id == sys_mpi_send) {
                if(m_req.n_params      != 6               ||
                   m_req.params[0].tag != param::tag_ptr  ||
                   m_req.params[5].tag != param::tag_int) {
                    cuosError("ERROR: invalid req params: n_params=%d param[0].tag=0x%x param[5].tag=0x%x\n",
                              m_req.n_params, m_req.params[0].tag, m_req.params[5].tag);
                    dump(m_req);
                    if(m_req.flags & system_service::request::flag_need_reply) {
                        // send back reply if needed
                        m_rep.serial = m_req.serial;
                        m_rep.flags = 0;
                        m_rep.retval = cuos_make_int_param(cuMPI_ERR_UNKNOWN);
                        m_state = REPLY;
                    } else {
                        ret = RUNTIME;
                        m_state = RELEASE;
                    }
                    break;
                }
                m_d_buf    = m_req.params[0].ptr;
                m_count    = m_req.params[1].si;
                m_datatype = m_req.params[2].si;
                m_dest     = m_req.params[3].si;
                m_tag      = m_req.params[4].si;
                m_comm     = m_req.params[5].si; assert(m_req.params[5].tag == param::tag_int);
                m_d_mpireq = 0;

                cuosTrace("decoded cuMpi_Send(device_buf=%p, count=%d, type=%d, dest_rank=%d, comm=%d)\n", m_d_buf, m_count, m_datatype, m_dest, m_comm);

            } else if(m_req.id == sys_mpi_isend) {

                assert(m_req.n_params == 7);
                assert(m_req.params[0].tag == param::tag_ptr);
                m_d_buf    = m_req.params[0].ptr;
                m_count    = m_req.params[1].si;
                m_datatype = m_req.params[2].si;
                m_dest     = m_req.params[3].si;
                m_tag      = m_req.params[4].si;
                m_comm     = m_req.params[5].si;
                m_d_mpireq = m_req.params[6].ptr; assert(m_req.params[6].tag == param::tag_ptr);
                
                cuosTrace("decoded cuMPI_Isend(device_buf=%p, count=%d, type=%d, dest_rank=%d, comm=%d, mpi_req=%p)\n", m_d_buf, m_count, m_datatype, m_dest, m_comm,  m_d_mpireq);

            } else {
                cuosError("ERROR: invalid req.id=%d\n", m_req.id);
                ret = INVALID;
                m_state = RELEASE;
                break;
            }
                
            m_buf_size = cumpi_type_size(m_datatype)*m_count;
            m_tmp_buf  = rtm->get_tmp_buf(m_buf_size);

            if(!m_tmp_buf) {
                cuosTrace("ERROR: can't get tmp buf\n");
                ret = RUNTIME;
                m_state = RELEASE;
                break;
            }

            m_state = READY;
        }
        // break; fall-through
        gettimeofday(&tm, NULL);

    case READY:
        cuosTrace("%d:%ld this=%p state=READY\n", tm.tv_sec, tm.tv_usec, this);
        cuosTrace("issuing cudaMemcpyAsync(dest=%p, src=%p, size=%u)\n", m_tmp_buf, m_d_buf, m_buf_size);
        retcode = cudaMemcpyAsync(m_tmp_buf, m_d_buf, m_buf_size, cudaMemcpyDeviceToHost, rtm->io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
            ret = RUNTIME;
            m_state = RELEASE;
            break;
        }
        m_state = WAIT_MEMCPY;
        //break; fall-through
        gettimeofday(&tm, NULL);

    case WAIT_MEMCPY:
        //cuosTrace("%d:%ld this=%p state=WAIT_MEMCPY\n", tm.tv_sec, tm.tv_usec, this);
        retcode = cudaStreamQuery(rtm->io_stream());
        if(retcode == cudaSuccess) {
            //cuosTrace("streamQuery returned success!\n");
        } else if(retcode == cudaErrorNotReady) {
            // keep on querying
            break;
        } else {
            cuosError("ERROR streamQuery ret=%d\n", retcode);
            ret = RUNTIME;
            m_state = RELEASE;
            break;
        }
        // success!!
        cuosTrace("%d:%ld calling MPI_Isend()\n", tm.tv_sec, tm.tv_usec);
        err = MPI_Isend(m_tmp_buf, m_count, cumpi_trans_type(m_datatype), m_dest, m_tag, cumpi_trans_comm(m_comm), &m_mpi_req);
        cuosTrace("MPI_Isend() err=%d\n", err);
        // prepare reply
#warning "non thread-safe code here"
        m_rep.serial = cnt++;
        //m_rep.serial = m_req.serial;
        m_rep.flags  = 0;
        m_rep.retval = cuos_make_int_param(err);
#if CUOS_REPLY_DUMMY_WORDS
        m_rep.dummy[0]  = 0xc1a0c1a9;
        m_rep.dummy[1]  = 0xc1a0c1a8;
        m_rep.dummy[2]  = 0xc1a0c1a7;
#endif
        if(err != MPI_SUCCESS) {
            cuosError("ERROR Mpi_Send() err=%d\n", err);
            m_state = RELEASE;
            break;
        }
        m_state = WAIT_MPI;
        // break; fall-through
        gettimeofday(&tm, NULL);

    case WAIT_MPI:
        //cuosTrace("%d:%ld this=%p state=WAIT_MPI\n", tm.tv_sec, tm.tv_usec, this);
        err = MPI_Test(&m_mpi_req, &flag, MPI_STATUS_IGNORE);
        if(!flag) {
            if(err != MPI_SUCCESS) {
                cuosError("ERROR MPI_Test() err=%d, returning it to CUDA kernel\n", err);
                m_rep.retval = cuos_make_int_param(err);
                m_state = RELEASE;
            }
            break;
        }
        assert(err == MPI_SUCCESS);
        cuosTrace("%d:%ld MPI_Test() success\n", tm.tv_sec, tm.tv_usec);

        if(m_req.flags & system_service::request::flag_need_reply) {
            // send back reply if needed
            m_state = REPLY;
        } else if(m_req.flags & system_service::request::flag_request) {
            if(!m_d_mpireq) {
                cuosError("ERROR invalid mpirequest device ptr %p\n", m_d_mpireq);
                ret = RUNTIME;
                m_state = RELEASE;
            } else {
                m_state = FLAGREQ;
            }
            break;
        } else {
            m_state = RELEASE;
            break;
        }
        // break; fall-through
        gettimeofday(&tm, NULL);

    case REPLY:
        cuosTrace("%d:%ld this=%p state=REPLY\n", tm.tv_sec, tm.tv_usec, this);
        {
            int retval;

            // set current tm as reply time
            m_reply_tm = tm;

            //cuosTrace("this system service needs a reply, sending it back\n");
            retval = rtm->push_reply(&m_rep); // m_queue_h2d.timed_push(&i->m_rep, timeout_infinite);
            if(cuqu::SUCCESS == retval) {
                // fine!
                // break;
            } else if(cuqu::WOULDBLOCK == retval) {
                // something is wrong !!! stay in this state and try again
                cuosError("got WOULDBLOCK while sending reply back, trying again\n");
                break;
            } else {
                // really wrong !!! go to next state but report error
                //cuqu::INVALID:
                //cuqu::UNSET:
                cuosError("got retcode=%d while sending reply back\n", retval);
                ret = INVALID;
                //break;
            }
        }
        m_state = RELEASE;
        // break; fall-through
        gettimeofday(&tm, NULL);

    case RELEASE:
        cuosTrace("%d:%ld this=%p state=RELEASE\n", tm.tv_sec, tm.tv_usec, this);
        if(m_tmp_buf) {
            cuosTrace("freeing tmp_buf\n");
            rtm->rel_tmp_buf(m_tmp_buf);
            m_tmp_buf = 0;
        }
        m_state = DONE;
        // break; fall-through
        gettimeofday(&tm, NULL);

    case DONE:
        cuosTrace("%d:%ld this=%p state=DONE\n", tm.tv_sec, tm.tv_usec, this);
        // stay here until runtime does:
        // remove it from queue
        // delete it
        m_done_tm = tm;
        ret = SUCCESS;
        break;

    case FLAGREQ:
        cuosTrace("this=%p state=FLAGREQ\n", this);
        {
            assert(m_d_mpireq);
            cuosTrace("issuing cudaMemcpyAsync(d_mpireq=%p, size=%u)\n", m_d_mpireq, sizeof(struct cumpi_request));
            //struct cumpi_request mpireq = { 1, cuMPI_SUCCESS };
            // reuse tmp_buf to set the MPI_Request flag
            cumpi_request *preq = (cumpi_request*)m_tmp_buf;
            *preq = (cumpi_request)INIT_CUMPIREQUEST(1);
            retcode = cudaMemcpyAsync(m_d_mpireq, m_tmp_buf, sizeof(struct cumpi_request), cudaMemcpyHostToDevice, rtm->io_stream());
            if(retcode != cudaSuccess) {
                cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
                m_state = RELEASE;
                break;
            }
            cuosTrace("invoking cudaStreamSynchronize()\n");
            retcode = cudaStreamSynchronize(rtm->io_stream());
            if(retcode != cudaSuccess) {
                cuosError("ERROR cudaStreamSynchronize() retcode=%d\n", retcode);
                m_state = RELEASE;
                break;
            }

            m_state = RELEASE;
        }
        break;

    default:
        cuosError("ERROR this=%p invalid state=%d\n", this, (int)m_state);
        ret = RUNTIME;
    }

    cuquTrace("returning %d\n", ret);
    return ret;
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

namespace cuos {
    namespace host {
        class mpi_recv: public handler
        {
        public:
            enum state_t {
                NONE = 0,
                INIT,
                READY,
                WAIT_MPI,
                WAIT_MEMCPY,
                REPLY,
                FLAGREQ,
                RELEASE,
                DONE
            };
            state_t     m_state;
            void       *m_d_buf;
            void       *m_tmp_buf;
            int         m_count;
            cuMPI_Datatype m_datatype;
            int         m_src;
            int         m_tag;
            cuMPI_Comm  m_comm;
            size_t      m_buf_size;
            MPI_Request m_mpi_req;
            void       *m_d_mpireq;

        public:
            mpi_recv(sys_req_t *req);
            ~mpi_recv();
            bool done() { return m_state == DONE; }
            int progress(runtime *rtm);
        };

        handler *make_mpi_recv_handler(sys_req_t *req)
        {
            handler *ret = new mpi_recv(req);
            cuosTrace("req=%p handler=%p\n", req, ret);
            return ret;
        }
    }
}

//----------------------------------------------------------------------

cuos::host::mpi_recv::mpi_recv(sys_req_t *req) : handler(req), m_state(INIT), m_d_buf(0), m_tmp_buf(0), m_d_mpireq(0)
{
    cuosTrace("mpi_recv CTOR this=%p req.id=%d\n", this, m_req.id);
}

//----------------------------------------------------------------------

cuos::host::mpi_recv::~mpi_recv()
{
    cuosTrace("mpi_recv DTOR this=%p\n");

    if(m_tmp_buf)
        cuosError("ERROR: tmp_buf is still allocated!!!\n");

    if(m_state != NONE && m_state != DONE)
        cuosError("ERROR: handler DTOR invoked while in state=%d!!!!\n", m_state);
}

//----------------------------------------------------------------------

int cuos::host::mpi_recv::progress(runtime *rtm)
{
    int ret = SUCCESS;
    int err;
    //MPI_Status stat;
    int flag;
    cudaError_t retcode;

    switch(m_state) {
    case NONE:
        cuosError("NONE is not valid!!\n");
        ret = RUNTIME;
        break;

    case INIT:
        cuosTrace("this=%p state=INIT\n", this);
        {
            if(m_req.id == sys_mpi_recv) {
                assert(m_req.n_params == 6);
                assert(m_req.params[0].tag == param::tag_ptr);
                m_d_buf    = m_req.params[0].ptr;
                m_count    = m_req.params[1].si;
                m_datatype = m_req.params[2].si;
                m_src      = m_req.params[3].si;
                m_tag      = m_req.params[4].si;
                m_comm     = m_req.params[5].si;
                m_d_mpireq = 0;

                cuosTrace("decoded cuMPI_Recv(device_buf=%p, count=%d, type=%d, src_rank=%d)\n", m_d_buf, m_count, m_datatype, m_src);

            } else if(m_req.id == sys_mpi_irecv) {

                assert(m_req.n_params == 7);
                assert(m_req.params[0].tag == param::tag_ptr);
                m_d_buf    = m_req.params[0].ptr;
                m_count    = m_req.params[1].si;
                m_datatype = m_req.params[2].si;
                m_src      = m_req.params[3].si;
                m_tag      = m_req.params[4].si;
                m_comm     = m_req.params[5].si;
                assert(m_req.params[6].tag == param::tag_ptr);
                m_d_mpireq = m_req.params[6].ptr;
                
                cuosTrace("decoded cuMPI_Irecv(device_buf=%p, count=%d, type=%d, src_rank=%d, mpi_req=%p)\n", m_d_buf, m_count, m_datatype, m_src, m_d_mpireq);

            } else {
                cuosError("ERROR: invalid req.id=%d\n", m_req.id);
                ret = INVALID;
                m_state = RELEASE;
                break;
            }
                
            m_buf_size = cumpi_type_size(m_datatype)*m_count;
            m_tmp_buf  = rtm->get_tmp_buf(m_buf_size);

            if(!m_tmp_buf) {
                cuosTrace("ERROR: can't get tmp buf\n");
                ret = RUNTIME;
                m_state = RELEASE;
                break;
            }

            m_state = READY;
        }
        // break; fall-through

    case READY:
        cuosTrace("this=%p state=READY\n", this);
        err = MPI_Irecv(m_tmp_buf, m_count, cumpi_trans_type(m_datatype), m_src, m_tag, cumpi_trans_comm(m_comm), &m_mpi_req);
        // prepare reply
        m_rep.serial = m_req.serial;
        m_rep.flags = 0;
        m_rep.retval = cuos_make_int_param(err);
        if(err != MPI_SUCCESS) {
            cuosError("ERROR MPI_Recv() err=%d\n", err);
            m_state = RELEASE;
            break;
        }

        m_state = WAIT_MPI;
        // break; fall-through

    case WAIT_MPI:
        cuosTrace("this=%p state=WAIT_MPI\n", this);
        err = MPI_Test(&m_mpi_req, &flag, MPI_STATUS_IGNORE);
        if(!flag) {
            if(err != MPI_SUCCESS) {
                cuosError("ERROR MPI_Test() err=%d, returning it to CUDA kernel\n", err);
                m_rep.retval = cuos_make_int_param(err);
                m_state = RELEASE;
            }
            break;
        }
        cuosTrace("MPI_Test() success\n");
        // success, start cudaMemcpyAsync
        cuosTrace("issuing cudaMemcpyAsync(dest=%p, src=%p, size=%u)\n", m_d_buf, m_tmp_buf, m_buf_size);
        retcode = cudaMemcpyAsync(m_d_buf, m_tmp_buf, m_buf_size, cudaMemcpyHostToDevice, rtm->io_stream());
        if(retcode != cudaSuccess) {
            cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
            ret = RUNTIME;
            m_state = RELEASE;
            break;
        }
        m_state = WAIT_MEMCPY;
        break;

    case WAIT_MEMCPY:
        cuosTrace("this=%p state=WAIT_MEMCPY\n", this);
        retcode = cudaStreamQuery(rtm->io_stream());
        if(retcode == cudaSuccess) {
            cuosTrace("streamQuery returned success!\n");
        } else if(retcode == cudaErrorNotReady) {
            // keep on querying
            break;
        } else {
            cuosError("ERROR streamQuery ret=%d\n", retcode);
            ret = RUNTIME;
            m_state = RELEASE;
            break;
        }
        // success!!
        if(m_req.flags & system_service::request::flag_need_reply) {
            // send back reply if needed
            m_state = REPLY;
        } else if(m_req.flags & system_service::request::flag_request) {
            if(!m_d_mpireq) {
                cuosError("ERROR invalid mpirequest device ptr %p\n", m_d_mpireq);
                ret = RUNTIME;
                m_state = RELEASE;
            } else {
                m_state = FLAGREQ;
            }
            break;
        } else {
            m_state = RELEASE;
            break;
        }
        // break; fall-through

    case REPLY:
        cuosTrace("this=%p state=REPLY\n", this);
        {
            int retval;
            cuosTrace("this system service needs a reply, sending it back\n");
            retval = rtm->push_reply(&m_rep); // m_queue_h2d.timed_push(&i->m_rep, timeout_infinite);
            if(cuqu::SUCCESS == retval) {
                // fine!
                // break;
            } else if(cuqu::WOULDBLOCK == retval) {
                // something is wrong !!! stay in this state and try again
                cuosError("got WOULDBLOCK while sending reply back, trying again\n");
                break;
            } else {
                // really wrong !!! go to next state but report error
                //cuqu::INVALID:
                //cuqu::UNSET:
                cuosError("got retcode=%d while sending reply back\n", retval);
                ret = INVALID;
                //break;
            }
        }
        m_state = RELEASE;
        // break; fall-through

    case RELEASE:
        cuosTrace("this=%p state=RELEASE\n", this);
        if(m_tmp_buf) {
            cuosTrace("freeing tmp_buf\n");
            rtm->rel_tmp_buf(m_tmp_buf);
            m_tmp_buf = 0;
        }
        m_state = DONE;
        // break; fall-through

    case DONE:
        cuosTrace("this=%p state=DONE\n", this);
        // stay here until runtime does:
        // remove it from queue
        // delete it
        ret = SUCCESS;
        break;

    case FLAGREQ:
        cuosTrace("this=%p state=FLAGREQ\n", this);
        {
            assert(m_d_mpireq);
            cuosTrace("issuing cudaMemcpyAsync(d_mpireq=%p, size=%u)\n", m_d_mpireq, sizeof(struct cumpi_request));
            //struct cumpi_request mpireq = { 1, cuMPI_SUCCESS };
            // reuse tmp_buf to set the MPI_Request flag
            cumpi_request *preq = (cumpi_request*)m_tmp_buf;
            *preq = (cumpi_request){1,cuMPI_SUCCESS};
            retcode = cudaMemcpyAsync(m_d_mpireq, m_tmp_buf, sizeof(struct cumpi_request), cudaMemcpyHostToDevice, rtm->io_stream());
            if(retcode != cudaSuccess) {
                cuosError("ERROR cudaMemcpyAsync() retcode=%d\n", retcode);
                m_state = RELEASE;
                break;
            }
            cuosTrace("invoking cudaStreamSynchronize()\n");
            retcode = cudaStreamSynchronize(rtm->io_stream());
            if(retcode != cudaSuccess) {
                cuosError("ERROR cudaStreamSynchronize() retcode=%d\n", retcode);
                m_state = RELEASE;
                break;
            }

            m_state = RELEASE;
        }
        break;

    default:
        cuosError("ERROR this=%p invalid state=%d\n", this, (int)m_state);
        ret = RUNTIME;
    }

    cuquTrace("returning %d\n", ret);
    return ret;
}

//----------------------------------------------------------------------
//----------------------------------------------------------------------

/*
 * Local variables:
 *  mode: c++
 *  c-indent-level: 4
 *  c-basic-offset: 4
 *  tab-width: 4
 *  indent-tabs-mode: nil
 * End:
 */
