/**
 * Copyright (c) 2013-2022 UChicago Argonne, LLC and The HDF Group.
 * Copyright (c) 2022-2024 Intel Corporation.
 * Copyright (c) 2024-2025 Hewlett Packard Enterprise Development LP.
 * Copyright (c) 2021 Carnegie Mellon University.
 *
 * SPDX-License-Identifier: BSD-3-Clause
 */

/**
 * mercury NA driver for the qlogic/intel PSM interface
 */

#include "na_plugin.h"

#include "mercury_inet.h"
#include "mercury_thread.h"

/*
 * this plugin uses the PSM API, but supports PSM2 (intel omnipath)
 * as well using the wrappers in na_psm2.h (PSM2 can emulate PSM).
 */
#ifdef PSM2

#    include <psm2.h>
#    include <psm2_mq.h>

#    include "na_psm2.h" /* wrappers */

#    define NA_PSM_NAME            "psm2"
#    define NA_PSM_PLUGIN_VARIABLE NA_PLUGIN_OPS(psm2)

#else

#    include <psm.h>
#    include <psm_mq.h>

/* locally defined struct aliases */
typedef struct psm_optkey psm_optkey_t;
typedef struct psm_ep_open_opts psm_ep_open_opts_t;

#    define NA_PSM_NAME            "psm"
#    define NA_PSM_PLUGIN_VARIABLE NA_PLUGIN_OPS(psm)

#endif /* PSM2 */

#include <stdlib.h>
#include <string.h>
#include <unistd.h>

/******************************************************************************
 * compile time defs and configuration
 */

/*
 * default uuid is used to generate the name of PSM's shared memory segment
 * in /dev/shm.   processes on the same node use this file to communicate
 * with each other.  the idea is that if you've got multiple unrelated jobs
 * running on one node, they have different shm files.   so, in theory,
 * the MPI job ID would be used to generate the uuid (assuming you are
 * running under MPI).   the psmlib uses filenames like:
 *    /dev/shm/psm_shm.30464646-3046-4646-2d30-3030302d3030
 * for shared memory communications.
 *
 *  XXX: for now, just lock it at this default.
 */
#define NA_PSM_DEFAULT_UUID ((uint8_t *) "0FFF0FFF-0000-0000-0000-0FFF0FFF0FFF")

/*
 * psm address serialize size.  we encode the psm_epid_t (a uint64_t).
 */
#define NA_PSM_ADDR_SERSIZE (sizeof(psm_epid_t))

/*
 * size params.  the psm irecv/send/isend APIs do not have any limits
 * (but internally they uses the size param to choose what mode to use).
 * XXXCDC: what values make sense here?
 */
#define NA_PSM_MAX_EXPECTED   4096
#define NA_PSM_MAX_UNEXPECTED 4096

/*
 * tag management.  data sent with psm_isend() is tagged with a uint64_t.
 * when recv buffers are posted to psm with psm_irecv(), you specify
 * which bits in the tag you want to filter on and what their values
 * should be.   bits that are not filtered on can be used to convey
 * additional user-level data.
 *
 * for the mercury na psm, we reserve the top NA_PSM_TAG_NINTBITS bits
 * for internal use.   These bits are used to add a message type value
 * to each message.  We put bits from the sender's epid in the remaining
 * free bits of the top half of the PSM tag to help make the tags unique
 * across hosts.  Mercury itself uses a uint32_t (na_tag_t) for tags,
 * so that can be placed in the lower half of the PSM tag for messages
 * generated by Mercury.  For internally generated messages (i.e.
 * "control" messages used to manage RMA), we use the lower 32 bits
 * of the PSM tag to convey an RMA sequence number and status info.
 */

#define NA_PSM_TAG_NINTBITS 2 /* bits reserved for internal use */
#define NA_PSM_MAX_TAG      (((uint64_t) ~0) >> NA_PSM_TAG_NINTBITS)
#define NA_PSM_INTBIT_MASK  (~NA_PSM_MAX_TAG)

#define NA_PSM_USRTAG_MASK 0xffffffff /* to get lower 32 bits of psm tag */

/*
 * unexpected messages are sent with the NA_PSM_TAG_UNEXPECTED tag bit
 * set.  unexpected irecv()'s only filter on the internal tag bits.
 *
 * to receive unexpected messages, you prepost a pool of unexpected
 * receive buffers to PSM with irecv() so that PSM has a place to
 * put unexpected data.
 */
#define NA_PSM_TAG_UNEXPECTED (0x1ULL << 63)

/*
 * control messages are internally generated messages used for control
 * operations like RMA get/put.  control messages can be unexpected
 * or expected.   we post a pool of internal buffers for receiving
 * unexpected control messages.
 */
#define NA_PSM_TAG_CONTROL (0x1ULL << 62)

/*
 * for expected control messages, we use 2 of the lower 32 bit of
 * the PSM tag for an opcode or error status code.  the remaining
 * 30 bits are used as a sequence number.  the 2 bits contain the
 * opcode (i.e. "put" or "get").  the target strips the opcode from
 * the tag and replaces it with the error status code when it responds
 * to the request.  using the reserved bits to pass the opcode saves
 * the initiator from having to add another arg to the payload of an
 * unexpected control message (see below).  expected control messages
 * carry user data as a payload.
 */
#define NA_PSM_TAG_ECRMASK   0xc0000000 /* to get the reserved bits */
#define NA_PSM_TAG_ECSEQMASK 0x3fffffff /* to get sequence number */

/* ECSELECT is the tag select bits for an expected control irecv */
#define NA_PSM_TAG_ECSELECT ~((uint64_t) NA_PSM_TAG_ECRMASK)

/* rma_tag in payload of unexpected control message has an opcode */
#define NA_PSM_TAG_OP_PUT 0x00000000 /* a 'put' operation */
#define NA_PSM_TAG_OP_GET 0x40000000 /* a 'get' operation */

/* tag in an expected ctrl msg sent by target has status/error code */
#define NA_PSM_TAG_ST_OK     0x00000000 /* no error */
#define NA_PSM_TAG_ST_EPERM  0x40000000 /* permission error */
#define NA_PSM_TAG_ST_ENOSPC 0x80000000 /* no space (i/o doesn't fit) */
#define NA_PSM_TAG_ST_EINVAL 0xc0000000 /* invalid args */

/*
 * unexpected control (UC) message are fixed in length.   the NA must
 * post a pool of UC buffers with irecv at startup so we are ready to
 * receive them.
 */
#define NA_PSM_UCMSG_COUNT 256 /* size of pool of posted ucmsg buffers */
#define NA_PSM_UCMSG_NARGS 4   /* number of 64 bit arg vals in a ucmsg */

/*
 * args in a UCMSG are assigned as follows...
 */
#define NA_PSM_UCARG_SENDER 0 /* sender's epid */
#define NA_PSM_UCARG_HANDLE 1 /* target memory handle */
#define NA_PSM_UCARG_OFFSET 2 /* offset from base of target memory handle */
#define NA_PSM_UCARG_LENGTH 3 /* number of data bytes in RMA op */

/*
 * NA_PSM_OP code values.   these are used in both the na_psm op_id
 * and in the subop.  note that the target side of a GET RMA operation
 * does not need a opcode as it can be directly resolved without having
 * to save any state in an op_id.
 */
#define NA_PSM_OP_SEND       1  /* send (unexpected or expected) */
#define NA_PSM_OP_RECV       2  /* recv (unexpected or expected) */
#define NA_PSM_OP_PUT        3  /* RMA put op (local side) */
#define NA_PSM_OP_PUT_TARGET 4  /* RMA put op (target side) */
#define NA_PSM_OP_GET        5  /* RMA get op (local side) */
#define NA_PSM_OP_PUTSNDCTL  6  /* RMA put send control message subop */
#define NA_PSM_OP_PUTSNDDATA 7  /* RMA put send data subop */
#define NA_PSM_OP_PUTRCVSTS  8  /* RMA put recv status from target subop */
#define NA_PSM_OP_PUTRCVDATA 9  /* RMA put recv data (on target) subop */
#define NA_PSM_OP_GETSNDCTL  10 /* RMA get send control message subop */
#define NA_PSM_OP_GETRCV     11 /* RMA get recv target response subop */

/******************************************************************************
 * psm na's private data structures
 */

/*
 * na_psm_addr: a psm address.  psm has a global epid number and an
 * epaddr address handle (an opaque psm lib pointer) that we group
 * together and track with this.  we keep a list of all addresses
 * currently allocated in the na_psm_class structure (alist).
 * other than alist linkage, the values in na_psm_addr are set
 * when the addr is allocated and not changed after (no need for
 * locking).
 */
struct na_psm_addr {
    struct na_psm_class *pcls; /* class we belong to */

    int origin;          /* who created us?  just for reference */
#define PSM_ORG_SELF   0 /* address is us, created at init */
#define PSM_ORG_LOOKUP 1 /* address created by lookup */
#define PSM_ORG_DESER  2 /* address created via deserialize */
#define PSM_ORG_RECV   3 /* address created by unexpectec recv */

    psm_epid_t epid;           /* epid number for this address */
    psm_epaddr_t epaddr;       /* opaque epaddr pointer for this address */
    hg_atomic_int32_t nrefs;   /* ref count */
    LIST_ENTRY(na_psm_addr) q; /* linkage off of na_psm_class alist */
};

/*
 * na_psm_ucmsg: an unexpected control message.  we don't track psm_mq_req_t's
 * for posted ucmsgs, since we never cancel them.  at shutdown we kill the
 * endpoint (this will terminate any pending recv's without having to bother
 * to cancel them).  once the endpoint is shutdown, the the memory used for
 * ucmsgs is safe to be freed.   ucmsgs are used to pass RMA put/get
 * management info between endpoints.   ucmsgs are fixed in size.
 */
struct na_psm_ucmsg {
    uint64_t args[NA_PSM_UCMSG_NARGS];
};

/*
 * na_psm_subop: a low-level PSM operation that is part of a NA
 * op_id.  simple NA op_id operations like "send" or "recv" only have one
 * subop.  more complex operations like RMA "put" and "get" require
 * multiple subops to complete.
 */
struct na_psm_subop {
    psm_mq_req_t psm_handle;    /* handle to underlying psm op */
    psm_mq_status_t psm_status; /* valid @end, after psm_mq_test call */
    int subop;                  /* type of suboperation */
    struct na_psm_op_id *owner; /* op_id that owns us */
};

/*
 * na_psm_op_id: tracks the state for a NA operation.  we return an
 * opaque pointer to this structure up to mercury as a handle to our
 * operation.  each op_id has one or more subops (counted by "busy").
 * must be holding busy_lock to change busy and busyops queue.
 * RMA operations require multiple subops.  currently we allocate
 * them here, in line (could be moved to their own allocation to
 * save a bit of space, if necessary).
 *
 * we save a pointer to the buffer used with datasub in datasub_buf
 * so that we can recover it later (the ipeek/test interface doesn't
 * return it).  we need this in order to gain access to the header
 * added by na_psm_msg_init_unexpected() for non-control unexpected
 * messages (but we set datasub_buf for all op_id's, not just unexpected
 * messages -- could be useful for debugging).
 */
struct na_psm_op_id {
    int op;                    /* type of operation */
    struct na_psm_class *pcls; /* psm class we belong to */
    na_context_t *context;     /* na_context_t we belong to */
    int busy;                  /* +1 per active subop (lock/w busy_lock) */
    int completed;             /* op_id added to completion queue */
    int cancel;                /* op canceled */
    struct na_cb_completion_data completion_data; /* callback info here */
    LIST_ENTRY(na_psm_op_id) q;  /* class busyops list (lock/w busy_lock) */
    struct na_psm_subop datasub; /* data transfer subop */
    const void *datasub_buf;     /* buffer associated w/datasub */

    /* the following are only used for RMA put/get operations */
    hg_atomic_int32_t rma_refs;           /* rma complete when drop to 0 */
    struct na_psm_addr *initiator;        /* 'put' target only, for status */
    struct na_psm_subop ucsendsub;        /* ucmsg send subop */
    uint64_t ucsargs[NA_PSM_UCMSG_NARGS]; /* args buffer for ucsendsub */
    struct na_psm_subop putrcvsub;        /* put-only: status recv at end */
};

/*
 * na_psm_mem_handle: psm memory handle.  contains a unique token that
 * references a block of user memory.  the memory may be on the local
 * or remote system.   remote memory is accessed using put()/get().
 */
struct na_psm_mem_handle {
    int is_local;   /* is our memory on the local system? */
    uint64_t token; /* unique token for remote put()/get() ops */
};

/*
 * na_psm_local_mem_handle: psm memory handle wrapper structure for
 * a block of memory that is on the local system.  these are always
 * created with mem_handle_create() and include additional metadata
 * that is not sent to remote systems (e.g. when serializing and
 * sending bulk handles over the network).
 */
struct na_psm_local_mem_handle {
    struct na_psm_mem_handle handle;       /* embedded struct: must be first */
    void *base;                            /* pointer to our memory */
    size_t size;                           /* size of our memory */
    uint8_t attr;                          /* protection */
    LIST_ENTRY(na_psm_local_mem_handle) q; /* linkage (off class) */
};

/*
 * na_psm_class: the top-level structure that contains the state for psm
 *
 * note: ipeek_lock is used to protect our progress routine from a threading
 * issue with the PSM API.  suppose you have 2 threads concurrently calling
 * progress:
 *
 *   thread1                        thread2
 *   ipeek(mq,&req,NULL)
 *                                  ipeek(mq,&req,NULL) << same req as thread1
 *   test(&req,&status)
 *                                  ERRROR: thread1 already retired "req"
 *                                  test(&req,&status) << already freed req
 *
 * to avoid this problem we need the ipeek()/test() ops to atomically
 * remove "req" from the mq.   we use "ipeek_lock" to achieve this.
 * (lock ordering note: take ipeek_lock first, then busy_lock)
 */
struct na_psm_class {
    /* set once at init time */
    bool listen;             /* cached copy from init, not relevant to psm */
    psm_ep_t psm_ep;         /* my endpoint */
    psm_mq_t psm_mq;         /* matched queue for psm_ep */
    struct na_psm_addr self; /* my addressing information */
    uint64_t ext_tagbits;    /* extra epid-ish tag bits we set in send tags */
    /* progress params */
    int prog_peeks_per_try; /* #ipeeks to try before yield/sleep */
    int prog_just_yield;    /* don't sleep, just yield */

    hg_thread_mutex_t alist_lock;   /* address list lock */
    LIST_HEAD(, na_psm_addr) alist; /* address list (locked by above) */

    hg_thread_mutex_t busy_lock;       /* busy op list lock */
    LIST_HEAD(, na_psm_op_id) busyops; /* busy ops list */

    hg_thread_mutex_t ipeek_lock; /* lock mq ipeek/test calls */

    hg_thread_mutex_t lhand_lock;                /* local handle lock */
    LIST_HEAD(, na_psm_local_mem_handle) lhands; /* local handles */
    uint32_t lhand_seq; /* sequence number for local handles */

    struct na_psm_ucmsg ucmsgs[NA_PSM_UCMSG_COUNT]; /* fixed pool of ucmsgs */
    hg_atomic_int32_t rma_seqno; /* used to generate psm rma_tags */
};

/******************************************************************************
 * local/static variables
 */

/* serialize access to psm_init() to be safe */
static hg_thread_mutex_t psm_init_lock = HG_THREAD_MUTEX_INITIALIZER;
static int psm_init_done = 0;

/******************************************************************************
 * required forward declared prototypes
 */
static void
na_psm_addr_free(na_class_t *na_class, na_addr_t *addr);
static na_op_id_t *
na_psm_op_create(na_class_t *na_class, unsigned long flags);
static void
na_psm_op_destroy(na_class_t *na_class, na_op_id_t *op_id);

/******************************************************************************
 * helpful macros
 */

/*
 * convert sec to nsec.  some psm APIs take timeouts in nsec.
 * psm timeouts control how long the psm lib spins on something
 * before giving up.
 *
 * note: the psm library timeouts do not sleep, they spin in a poll loop
 */
#define SEC_TO_NSEC(X) ((X) * 1000000000LL)

/*
 * psm_enc64: break a uint64 up into two uint32s and encode each in network
 * byte order using htonl().  a work around the lack of a standard API for
 * encoding uint64s for portable network transport.
 */
#define psm_enc64(BUF, IN)                                                     \
    do {                                                                       \
        uint32_t _tmp[2];                                                      \
        _tmp[0] = htonl((uint32_t) ((uint64_t) (IN) >> 32));                   \
        _tmp[1] = htonl((uint32_t) ((uint64_t) (IN) & (0xffffffff)));          \
        memcpy((BUF), _tmp, sizeof(_tmp));                                     \
    } while (0)

/*
 * psm_dec64: decode buffer encoded by psm_enc64() back into a uint64_t
 */
#define psm_dec64(BUF, OUT)                                                    \
    do {                                                                       \
        uint32_t _tmp[2];                                                      \
        memcpy(_tmp, (BUF), sizeof(_tmp));                                     \
        (OUT) = ((uint64_t) ntohl(_tmp[0]) << 32) | ntohl(_tmp[1]);            \
    } while (0)

/******************************************************************************
 * helper functions
 */

/*
 * na_psm_psmlib_finalize: finalize the psmlib (done via atexit(3) interface)
 */
static void
na_psm_psmlib_finalize()
{
    psm_error_t perr;
    perr = psm_finalize();
    if (perr != PSM_OK) {
        NA_LOG_WARNING("psm finalize: %s", psm_error_get_string(perr));
    } else {
        NA_LOG_DEBUG("psm finalize: %s", psm_error_get_string(perr));
    }
}

/*
 * na_psm_psmlib_init: init underlying PSM library, honor psm_init_lock.
 * return true if we are ok.  we arrange to call psm_finalize()
 * at exit time, since PSM says you can only shutdown psm one time per proc.
 */
static bool
na_psm_psmlib_init()
{
    int ver_major, ver_minor;
    psm_error_t perr;
    bool ret = true;

    hg_thread_mutex_lock(&psm_init_lock);
    if (psm_init_done == 0) {
        ver_major = PSM_VERNO_MAJOR;
        ver_minor = PSM_VERNO_MINOR;
        perr = psm_init(&ver_major, &ver_minor);
        if (perr != PSM_OK) {
            ret = false;
            NA_LOG_ERROR("psm init failed (%s)", psm_error_get_string(perr));
        } else {
            atexit(na_psm_psmlib_finalize);
            psm_init_done = 1;
            NA_LOG_DEBUG("psm_init version %d.%d", ver_major, ver_minor);
        }
    } else {
        /*
         * XXX: sigh, psm.h says:
         *
         *     [warning] Currently, PSM limits the user to calling psm_ep_open
         *     only once per process and subsequent calls will fail.  Multiple
         *     endpoints per process  will be enabled in a future release.
         *
         * so warn when this is attempted.
         */

        NA_LOG_WARNING("psm: multiple inits detected.  may not work");
    }
    hg_thread_mutex_unlock(&psm_init_lock);

    return ret;
}

/*
 * na_psm_release: plugin_callback function for psm op_id.  called
 * when we are done with an op_id to reset it.  this is a sanity check
 * that clears the completed flag.
 */
static void
na_psm_release(void *arg)
{
    struct na_psm_op_id *pop = (struct na_psm_op_id *) arg;

    if (pop) {
        if (pop->completed == 0) {
            NA_LOG_WARNING("op not done?");
        }
        pop->completed = 0;
        pop->cancel = 0;
        /* NA_LOG_DEBUG("op_id=%p", pop); */
    }
}

/*
 * na_psm_addr_lookup_epid: lookup an epid's address structure.  if
 * the epid currently does not have an address structure allocated for
 * it, then we attempt to add one using psm_ep_connect().  on success
 * we return an addr with an active reference that must be dropped when done.
 *
 * XXX: in an ideal world, we'd start an async connection request here
 * and queue anything waiting for it to complete for later processing.
 * unfortunately, PSM has no async connect API, so when we call
 * psm_ep_connect() we will spin in that call until the connection request
 * completes or fails... there is no way to avoid that.  we are going
 * to assume that the we are a local infiniband network that will quickly
 * resolve the sync connect call and hope that it does not hurt us too
 * much.
 */
static na_return_t
na_psm_addr_lookup_epid(struct na_psm_class *pc, psm_epid_t epid, int origin,
    struct na_psm_addr **addr)
{
    struct na_psm_addr *toadd, *naddr;
    int found;
    psm_error_t perr, perr2;
    psm_epaddr_t epaddr;

    /* maybe we already have looked this one up?  check the cache */
    toadd = NULL; /* only non-null if we malloc */
    found = 0;
    hg_thread_mutex_lock(&pc->alist_lock);
    LIST_FOREACH (naddr, &pc->alist, q) {
        if (naddr->epid == epid) {
            found++;
            hg_atomic_incr32(&naddr->nrefs);
            break;
        }
    }
    hg_thread_mutex_unlock(&pc->alist_lock);
    if (found)
        goto done;

    /*
     * not in cache.  we need to create a new na_psm_addr for this epid.
     * must make sync psm_ep_connect() call to get the epaddr.
     */
    perr = psm_ep_connect(
        pc->psm_ep, 1, &epid, NULL, &perr2, &epaddr, SEC_TO_NSEC(5));
    if (perr != PSM_OK) {
        NA_LOG_ERROR("connect %" PRIx64 " failed (%s)", epid,
            psm_error_get_string(perr));
        return NA_TIMEOUT;
    }

    /* malloc new na_psm_addr, fill it in, add to cache and return it */
    toadd = malloc(sizeof(*toadd));
    if (!toadd) {
        NA_LOG_ERROR("malloc fail");
        return NA_NOMEM;
    }
    toadd->pcls = pc;
    toadd->origin = origin;
    toadd->epid = epid;
    toadd->epaddr = epaddr;
    hg_atomic_set32(&toadd->nrefs, 1);

    /*
     * be careful, some other thread could have raced us and added
     * it to the alist already.  double check.
     */
    hg_thread_mutex_lock(&pc->alist_lock);
    found = 0;
    LIST_FOREACH (naddr, &pc->alist, q) {
        if (naddr->epid == epid) {
            found++;
            break;
        }
    }
    if (found) { /* lost the race */
        free(toadd);
        toadd = NULL;
        hg_atomic_incr32(&naddr->nrefs);
    } else {                               /* safe to add to alist */
        psm_epaddr_setctxt(epaddr, toadd); /* set psm bck ptr */
        naddr = toadd;
        LIST_INSERT_HEAD(&pc->alist, naddr, q);
    }
    hg_thread_mutex_unlock(&pc->alist_lock);

done:
    *addr = naddr; /* return addr pointer to caller */
    if (toadd)
        NA_LOG_DEBUG("alist added epid=%" PRIx64 " origin=%d", epid, origin);
    else
        NA_LOG_DEBUG("alist found epid=%" PRIx64, epid);
    return NA_SUCCESS;
}

/*
 * na_psm_opid_setbusy: set/adjust the busy value of an op_id.  if
 * v is less than 0, we add it to the current value.  if it is
 * >=0 then we set it to that value.  if busy transitions from 0
 * to non-zero, we add to busy list.  if busy drops to zero, we
 * remove from busy list.  returns new busy value.
 */
static int
na_psm_opid_setbusy(struct na_psm_class *pc, struct na_psm_op_id *pop, int v)
{
    int new_busy;

    hg_thread_mutex_lock(&pc->busy_lock);

    if (v < 0) {
        new_busy = pop->busy + v; /* add to current value */
        if (new_busy < 0)
            new_busy = 0; /* don't let it drop below zero */
    } else {
        new_busy = v;
    }

    if (pop->busy == 0 && new_busy) { /* transition to busy */
        LIST_INSERT_HEAD(&pc->busyops, pop, q);
    } else if (pop->busy && new_busy == 0) { /* transition to unbusy */
        LIST_REMOVE(pop, q);
    }

    pop->busy = new_busy;

    hg_thread_mutex_unlock(&pc->busy_lock);
    /* NA_LOG_DEBUG("op_id=%p busy=%d", pop, new_busy); */

    return new_busy;
}

/*
 * na_psm_msg_send: send a message.  this function combines the
 * unexpected and expected msg send paths into a single helper function
 * to reduce duplicate code.   you can tell the difference between expected
 * and unexpected sends by looking at the internal unexpected tag bit.
 * the caller must make sure we are the only one using or operating on
 * op_id until this function returns (so no need for a lock at this level).
 * (this means some other higher-level thread cannot cancel the op_id
 * while we are starting it... they have to wait until we complete.)
 */
static na_return_t
na_psm_msg_send(na_class_t *na_class, na_context_t *context, na_cb_t callback,
    void *arg, const void *buf, size_t buf_size, na_addr_t *dest_addr,
    uint64_t psmtag, na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    struct na_psm_addr *naddr;
    struct na_psm_op_id *pop;
    psm_error_t perr;

    /* extract psm-specific struct pointers from args */
    pc = na_class->plugin_class;
    naddr = (struct na_psm_addr *) dest_addr;
    pop = (struct na_psm_op_id *) op_id; /* caller did na_psm_op_create() */

    /* sanity checks, should never fire */
    if (pop == NULL || pop->busy || pop->completed) {
        NA_LOG_ERROR("invalid op_id!?");
        return NA_INVALID_ARG;
    }
    /* caller has already sanity checked psmtag */

    /*
     * fill in the pop.  pcls, plugin_callback, plugin_callback_args, owner
     * have already been set by the psm op create function.
     */
    pop->op = NA_PSM_OP_SEND;
    pop->context = context;
    pop->cancel = 0;
    pop->completion_data.callback_info.arg = arg;
    pop->completion_data.callback_info.type = (psmtag & NA_PSM_TAG_UNEXPECTED)
                                                  ? NA_CB_SEND_UNEXPECTED
                                                  : NA_CB_SEND_EXPECTED;
    pop->completion_data.callback_info.ret = NA_SUCCESS; /* to start */
    pop->completion_data.callback = callback;
    /* only one subop needed for a send */
    /* note: psmtag not saved in pop (currently not needed after send op) */
    pop->datasub.psm_status.error_code = PSM_OK; /* init this, to be safe */
    pop->datasub.subop = pop->op;
    pop->datasub.psm_handle = NULL; /* to be safe */
    pop->datasub_buf = buf;

    /* add to busyops and start the operation */
    na_psm_opid_setbusy(pc, pop, 1); /* only 1 subop for send */

    /* pass the message down to the psm lib */
    perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/, psmtag, buf,
        (uint32_t) buf_size, &pop->datasub, &pop->datasub.psm_handle);

    if (perr != PSM_OK) {

        /* failed to send: unbusy and error out */
        na_psm_opid_setbusy(pc, pop, 0);

        NA_LOG_ERROR("send failed: %s", psm_error_get_string(perr));
        return NA_PROTOCOL_ERROR;
    }

    NA_LOG_DEBUG("sent tag=%" PRIx64 ", op_id=%p, h=%p, buf=%p, len=%d", psmtag,
        (void *) pop, (void *) pop->datasub.psm_handle, buf, (int) buf_size);

    return NA_SUCCESS;
}

/*
 * na_psm_msg_recv: post a buffer for receiving a message.  this function
 * combines the unexpected and expected msg recv paths into a single
 * helper function to reduce duplicate code.   we can tell the difference
 * between expected and unexpected recvs by looking at the internal
 * unexpected tag bit.  the caller must make sure we are the only one using
 * or operating on op_id until this function returns (so no need for a
 * lock at this level).  (this means some other higher-level thread
 * cannot cancel the op_id while we are starting it... they have to wait
 * until we complete.)
 */
static na_return_t
na_psm_msg_recv(na_class_t *na_class, na_context_t *context, na_cb_t callback,
    void *arg, void *buf, size_t buf_size, na_op_id_t *op_id, uint64_t psmtag,
    uint64_t psmtagsel)
{
    struct na_psm_class *pc;
    struct na_psm_op_id *pop;
    psm_error_t perr;

    /* extract psm-specific struct pointers from args */
    pc = na_class->plugin_class;
    pop = (struct na_psm_op_id *) op_id; /* caller did na_psm_op_create() */

    /* sanity checks, should never fire */
    if (pop == NULL || pop->busy || pop->completed) {
        NA_LOG_ERROR("invalid op_id!?");
        return NA_INVALID_ARG;
    }

    /*
     * fill in the pop.  pcls, plugin_callback, plugin_callback_args, owner
     * have already been set by the psm op create function.
     */
    pop->op = NA_PSM_OP_RECV;
    pop->context = context;
    pop->cancel = 0;
    pop->completion_data.callback_info.arg = arg;
    pop->completion_data.callback_info.type = (psmtag & NA_PSM_TAG_UNEXPECTED)
                                                  ? NA_CB_RECV_UNEXPECTED
                                                  : NA_CB_RECV_EXPECTED;
    pop->completion_data.callback_info.ret = NA_SUCCESS; /* to start */
    pop->completion_data.callback = callback;
    /* only one subop needed for a recv */
    pop->datasub.psm_status.error_code = PSM_OK; /* init this, to be safe */
    pop->datasub.subop = pop->op;
    pop->datasub.psm_handle = NULL; /* to be safe */
    pop->datasub_buf = buf;

    /* add to busyops and start the operation */
    na_psm_opid_setbusy(pc, pop, 1); /* only  subop for recv */

    /*
     * call irecv to post the buffer for a future recv
     *
     * note that with psm we only receive messages with the tags we
     * ask for, so it isn't possible for the msg to have been pulled
     * out by progress with psm_mq_ipeek()/pmsg_mq_wait() prior to
     * us posting a buffer for it (socket-based NAs have to deal with
     * data that arrives before a recv is posted).
     */
    perr = psm_mq_irecv(pc->psm_mq, psmtag, psmtagsel, 0 /*flags*/, buf,
        (uint32_t) buf_size, &pop->datasub, &pop->datasub.psm_handle);

    if (perr != PSM_OK) {

        /* failed to irecv: unbusy and error out */
        na_psm_opid_setbusy(pc, pop, 0);

        NA_LOG_ERROR("irecv failed: %s", psm_error_get_string(perr));
        return NA_PROTOCOL_ERROR;
    }

    NA_LOG_DEBUG("tag=%" PRIx64 ", sel=%" PRIx64
                 ", op_id=%p, h=%p, buf=%p, len=%d",
        psmtag, psmtagsel, (void *) pop, (void *) pop->datasub.psm_handle, buf,
        (int) buf_size);

    return NA_SUCCESS;
}

/*
 * na_psm_epid_ext_tagbits: generate the extra epid-ish tag bits we
 * set for epid when it sends.  this bits are in the top half of the
 * 64 bit PSM tag.
 *
 * for isend/irecv's we provide 32 bits of tag for mercury itself.  to
 * reduce the chance that two different hosts happen to use the same tag
 * value, we "or" in some of the bits from the senders epid into the 64 bit
 * PSM tag.  the internal bit format of the epid is:
 *
 *  shmidx (8) | not used (24) | LID (16) | subctx (2) | ctx (6) | sl/type (8)
 *
 * the type is the hca hardware type and sl (so not unique).
 * we discard the sl/type and "not used" bits.   this will give us
 * the top 32 bits of the 64 bit PSM tag.  for non-control messages
 * we use the value from mercury for the lower 32 bits.  control messages
 * use the lower 32 bits for RMA status and RMA sequence number.
 * note that the shmidx does not get large enough to conflict with our
 * internal reserved bits in the tag.
 */
static uint64_t
na_psm_epid_ext_tagbits(psm_epid_t epid)
{
    uint64_t bits;

    bits = epid & ~(((uint64_t) ~0) >> 8); /* shmidx */
    bits |= ((epid & 0xffffff00) << 24);   /*lid, subctx, ctx*/
    return bits;
}

/*
 * na_psm_msgtag_err: get error code from a control status msg_tag
 */
static na_return_t
na_psm_msgtag_err(uint64_t msgtag)
{
    switch (msgtag & NA_PSM_TAG_ECRMASK) {
        case NA_PSM_TAG_ST_EPERM:
            return NA_PERMISSION;
        case NA_PSM_TAG_ST_ENOSPC:
            return NA_OVERFLOW;
        case NA_PSM_TAG_ST_EINVAL:
            return NA_INVALID_ARG;
    }
    return NA_SUCCESS; /* must be NA_PSM_TAG_ST_OK */
}

/*
 * na_psm_progress_op: progress an op_id given that one of its subops
 * just completed.  if op_idled is set, the last pending subop has
 * completed and the op has been removed from the busy list (we need
 * to add it to the completion queue).   RMA op_id's with multiple
 * subops use "rma_refs" to determine when the last one finishes.
 * returns 1 if we completed an op_id (i.e. called na_cb_completion_add()
 * on an op_id).
 */
static int
na_psm_progress_op(
    struct na_psm_op_id *pop, struct na_psm_subop *subop, int op_idled)
{
    na_return_t ret;
    uint64_t i64epid, tag, wanted;
    struct na_cb_info *cbi = &pop->completion_data.callback_info; /*shorthand*/
    int completed = 0;
    psm_error_t perr;
    psm_mq_req_t tmp_psmhand;

    switch (subop->subop) {
        /*
         * basic send subop completed.  since this op only has one subop,
         * we expect op_idled to be true.   we need to look for errors and
         * complete the request.
         */
        case NA_PSM_OP_SEND:
            if (!op_idled) {
                NA_LOG_ERROR("completed send didn't idle req");
                break; /* what else can we do if it isn't idle? */
            }
            if (subop->psm_status.error_code != PSM_OK) {
                NA_LOG_ERROR("send error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;
            }
            pop->completed = 1;
            NA_LOG_DEBUG("done SEND op_id=%p, ret=%d", (void *) pop, cbi->ret);
            na_cb_completion_add(pop->context, &pop->completion_data);
            completed = 1;

            break;

        /*
         * basic recv subop completed.  since this op only has one subop,
         * we expect op_idled to be true.   we check for errors.  for
         * unexpected receives, we may need to create an address structure
         * for the remote end if we don't already have one.   then we can
         * complete the request.
         */
        case NA_PSM_OP_RECV:
            if (!op_idled) {
                NA_LOG_ERROR("completed recv didn't idle req");
                break; /* what else can we do if it isn't idle? */
            }
            if (subop->psm_status.error_code != PSM_OK) {

                /* got a PSM-level error? */
                NA_LOG_ERROR("recv error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;

            } else if (pop->cancel) {

                /* user canceled it, so stop now */
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_CANCELED;

            } else if (cbi->type == NA_CB_RECV_UNEXPECTED) {

                /* additional processing for unexpected recv */

                /* parse unexpected header to get address */
                if (subop->psm_status.nbytes < NA_PSM_ADDR_SERSIZE) {

                    NA_LOG_ERROR(
                        "short recv (%d), no epid", subop->psm_status.nbytes);
                    if (cbi->ret == NA_SUCCESS)
                        cbi->ret = NA_PROTOCOL_ERROR;

                } else {

                    /* if successful, calle must free addr when done with it */
                    psm_dec64(pop->datasub_buf, i64epid);
                    ret = na_psm_addr_lookup_epid(pop->pcls, i64epid,
                        PSM_ORG_RECV,
                        (struct na_psm_addr **) &cbi->info.recv_unexpected
                            .source);

                    if (ret != NA_SUCCESS && cbi->ret == NA_SUCCESS)
                        cbi->ret = ret;

                    cbi->info.recv_unexpected.actual_buf_size =
                        subop->psm_status.nbytes;
                    cbi->info.recv_unexpected.tag =
                        subop->psm_status.msg_tag & NA_PSM_USRTAG_MASK;
                }
            }

            /* sanity check lengths if no errors, shouldn't ever fire */
            if (cbi->ret == NA_SUCCESS &&
                subop->psm_status.nbytes < subop->psm_status.msg_length) {
                NA_LOG_WARNING("truncated recv %d<%d", subop->psm_status.nbytes,
                    subop->psm_status.msg_length);
            }

            pop->completed = 1;
            NA_LOG_DEBUG("done RECV op_id=%p, ret=%d", (void *) pop, cbi->ret);
            na_cb_completion_add(pop->context, &pop->completion_data);
            completed = 1;

            break;

        /*
         * "put" RMA op: initiator side sends
         *     - unexpected control msg send completed.
         *     - data send completed.
         */
        case NA_PSM_OP_PUTSNDCTL:
        case NA_PSM_OP_PUTSNDDATA:
            if (subop->psm_status.error_code != PSM_OK) {
                NA_LOG_ERROR("put %s error: %s",
                    (subop->subop == NA_PSM_OP_PUTSNDCTL) ? "sndctl"
                                                          : "snddata",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;

                /* attempt to cancel the irecv if it is running */
                hg_thread_mutex_lock(&pop->pcls->ipeek_lock);
                if (pop->putrcvsub.psm_handle)
                    (void) psm_mq_cancel(&pop->putrcvsub.psm_handle);
                hg_thread_mutex_unlock(&pop->pcls->ipeek_lock);
            }

            if (hg_atomic_decr32(&pop->rma_refs) == 0) { /* !busy if true */
                pop->completed = 1;
                NA_LOG_DEBUG("done PUTSND%s op_id=%p, ret=%d",
                    (subop->subop == NA_PSM_OP_PUTSNDCTL) ? "CTL" : "DATA",
                    (void *) pop, cbi->ret);
                na_cb_completion_add(pop->context, &pop->completion_data);
                completed = 1;
            } else {
                NA_LOG_DEBUG("advance PUTSND%s op_id=%p, ret=%d",
                    (subop->subop == NA_PSM_OP_PUTSNDCTL) ? "CTL" : "DATA",
                    (void *) pop, cbi->ret);
            }
            break;

        /*
         * "put" RMA op: initiator side status receive
         */
        case NA_PSM_OP_PUTRCVSTS:
            if (subop->psm_status.error_code != PSM_OK) {
                NA_LOG_ERROR("put rcvsts error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;
            } else if (cbi->ret == NA_SUCCESS) {

                /* extract user-level error code */
                cbi->ret = na_psm_msgtag_err(subop->psm_status.msg_tag);
            }

            if (hg_atomic_decr32(&pop->rma_refs) == 0) { /* !busy if true */
                pop->completed = 1;
                NA_LOG_DEBUG(
                    "done PUTRCVSTS op_id=%p, ret=%d", (void *) pop, cbi->ret);
                na_cb_completion_add(pop->context, &pop->completion_data);
                completed = 1;
            } else {
                NA_LOG_DEBUG("advance PUTRCVSTS op_id=%p, ret=%d", (void *) pop,
                    cbi->ret);
            }
            break;

        /*
         * "put" RMA op: target side data receive.  this happens if we
         * approved the put and issued the irecv() for it into our (the
         * target) memory.  since we are the target, there is no local
         * higher-level operation to complete... the op_id is internally
         * allocated.  we send an untracked status reply to complete.
         *
         *   note: PUTRCVDATA does not need or use pop->rma_refs, since
         *         there is only 1 subop (no races to worry about).  this
         *         is the only rma-related case that does not use rma_refs.
         */
        case NA_PSM_OP_PUTRCVDATA:

            tag = subop->psm_status.msg_tag & NA_PSM_TAG_ECSELECT;

            if (subop->psm_status.error_code != PSM_OK) {
                /* send back some sort of error... unlikely to ever happen? */
                tag |= NA_PSM_TAG_ST_EINVAL;

                NA_LOG_ERROR("put rcvdata error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
            } else {
                tag |= NA_PSM_TAG_ST_OK;
            }

            /* send back status to initiator (untracked, no payload) */
            perr = psm_mq_isend(pop->pcls->psm_mq, pop->initiator->epaddr,
                0 /*flags*/, tag, NULL, 0, NULL, &tmp_psmhand);
            if (perr != PSM_OK) {
                /* nothing else we can do if status send failed... */
                NA_LOG_ERROR("put rcvdata isend error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
            }

            /* drop ref to initiator address and clear it */
            na_psm_addr_free(
                (na_class_t *) pop->pcls, (na_addr_t *) pop->initiator);
            pop->initiator = NULL;

            /* op_id is done!  it is internal, so no na_cb_completion_add() call
             */
            if (!op_idled) { /* shouldn't happen */
                NA_LOG_ERROR("put rcvdata req wasn't idled");
            } else {
                NA_LOG_DEBUG("done PUTRCVDATA op_id=%p", (void *) pop);
                na_psm_op_destroy((na_class_t *) pop->pcls, (na_op_id_t *) pop);
            }
            break;

        /*
         * "get" RMA op: initiator side sends
         *     - unexpected control msg send completed.
         */
        case NA_PSM_OP_GETSNDCTL:
            if (subop->psm_status.error_code != PSM_OK) {
                NA_LOG_ERROR("get sndctl error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;

                /* attempt to cancel the irecv if it is running */
                hg_thread_mutex_lock(&pop->pcls->ipeek_lock);
                if (pop->datasub.psm_handle)
                    (void) psm_mq_cancel(&pop->datasub.psm_handle);
                hg_thread_mutex_unlock(&pop->pcls->ipeek_lock);
            }

            if (hg_atomic_decr32(&pop->rma_refs) == 0) { /* !busy if true */
                pop->completed = 1;
                NA_LOG_DEBUG(
                    "done GETSNDCTL op_id=%p, ret=%d", (void *) pop, cbi->ret);
                na_cb_completion_add(pop->context, &pop->completion_data);
                completed = 1;
            } else {
                NA_LOG_DEBUG("advance GETSNDCTL op_id=%p, ret=%d", (void *) pop,
                    cbi->ret);
            }
            break;

        /*
         * "get" RMA op: initiator side recv
         *     - we either get back the data or a payloadless msg w/error code
         */
        case NA_PSM_OP_GETRCV:
            if (subop->psm_status.error_code != PSM_OK) {
                NA_LOG_ERROR("get getrcv error: %s",
                    psm_error_get_string(subop->psm_status.error_code));
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_PROTOCOL_ERROR;
            } else if (cbi->ret == NA_SUCCESS) {

                /* extract user-level error code */
                cbi->ret = na_psm_msgtag_err(subop->psm_status.msg_tag);
                if (cbi->ret == NA_SUCCESS) {
                    psm_dec64(&pop->ucsargs[NA_PSM_UCARG_LENGTH], wanted);
                    if (wanted != subop->psm_status.nbytes) {
                        NA_LOG_WARNING("get mismatch "
                                       "want=%" PRId64 ", got=%" PRId32 "!",
                            wanted, subop->psm_status.nbytes);
                    }
                }
            }

            if (hg_atomic_decr32(&pop->rma_refs) == 0) { /* !busy if true */
                pop->completed = 1;
                NA_LOG_DEBUG(
                    "done GETRCV op_id=%p, ret=%d", (void *) pop, cbi->ret);
                na_cb_completion_add(pop->context, &pop->completion_data);
                completed = 1;
            } else {
                NA_LOG_DEBUG(
                    "advance GETRCV op_id=%p, ret=%d", (void *) pop, cbi->ret);
            }
            break;

        default:
            /* this should never happen */
            NA_LOG_ERROR("invalid subop %d!", subop->subop);
            break;
    }

    return completed;
}

/*
 * na_psm_progres_ucmsg: progress an unexpected control message we
 * just received.   currently this happens when we are a target of
 * an RMA put/get operation.
 */
static void
na_psm_progress_ucmsg(struct na_psm_class *pc, psm_mq_status_t *psmstat,
    struct na_psm_ucmsg *ucmsg)
{
    psm_error_t perr;
    psm_mq_req_t tmp; /* not needed */
    uint64_t op, rma_tag, status, initiator, token, offset, length;
    struct na_psm_addr *naddr;
    struct na_psm_local_mem_handle *lh;
    void *lh_base;
    size_t lh_size;
    int lh_attr;
    psm_mq_req_t tmp_psmhand;
    void *userdata;
    struct na_psm_op_id *pop;
    static int discard;

    if (psmstat->error_code != PSM_OK) {
        NA_LOG_ERROR("rcv=%s", psm_error_get_string(psmstat->error_code));
        goto finish;
    }
    if (psmstat->nbytes < sizeof(*ucmsg)) { /* initiator goofed? */
        NA_LOG_ERROR("short rcv (%d < %zu)", psmstat->nbytes, sizeof(*ucmsg));
        goto finish;
    }
    op = psmstat->msg_tag & NA_PSM_TAG_ECRMASK;
    if (op != NA_PSM_TAG_OP_PUT && op != NA_PSM_TAG_OP_GET) {
        NA_LOG_ERROR("bad opcode %" PRIx64, op);
        goto finish;
    }
    rma_tag = psmstat->msg_tag & NA_PSM_TAG_ECSELECT;
    rma_tag = rma_tag & ~NA_PSM_TAG_UNEXPECTED; /* clear unexpected bit */
    status = NA_PSM_TAG_ST_OK;                  /* so far, so good... */

    /* decode the header */
    psm_dec64(&ucmsg->args[NA_PSM_UCARG_SENDER], initiator);
    memcpy(&token, &ucmsg->args[NA_PSM_UCARG_HANDLE], sizeof(token));
    psm_dec64(&ucmsg->args[NA_PSM_UCARG_OFFSET], offset);
    psm_dec64(&ucmsg->args[NA_PSM_UCARG_LENGTH], length);

    /* get the initiator's address structure (gains a reference) */
    if (na_psm_addr_lookup_epid(pc, initiator, PSM_ORG_RECV, &naddr) !=
        NA_SUCCESS) {
        NA_LOG_ERROR("lookup fail epid=%" PRIx64, initiator);
        goto finish;
    }

    /* use the token to find the target local memory handle being used */
    lh_base = NULL;
    hg_thread_mutex_lock(&pc->lhand_lock);
    LIST_FOREACH (lh, &pc->lhands, q) {
        if (lh->handle.token == token) {
            /* copy key bits out so we can drop lhand_lock */
            lh_base = lh->base;
            lh_size = lh->size;
            lh_attr = lh->attr;
        }
    }
    hg_thread_mutex_unlock(&pc->lhand_lock);

    if (lh_base == NULL) {
        status = NA_PSM_TAG_ST_EINVAL;
        NA_LOG_WARNING("got invalid token %" PRIx64, token);
        goto send_error_now;
    }

    if (offset > lh_size || length > lh_size - offset) {
        status = NA_PSM_TAG_ST_ENOSPC;
        NA_LOG_WARNING("RMA offset overflow");
        goto send_error_now;
    }

    if ((op == NA_PSM_TAG_OP_PUT && lh_attr == NA_MEM_READ_ONLY) ||
        (op == NA_PSM_TAG_OP_GET && lh_attr == NA_MEM_WRITE_ONLY)) {
        status = NA_PSM_TAG_ST_EPERM;
        NA_LOG_WARNING("RMA permission error");
        goto send_error_now;
    }

    /* valid request, now we can generate the local pointer */
    userdata = (char *) lh_base + offset;

    /* 'get' can be completely handled here */
    if (op == NA_PSM_TAG_OP_GET) {

        /* this untracked send can complete the op */
        perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/, rma_tag,
            userdata, (uint32_t) length, NULL, &tmp_psmhand);

        if (perr != PSM_OK) {
            /* unlikely.. try sending an error */
            NA_LOG_ERROR("snd get data (%s)", psm_error_get_string(perr));
            status = NA_PSM_TAG_ST_EINVAL;
            goto send_error_now;
        } else {
            NA_LOG_DEBUG("get-target:epid=%" PRIx64 " tag=%" PRIx64
                         " p=%p len=%d handled",
                naddr->epid, rma_tag, userdata, (int) length);
        }

        /* drop address reference now that we've sent the data */
        na_psm_addr_free((na_class_t *) pc, (na_addr_t *) naddr);

        goto finish;
    }

    /*
     * 'put' is more complicated... we need to create an internal
     * op_id to wait for the inititator to send us the data.
     */
    pop = (struct na_psm_op_id *) na_psm_op_create((na_class_t *) pc, 0);
    if (!pop) {
        /* unlikely.. try sending an error */
        NA_LOG_ERROR("pop alloc failed");
        status = NA_PSM_TAG_ST_EINVAL;
        goto send_error_now;
    }
    pop->op = NA_PSM_OP_PUT_TARGET;
    pop->datasub.subop = NA_PSM_OP_PUTRCVDATA;
    pop->datasub_buf = userdata;
    pop->initiator = naddr; /* so we can send status later, holding ref */

    /* add to busyops and start the operation */
    na_psm_opid_setbusy(pc, pop, 1); /* only  subop for put recv */

    perr = psm_mq_irecv(pc->psm_mq, rma_tag, NA_PSM_TAG_ECSELECT, 0 /*flags*/,
        userdata, (uint32_t) length, &pop->datasub, &pop->datasub.psm_handle);

    if (perr != PSM_OK) {
        /* unlikely.. dump pop and try sending an error */
        na_psm_opid_setbusy(pc, pop, 0);
        na_psm_op_destroy((na_class_t *) pc, (na_op_id_t *) pop);
        NA_LOG_ERROR("pop irecv failed: %s", psm_error_get_string(perr));
        status = NA_PSM_TAG_ST_EINVAL;
        goto send_error_now;
    }

    NA_LOG_DEBUG("put-target:epid=%" PRIx64 " tag=%" PRIx64
                 " h=%p p=%p len=%d op_id=%p start",
        naddr->epid, rma_tag, (void *) pop->datasub.psm_handle, userdata,
        (int) length, (void *) pop);
    /*
     * done for now.   processing will resume in the NA_PSM_OP_PUTRCVDATA
     * case of na_psm_progress_op().  note that pop is holding the address
     * ref to initiator so we can send a reply later.
     */
    goto finish;

send_error_now:

    /*
     * for a put the initiator has already posted the data to PSM
     * and you can't cancel a send.   try and retire the initiator's
     * data send by posting a short receive to a discard buffer.
     * hopefully this will clear out the initiator op_id?
     */
    if (op == NA_PSM_TAG_OP_PUT) {

        /* this irecv is untracked */
        perr = psm_mq_irecv(pc->psm_mq, rma_tag, NA_PSM_TAG_ECSELECT,
            0 /*flags*/, &discard, sizeof(discard), NULL, &tmp_psmhand);

        if (perr != PSM_OK) {
            NA_LOG_WARNING("put discard err (%s)", psm_error_get_string(perr));
            /* keep going... */
        }
    }

    /* send back status to initiator now (untracked, no payload) */
    perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/,
        rma_tag | status, NULL, 0, NULL, &tmp_psmhand);

    if (perr != PSM_OK) {
        NA_LOG_ERROR("snd err (%s)", psm_error_get_string(perr));
        /* nothing else we can do... */
    }

    /* drop address reference now as well... */
    na_psm_addr_free((na_class_t *) pc, (na_addr_t *) naddr);

    /* fall through */

finish: /* repost the buffer for future ucmsgs */

    perr = psm_mq_irecv(pc->psm_mq, NA_PSM_TAG_UNEXPECTED | NA_PSM_TAG_CONTROL,
        NA_PSM_INTBIT_MASK, 0 /*flags*/, (void *) ucmsg, sizeof(*ucmsg),
        ucmsg /*context*/, &tmp);
    if (perr != PSM_OK) {
        /* should never happen ... */
        NA_LOG_ERROR("repost failed (%s)", psm_error_get_string(perr));
    }
}

/*
 * na_psm_progress: progress network communication.
 * note that PSM only provides the
 * polling-style psm_mq_ipeek() interface to check for progress, so if
 * we have other useful work we could be doing, we may want to yield or
 * sleep some between polls to avoid burning off CPU cycles in the
 * progress function that could be used elsewhere.
 */
static void
na_psm_progress(struct na_psm_class *pc, unsigned int *count_p)
{
    int completed, lcv, op_idled;
    struct na_psm_subop *subop;
    struct na_psm_ucmsg *ucmsg;
    psm_error_t perr;
    psm_mq_req_t psmreq, orig_psmreq;
    psm_mq_status_t psmstatus;
    struct na_psm_op_id *pop;

    /*
     * we may collect a subop, a ucmsg, or a NULL context (if we
     * complete an untracked PSM op).
     */
    completed = 0;
    subop = NULL;
    ucmsg = NULL;

    /*
     * hold ipeek lock to avoid ipeek/wait concurrency issue and
     * to prevent ops to be canceled while we are collecting them.
     */
    hg_thread_mutex_lock(&pc->ipeek_lock);
    for (lcv = 0; lcv < pc->prog_peeks_per_try; lcv++) {

        perr = psm_mq_ipeek(pc->psm_mq, &psmreq, NULL); /* << poll here */
        if (perr != PSM_OK) /* no psmreq available? */
            continue;

        /* test will NULL out psmreq, save a copy for handle sanity check */
        orig_psmreq = psmreq;

        /* collect req's status and retire it */
        perr = psm_mq_test(&psmreq, &psmstatus);
        if (perr != PSM_OK)
            continue; /* shouldn't happen: ipeek returns done req */

        if (psmstatus.context == NULL) /* untracked operation? */
            break;

        /* unexpected control message buffer? */
        if (psmstatus.context >= (void *) &pc->ucmsgs[0] &&
            psmstatus.context <= (void *) &pc->ucmsgs[NA_PSM_UCMSG_COUNT - 1]) {
            ucmsg = psmstatus.context;
            break;
        }

        /* must be a subop! */
        subop = psmstatus.context;
        if (subop->psm_handle != orig_psmreq) {
            NA_LOG_ERROR("fail handle sanity check!");
            /* keep going w/this subop, what else can we do? */
        }
        subop->psm_handle = NULL; /* want this w/ipeek lock held */
        subop->psm_status = psmstatus;
        pop = subop->owner;
        op_idled = (na_psm_opid_setbusy(pc, pop, -1) == 0);
        break;
    }
    hg_thread_mutex_unlock(&pc->ipeek_lock);

    if (ucmsg)
        na_psm_progress_ucmsg(pc, &psmstatus, ucmsg);
    else if (subop)
        completed = na_psm_progress_op(pop, subop, op_idled);

    if (!completed && pc->prog_just_yield)
        hg_thread_yield();

    if (count_p != NULL)
        *count_p = (unsigned int) completed;
}

/******************************************************************************
 * na_class_ops functions: exported to and called from the main mercury code
 */

/*
 * check_protocol: the "psm" class supports the "psm" protocol.
 * we can be HG_Init()'d with "psm+psm" ... plain "psm" may also
 * work (it wildcards the class name in the list of plugins, so
 * it picks the first class that supports "psm" protocol).
 */
static bool
na_psm_check_protocol(const char NA_UNUSED *protocol_name)
{
    if (protocol_name && strcmp(protocol_name, NA_PSM_NAME) == 0)
        return true;
    return false;
}

/*
 * initialize: bring up underlying PSM lib, allocate and init
 * our state structure.
 */
static na_return_t
na_psm_initialize(
    na_class_t *na_class, const struct na_info NA_UNUSED *na_info, bool listen)
{
    bool bret;
    struct na_psm_class *pc;
    psm_ep_open_opts_t o_opts;
    psm_error_t perr, perr2;
    int lcv;

    bret = na_psm_psmlib_init(); /* bring up underlying PSM lib */
    if (bret != true)
        return NA_NOENTRY;

    /* allocate our state structure and hook it into the na_class */
    pc = malloc(sizeof(*pc));
    if (!pc) {
        NA_LOG_ERROR("na_psm_class malloc failed");
        return NA_NOMEM;
    }

    /* pc now allocated, must free it if we hit an error */
    memset(pc, 0, sizeof(*pc));

    pc->listen = listen; /* save a copy, psm doesn't use it though */

    /* open psm_ep_t endpoint */
    psm_ep_open_opts_get_defaults(&o_opts);
    perr =
        psm_ep_open(NA_PSM_DEFAULT_UUID, &o_opts, &pc->psm_ep, &pc->self.epid);
    if (perr != PSM_OK) {
        NA_LOG_ERROR("psm open failed (%s)", psm_error_get_string(perr));
        goto error;
    }

    /* open psm_mq_t matched queue */
    perr = psm_mq_init(pc->psm_ep, PSM_MQ_ORDERMASK_ALL, NULL, 0, &pc->psm_mq);
    if (perr != PSM_OK) {
        NA_LOG_ERROR("psm mqinit failed (%s)", psm_error_get_string(perr));
        goto error;
    }

    /* connect to ourself so that we have our epaddr struct pointer */
    perr = psm_ep_connect(pc->psm_ep, 1, &pc->self.epid, NULL, &perr2,
        &pc->self.epaddr, SEC_TO_NSEC(5));
    if (perr != PSM_OK) {
        NA_LOG_ERROR(
            "psm self ep_connect failed (%s)", psm_error_get_string(perr));
        goto error;
    }
    NA_LOG_DEBUG("up, my epid=%" PRIx64 ", my epaddr=%p", pc->self.epid,
        (void *) pc->self.epaddr);

    /* fill out the rest of our self address info and init address list */
    pc->self.pcls = pc;
    pc->self.origin = PSM_ORG_SELF;
    hg_atomic_set32(&pc->self.nrefs, 1);            /* ref ourself */
    psm_epaddr_setctxt(pc->self.epaddr, &pc->self); /* bckptr stored in PSM */

    /*
     * setup ext_tagbits (sanity check that our internal bits don't
     * conflict with this).
     */
    if (pc->self.epid & NA_PSM_INTBIT_MASK) {
        NA_LOG_ERROR("reserved bits set in epid %#" PRIx64,
            pc->self.epid & NA_PSM_INTBIT_MASK);
        goto error;
    }
    pc->ext_tagbits = na_psm_epid_ext_tagbits(pc->self.epid);

    /*
     * default progress params: XXX get config from where?
     * XXX: just randomly picking some values we can start with.
     */
    pc->prog_peeks_per_try = 1;
    pc->prog_just_yield = 1;

    hg_thread_mutex_init(&pc->alist_lock); /* XXX: ignoring ret val */
    LIST_INIT(&pc->alist);

    hg_thread_mutex_lock(&pc->alist_lock);
    LIST_INSERT_HEAD(&pc->alist, &pc->self, q);
    hg_thread_mutex_unlock(&pc->alist_lock);

    hg_thread_mutex_init(&pc->busy_lock); /* XXX: ignoring ret val */
    LIST_INIT(&pc->busyops);

    hg_thread_mutex_init(&pc->ipeek_lock); /* XXX: ignoring ret val */

    hg_thread_mutex_init(&pc->lhand_lock); /* XXX: ignoring ret val */
    LIST_INIT(&pc->lhands);
    pc->lhand_seq = (uint32_t) random();

    /* post unexpected control recv buffers */
    for (lcv = 0; lcv < NA_PSM_UCMSG_COUNT; lcv++) {
        psm_mq_req_t tmp; /* not needed */
        perr =
            psm_mq_irecv(pc->psm_mq, NA_PSM_TAG_UNEXPECTED | NA_PSM_TAG_CONTROL,
                NA_PSM_INTBIT_MASK, 0 /*flags*/, (void *) &pc->ucmsgs[lcv],
                sizeof(pc->ucmsgs[lcv]), &pc->ucmsgs[lcv] /*context*/, &tmp);
        if (perr != PSM_OK) {
            NA_LOG_ERROR("irecv ucmsg %d failed: %s", lcv,
                psm_error_get_string(perr)); /* shouldn't happen */
            goto error;
        }
    }

    na_class->plugin_class = pc;

    /*
     * done!
     */
    return NA_SUCCESS;

error: /* already printed error */
    if (pc->psm_mq) {
        perr = psm_mq_finalize(pc->psm_mq);
    }
    if (pc->psm_ep) {
        perr = psm_ep_close(pc->psm_ep, PSM_EP_CLOSE_GRACEFUL, SEC_TO_NSEC(5));
    }
    hg_thread_mutex_destroy(&pc->alist_lock);
    hg_thread_mutex_destroy(&pc->busy_lock);
    free(pc);
    return NA_PROTONOSUPPORT;
}

/*
 * finalize: clear out our state structure and free it.   the
 * higher level HG_Context_destroy() should have cleared everything
 * out for us before calling (e.g. all op_ids should have been
 * released).   we return NA_BUSY if any op_ids are still active.
 * we assume that the user is not going to call progress on a NA
 * it is trying to finalize.
 */
static na_return_t
na_psm_finalize(na_class_t *na_class)
{
    struct na_psm_class *pc;
    struct na_psm_op_id *pop;
    int cnt;
    struct na_psm_addr *ap;
    struct na_psm_local_mem_handle *lp;
    psm_error_t perr;

    pc = na_class->plugin_class;
    if (pc == NULL) /* just to be safe, unlikely */
        return NA_SUCCESS;

    /* verify that higher-level code has cleared out the busyops */
    hg_thread_mutex_lock(&pc->busy_lock);
    cnt = 0;
    LIST_FOREACH (pop, &pc->busyops, q) {
        cnt++;
    }
    hg_thread_mutex_unlock(&pc->busy_lock);
    if (cnt) {
        NA_LOG_ERROR("still have %d busy ops", cnt);
        return NA_BUSY;
    }
    hg_thread_mutex_destroy(&pc->busy_lock);

    /* dispose of address list */
    hg_thread_mutex_lock(&pc->alist_lock);
    while (!LIST_EMPTY(&pc->alist)) {
        ap = LIST_FIRST(&pc->alist);
        LIST_REMOVE(ap, q);
        /* self is not malloc()ed, do not try and free it */
        if (ap->origin != PSM_ORG_SELF) {
            NA_LOG_DEBUG("free addr epid=%" PRIx64, ap->epid);
            psm_epaddr_setctxt(ap->epaddr, NULL); /* clear psm bck ptr */
            free(ap);
        }
    }
    hg_thread_mutex_unlock(&pc->alist_lock);
    hg_thread_mutex_destroy(&pc->alist_lock);

    /* dispose of all local memory handles */
    hg_thread_mutex_lock(&pc->lhand_lock);
    while (!LIST_EMPTY(&pc->lhands)) {
        lp = LIST_FIRST(&pc->lhands);
        LIST_REMOVE(lp, q);
    }
    hg_thread_mutex_unlock(&pc->lhand_lock);
    hg_thread_mutex_destroy(&pc->lhand_lock);

    /* shutdown our mq and ep. */
    perr = psm_mq_finalize(pc->psm_mq);
    if (perr != PSM_OK)
        NA_LOG_DEBUG("psm_mq_finalize: %s", psm_error_get_string(perr));
    perr = psm_ep_close(pc->psm_ep, PSM_EP_CLOSE_GRACEFUL, SEC_TO_NSEC(5));
    if (perr != PSM_OK) {
        NA_LOG_DEBUG("psm_ep_close: %s", psm_error_get_string(perr));
        if (perr == PSM_EP_CLOSE_TIMEOUT) /* force it if needed */
            perr = psm_ep_close(pc->psm_ep, PSM_EP_CLOSE_FORCE, 0);
    }

    /*
     * we leave the psmlib up, since we'll clear it at exit time.
     */
    free(pc);
    na_class->plugin_class = NULL;
    return NA_SUCCESS;
}

/*
 * op_create: allocate an op structure to track an op we are starting.
 * most of this will get filled out later.
 */
static na_op_id_t *
na_psm_op_create(na_class_t NA_UNUSED *na_class, unsigned long NA_UNUSED flags)
{
    struct na_psm_op_id *pop;

    pop = (struct na_psm_op_id *) malloc(sizeof(*pop)); /* XXX: pool? */
    if (!pop) {
        NA_LOG_ERROR("malloc fail");
        return NULL;
    }
    memset(pop, 0, sizeof(*pop));

    /* we can set these once here, they never change */
    pop->pcls = na_class->plugin_class;
    pop->completion_data.plugin_callback = na_psm_release;
    pop->completion_data.plugin_callback_args = pop;
    pop->datasub.owner = pop->ucsendsub.owner = pop->putrcvsub.owner = pop;
    hg_atomic_set32(&pop->rma_refs, 0);
    return (na_op_id_t *) pop; /* cast to opaque struct ptr */
}

/*
 * op_destroy: drop reference, free if zero
 */
static void
na_psm_op_destroy(na_class_t NA_UNUSED *na_class, na_op_id_t *op_id)
{
    struct na_psm_op_id *pop = (struct na_psm_op_id *) op_id;
    int rma_refs = hg_atomic_get32(&pop->rma_refs);

    if (pop->busy || pop->completed || rma_refs) {
        /* this should never happen.  raise an error and don't free */
        NA_LOG_ERROR("op active (busy=%d,comp=%d,rma=%d)", pop->busy,
            pop->completed, rma_refs);
    } else {
        free(pop); /* XXX:pool? */
    }
}

/*
 * addr_lookup: look up an address string and return an address structure.
 */
static na_return_t
na_psm_addr_lookup(na_class_t *na_class, const char *name, na_addr_t **addr)
{
    struct na_psm_class *pc;
    int rc;
    uint64_t i64epid;
    psm_epid_t epid;

    /* parse the string looking for the epid */
    pc = na_class->plugin_class;
    rc = sscanf(name, "%*[^:]://%" SCNx64, &i64epid);
    if (rc != 1) {
        NA_LOG_ERROR("fail: %s", name);
        return NA_INVALID_ARG;
    }

    epid = i64epid;

    return na_psm_addr_lookup_epid(
        pc, epid, PSM_ORG_LOOKUP, (struct na_psm_addr **) addr);
}

/*
 * addr_free: drop reference on an address
 */
static void
na_psm_addr_free(na_class_t NA_UNUSED *na_class, na_addr_t *addr)
{
    struct na_psm_addr *naddr = (struct na_psm_addr *) addr;

    /*
     * psm does not have a disconnect API to undo the psm_ep_connect(),
     * so the psm-level epaddr structures never go away.  there is not
     * much point freeing stuff off the alist.   we'll just drop
     * the reference and return...
     */
    if (hg_atomic_decr32(&naddr->nrefs)) {
        /* still has active references */
    } else {
        /* last reference gone, but we keep naddr around */
    }
}

/*
 * addr_self: return our local address.
 */
static na_return_t
na_psm_addr_self(na_class_t *na_class, na_addr_t **addr)
{
    struct na_psm_class *pc;
    pc = na_class->plugin_class;

    /*
     * this is easy because we keep the self address in the pc.
     * just bump the reference and return it.
     */
    hg_atomic_incr32(&pc->self.nrefs);
    *addr = (na_addr_t *) &pc->self;
    return NA_SUCCESS;
}

/*
 * addr_dup: duplicate address.
 */
static na_return_t
na_psm_addr_dup(
    na_class_t NA_UNUSED *na_class, na_addr_t *addr, na_addr_t **new_addr)
{
    struct na_psm_addr *naddr = (struct na_psm_addr *) addr;

    /*
     * since we must be holding a ref to naddr, we can safely
     * bump it and return naddr as the new_addr structure.
     */
    hg_atomic_incr32(&naddr->nrefs);
    *new_addr = (na_addr_t *) naddr;
    return NA_SUCCESS;
}

/*
 * addr_cmp: compare two addresses.
 */
static bool
na_psm_addr_cmp(
    na_class_t NA_UNUSED *na_class, na_addr_t *addr1, na_addr_t *addr2)
{
    struct na_psm_addr *naddr1 = (struct na_psm_addr *) addr1;
    struct na_psm_addr *naddr2 = (struct na_psm_addr *) addr2;

    /* easy: just need to compare the two epids */
    return (naddr1->epid == naddr2->epid);
}

/*
 * is_self: is this address us?
 */
static bool
na_psm_addr_is_self(na_class_t NA_UNUSED *na_class, na_addr_t *addr)
{
    struct na_psm_addr *naddr = (struct na_psm_addr *) addr;

    return (naddr->origin == PSM_ORG_SELF); /* SELF set at init time */
}

/*
 * addr_to_string: convert the address to a string (the epid)
 */
static na_return_t
na_psm_addr_to_string(na_class_t NA_UNUSED *na_class, char *buf,
    size_t *buf_size, na_addr_t *addr)
{
    struct na_psm_addr *naddr = (struct na_psm_addr *) addr;
    char tmpbuf[32];
    size_t len;

    snprintf(tmpbuf, sizeof(tmpbuf), NA_PSM_NAME "://%" PRIx64, naddr->epid);
    len = strlen(tmpbuf);
    if (buf) {
        if (len >= *buf_size) {
            return NA_MSGSIZE;
        }
        strcpy(buf, tmpbuf);
    }
    *buf_size = len + 1;

    return NA_SUCCESS;
}

/*
 * addr_get_serialize_size: get number of bytes needed to serialize an addr.
 * this is used when attaching an hg_addr_t to an HG bulk descriptor using
 * HG_Bulk_bind(). (e.g. when sending a bulk descriptor to a server
 * using a intermdiate server in the middle of the chain.)
 */
static size_t
na_psm_addr_get_serialize_size(
    na_class_t NA_UNUSED *na_class, na_addr_t NA_UNUSED *addr)
{
    return NA_PSM_ADDR_SERSIZE;
}

/*
 * addr_serialize: seralize the address (epid)
 */
static na_return_t
na_psm_addr_serialize(
    na_class_t NA_UNUSED *na_class, void *buf, size_t buf_size, na_addr_t *addr)
{
    struct na_psm_addr *naddr = (struct na_psm_addr *) addr;

    if (buf_size < NA_PSM_ADDR_SERSIZE) {
        NA_LOG_ERROR("bufsiz err %" PRId64, buf_size);
        return (NA_OVERFLOW);
    }

    psm_enc64(buf, naddr->epid);
    return NA_SUCCESS;
}

/*
 * addr_deserialize: undo epid serialization.  on success caller
 * is holding a reference to the address.
 */
static na_return_t
na_psm_addr_deserialize(na_class_t *na_class, na_addr_t **addr, const void *buf,
    size_t buf_size, uint64_t NA_UNUSED flags)
{
    uint64_t i64epid;
    na_return_t ret;

    if (buf_size < NA_PSM_ADDR_SERSIZE) {
        NA_LOG_ERROR("bufsiz err %" PRId64, buf_size);
        return (NA_OVERFLOW);
    }
    psm_dec64(buf, i64epid);

    ret = na_psm_addr_lookup_epid((struct na_psm_class *) na_class, i64epid,
        PSM_ORG_DESER, (struct na_psm_addr **) addr);

    return ret;
}

/*
 * get_max_unexpected_size
 */
static size_t
na_psm_msg_get_max_unexpected_size(const na_class_t NA_UNUSED *na_class)
{
    return NA_PSM_MAX_UNEXPECTED;
}

/*
 * get_max_expected_size
 */
static size_t
na_psm_msg_get_max_expected_size(const na_class_t NA_UNUSED *na_class)
{
    return NA_PSM_MAX_EXPECTED;
}

/*
 * msg_get_unexpected_header_size: upper-level mercury code reserves
 * this much space in the buffer for unexpected send operations that
 * it generates (the default value is 0).  as the psm_mq_ipeek()/psm_mq_test()
 * interface does not tell us what epid recv'd data comes in on, we
 * add it to the message using this header in na_psm_msg_init_unexpected().
 *
 * note: unexpected NA_PSM_TAG_CONTROL messages also include the epid
 * in their data, but that is done via ucsargs[] rather than this
 * mechanism.
 */
static size_t
na_psm_msg_get_unexpected_header_size(const na_class_t NA_UNUSED *na_class)
{
    return sizeof(psm_epid_t);
}

/*
 * msg_get_max_tag: largest tag value mercury uses (then it loops back
 * around).  we reserve the top 32 bits of the PSM tag for internal use.
 * we allow mercury to use all of the 32 lower bits of the PSM tag
 * (unless NA_TAG_MAX limits it).
 * note: mercury na_tag_t is currently 32 bits, where psm tag is 64 bits.
 */
static na_tag_t
na_psm_msg_get_max_tag(const na_class_t NA_UNUSED *na_class)
{
    na_tag_t na_max = NA_TAG_MAX;
    return (na_max < NA_PSM_USRTAG_MASK) ? na_max : NA_PSM_USRTAG_MASK;
}

/*
 * msg_init_unexpected: this adds the source epid as a header to an
 * unexpected message (pairs with na_psm_msg_get_unexpected_header_size()).
 * we will pull this out of arriving unexpected message in the progress
 * path.  higher-level mercury receive code knows the header's size
 * and will skip over it.   (note: this is only for normal unexpected
 * messages, not messages with NA_PSM_TAG_CONTROL set.)
 */
static na_return_t
na_psm_msg_init_unexpected(na_class_t *na_class, void *buf, size_t buf_size)
{
    struct na_psm_class *pc;

    pc = na_class->plugin_class;

    if (buf_size < sizeof(pc->self.epid)) {
        NA_LOG_ERROR("buf_size too small %" PRId64, buf_size);
        return NA_OVERFLOW;
    }

    psm_enc64(buf, pc->self.epid);

    return NA_SUCCESS;
}

/*
 * msg_send_unexpected: send an unexpected message over psm.
 * this is a wrapper for the internal na_psm_msg_send() call.
 */
static na_return_t
na_psm_msg_send_unexpected(na_class_t *na_class, na_context_t *context,
    na_cb_t callback, void *arg, const void *buf, size_t buf_size,
    void NA_UNUSED *plugin_data, na_addr_t *dest_addr,
    uint8_t NA_UNUSED dest_id, na_tag_t tag, na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    uint64_t psmtag;
    na_return_t ret;

    pc = na_class->plugin_class;

    /*
     * mercury uses na_psm_msg_get_max_tag() to limit "tag" so
     * no worries about it conflicting with the internal stuff we OR in.
     */
    psmtag = NA_PSM_TAG_UNEXPECTED | pc->ext_tagbits | tag;

    ret = na_psm_msg_send(na_class, context, callback, arg, buf, buf_size,
        dest_addr, psmtag, op_id);
    return ret;
}

/*
 * msg_recv_unexpected: post an unexpected recv buffer.  any msg
 * with 'unexpected' set and 'control' clear is a match.
 * wrapper for na_psm_msg_recv().
 */
static na_return_t
na_psm_msg_recv_unexpected(na_class_t *na_class, na_context_t *context,
    na_cb_t callback, void *arg, void *buf, size_t buf_size,
    void NA_UNUSED *plugin_data, na_op_id_t *op_id)
{
    uint64_t psmtag, psmtagsel;
    na_return_t ret;

    psmtag = NA_PSM_TAG_UNEXPECTED; /* want only the unexpected bit */
    psmtagsel = NA_PSM_INTBIT_MASK; /* only need internal bits */

    ret = na_psm_msg_recv(na_class, context, callback, arg, buf, buf_size,
        op_id, psmtag, psmtagsel);
    return ret;
}

/*
 * msg_send_expected: send an expected message over psm.
 * this is a wrapper for the internal na_psm_msg_send() call.
 */
static na_return_t
na_psm_msg_send_expected(na_class_t *na_class, na_context_t *context,
    na_cb_t callback, void *arg, const void *buf, size_t buf_size,
    void NA_UNUSED *plugin_data, na_addr_t *dest_addr,
    uint8_t NA_UNUSED dest_id, na_tag_t tag, na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    uint64_t psmtag;
    na_return_t ret;

    pc = na_class->plugin_class;

    /*
     * mercury uses na_psm_msg_get_max_tag() to limit "tag" so
     * no worries about it conflicting with the internal stuff we OR in.
     */
    psmtag = pc->ext_tagbits | tag;

    ret = na_psm_msg_send(na_class, context, callback, arg, buf, buf_size,
        dest_addr, psmtag, op_id);
    return ret;
}

/*
 * msg_recv_expected: post a recv buffer.  wrapper for na_psm_msg_recv().
 */
static na_return_t
na_psm_msg_recv_expected(na_class_t *na_class, na_context_t *context,
    na_cb_t callback, void *arg, void *buf, size_t buf_size,
    void NA_UNUSED *plugin_data, na_addr_t *source_addr,
    uint8_t NA_UNUSED source_id, na_tag_t tag, na_op_id_t *op_id)
{
    struct na_psm_addr *naddr;
    uint64_t psmtag, psmtagsel;
    na_return_t ret;

    naddr = (struct na_psm_addr *) source_addr;

    /*
     * mercury uses na_psm_msg_get_max_tag() to limit "tag" so
     * no worries about it conflicting with the internal stuff we OR in.
     */
    psmtag = na_psm_epid_ext_tagbits(naddr->epid);
    psmtag |= tag;
    psmtagsel = ~((uint64_t) 0); /* use all the tag bits for filtering */

    ret = na_psm_msg_recv(na_class, context, callback, arg, buf, buf_size,
        op_id, psmtag, psmtagsel);
    return ret;
}

/*
 * mem_handle_create: create a local memory handle (called from HG_Bulk_create)
 */
static na_return_t
na_psm_mem_handle_create(na_class_t *na_class, void *buf, size_t buf_size,
    unsigned long flags, na_mem_handle_t **mem_handle)
{
    struct na_psm_class *pc;
    struct na_psm_local_mem_handle *lp;
    uint64_t seq64;

    pc = na_class->plugin_class;

    lp = (struct na_psm_local_mem_handle *) malloc(sizeof(*lp));
    if (!lp) {
        NA_LOG_ERROR("malloc failed");
        return NA_NOMEM;
    }

    lp->handle.is_local = 1; /* handle is embedded in local_mem_handle */
    /* handle.token done below when we have the lhand_lock */
    lp->base = buf;
    lp->size = buf_size;
    lp->attr = flags & 0xff;

    hg_thread_mutex_lock(&pc->lhand_lock);
    seq64 = pc->lhand_seq++;
    lp->handle.token = (seq64 << 32) | (random() & 0xffffffff);
    LIST_INSERT_HEAD(&pc->lhands, lp, q);
    hg_thread_mutex_unlock(&pc->lhand_lock);

    NA_LOG_DEBUG("mh=%p (buf=%p,sz=%d,at=%lu,tok=%" PRIx64 ")", (void *) lp,
        buf, (int) buf_size, flags, lp->handle.token);
    *mem_handle = (na_mem_handle_t *) lp;
    return NA_SUCCESS;
}

/*
 * mem_handle_free: free a memory handle (may or may not be local).
 */
static void
na_psm_mem_handle_free(na_class_t *na_class, na_mem_handle_t *mem_handle)
{
    struct na_psm_class *pc;
    struct na_psm_mem_handle *mh;
    struct na_psm_local_mem_handle *lp;

    pc = na_class->plugin_class;
    mh = (struct na_psm_mem_handle *) mem_handle;

    /* must remove local handles from lhands list prior to freeing */
    if (mh->is_local) {
        lp = (struct na_psm_local_mem_handle *) mh; /* embedded, so safe */
        hg_thread_mutex_lock(&pc->lhand_lock);
        LIST_REMOVE(lp, q);
        hg_thread_mutex_unlock(&pc->lhand_lock);
    }

    NA_LOG_DEBUG("token=%" PRIx64 ", loc=%d", mh->token, mh->is_local);
    free(mh);
}

/*
 * mem_handle_get_serialize_size: get size needed to serialize the handle
 */
static size_t
na_psm_mem_handle_get_serialize_size(
    na_class_t NA_UNUSED *na_class, na_mem_handle_t NA_UNUSED *mem_handle)
{
    struct na_psm_mem_handle dummy;
    return sizeof(dummy.token); /* we just send the token */
}

/*
 * mem_handle_serialize: serialize a memory handle for sending to a remote
 */
static na_return_t
na_psm_mem_handle_serialize(na_class_t NA_UNUSED *na_class, void *buf,
    size_t buf_size, na_mem_handle_t *mem_handle)
{
    struct na_psm_mem_handle *mh;
    mh = (struct na_psm_mem_handle *) mem_handle;

    if (buf_size < sizeof(mh->token)) {
        NA_LOG_ERROR("bad size %" PRId64, buf_size);
        return NA_MSGSIZE;
    }

    /* no byte order issue, remote treates token as an opaque value */
    memcpy(buf, &mh->token, sizeof(mh->token));
    return NA_SUCCESS;
}

/*
 * mem_handle_deserialize: deserialize a non-local memory handle.
 * note: local memory handles are created by na_psm_mem_handle_create(),
 * not this function.
 */
static na_return_t
na_psm_mem_handle_deserialize(na_class_t NA_UNUSED *na_class,
    na_mem_handle_t **mem_handle, const void *buf, size_t buf_size)
{
    struct na_psm_mem_handle *mh;

    if (buf_size < sizeof(mh->token)) {
        NA_LOG_ERROR("bad size %" PRId64, buf_size);
        return NA_MSGSIZE;
    }

    mh = (struct na_psm_mem_handle *) malloc(sizeof(*mh));
    if (!mh) {
        NA_LOG_ERROR("malloc failed");
        return NA_NOMEM;
    }
    mh->is_local = 0;
    memcpy(&mh->token, buf, sizeof(mh->token));

    NA_LOG_DEBUG("rawtoken=%" PRIx64, mh->token);
    *mem_handle = (na_mem_handle_t *) mh;

    return NA_SUCCESS;
}

/*
 * na_psm_put: RMA "put" operation.   PSM does not give us an RMA API.
 * instead, we build RMA on top of the PSM isend/irecv operations
 * using internal control messages.   we assume that an isend operation
 * with a sufficiently large buffer will cause PSM to internally use
 * RMA (e.g. see ips_proto_mq_isend() in PSM ptl_ips/ips_proto_mq.c and
 * note how it chooses different transfer strategies based on the value
 * of "len").
 */
static na_return_t
na_psm_put(na_class_t *na_class, na_context_t *context, na_cb_t callback,
    void *arg, na_mem_handle_t *local_mem_handle, na_offset_t local_offset,
    na_mem_handle_t *remote_mem_handle, na_offset_t remote_offset,
    size_t length, na_addr_t *remote_addr, uint8_t NA_UNUSED remote_id,
    na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    struct na_psm_addr *naddr;
    struct na_psm_op_id *pop;
    struct na_psm_local_mem_handle *lochand;
    struct na_psm_mem_handle *remhand;
    void *userdata;
    uint64_t rma_tag;
    psm_error_t perr;
    static uint32_t dummy = 0; /* shared dummy buffer for irecv */

    /* extract psm-specific struct pointers from args */
    pc = na_class->plugin_class;
    naddr = (struct na_psm_addr *) remote_addr; /* i.e. the target */
    pop = (struct na_psm_op_id *) op_id; /* caller did na_psm_op_create() */
    lochand = (struct na_psm_local_mem_handle *) local_mem_handle;
    remhand = (struct na_psm_mem_handle *) remote_mem_handle;

    /* sanity checks, should never fire */
    if (pop == NULL || pop->busy || pop->completed ||
        hg_atomic_get32(&pop->rma_refs) != 0) {
        NA_LOG_ERROR("invalid op_id!?");
        return NA_INVALID_ARG;
    }
    if (!lochand->handle.is_local) {
        NA_LOG_ERROR("provided local_mem_handle isn't local");
        return NA_INVALID_ARG;
    }
    if (length > UINT32_MAX) {
        NA_LOG_ERROR("length cannot exceed %" PRIu32 " bytes", UINT32_MAX);
        return NA_MSGSIZE;
    }
    /* we can always read our local memory, so lochand->attr is ok */
    if (local_offset >= lochand->size ||
        length > lochand->size - local_offset) {
        NA_LOG_ERROR("local range err.  size=%" PRId64 ", off=%" PRId64
                     ",len=%" PRId64,
            lochand->size, local_offset, length);
        return NA_OVERFLOW;
    }

    /* userdata is starting point of the local buffer */
    userdata = (char *) lochand->base + local_offset;

    /* rma_tag for this put operation */
    rma_tag = NA_PSM_TAG_CONTROL | pc->ext_tagbits |
              (hg_atomic_incr32(&pc->rma_seqno) & NA_PSM_TAG_ECSEQMASK);

    /*
     * fill in the pop.  pcls, plugin_callback, plugin_callback_args, owner
     * have already been set by the psm op create function.
     */
    pop->op = NA_PSM_OP_PUT;
    pop->context = context;
    pop->cancel = 0;
    pop->completion_data.callback_info.arg = arg;
    pop->completion_data.callback_info.type = NA_CB_PUT;
    pop->completion_data.callback_info.ret = NA_SUCCESS; /* to start */
    pop->completion_data.callback = callback;

    /* we'll need all 3 subops for a put, init err and handle to be safe */

    /*
     * put/get RMA ops have more than one subop.  we use rma_refs to
     * track the subops and only complete the op when it rma_refs drops
     * to zero.  this is different than pop->busy, since that will
     * drop to zero when we remove the op_id from the busy list.
     * busy list removal happens before we finish processing (so
     * if we have concurrent progress calls running, we could have
     * a race if we used pop->busy to decide when to complete).
     * simple send/recv op_id's don't need this, since they only
     * have one subop (you are done when that subop is done).
     */
    hg_atomic_set32(&pop->rma_refs, 3);

    /* unexpected control msg send: tell target we want to 'put' */
    pop->ucsendsub.psm_status.error_code = PSM_OK;
    pop->ucsendsub.subop = NA_PSM_OP_PUTSNDCTL;
    pop->ucsendsub.psm_handle = NULL;

    /* need to setup control msg args too... */
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_SENDER], pc->self.epid);
    memcpy(&pop->ucsargs[NA_PSM_UCARG_HANDLE], &remhand->token,
        sizeof(remhand->token)); /* opaque to target */
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_OFFSET], remote_offset);
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_LENGTH], length);

    /* expected control msg send: the data for 'put' */
    pop->datasub.psm_status.error_code = PSM_OK;
    pop->datasub.subop = NA_PSM_OP_PUTSNDDATA;
    pop->datasub.psm_handle = NULL;

    /* expectecd control msg recv; return status for put (no payload) */
    pop->putrcvsub.psm_status.error_code = PSM_OK;
    pop->putrcvsub.subop = NA_PSM_OP_PUTRCVSTS;
    pop->putrcvsub.psm_handle = NULL;
    pop->datasub_buf = userdata;

    /* put it on the busy list, then we can start the subops */
    na_psm_opid_setbusy(pc, pop, 3); /* 3 subops for a put */

    /* queue unexpected control message for sending */
    perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/,
        NA_PSM_TAG_UNEXPECTED | NA_PSM_TAG_OP_PUT | rma_tag, pop->ucsargs,
        sizeof(pop->ucsargs), &pop->ucsendsub, &pop->ucsendsub.psm_handle);

    if (perr != PSM_OK) {
        na_psm_opid_setbusy(pc, pop, 0);    /* unbusy */
        hg_atomic_set32(&pop->rma_refs, 0); /* failed to start any */

        NA_LOG_ERROR("uc send failed: %s", psm_error_get_string(perr));
        return NA_PROTOCOL_ERROR;
    }

    /* queue sending the put data */
    perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/,
        rma_tag | NA_PSM_TAG_OP_PUT, userdata, (uint32_t) length, &pop->datasub,
        &pop->datasub.psm_handle);

    if (perr != PSM_OK) {
        /*
         * leave an error in callback_info, have to wait for the
         * busy unexpected control send to finish (PSM won't let
         * us cancel send operations).
         */
        pop->completion_data.callback_info.ret = NA_PROTOCOL_ERROR;

        na_psm_opid_setbusy(pc, pop, -2); /* remove this one and the next */

        NA_LOG_ERROR("2nd send failed: %s", psm_error_get_string(perr));

        /* drop both subops, fail now if the first one already finished */
        if (hg_atomic_decr32(&pop->rma_refs) == 0 ||
            hg_atomic_decr32(&pop->rma_refs) == 0) {
            return NA_PROTOCOL_ERROR;
        }

        /* return, but delay error until the send above completes */
        return NA_SUCCESS;
    }

    /* queue irecv for 0 byte status reply (dummy not touched) */
    perr = psm_mq_irecv(pc->psm_mq, rma_tag, NA_PSM_TAG_ECSELECT, 0 /*flags*/,
        &dummy, sizeof(dummy), &pop->putrcvsub, &pop->putrcvsub.psm_handle);

    if (perr != PSM_OK) {
        /* must wait for both isends to finish */
        pop->completion_data.callback_info.ret = NA_PROTOCOL_ERROR;

        na_psm_opid_setbusy(pc, pop, -1); /* remove this subop */

        NA_LOG_ERROR("irecv failed: %s", psm_error_get_string(perr));

        /* drop our subop, fail now if the first two already finished */
        if (hg_atomic_decr32(&pop->rma_refs) == 0) {
            return NA_PROTOCOL_ERROR;
        }

        /* return, but delay error until the sends above completes */
        return NA_SUCCESS;
    }

    /* success! */
    NA_LOG_DEBUG("rma_tag=%" PRIx64 ", op_id=%p, h=%p", rma_tag, (void *) pop,
        (void *) pop->datasub.psm_handle);

    return NA_SUCCESS;
}

/*
 * na_psm_get: RMA "get" operation.   PSM does not give us an RMA API,
 * so we build "get" with isend/irecv in the same way we built "put" above.
 */
static na_return_t
na_psm_get(na_class_t *na_class, na_context_t *context, na_cb_t callback,
    void *arg, na_mem_handle_t *local_mem_handle, na_offset_t local_offset,
    na_mem_handle_t *remote_mem_handle, na_offset_t remote_offset,
    size_t length, na_addr_t *remote_addr, uint8_t NA_UNUSED remote_id,
    na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    struct na_psm_addr *naddr;
    struct na_psm_op_id *pop;
    struct na_psm_local_mem_handle *lochand;
    struct na_psm_mem_handle *remhand;
    void *userdata;
    uint64_t rma_tag;
    psm_error_t perr;

    /* extract psm-specific struct pointers from args */
    pc = na_class->plugin_class;
    naddr = (struct na_psm_addr *) remote_addr; /* i.e. the target */
    pop = (struct na_psm_op_id *) op_id; /* caller did na_psm_op_create() */
    lochand = (struct na_psm_local_mem_handle *) local_mem_handle;
    remhand = (struct na_psm_mem_handle *) remote_mem_handle;

    /* sanity checks, should never fire */
    if (pop == NULL || pop->busy || pop->completed) {
        NA_LOG_ERROR("invalid op_id!?");
        return NA_INVALID_ARG;
    }
    if (!lochand->handle.is_local) {
        NA_LOG_ERROR("provided local_mem_handle isn't local");
        return NA_INVALID_ARG;
    }
    if (length > UINT32_MAX) {
        NA_LOG_ERROR("length cannot exceed %" PRIu32 " bytes", UINT32_MAX);
        return NA_MSGSIZE;
    }
    if (local_offset >= lochand->size ||
        length > lochand->size - local_offset) {
        NA_LOG_ERROR("local range err.  size=%" PRId64 ", off=%" PRId64
                     ", len=%" PRId64,
            lochand->size, local_offset, length);
        return NA_OVERFLOW;
    }
    if (lochand->attr == NA_MEM_READ_ONLY) {
        NA_LOG_ERROR("provided local_mem_handle isn't writable");
        return NA_PERMISSION;
    }

    /* userdata is starting point of the local buffer */
    userdata = (char *) lochand->base + local_offset;

    /* rma_tag for this get operation */
    rma_tag = NA_PSM_TAG_CONTROL | pc->ext_tagbits |
              (hg_atomic_incr32(&pc->rma_seqno) & NA_PSM_TAG_ECSEQMASK);

    /*
     * fill in the pop.  pcls, plugin_callback, plugin_callback_args, owner
     * have already been set by the psm op create function.
     */
    pop->op = NA_PSM_OP_GET;
    pop->context = context;
    pop->cancel = 0;
    pop->completion_data.callback_info.arg = arg;
    pop->completion_data.callback_info.type = NA_CB_GET;
    pop->completion_data.callback_info.ret = NA_SUCCESS; /* to start */
    pop->completion_data.callback = callback;

    /* we'll need 2 subops for a get, init err and handle to be safe */

    /*
     * put/get RMA ops have more than one subop.  we use rma_refs to
     * track the subops and only complete the op when it rma_refs drops
     * to zero, as noted in "put" above.
     */
    hg_atomic_set32(&pop->rma_refs, 2);

    /* unexpected control msg send: tell target we want to 'get' */
    pop->ucsendsub.psm_status.error_code = PSM_OK;
    pop->ucsendsub.subop = NA_PSM_OP_GETSNDCTL;
    pop->ucsendsub.psm_handle = NULL;

    /* need to setup control msg args too... */
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_SENDER], pc->self.epid);
    memcpy(&pop->ucsargs[NA_PSM_UCARG_HANDLE], &remhand->token,
        sizeof(remhand->token)); /* opaque to target */
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_OFFSET], remote_offset);
    psm_enc64(&pop->ucsargs[NA_PSM_UCARG_LENGTH], length);

    /* expected control msg recv: the data for 'get' (or an error) */
    pop->datasub.psm_status.error_code = PSM_OK;
    pop->datasub.subop = NA_PSM_OP_GETRCV;
    pop->datasub.psm_handle = NULL;
    pop->datasub_buf = userdata;

    /* put it on the busy list, then we can start the subops */
    na_psm_opid_setbusy(pc, pop, 2); /* 2 subops for a put */

    /* queue irecv for get data recv (or error) */
    perr = psm_mq_irecv(pc->psm_mq, rma_tag, NA_PSM_TAG_ECSELECT, 0 /*flags*/,
        userdata, (uint32_t) length, &pop->datasub, &pop->datasub.psm_handle);

    if (perr != PSM_OK) {
        na_psm_opid_setbusy(pc, pop, 0);    /* unbusy */
        hg_atomic_set32(&pop->rma_refs, 0); /* failed to start any */

        NA_LOG_ERROR("uc send failed: %s", psm_error_get_string(perr));
        return NA_PROTOCOL_ERROR;
    }

    /* queue unexpected control message for sending */
    perr = psm_mq_isend(pc->psm_mq, naddr->epaddr, 0 /*flags*/,
        NA_PSM_TAG_UNEXPECTED | NA_PSM_TAG_OP_GET | rma_tag, pop->ucsargs,
        sizeof(pop->ucsargs), &pop->ucsendsub, &pop->ucsendsub.psm_handle);

    if (perr != PSM_OK) {
        /*
         * leave an error in callback_info, have to wait for the
         * busy unexpected control send to finish (PSM won't let
         * us cancel send operations).
         */
        pop->completion_data.callback_info.ret = NA_PROTOCOL_ERROR;

        na_psm_opid_setbusy(pc, pop, -1); /* remove this subop */

        /*
         * we can try and cancel the irecv, ignore failures.  this
         * will cause the req handle to successfully complete with
         * 0 bytes rcv'd.  we'll know it is an error since we set
         * callback_info.ret to NA_PROTOCOL_ERROR above.   grab
         * the ipeek lock so we don't race the progress thread.
         */
        hg_thread_mutex_lock(&pc->ipeek_lock);
        if (pop->datasub.psm_handle)
            (void) psm_mq_cancel(&pop->datasub.psm_handle);
        hg_thread_mutex_unlock(&pc->ipeek_lock);

        NA_LOG_ERROR("isend failed: %s", psm_error_get_string(perr));

        /* drop our subop, fail now if the first subop already finished */
        if (hg_atomic_decr32(&pop->rma_refs) == 0) {
            return NA_PROTOCOL_ERROR;
        }

        /* return, but delay error until the irecv above completes */
        return NA_SUCCESS;
    }

    /* success! */
    NA_LOG_DEBUG("rma_tag=%" PRIx64 ", op_id=%p, h=%p", rma_tag, (void *) pop,
        (void *) pop->datasub.psm_handle);

    return NA_SUCCESS;
}

/*
 * na_psm_poll
 */
static na_return_t
na_psm_poll(na_class_t *na_class, na_context_t NA_UNUSED *context,
    unsigned int *count_p)
{
    struct na_psm_class *pc = na_class->plugin_class;

    na_psm_progress(pc, count_p);

    return NA_SUCCESS;
}

/*
 * na_psm_cancel: cancel a psm op_id.  we are limited in what we can
 * do because psm only allows irecv to be canceled.   canceling an
 * an irecv will cause it to successfully complete (but ignore the
 * status nbytes and msg_length values, as they will not be updated
 * and may contain garbage).
 */
static na_return_t
na_psm_cancel(
    na_class_t *na_class, na_context_t NA_UNUSED *context, na_op_id_t *op_id)
{
    struct na_psm_class *pc;
    struct na_psm_op_id *pop;
    struct na_cb_info *cbi;

    /* recover psm class and op_id */
    pc = na_class->plugin_class;
    pop = (struct na_psm_op_id *) op_id;
    cbi = &pop->completion_data.callback_info; /* shorthand */

    /*
     * hold the ipeek lock to block progress from handling op_ids
     * while we figure out what we can cancel.
     */
    hg_thread_mutex_lock(&pc->ipeek_lock);

    /* skip out if there is nothing we can do */
    if (pop->busy == 0 || pop->completed || pop->cancel)
        goto unlock_finish;

    switch (pop->op) {
        case NA_PSM_OP_SEND:
            /* PSM will not cancel sends, nothing we can do */
            break;

        case NA_PSM_OP_RECV:
        case NA_PSM_OP_GET:
            if (pop->datasub.psm_handle) {
                /* best effort, don't worry if cancel fails */
                psm_mq_cancel(&pop->datasub.psm_handle);
                pop->cancel = 1;
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_CANCELED;
            }
            break;

        case NA_PSM_OP_PUT:
            if (pop->putrcvsub.psm_handle) {
                /* best effort, don't worry if cancel fails */
                psm_mq_cancel(&pop->putrcvsub.psm_handle);
                pop->cancel = 1;
                if (cbi->ret == NA_SUCCESS)
                    cbi->ret = NA_CANCELED;
            }
            break;

        case NA_PSM_OP_PUT_TARGET:
            NA_LOG_ERROR("impossible on a PUT_TARGET");
            break;

        default:
            NA_LOG_ERROR("invalid op value %d", pop->op);
            break;
    }

unlock_finish:
    hg_thread_mutex_unlock(&pc->ipeek_lock);

    return NA_SUCCESS;
}

/*
 * the set of psm ops we export to the world.
 *
 * optional functions not provided:
 *  - cleanup: nothing to cleanup at this level
 *  - context_create,context_destroy: not using context (XXXCDC: convert to it?)
 *  - addr_set_remove: a hint addr has gone bad (nothing psm can do with this)
 *  - msg_get_expected_header_size: do not need a header in expected msgs
 *  - msg_buf_alloc,msg_buf_free: just use malloc (XXX: pool helpful?)
 *  - msg_init_expected: not adding any additional data to expected msg
 *  - mem_handle_create_segment: an optimization not relevant to psm
 *  - mem_handle_get_max_segments: only need if using mem_handle_create_segment
 *  - mem_register,mem_deregister: mem register not needed for psm I/O
 *  - poll_get_fd: psm does not expose a filedescriptor to poll on
 *  - poll_try_wait: always ok to push psm progress
 */
const struct na_class_ops NA_PSM_PLUGIN_VARIABLE = {
    NA_PSM_NAME,                           /* name */
    NULL,                                  /* get_protocol_info */
    na_psm_check_protocol,                 /* check_protocol */
    na_psm_initialize,                     /* initialize */
    na_psm_finalize,                       /* finalize */
    NULL,                                  /* cleanup */
    NULL,                                  /* has_opt_feature */
    NULL,                                  /* context_create */
    NULL,                                  /* context_destroy */
    na_psm_op_create,                      /* op_create */
    na_psm_op_destroy,                     /* op_destroy */
    na_psm_addr_lookup,                    /* addr_lookup */
    na_psm_addr_free,                      /* addr_free */
    NULL,                                  /* addr_set_remove */
    na_psm_addr_self,                      /* addr_self */
    na_psm_addr_dup,                       /* addr_dup */
    na_psm_addr_cmp,                       /* addr_cmp */
    na_psm_addr_is_self,                   /* addr_is_self */
    na_psm_addr_to_string,                 /* addr_to_string */
    na_psm_addr_get_serialize_size,        /* addr_get_serialize_size */
    na_psm_addr_serialize,                 /* addr_serialize */
    na_psm_addr_deserialize,               /* addr_deserialize */
    na_psm_msg_get_max_unexpected_size,    /* msg_get_max_unexpected_size */
    na_psm_msg_get_max_expected_size,      /* msg_get_max_expected_size */
    na_psm_msg_get_unexpected_header_size, /* msg_get_unexpected_header_size */
    NULL,                                  /* msg_get_expected_header_size */
    na_psm_msg_get_max_tag,                /* msg_get_max_tag */
    NULL,                                  /* msg_buf_alloc */
    NULL,                                  /* msg_buf_free */
    na_psm_msg_init_unexpected,            /* msg_init_unexpected */
    na_psm_msg_send_unexpected,            /* msg_send_unexpected */
    na_psm_msg_recv_unexpected,            /* msg_recv_unexpected */
    NULL,                                  /* msg_multi_recv_unexpected */
    NULL,                                  /* msg_init_expected */
    na_psm_msg_send_expected,              /* msg_send_expected */
    na_psm_msg_recv_expected,              /* msg_recv_expected */
    na_psm_mem_handle_create,              /* mem_handle_create */
    NULL,                                  /* mem_handle_create_segment */
    na_psm_mem_handle_free,                /* mem_handle_free */
    NULL,                                  /* mem_handle_get_max_segments */
    NULL,                                  /* mem_register */
    NULL,                                  /* mem_deregister */
    na_psm_mem_handle_get_serialize_size,  /* mem_handle_get_serialize_size */
    na_psm_mem_handle_serialize,           /* mem_handle_serialize */
    na_psm_mem_handle_deserialize,         /* mem_handle_deserialize */
    na_psm_put,                            /* put */
    na_psm_get,                            /* get */
    NULL,                                  /* poll_get_fd */
    NULL,                                  /* poll_try_wait */
    na_psm_poll,                           /* poll */
    NULL,                                  /* poll_wait */
    na_psm_cancel                          /* cancel */
};
