
#ifndef _NVME_INFO_
#include "configure.h"
#else
#define IO_FUNC
#endif
#include "nvme_internal.h"

/*
 * Allocate a request descriptor from the queue pair free list.
 */
static struct nvme_request *IO_FUNC nvme_alloc_request(struct nvme_qpair *qpair)
{
        struct nvme_request *req;

        req = STAILQ_FIRST(&qpair->free_req);
        if (req) {
                STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
                memset(&req->cmd, 0, sizeof(struct nvme_cmd));
        }

        return req;
}

static void nvme_request_cb_complete_child(void *child_arg, const struct nvme_cpl *cpl)
{
        struct nvme_request *child = child_arg;
        struct nvme_request *parent = child->parent;

        nvme_request_remove_child(parent, child);

        if (nvme_cpl_is_error(cpl))
                memcpy(&parent->parent_status, cpl, sizeof(*cpl));

        if (parent->child_reqs == 0) {
                if (parent->cb_fn)
                        parent->cb_fn(parent->cb_arg, &parent->parent_status);
                nvme_request_free(parent);
        }
}

void nvme_request_completion_poll_cb(void *arg, const struct nvme_cpl *cpl)
{
        struct nvme_completion_poll_status *status = arg;

        memcpy(&status->cpl, cpl, sizeof(*cpl));
        status->done = true;
}

int nvme_request_pool_construct(struct nvme_qpair *qpair)
{
        struct nvme_request *req;
        unsigned int i;

        qpair->num_reqs = qpair->trackers * NVME_IO_ENTRIES_VS_TRACKERS_RATIO;
        qpair->reqs = calloc(qpair->num_reqs, sizeof(struct nvme_request));
        if (!qpair->reqs) {
                nvme_err("QPair %d: allocate %u requests failed\n", (int)qpair->id, qpair->num_reqs);
                return -ENOMEM;
        }

        nvme_info("QPair %d: %d requests in pool\n", (int)qpair->id, (int)qpair->num_reqs);

        for (i = 0; i < qpair->num_reqs; i++) {
                req = &qpair->reqs[i];
                req->qpair = qpair;
                STAILQ_INSERT_TAIL(&qpair->free_req, req, stailq);
                req++;
        }

        return 0;
}

void nvme_request_pool_destroy(struct nvme_qpair *qpair)
{
        struct nvme_request *req;
        unsigned int n = 0;

        while ((req = STAILQ_FIRST(&qpair->free_req))) {
                STAILQ_REMOVE_HEAD(&qpair->free_req, stailq);
                n++;
        }

        if (n != qpair->num_reqs)
                nvme_err("QPair %d: Freed %d/%d requests\n", (int)qpair->id, n, (int)qpair->num_reqs);

        free(qpair->reqs);
}

struct nvme_request *IO_FUNC nvme_request_allocate(struct nvme_qpair *qpair, const struct nvme_payload *payload, uint32_t payload_size, nvme_cmd_cb cb_fn,
                                                   void *cb_arg)
{
        struct nvme_request *req;

        req = nvme_alloc_request(qpair);
        if (unlikely(req == NULL))
                return NULL;

        /*
         * Only memset up to (but not including) the children TAILQ_ENTRY.
         * Children, and following members, are only used as part of I/O
         * splitting so we avoid memsetting them until it is actually needed.
         * They will be initialized in nvme_request_add_child()
         * if the request is split.
         */
        memset(req, 0, offsetof(struct nvme_request, children));
        req->cb_fn = cb_fn;
        req->cb_arg = cb_arg;
        req->payload = *payload;
        req->payload_size = payload_size;

        return req;
}

struct nvme_request *nvme_request_allocate_contig(struct nvme_qpair *qpair, void *buffer, uint32_t payload_size, nvme_cmd_cb cb_fn, void *cb_arg)
{
        struct nvme_payload payload;

        payload.type = NVME_PAYLOAD_TYPE_CONTIG;
        payload.u.contig = buffer;
        payload.md = NULL;

        return nvme_request_allocate(qpair, &payload, payload_size, cb_fn, cb_arg);
}

struct nvme_request *nvme_request_allocate_null(struct nvme_qpair *qpair, nvme_cmd_cb cb_fn, void *cb_arg)
{
        return nvme_request_allocate_contig(qpair, NULL, 0, cb_fn, cb_arg);
}

void nvme_request_free(struct nvme_request *req)
{
        struct nvme_qpair *qpair = req->qpair;

        nvme_assert(req->child_reqs == 0, "Number of child request not 0\n");

        STAILQ_INSERT_HEAD(&qpair->free_req, req, stailq);
}

void nvme_request_add_child(struct nvme_request *parent, struct nvme_request *child)
{
        if (parent->child_reqs == 0) {
                /*
                 * Defer initialization of the children TAILQ since it falls
                 * on a separate cacheline.  This ensures we do not touch this
                 * cacheline except on request splitting cases, which are
                 * relatively rare.
                 */
                TAILQ_INIT(&parent->children);
                parent->parent = NULL;
                memset(&parent->parent_status, 0, sizeof(struct nvme_cpl));
        }

        parent->child_reqs++;
        TAILQ_INSERT_TAIL(&parent->children, child, child_tailq);
        child->parent = parent;
        child->cb_fn = nvme_request_cb_complete_child;
        child->cb_arg = child;
}

void nvme_request_remove_child(struct nvme_request *parent, struct nvme_request *child)
{
        nvme_assert(child->parent == parent, "child->parent != parent\n");
        nvme_assert(parent->child_reqs != 0, "child_reqs is 0\n");

        parent->child_reqs--;
        TAILQ_REMOVE(&parent->children, child, child_tailq);
}
