#include "config.h"

#define DBG_SUBSYS S_LIBINTERFACE

#include <string.h>
#include <errno.h>
#include <stdlib.h>

#include "iscsi.h"
#include "job_dock.h"
#include "volume.h"
#include "volume_ctl.h"
#include "dbg.h"
#include "net_global.h"
#include "lichstor.h"
#include "schedule.h"
#include "configure.h"

#include "subsystem.h"
#include "session.h"
#include "request.h"
#include "nvmf.h"
#include "nvmf_spec.h"
#include "nvme_spec.h"


#define MODEL_NUMBER "LICH Volume Controller"
#define FW_VERSION "FFFFFFFF"

static int IO_FUNC __lich_io_read__(struct lich_nvmf_request *req, uint64_t off, uint64_t size)
{
        int ret;
        io_t io;
        mcache_entry_t *entry = NULL;
        struct lich_nvmf_subsystem *subsystem = req->conn->sess->subsys;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;

retry:
        entry = subsystem->dev.ns_list[cmd->nsid - 1].entry;
        if (unlikely(!entry)) {
                ret = volume_ctl_get(&req->vol->id, &entry);
                if (ret) {
                        entry = NULL;
                } else
                        subsystem->dev.ns_list[cmd->nsid - 1].entry = entry;
        }

        if (likely(entry)) {
                io_init(&io, &req->vol->id, NULL, off, size, 0);
                ret = volume_ctl_read_direct(entry, &io, req->buf, 1);
                if (unlikely(ret)) {
                        if (ret == EREMCHG || ret == ESTALE || ret == ENOENT) {
                                mcache_release(entry);
                                subsystem->dev.ns_list[cmd->nsid - 1].entry = NULL;
                                DWARN(CHKID_FORMAT" moved\n", CHKID_ARG(&req->vol->id));

                                goto retry;
                        } else
                                GOTO(err_ret, ret);
                }
        } else {
                ret = stor_read(req->vol->pool, &req->vol->id, req->buf, size, off);
                if (unlikely(ret))
                        GOTO(err_ret, ret);
        }

        return 0;
err_ret:
        return ret;
}

static int __lich_io_read(struct lich_nvmf_request *req, uint64_t off, uint64_t size)
{
        int ret;

        if (unlikely(off + size > req->vol->size)) {
                ret = EIO;
                GOTO(err_ret, ret);
        }

        schedule_task_setname("nvmf_io_read");
retry:
        ret = __lich_io_read__(req, off, size);
        if (unlikely(ret)) {
                ret = _errno(ret);
                if (ret == EAGAIN || ret == ENOSPC)
                        goto retry;
                else
                        ret = EIO;
        }

        return 0;
err_ret:
        /* @---------- */
        //cops->scan_async();
        /* @---------- */

        return ret;
}

static int __lich_io_write__(struct lich_nvmf_request *req, uint64_t off, uint64_t size)
{
        int ret;
        io_t io;
        struct lich_nvmf_subsystem *subsystem = req->conn->sess->subsys;
        mcache_entry_t *entry = NULL;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;

retry:
        entry = subsystem->dev.ns_list[cmd->nsid - 1].entry;
        if (!entry) {

                DWARN("lookup entry \n");
                ret = volume_ctl_get(&req->vol->id, &entry);
                if (ret) {
                        entry = NULL;
                } else
                        subsystem->dev.ns_list[cmd->nsid - 1].entry = entry;
        }

        if (likely(entry)) {
                io_init(&io, &req->vol->id, NULL, off, size, 0);
                ret = volume_ctl_write_direct(entry, &io, req->buf, 1);
                if (unlikely(ret)) {
                        if (ret == EREMCHG || ret == ESTALE || ret == ENOENT) {
                                mcache_release(entry);
                                subsystem->dev.ns_list[cmd->nsid - 1].entry = NULL;
                                DWARN(CHKID_FORMAT" moved\n", CHKID_ARG(&req->vol->id));

                                goto retry;
                        } else
                                GOTO(err_ret, ret);
                }
        } else {
                ret = stor_write(req->vol->pool, &req->vol->id, req->buf, off, size);
                if (unlikely(ret))
                        GOTO(err_ret, ret);
        }

        return 0;
err_ret:
        return ret;
}

static int __lich_io_write(struct lich_nvmf_request *req, uint64_t off, uint64_t size)
{
        int ret;
        //        time_t begin = gettime();

        schedule_task_setname("nvmf_io_write");

        if (unlikely(off + size > req->vol->size)) {
                ret = EIO;
                GOTO(err_ret, ret);
        }

        YASSERT(req->buf->len == (uint32_t)size);

        //cmd->oid = ((struct lichio_data *)cmd->lun->private)->fileid;

retry:
        ret = __lich_io_write__(req, off, size);
        if (unlikely(ret)) {
                ret = _errno(ret);
                if (ret == EAGAIN || ret == ENOSPC)
                        goto retry;
                else
                        ret = EIO;

        }

        return 0;
err_ret:
        return ret;
}


/* read command dword 12 */
struct __attribute__((packed)) nvme_read_cdw12 {
        uint16_t	nlb;		/* number of logical blocks */
        uint16_t	rsvd	: 10;
        uint8_t		prinfo	: 4;	/* protection information field */
        uint8_t		fua	: 1;	/* force unit access */
        uint8_t		lr	: 1;	/* limited retry */
};

static void nvmf_set_dsm(struct lich_nvmf_session *session)
{
        DWARN("Subsystem%d Namespace  does not support unmap - not enabling DSM\n", session->subsys->lcore);
        /*for (i = 0; i < session->subsys->dev.virt.ns_count; i++) {
          struct spdk_bdev *bdev = session->subsys->dev.virt.ns_list[i];

          if (!spdk_bdev_io_type_supported(bdev, SPDK_BDEV_IO_TYPE_UNMAP)) {
          return;
          }
          }

          DWARN("All devices in Subsystem %s support unmap - enabling DSM\n",
          spdk_nvmf_subsystem_get_nqn(session->subsys));*/
        session->vcdata.oncs.dsm = 0;
}

void lich_strcpy_pad(void *dst, const char *src, size_t size, int pad)
{
        size_t len;

        len = strlen(src);
        if (len < size) {
                memcpy(dst, src, len);
                memset((char *)dst + len, pad, size - len);
        } else {
                memcpy(dst, src, size);
        }
}

static void nvmf_ctrlr_get_data(struct lich_nvmf_session *session)
{
        struct lich_nvmf_subsystem *subsys = session->subsys;

        memset(&session->vcdata, 0, sizeof(struct spdk_nvme_ctrlr_data));
        lich_strcpy_pad(session->vcdata.fr, FW_VERSION, sizeof(session->vcdata.fr), ' ');
        lich_strcpy_pad(session->vcdata.mn, MODEL_NUMBER, sizeof(session->vcdata.mn), ' ');
        lich_strcpy_pad(session->vcdata.sn, subsys->dev.sn, sizeof(session->vcdata.sn), ' ');
        session->vcdata.rab = 6;
        session->vcdata.ver.bits.mjr = 1;
        session->vcdata.ver.bits.mnr = 2;
        session->vcdata.ver.bits.ter = 1;
        session->vcdata.ctratt.host_id_exhid_supported = 1;
        session->vcdata.aerl = 0;
        session->vcdata.frmw.slot1_ro = 1;
        session->vcdata.frmw.num_slots = 1;
        session->vcdata.lpa.edlp = 1;
        session->vcdata.elpe = 127;
        session->vcdata.sqes.min = 0x06;
        session->vcdata.sqes.max = 0x06;
        session->vcdata.cqes.min = 0x04;
        session->vcdata.cqes.max = 0x04;
        session->vcdata.maxcmd = 1024;
        session->vcdata.nn = subsys->dev.ns_count;
        session->vcdata.vwc.present = 1;
        session->vcdata.sgls.supported = 1;
        strncpy((char *)session->vcdata.subnqn, (char *)session->subsys->subnqn, sizeof(session->vcdata.subnqn));
        nvmf_set_dsm(session);
}

static int nvmf_virtual_ctrlr_get_log_page(struct lich_nvmf_request *req)
{
        uint8_t lid;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
        uint64_t log_page_offset;
        void *data = ((seg_t *)req->buf->list.next)->handler.ptr;

        if (data == NULL) {
                DERROR("get log command with no buffer\n");
                response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        memset(data, 0, req->length);

        log_page_offset = (uint64_t)cmd->cdw12 | ((uint64_t)cmd->cdw13 << 32);
        if (log_page_offset & 3) {
                DERROR("Invalid log page offset 0x%" PRIx64 "\n", log_page_offset);
                response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        lid = cmd->cdw10 & 0xFF;
        switch (lid) {
        case SPDK_NVME_LOG_ERROR:
        case SPDK_NVME_LOG_HEALTH_INFORMATION:
        case SPDK_NVME_LOG_FIRMWARE_SLOT:
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        default:
                DERROR("Unsupported Get Log Page 0x%02X\n", lid);
                response->status.sct = SPDK_NVME_SCT_COMMAND_SPECIFIC;
                response->status.sc = SPDK_NVME_SC_INVALID_LOG_PAGE;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }
}

static int identify_ns(struct lich_nvmf_subsystem *subsystem,
                struct spdk_nvme_cmd *cmd,
                struct spdk_nvme_cpl *rsp,
                struct spdk_nvme_ns_data *nsdata)
{

        ns_info_t  *vol;

        if (cmd->nsid > subsystem->dev.ns_count || cmd->nsid == 0) {
                DERROR("Identify Namespace for invalid NSID %u\n", cmd->nsid);
                rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        vol = &subsystem->dev.ns_list[cmd->nsid - 1];

        nsdata->nsze = vol->size / LICH_BLOCK_SIZE;
        nsdata->ncap = vol->size / LICH_BLOCK_SIZE;
        nsdata->nuse = vol->size / LICH_BLOCK_SIZE;
        nsdata->nlbaf = 0;
        nsdata->flbas.format = 0;
        nsdata->lbaf[0].lbads = nvmf_u32log2(LICH_BLOCK_SIZE);

        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}

static int identify_ctrlr(struct lich_nvmf_session *session, struct spdk_nvme_ctrlr_data *cdata)
{
        *cdata = session->vcdata;
        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}

static int identify_active_ns_list(struct lich_nvmf_subsystem *subsystem,
                struct spdk_nvme_cmd *cmd,
                struct spdk_nvme_cpl *rsp,
                struct spdk_nvme_ns_list *ns_list)
{
        uint32_t i, num_ns, count = 0;

        if (cmd->nsid >= 0xfffffffeUL) {
                DERROR("Identify Active Namespace List with invalid NSID %u\n", cmd->nsid);
                rsp->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        num_ns = subsystem->dev.ns_count;
        for (i = 1; i <= num_ns; i++) {
                if (i <= cmd->nsid) {
                        continue;
                }
                ns_list->ns_list[count++] = i;
                if (count == sizeof(*ns_list) / sizeof(uint32_t)) {
                        break;
                }
        }

        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}

static int nvmf_virtual_ctrlr_identify(struct lich_nvmf_request *req)
{
        uint8_t cns;
        struct lich_nvmf_session *session = req->conn->sess;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *rsp = &req->rsp->nvme_cpl;
        struct lich_nvmf_subsystem *subsystem = session->subsys;
        void *data = ((seg_t *)req->buf->list.next)->handler.ptr;

        if (data == NULL || req->length < 4096) {
                DERROR("identify command with invalid buffer\n");
                rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        memset(data, 0, req->length);

        cns = cmd->cdw10 & 0xFF;
        switch (cns) {
        case SPDK_NVME_IDENTIFY_NS:
                return identify_ns(subsystem, cmd, rsp, data);
        case SPDK_NVME_IDENTIFY_CTRLR:
                return identify_ctrlr(session, data);
        case SPDK_NVME_IDENTIFY_ACTIVE_NS_LIST:
                return identify_active_ns_list(subsystem, cmd, rsp, data);
        default:
                DERROR("Identify command with unsupported CNS 0x%02x\n", cns);
                rsp->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }
}

static int nvmf_virtual_ctrlr_get_features(struct lich_nvmf_request *req)
{
        uint8_t feature;
        struct lich_nvmf_session *session = req->conn->sess;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        feature = cmd->cdw10 & 0xff; /* mask out the FID value */
        switch (feature) {
        case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
                return lich_nvmf_session_get_features_number_of_queues(req);
        case SPDK_NVME_FEAT_VOLATILE_WRITE_CACHE:
                response->cdw0 = 1;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
                return lich_nvmf_session_get_features_keep_alive_timer(req);
        case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
                DBUG("Get Features - Async Event Configuration\n");
                response->cdw0 = session->async_event_config.raw;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        case SPDK_NVME_FEAT_HOST_IDENTIFIER:
                return lich_nvmf_session_get_features_host_identifier(req);
        default:
                DERROR("Get Features command with unsupported feature ID 0x%02x\n", feature);
                response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }
}

static int nvmf_virtual_ctrlr_set_features(struct lich_nvmf_request *req)
{
        uint8_t feature;
        struct lich_nvmf_session *session = req->conn->sess;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        feature = cmd->cdw10 & 0xff; /* mask out the FID value */
        switch (feature) {
        case SPDK_NVME_FEAT_NUMBER_OF_QUEUES:
                return lich_nvmf_session_set_features_number_of_queues(req);
        case SPDK_NVME_FEAT_KEEP_ALIVE_TIMER:
                return lich_nvmf_session_set_features_keep_alive_timer(req);
        case SPDK_NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
                session->async_event_config.raw = cmd->cdw11;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        case SPDK_NVME_FEAT_HOST_IDENTIFIER:
                return lich_nvmf_session_set_features_host_identifier(req);
        default:
                DERROR("Set Features command with unsupported feature ID 0x%02x\n", feature);
                response->status.sc = SPDK_NVME_SC_INVALID_FIELD;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }
}

static int nvmf_virtual_ctrlr_process_admin_cmd(struct lich_nvmf_request *req)
{
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        /* pre-set response details for this command */
        response->status.sc = SPDK_NVME_SC_SUCCESS;

        switch (cmd->opc) {
        case SPDK_NVME_OPC_GET_LOG_PAGE:
                return nvmf_virtual_ctrlr_get_log_page(req);
        case SPDK_NVME_OPC_IDENTIFY:
                return nvmf_virtual_ctrlr_identify(req);
        case SPDK_NVME_OPC_GET_FEATURES:
                return nvmf_virtual_ctrlr_get_features(req);
        case SPDK_NVME_OPC_SET_FEATURES:
                return nvmf_virtual_ctrlr_set_features(req);
        case SPDK_NVME_OPC_ASYNC_EVENT_REQUEST:
                /* TODO: Just release the request as consumed. AER events will never
                 * be triggered. */
                return SPDK_NVMF_REQUEST_EXEC_STATUS_RELEASE;
        case SPDK_NVME_OPC_KEEP_ALIVE:
                /*
                   To handle keep alive just clear or reset the
                   session based keep alive duration counter.
                   When added, a separate timer based process
                   will monitor if the time since last recorded
                   keep alive has exceeded the max duration and
                   take appropriate action.
                   */
                //session->keep_alive_timestamp = ;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;

        case SPDK_NVME_OPC_CREATE_IO_SQ:
        case SPDK_NVME_OPC_CREATE_IO_CQ:
        case SPDK_NVME_OPC_DELETE_IO_SQ:
        case SPDK_NVME_OPC_DELETE_IO_CQ:
                DERROR("Admin opc 0x%02X not allowed in NVMf\n", cmd->opc);
                response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        default:
                DERROR("Unsupported admin command\n");
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

}

static int nvmf_lichio_ctrlr_rw_cmd(struct lich_nvmf_request *req)
{
        int ret;
        uint64_t lba_address;
        uint64_t io_bytes;
        uint64_t offset;
        uint64_t llen;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;
        struct nvme_read_cdw12 *cdw12 = (struct nvme_read_cdw12 *)&cmd->cdw12;

        lba_address = cmd->cdw11;
        lba_address = (lba_address << 32) + cmd->cdw10;
        offset = lba_address * LICH_BLOCK_SIZE;
        llen = cdw12->nlb + 1;

        /*if (lba_address >= blockcnt || llen > blockcnt || lba_address > (blockcnt - llen)) {
          DERROR("end of media\n");
          response->status.sc = SPDK_NVME_SC_LBA_OUT_OF_RANGE;
          return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
          } */

        io_bytes = llen * LICH_BLOCK_SIZE;
        if (io_bytes > req->length) {
                DERROR("Read/Write NLB > SGL length\n");
                response->status.sc = SPDK_NVME_SC_DATA_SGL_LENGTH_INVALID;
                return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
        }

        if (cmd->opc == SPDK_NVME_OPC_READ) {
                mbuffer_free(req->buf);
                ret = __lich_io_read(req, offset, io_bytes);
                if (ret) {
                        response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
                        ret = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
                        GOTO(err_ret, ret);
                }
        } else {
                ret = __lich_io_write(req, offset, io_bytes);
                if (ret) {
                        response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
                        ret = SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
                        GOTO(err_ret, ret);
                }

        }

        response->status.sc = SPDK_NVME_SC_SUCCESS;

        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
err_ret:
        return ret;
}

static int nvmf_lichio_ctrlr_flush_cmd(struct lich_nvmf_request *req)
{
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        response->status.sc = SPDK_NVME_SC_SUCCESS;
        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}

static int nvmf_lichio_ctrlr_dsm_cmd(struct lich_nvmf_request *req)
{
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        DERROR("do not support dsm now, return error\n");
        response->status.sc = SPDK_NVME_SC_INTERNAL_DEVICE_ERROR;
        return SPDK_NVMF_REQUEST_EXEC_STATUS_COMPLETE;
}

static void __nvmf_lichio_ctrlr_process_io_cmd(void *arg)
{
        uint32_t nsid;
        struct lich_nvmf_request *req = arg;
        struct lich_nvmf_subsystem *subsystem = req->conn->sess->subsys;
        struct spdk_nvme_cmd *cmd = &req->cmd->nvme_cmd;
        struct spdk_nvme_cpl *response = &req->rsp->nvme_cpl;

        /* pre-set response details for this command */
        response->status.sc = SPDK_NVME_SC_SUCCESS;
        nsid = cmd->nsid;

        if (nsid > subsystem->dev.ns_count || nsid == 0) {
                DERROR("Unsuccessful query for nsid %u, ns_count: %u\n", cmd->nsid, subsystem->dev.ns_count);
                response->status.sc = SPDK_NVME_SC_INVALID_NAMESPACE_OR_FORMAT;
                goto complete;
        }

        req->vol = &subsystem->dev.ns_list[nsid - 1];

        switch (cmd->opc) {
        case SPDK_NVME_OPC_READ:
        case SPDK_NVME_OPC_WRITE:
                nvmf_lichio_ctrlr_rw_cmd(req);
                break;
        case SPDK_NVME_OPC_FLUSH:
                nvmf_lichio_ctrlr_flush_cmd(req);
                break;
        case SPDK_NVME_OPC_DATASET_MANAGEMENT:
                nvmf_lichio_ctrlr_dsm_cmd(req);
                break;
        default:
                DERROR("Unsupported IO command opc: %x\n", cmd->opc);
                response->status.sc = SPDK_NVME_SC_INVALID_OPCODE;
        }

complete:
        lich_nvmf_request_complete(req);
        return ;
}

static int nvmf_lichio_ctrlr_process_io_cmd(struct lich_nvmf_request *req)
{
        schedule_task_new("nvmf_io_exec", __nvmf_lichio_ctrlr_process_io_cmd, req, -1);

        return SPDK_NVMF_REQUEST_EXEC_STATUS_ASYNCHRONOUS;
}

static void nvmf_ctrlr_detach(struct lich_nvmf_subsystem *subsystem)
{
        (void)subsystem;

        return;
}

static void nvmf_virtual_ctrlr_poll_for_completions(struct lich_nvmf_session *session)
{
        (void)session;

        return;
}

const struct lich_nvmf_ctrlr_ops lich_nvmf_virtual_ctrlr_ops = {
        .ctrlr_get_data			= nvmf_ctrlr_get_data,
        .process_admin_cmd		= nvmf_virtual_ctrlr_process_admin_cmd,
        .process_io_cmd			= nvmf_lichio_ctrlr_process_io_cmd,
        .poll_for_completions           = nvmf_virtual_ctrlr_poll_for_completions,
        .detach				= nvmf_ctrlr_detach,
};
