/*
 * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <linux/virtio_net.h>
#include "virtnet.h"
#include "virtnet_vq.h"

static inline void
virtnet_desc_tunnel_request_dump(void *data)
{
	union virtnet_desc_tunnel_request *req = data;

	/* Don't print the full descs for now */
	return;

	log_debug("got request: avail_index[%hu], num_desc[%hu]",
		  req->split.avail_index, req->split.num_desc);

	log_debug("descs ==>"
		  "\n0 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,"
		  "\n1 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,"
		  "\n2 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,"
		  "\n3 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,"
		  "\n4 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,"
		  "\n5 -> addr:0x%lx,len:0x%x,flags:0x%x,next:0x%x,",
		  req->split.desc[0].addr, req->split.desc[0].len,
		  req->split.desc[0].flags, req->split.desc[0].next,
		  req->split.desc[1].addr, req->split.desc[1].len,
		  req->split.desc[1].flags, req->split.desc[1].next,
		  req->split.desc[2].addr, req->split.desc[2].len,
		  req->split.desc[2].flags, req->split.desc[2].next,
		  req->split.desc[3].addr, req->split.desc[3].len,
		  req->split.desc[3].flags, req->split.desc[3].next,
		  req->split.desc[4].addr, req->split.desc[4].len,
		  req->split.desc[4].flags, req->split.desc[4].next,
		  req->split.desc[5].addr, req->split.desc[5].len,
		  req->split.desc[5].flags, req->split.desc[5].next);
}

/*
 * Callback function to handle descriptor tunnel request from control vq.
 *
 * @param[in] q
 * 	The sw QP that receives the request
 * @param[in] data
 * 	Address of request data
 * @param[in] data_len
 * 	Length of request data
 * @param[in] imm_data
 * 	Immediate number associated with RDMA send request.
 */
static void
virtent_vq_tunnel_handle_req(struct snap_dma_q *q, void *data,
			     uint32_t data_len, uint32_t imm_data)
{
	struct virtnet_device *dev = q->uctx;
	struct virtnet_qp *qp = &dev->ctrl.sw;
	union virtnet_desc_tunnel_request *req = data;

	virtnet_desc_tunnel_request_dump(data);
	virtnet_vq_ctrl_cmds_parse(dev, qp, req);
}

/**
 * Callback function of descriptor tunnel event triggered by QP event channel.
 *
 * This function get CQ and call DMA helper to poll and process CQ event,
 * then acknowledge event and enable event.
 *
 * @param[in] arg
 * 	callback argument
 */
static void
virtnet_vq_tunnel_epoll_cb(void *arg)
{
	struct virtnet_device *dev = arg;
	int ret;

	ret = snap_dma_q_progress(dev->ctrl.dma);
	if (ret < 0)
		log_error("failed to get dma");
}

/**
 * Initialize descriptor tunnel sw QP and fw QP resources.
 * SW QP: N requests for rx, 1 response for tx and 1 RDMA buffer.
 * FW QP: 1 request for tx, N responses for rx and 1 RDMA buffer.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[in] fw_buffer
 * 	Create fw RDMA buffer
 */
static int
virtnet_vq_tunnel_init(struct virtnet_device *dev)
{
	struct ibv_pd *pd = dev->sf_verbs.pd;
	struct virtnet_qp *qp;
	size_t size;

	/* SW QP */
	qp = &dev->ctrl.sw;
	/* SW rx buffer allocated by dma helper */
	qp->reqs = (void *)dev->ctrl.dma->sw_qp.rx_buf;

	size = sizeof(*qp->resp);
	qp->resp = memalign(4096, size);
	if (!qp->resp) {
		log_error("Failed to allocate sw response buffer");
		return ENOMEM;
	}
	qp->tx_mr = ibv_reg_mr(pd, qp->resp, size, IBV_ACCESS_LOCAL_WRITE);
	if (!qp->tx_mr) {
		log_error("Failed to register sw response mr");
		goto err_sw_tx;
	}

	qp->rdma_buf = memalign(4096, qp->rdma_buf_len);
	if (!qp->rdma_buf) {
		log_error("Failed to allocate sw rdma buffer");
		goto err_sw_rdma;
	}
	qp->rdma_mr = ibv_reg_mr(pd, qp->rdma_buf, qp->rdma_buf_len,
				IBV_ACCESS_LOCAL_WRITE |
				IBV_ACCESS_REMOTE_READ |
				IBV_ACCESS_REMOTE_WRITE);
	if (!qp->rdma_mr) {
		log_error("Failed to register sw rdma buffer mr");
		goto err_sw_rdma_mr;
	}

	/* FW QP */
	qp = &dev->ctrl.fw;

	/* fw post one response to rx */
	size = sizeof(*qp->resp);
	qp->dma->rx_buf = memalign(4096, size);
	if (!qp->dma->rx_buf) {
		log_error("Failed to allocate response buffer");
		goto err_fw_rx;
	}
	qp->resp = (void *)qp->dma->rx_buf;

	/* allocate one request for tx */
	size = sizeof(*qp->reqs);
	qp->reqs = memalign(4096, size);
	if (!qp->reqs) {
		log_error("Failed to allocate fw request buffer");
		goto err_fw_reqs;
	}
	qp->tx_mr = ibv_reg_mr(pd, qp->reqs, size, IBV_ACCESS_LOCAL_WRITE);
	if (!qp->tx_mr) {
		log_error("Failed to register fw request mr");
		goto err_fw_tx;
	}

	return 0;
err_fw_tx:
	free(qp->reqs);
err_fw_reqs:
	free(qp->resp);
err_fw_rx:
	qp = &dev->ctrl.sw;
	ibv_dereg_mr(qp->rdma_mr);
err_sw_rdma_mr:
	free(qp->rdma_buf);
err_sw_rdma:
	ibv_dereg_mr(qp->tx_mr);
err_sw_tx:
	free(qp->resp);
	return -1;
}

static void virtnet_vq_tunnel_deinit(struct virtnet_device *dev)
{
	struct virtnet_qp *qp;

	/* FW QP */
	qp = &dev->ctrl.fw;
	ibv_dereg_mr(qp->tx_mr);
	free(qp->reqs);
	free(qp->resp);
	memset(qp, 0, sizeof(*qp));

	/* SW QP */
	qp = &dev->ctrl.sw;
	ibv_dereg_mr(qp->rdma_mr);
	free(qp->rdma_buf);
	ibv_dereg_mr(qp->tx_mr);
	free(qp->resp);
	memset(qp, 0, sizeof(*qp));
}

/**
 * Enroll device to global epoll and enable device desc tunnel event.
 *
 * @param[in] dev
 * 	Pointer to device
 * return
 * 	0 on success, error otherwise
 */
int
virtnet_vq_tunnel_epoll(struct virtnet_device *dev)
{
	int ret;

	dev->ctrl.sw.cb_data.cb = virtnet_vq_tunnel_epoll_cb;
	ret = virtnet_epoll_add(dev->sf_verbs.channel->fd,
				&dev->ctrl.sw.cb_data);
	err_if(ret, "failed add epoll source");

	ret = ibv_req_notify_cq(dev->ctrl.sw.dma->rx_cq, 0);
	err_if(ret, "failed to arm cq");

free_exit:
	return ret;
}

/**
 * Destroy desc tunnel DMA helper and sw/fw resoruces.
 */
static void
virtnet_vq_tunnel_destroy(struct virtnet_device *dev)
{
	snap_virtio_net_destroy_queue(dev->ctrl_vq.snap_q);
	virtnet_vq_tunnel_deinit(dev);
	snap_dma_q_destroy(dev->ctrl.dma);
	memset(&dev->ctrl_vq, 0, sizeof(dev->ctrl_vq));
}

/**
 * Prepare vq initial parameters and create vq.
 */
static struct snap_virtio_net_queue *
virtnet_vq_snap_create(struct virtnet_device *dev,
		       struct snap_virtio_net_queue_attr *attr, int idx)
{
	struct snap_virtio_net_queue *q;
	int ret;

	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_F_RING_PACKED))
		attr->vattr.type = SNAP_VIRTQ_PACKED_MODE;
	else
		attr->vattr.type = SNAP_VIRTQ_SPLIT_MODE;
	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_F_VERSION_1))
		attr->vattr.virtio_version_1_0 = true;
	attr->vattr.full_emulation = true;
	attr->vattr.ev_mode =
		attr->vattr.msix_vector == VIRTIO_MSI_NO_VECTOR ?
		SNAP_VIRTQ_NO_MSIX_MODE : SNAP_VIRTQ_MSIX_MODE;
	attr->vattr.event_qpn_or_msix = attr->vattr.msix_vector;
	attr->vhca_id = dev->sf_verbs.vhca_id;
	attr->vattr.idx = idx;
	attr->vattr.pd = dev->sf_verbs.pd;
	/* TOOD handling VIRTIO_MSI_NO_VECTOR */

	q = snap_virtio_net_create_queue(dev->snap_dev, attr);
	if (!q) {
		log_error("Failed to create virtio-net virtq object");
		return NULL;
	}

	ret = snap_virtio_net_query_queue(q, attr);
	if (ret) {
		log_error("Failed to query virtq object");
		goto err_q;
	}

	attr->vattr.state = SNAP_VIRTQ_STATE_RDY;
	ret = snap_virtio_net_modify_queue(q,
					   SNAP_VIRTIO_NET_QUEUE_MOD_STATE,
					   attr);
	if (ret) {
		log_error("Failed to modify virtq state to ready");
		goto err_q;
	}

	return q;

err_q:
	snap_virtio_net_destroy_queue(q);
	return NULL;
}

/**
 * Create control vq object.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[in] idx
 * 	Virtio queue index
 * @param[in] fw_buffer
 * 	create fw qp buffer for test or warmup purpose.
 */
int
virtnet_vq_tunnel_create(struct virtnet_device *dev, int idx)
{
	struct snap_virtio_net_queue_attr *attr = &dev->reg_vqs[idx];
	struct snap_context *sctx = dev->ctx->sctx;
        struct snap_dma_q_create_attr q_attr = {
		.tx_qsize = attr->vattr.size,
		.rx_qsize = attr->vattr.size,
		.tx_elem_size = sizeof(*dev->ctrl.sw.resp),
		.rx_elem_size = sizeof(*dev->ctrl.sw.reqs),
		.rx_cb = virtent_vq_tunnel_handle_req,
		.uctx = dev,
		.comp_vector = 0,
		.comp_context = dev,
        };
	struct snap_virtio_net_queue *q;
	int ret = -1;

	dev->ctrl.sw.rdma_buf_len = 4096;
	dev->ctrl.fw.rdma_buf_len = 4096;

	q_attr.comp_channel = dev->sf_verbs.channel,
	dev->ctrl.dma = snap_dma_q_create(dev->sf_verbs.pd, &q_attr);
	if (!dev->ctrl.dma) {
		log_error("Failed to create dma control qp");
		return -1;
	}

	dev->ctrl.sw.dma = &dev->ctrl.dma->sw_qp;
	dev->ctrl.fw.dma = &dev->ctrl.dma->fw_qp;
	dev->ctrl.sw.cb_data.ctx = dev;

	ret = virtnet_vq_tunnel_init(dev);
	if (ret) {
		log_error("Failed to init control vq");
		goto err_init;
	}

	attr->vattr.max_tunnel_desc = sctx->virtio_net_caps.max_tunnel_desc;
	attr->vattr.ev_mode = SNAP_VIRTQ_MSIX_MODE;
	attr->tisn_or_qpn = dev->ctrl.dma->fw_qp.qp->qp_num;
	attr->vattr.offload_type = SNAP_VIRTQ_OFFLOAD_DESC_TUNNEL;
	q = virtnet_vq_snap_create(dev, attr, idx);
	if (!q) {
	     log_error("Failed to create control snap vq");
	     goto err_snap;
	}

	dev->ctrl_vq.snap_q = q;
	return 0;
err_snap:
	virtnet_vq_tunnel_deinit(dev);
err_init:
	snap_dma_q_destroy(dev->ctrl.dma);
	return ret;
}

/**
 * Create virtio-net data path ethernet vq.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[in] idx
 * 	Virtio queue index
 */
int
virtnet_vq_eth_create(struct virtnet_device *dev, int idx)
{
	struct snap_virtio_net_queue_attr *attr = &dev->reg_vqs[idx];
	struct snap_virtio_net_queue *q;

	if (idx & 1)
		attr->tisn_or_qpn = dev->sf_verbs.tis_num;
	attr->vattr.offload_type = SNAP_VIRTQ_OFFLOAD_ETH_FRAME;
	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_NET_F_HOST_TSO4))
		attr->tso_ipv4 = true;
	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_NET_F_HOST_TSO6))
		attr->tso_ipv6 = true;
	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_NET_F_CSUM))
		attr->tx_csum = true;
	if (dev->registers->vattr.driver_feature &
	    (1ULL << VIRTIO_NET_F_GUEST_CSUM))
		attr->rx_csum = true;

	q = virtnet_vq_snap_create(dev, attr, idx);
	if (!q) {
		log_error("Failed to create eth vq(%d)", idx);
		return -1;
	}
	dev->eth_vqs[idx].snap_q = q;
	return 0;
}

static void
virtnet_vq_eth_destroy(struct virtnet_device *dev, int idx)
{
	struct virtnet_vq *vq = &dev->eth_vqs[idx];

	snap_virtio_net_destroy_queue(vq->snap_q);
	memset(vq, 0, sizeof(*vq));
}

void virtnet_vqs_destroy(struct virtnet_device *dev)
{
	bool ctrl_vq;
	int i;

	ctrl_vq = !!dev->ctrl_vq.snap_q;
	if (ctrl_vq) {
		virtnet_epoll_del(dev->sf_verbs.channel->fd);
		virtnet_vq_tunnel_destroy(dev);
	}
	for (i = dev->num_eth_queues - 1; i >= 0; i--)
		virtnet_vq_eth_destroy(dev, i);

	log_debug("Destroy %d ETH VQs, %d Ctrl VQ",
		  dev->num_eth_queues, ctrl_vq);
}

int virtnet_vqs_create(struct virtnet_device *dev)
{
	struct snap_virtio_net_queue_attr *q_attrs;
	uint16_t ctrl_queue_idx;
	int enabled_queues;
	int num_eth_queues;
	bool ctrl_queue;
	int i, ret;

	q_attrs = dev->reg_vqs;
	enabled_queues = dev->snap_ctrl->common.enabled_queues;

	if (enabled_queues < 2) {
		log_error("At least 2 queues(tx/rx) are needed, current %d",
			  enabled_queues);
		return EINVAL;
	}

	if (enabled_queues == 2) {
		/* In PXE boot, only one pair of ETH queue is needed */
		num_eth_queues = enabled_queues;
		ctrl_queue = false;
	} else {
		/* N pairs of tx/rx queues, 1 ctrl queue*/
		num_eth_queues = enabled_queues % 2 ?
			enabled_queues - 1 :
			enabled_queues - 2;
		ctrl_queue = true;
		ctrl_queue_idx = num_eth_queues;
	}

	if (virtnet_device_is_recovering(dev)) {
		ret = virtnet_device_queue_index_recover(dev, num_eth_queues,
						ctrl_queue, ctrl_queue_idx);
		if (ret)
			return ret;
	}

	for (i = 0; i < num_eth_queues; i++) {
		if (!q_attrs[i].vattr.enable)
			continue;
		ret = virtnet_vq_eth_create(dev, i);
		if (ret) {
			log_error("Failed to create data vq %d", i);
			goto err_eth;
		}
	}

	if (ctrl_queue) {
		if (!q_attrs[ctrl_queue_idx].vattr.enable) {
			log_error("Control VQ isn't enabled");
			ret = EINVAL;
			goto err_eth;
		}

		ret = virtnet_vq_tunnel_create(dev, ctrl_queue_idx);
		if (ret) {
			log_error("Failed to create ctrl vq");
			goto err_eth;
		}

		ret = virtnet_vq_tunnel_epoll(dev);
		if (ret) {
			log_error("Failed to create ctrl vq epoll");
			goto err_ctrl;
		}
	}

	log_debug("Created %d ETH VQs, %d Ctrl VQ",
		  num_eth_queues, ctrl_queue);
	dev->num_eth_queues = num_eth_queues;

	return 0;
err_ctrl:
	virtnet_vq_tunnel_destroy(dev);
err_eth:
	for (--i; i >= 0; i--)
		virtnet_vq_eth_destroy(dev, i);
	return ret;
}

static int virtnet_vq_update_period(struct virtnet_device *dev,
				    struct snap_virtio_net_queue *vnq)
{
	struct snap_virtio_net_queue_attr attr;
	int ret;

	if (!dev->snap_dev) {
		log_info("dev %d not open", dev->id);
		ret = -EINVAL;
		goto out;
	}

	ret = snap_virtio_net_query_queue(vnq, &attr);
	if (ret)
		goto out;

	if((dev->period.mode == VIRTQ_PERIOD_UPON_EVENT) &&
	   !dev->ctx->sctx->virtio_net_caps.queue_period_upon_event) {
		log_error("queue period upon event is not supported");
		ret = -EINVAL;
		goto out;
	}

	if((dev->period.mode == VIRTQ_PERIOD_UPON_CQE) &&
	   !dev->ctx->sctx->virtio_net_caps.queue_period_upon_cqe) {
		log_error("queue period upon cqe is not supported");
		ret = -EINVAL;
		goto out;
	}

	attr.vattr.queue_period_mode = dev->period.mode;
	attr.vattr.queue_period = dev->period.period;
	attr.vattr.queue_max_count = dev->period.max_count;

	ret = snap_virtio_net_modify_queue(vnq,
					   SNAP_VIRTIO_NET_QUEUE_PERIOD,
					   &attr);

out:
	if (ret)
		log_error("failed to modify device %d queue %d period",
			  dev->id, vnq->virtq.idx);

	return ret;
}

int virtnet_vqs_update_period(struct virtnet_device *dev)
{
	struct snap_virtio_net_queue *vnq;
	int i, ret = 0;

	/* Only modify RX queue */
	for (i = 0; i < dev->num_eth_queues; i++) {
		if (i % 2)
			continue;
		vnq = dev->eth_vqs[i].snap_q;
		if (!vnq)
			break;
		ret = virtnet_vq_update_period(dev, vnq);
		if (ret)
			return ret;
	}

	return ret;
}

