/*
 * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "virtnet_vq.h"
#include "virtnet.h"

static void *virtnet_ctrl_vq_pop(struct virtnet_device *dev,
			struct virtnet_qp *qp,
			union virtnet_desc_tunnel_request *req);
static int virtnet_ctrl_vq_push(struct virtnet_device *dev,
				struct virtnet_vq_elem *elem,
				union virtnet_desc_tunnel_request *req);
static inline void * virtnet_memdup(const void *ptr, unsigned int size);

/**
 * Copy from single continuous buffer to local rdma buf which logically
 * (or physically) devided into several scatter-gatcher vectors
 *  and back like memcpy() between two continuous memory regions.
 * Data in single continuous buffer starting at address `buf' and
 * `bytes' bytes long will be copied to/from an iovec `iov' with
 * `iov_cnt' number of elements, starting at byte position `offset'
 * within the iovec.  If the iovec does not contain enough space,
 * only part of data will be copied, up to the end of the iovec.
 * Number of bytes actually copied will be returned, which is
 *  min(bytes, iov_size(iov)-offset)
 * `Offset' must point to the inside of iovec.
 */
static size_t
virtnet_iov_from_buf_full(const struct virtnet_iovec *iov,
			  unsigned int iov_cnt,
			  size_t offset, const void *buf, size_t bytes);
static size_t
virtnet_iov_to_buf_full(const struct virtnet_iovec *iov,
			const unsigned int iov_cnt,
			size_t offset, void *buf, size_t bytes);
static size_t virtnet_iov_size(const struct virtnet_iovec *iov,
			       const unsigned int iov_cnt);

/* cmds. */
static int
virtnet_handle_mac(struct virtnet_device *dev, uint8_t cmd,
		   struct virtnet_iovec *iov, unsigned int iov_cnt);

static int virtnet_handle_rx_mode(struct virtnet_device *dev,
				  uint8_t cmd,
				  struct virtnet_iovec *iov,
				  unsigned int iov_cnt);
static int virtnet_handle_vlan_table(struct virtnet_device *dev,
				     uint8_t cmd,
				     struct virtnet_iovec *iov,
				     unsigned int iov_cnt);
static int virtnet_handle_announce(struct virtnet_device *dev,
				   uint8_t cmd,
				   struct virtnet_iovec *iov,
				   unsigned int iov_cnt);
static int virtnet_handle_mq(struct virtnet_device *dev, uint8_t cmd,
			     struct virtnet_iovec *iov, unsigned int iov_cnt);

static size_t
virtnet_iov_to_buf_full(const struct virtnet_iovec *iov,
			const unsigned int iov_cnt,
			size_t offset, void *buf, size_t bytes)
{
	size_t done;
	unsigned int i;
	size_t len;

	for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
		if (offset < iov[i].iov_len) {
			len = MIN(iov[i].iov_len - offset, bytes - done);
			memcpy(buf + done, iov[i].iov_base + offset, len);
			done += len;
			offset = 0;
		} else {
			offset -= iov[i].iov_len;
		}
	}
	return done;
}

static size_t
virtnet_iov_from_buf_full(const struct virtnet_iovec *iov,
			  unsigned int iov_cnt,
			  size_t offset, const void *buf, size_t bytes)
{
	size_t done;
	unsigned int i;
	size_t len;

	for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
		if (offset < iov[i].iov_len) {
			len = MIN(iov[i].iov_len - offset, bytes - done);
			memcpy(iov[i].iov_base + offset, buf + done, len);
			done += len;
			offset = 0;
		} else {
			offset -= iov[i].iov_len;
		}
	}
	return done;
}

static size_t
virtnet_iov_discard_front(struct virtnet_iovec **iov, unsigned int *iov_cnt,
                         size_t bytes)
{
	size_t total = 0;
	struct virtnet_iovec *cur;

	for (cur = *iov; *iov_cnt > 0; cur++) {
		if (cur->iov_len > bytes) {
			cur->iov_base += bytes;
			cur->iov_len -= bytes;
			total += bytes;
			break;
		}
		bytes -= cur->iov_len;
		total += cur->iov_len;
		*iov_cnt -= 1;
	}
	*iov = cur;
	return total;
}

static inline size_t
virtnet_iov_from_buf(const struct virtnet_iovec *iov, unsigned int iov_cnt,
		     size_t offset, const void *buf, size_t bytes)
{
	if (__builtin_constant_p(bytes) && iov_cnt &&
			offset <= iov[0].iov_len &&
			bytes <= iov[0].iov_len - offset) {
		memcpy(iov[0].iov_base + offset, buf, bytes);
		return bytes;
	} else {
		return virtnet_iov_from_buf_full
			(iov, iov_cnt, offset, buf, bytes);
	}
}

static inline size_t
virtnet_iov_to_buf(const struct virtnet_iovec *iov,
		   const unsigned int iov_cnt,
		   size_t offset, void *buf, size_t bytes)
{
	if (__builtin_constant_p(bytes) && iov_cnt &&
			offset <= iov[0].iov_len &&
			bytes <= iov[0].iov_len - offset) {
		memcpy(buf, iov[0].iov_base + offset, bytes);
		return bytes;
	} else {
		return virtnet_iov_to_buf_full
			(iov, iov_cnt, offset, buf, bytes);
	}
}

static int
virtnet_ctrl_vq_packed_push(struct virtnet_device *dev,
			    struct virtnet_vq_elem *elem,
			    struct virtnet_desc_tunnel_request_packed *req)
{
	/* TODO: to support packed virtqueue. */
	(void)dev;
	(void)elem;
	(void)req;
	return 0;
}

static int
virtnet_ctrl_vq_split_push(struct virtnet_device *dev,
			   struct virtnet_vq_elem *elem,
			   struct virtnet_desc_tunnel_request_split *req)
{
	int i;
	uint32_t rkey = 0;
	struct virtnet_qp *qp = &dev->ctrl.sw;
	struct snap_virtio_net_queue_attr ctrl_vq;
	int ret;

	ctrl_vq = dev->reg_vqs[dev->snap_ctrl->common.enabled_queues -1];
	rkey = ctrl_vq.vattr.dma_mkey;
	for (i = 0; i < elem->in_num; i++ ) {
		ret = snap_dma_q_write(dev->ctrl.dma,
				       elem->in_lbuf[i].iov_base,
				       elem->in_lbuf[i].iov_len,
				       dev->ctrl.sw.rdma_mr->lkey,
				       elem->in_addr[i], rkey, NULL);
		err_if(ret, "failed to write control vq send response %d", i);
	}
	qp->resp->split.avail_index = req->avail_index;
	qp->resp->split.len = 1;
	qp->resp->split.reserved = 0;
	ret = snap_dma_q_send_completion(dev->ctrl.dma, dev->ctrl.sw.resp,
					 sizeof(*dev->ctrl.sw.resp));
	err_if(ret, "failed to send control vq response");
	ret = snap_dma_q_flush(dev->ctrl.dma) > 0 ? 0 : -1;
	err_if(ret, "failed to get control vq send response");
free_exit:
	return ret;
}

static int
virtnet_ctrl_vq_push(struct virtnet_device *dev, struct virtnet_vq_elem *elem,
		     union virtnet_desc_tunnel_request *req)
{
	if (dev->registers->vattr.driver_feature &
			(1ULL << VIRTIO_F_RING_PACKED))
		return virtnet_ctrl_vq_packed_push(dev, elem, &req->packed);
	else
		return virtnet_ctrl_vq_split_push(dev, elem, &req->split);
}

static inline void *
virtnet_memdup(const void *ptr, unsigned int size)
{
	uint8_t *dup;
	unsigned int i;

	if (!ptr)
		return NULL;
	dup = malloc(size);
	for (i = 0; i < size; i++)
		dup[i] = ((uint8_t *)ptr)[i];
	return dup;
}

static void *
virtnet_alloc_element(unsigned int out_num, unsigned int in_num)
{
	struct virtnet_vq_elem *elem = NULL;
	size_t in_addr_ofs = sizeof(*elem);
	size_t out_addr_ofs = in_addr_ofs + in_num * sizeof(elem->in_addr[0]);
	size_t out_addr_end =
		out_addr_ofs + out_num * sizeof(elem->out_addr[0]);
	size_t in_lbuf_ofs = out_addr_end;
	size_t out_lbuf_ofs = in_lbuf_ofs + in_num * sizeof(elem->in_lbuf[0]);
	size_t out_lbuf_end =
		out_lbuf_ofs + out_num * sizeof(elem->out_lbuf[0]);
	elem = malloc(out_lbuf_end);
	elem->out_num = out_num;
	elem->in_num = in_num;
	elem->in_addr = (void *)elem + in_addr_ofs;
	elem->out_addr = (void *)elem + out_addr_ofs;
	elem->in_lbuf = (void *)elem + in_lbuf_ofs;
	elem->out_lbuf = (void *)elem + out_lbuf_ofs;
	return elem;
}

static int
virtnet_vq_dma_desc(struct virtnet_device *dev, unsigned int *p_num_sg,
		    hwaddr *addr, struct virtnet_iovec *lbuf,
		    hwaddr pa, size_t sz,
		    uint16_t flags, uint8_t *rdma_buf)
{
	unsigned int num_sg = *p_num_sg;
	int ret = 0;
	uint64_t rkey = 0;
	struct snap_virtio_net_queue_attr ctrl_vq;

	ctrl_vq = dev->reg_vqs[dev->snap_ctrl->common.enabled_queues -1];
	rkey = ctrl_vq.vattr.dma_mkey;
	lbuf[num_sg].iov_base = rdma_buf;
	lbuf[num_sg].iov_len = sz;
	addr[num_sg] = pa;
	if (!(flags & VIRTQ_DESC_F_WRITE)) {
		ret = snap_dma_q_read(dev->ctrl.dma,
				      lbuf[num_sg].iov_base, sz,
				      dev->ctrl.sw.rdma_mr->lkey,
				      addr[num_sg], rkey, NULL);
		err_if(ret, "tunnel rdma read failed");
		ret = snap_dma_q_flush(dev->ctrl.dma);
		ret = ret > 0 ? 0 : -1;
		err_if(ret, "tunnel rdma failed to read result");
	}
	num_sg++;
	*p_num_sg = num_sg;
free_exit:
	return ret;
}

static void *
virtnet_ctrl_vq_packed_pop(struct virtnet_device *dev, struct virtnet_qp *qp,
			   struct virtnet_desc_tunnel_request_packed *req)
{
	/* TODO: to support packed virtqueue. */
	(void)req;
	(void)dev;
	(void)qp;
	return NULL;
}

static void *
virtnet_ctrl_vq_split_pop(struct virtnet_device *dev, struct virtnet_qp *qp,
			  struct virtnet_desc_tunnel_request_split *req)
{
	unsigned out_num, in_num, elem_entries;
	int i, ret = 0;
	hwaddr addr[VIRTIO_DESC_REQ_MAX_DESC];
	struct virtnet_iovec lbuf[VIRTIO_DESC_REQ_MAX_DESC];
	struct virtnet_vq_desc *desc = NULL;
	uint8_t *rdma_buf = NULL;
	struct virtnet_vq_elem *elem = NULL;

	out_num = in_num = 0;
	rdma_buf = dev->ctrl.sw.rdma_buf;
	/* At least there is one desc. */
	desc = &req->desc[0];
	if (desc->flags & VIRTQ_DESC_F_INDIRECT) {
		/* TODO: loop over the indirect descriptor table. */
		log_debug("Get indirect descriptor table");
	}
	/* Collect all the descriptors. */
	for (elem_entries = 0; elem_entries < req->num_desc; elem_entries++) {
		desc = &req->desc[elem_entries];
		if (desc->flags & VIRTQ_DESC_F_WRITE) {
			ret = virtnet_vq_dma_desc(dev, &in_num, addr + out_num,
					lbuf + out_num, desc->addr, desc->len,
					desc->flags, rdma_buf);
		} else {
			err_if(in_num, "Incorrect order of descriptors");
			ret = virtnet_vq_dma_desc(dev, &out_num, addr, lbuf,
					desc->addr, desc->len, desc->flags, rdma_buf);
		}
		err_if(ret, "dma descriptor error");
		rdma_buf += desc->len;
		err_if((rdma_buf - dev->ctrl.sw.rdma_buf) > 4096,
		       "rdma buf overflow");
	}
	/* Check if the last descriptor of this request is the end of chain. */
	if (desc->flags & VIRTQ_DESC_F_NEXT) {
		/* TODO: read more descriptor by ourself. */
		log_debug("More descriptor available");
	}
	/* Now copy what we have collected and DMAed. */
	elem = virtnet_alloc_element(out_num, in_num);
	elem->index = req->avail_index;
	elem->ndescs = 1; /* split vq always 1. */
	/* read-only descriptors. */
	for (i = 0; i < out_num; i++) {
		elem->out_addr[i] = addr[i];
		elem->out_lbuf[i] = lbuf[i];
	}
	/* writable descriptors. */
	for (i = 0; i < in_num; i++) {
		elem->in_addr[i] = addr[out_num + i];
		elem->in_lbuf[i] = lbuf[out_num + i];
	}
	return elem;
free_exit:
	return NULL;
}

static void *
virtnet_ctrl_vq_pop(struct virtnet_device *dev, struct virtnet_qp *qp,
		    union virtnet_desc_tunnel_request *req)
{
	if (dev->registers->vattr.driver_feature &
			(1ULL << VIRTIO_F_RING_PACKED))
		return virtnet_ctrl_vq_packed_pop(dev, qp, &req->packed);
	else
		return virtnet_ctrl_vq_split_pop(dev, qp, &req->split);
}

void
virtnet_vq_ctrl_cmds_parse(struct virtnet_device *dev, struct virtnet_qp *qp,
			   union virtnet_desc_tunnel_request *req)
{
	struct virtnet_vq_elem *elem = NULL;
	int ret = 0;
	struct virtnet_iovec *iov, *iov2;
	unsigned int iov_cnt;
	size_t s;
	struct virtio_net_ctrl_hdr ctrl;
	uint8_t status = 0xFF;

	elem = virtnet_ctrl_vq_pop(dev, qp, req);
	err_if(!elem, "No valid virtq elem found!!!");
	iov_cnt = elem->out_num;
	iov2 = iov = virtnet_memdup (elem->out_lbuf,
				     sizeof(struct virtnet_iovec) *
				     elem->out_num);
	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
	virtnet_iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
	err_if(s != sizeof(ctrl),
	       "iov_to_buf didn't get correct ctrl header");
	log_debug("%s(%i): ctrl vq class: %hhu",
		  dev->flag & VIRTNET_DEV_PF ? "PF" : "VF", dev->id,
		  ctrl.class);
	if (ctrl.class == VIRTIO_NET_CTRL_RX) {
		status = virtnet_handle_rx_mode(dev, ctrl.cmd, iov, iov_cnt);
	} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
		status = virtnet_handle_mac(dev, ctrl.cmd, iov, iov_cnt);
	} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
		status = virtnet_handle_vlan_table(dev, ctrl.cmd,
						   iov, iov_cnt);
	} else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
		status = virtnet_handle_announce(dev, ctrl.cmd, iov, iov_cnt);
	} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
		status = virtnet_handle_mq(dev, ctrl.cmd, iov, iov_cnt);
	} else {
		log_error("unknown ctrl vq class: %hhu", ctrl.class);
	}
	/* write back to local rdma buf which mapped to writable desc. */
	s = virtnet_iov_from_buf
		(elem->in_lbuf, elem->in_num, 0, &status, sizeof(status));
	err_if(s != sizeof(status), "Buf size miss match");
	/* dma back to remote(host) memory - the writable desc. */
	ret = virtnet_ctrl_vq_push(dev, elem, req);
	err_if(ret, "ctrl_vq_push error!!!");
	if (iov2)
		free(iov2);
	if (elem)
		free (elem);
	return;
free_exit:
	return;
}

/* ctrl cmds handler. */
static int
virtnet_handle_vlan_table(struct virtnet_device *dev, uint8_t cmd,
			  struct virtnet_iovec *iov, unsigned int iov_cnt)
{
	uint16_t vid;
	size_t s;
	int ret;
	void *dev_flow = NULL;

	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &vid, sizeof(vid));
	err_if(s != sizeof(vid), "cannot get vid");
	err_if(vid >= VIRTNET_MAX_VLAN, "vid[%u] is greater than max vid[%u]",
	       vid, VIRTNET_MAX_VLAN);
	if (cmd == VIRTIO_NET_CTRL_VLAN_ADD) {
		if (!dev->vlan_table.entries[vid].dev_flow) {
			ret = virtnet_sf_vlan_flow_apply(dev, vid, &dev_flow);
			dev->vlan_table.entries[vid].dev_flow = dev_flow;
			err_if(ret, "cannot apply vlan[vid=%u] flow", vid);
		}
	} else if (cmd == VIRTIO_NET_CTRL_VLAN_DEL) {
		if (dev->vlan_table.entries[vid].dev_flow) {
			ret = virtnet_sf_vlan_flow_destroy
				(dev, &dev->vlan_table.entries[vid].dev_flow);
			err_if(ret, "cannot destory vlan[vid=%u] flow",
			       vid);
			dev->vlan_table.entries[vid].dev_flow = NULL;
		}
	} else {
		log_error("unknown cmd[%u]", cmd);
		return VIRTIO_NET_ERR;
	}
	return VIRTIO_NET_OK;
free_exit:
	return VIRTIO_NET_ERR;
}

static int
virtnet_handle_announce(struct virtnet_device *dev, uint8_t cmd,
			struct virtnet_iovec *iov, unsigned int iov_cnt)
{
	(void)dev;
	(void)cmd;
	(void)iov;
	(void)iov_cnt;
	return 0;
}

static int
virtnet_handle_mq(struct virtnet_device *dev, uint8_t cmd,
		  struct virtnet_iovec *iov, unsigned int iov_cnt)
{
	uint16_t n_mq;
	size_t s;
	int ret;

	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &n_mq, sizeof(n_mq));
	err_if(s != sizeof(n_mq), "cannot get mq from request");
	ret = virtnet_sf_mq_update(dev, n_mq);
	err_if(ret, "failed to set mq %hu", n_mq);
	ret = virtnet_device_mq_save(dev, n_mq);
	err_if(ret, "cannot save mq to recovery file");
	log_debug("%s(%i): mq changed to %hu",
		  dev->flag & VIRTNET_DEV_PF ? "PF" : "VF",
		  dev->id, n_mq);
	return VIRTIO_NET_OK;
free_exit:
	return VIRTIO_NET_ERR;
}

static int
virtnet_handle_mac(struct virtnet_device *dev, uint8_t cmd,
		   struct virtnet_iovec *iov, unsigned int iov_cnt)
{
	size_t s = 0;
	uint8_t *macs = NULL;
	void *dev_flow = NULL;
	struct virtnet_mac_table mac_table = {0};
	struct virtio_net_ctrl_mac mac_data = {0};
	int ret = 0, i, j;

	if (cmd == VIRTIO_NET_CTRL_MAC_ADDR_SET) {
		if (virtnet_iov_size(iov, iov_cnt) != ETH_ALEN)
			return VIRTIO_NET_ERR;
		s = virtnet_iov_to_buf
			(iov, iov_cnt, 0, dev->mac.addr, ETH_ALEN);
		ret = virtnet_sf_mac_flow_apply(dev, dev->mac.addr,
						&dev_flow);
		err_if(ret, "cannot apply device's mac flow rule");
		ret = virtnet_device_mac_save(dev, dev->mac.addr);
		err_if(ret, "cannot save mac to recovery file");
		return VIRTIO_NET_OK;
	}
	if (cmd != VIRTIO_NET_CTRL_MAC_TABLE_SET) {
		return VIRTIO_NET_ERR;
	}
	mac_table.entries =
		calloc(VIRTNET_MAC_TABLE_ENTRIES, sizeof(*mac_table.entries));
	/* first part is uni-cast. */
	macs = calloc(VIRTNET_MAC_TABLE_ENTRIES, ETH_ALEN);
	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
			       sizeof(mac_data.entries));
	err_if(s != sizeof(mac_data.entries), "cannot get mac entries");
	virtnet_iov_discard_front(&iov, &iov_cnt, s);
	err_if((mac_data.entries * ETH_ALEN > virtnet_iov_size(iov, iov_cnt)),
		"uni-cast mac table is not complete: got entries[%d] "
		"but table is smaller than it", mac_data.entries);
	if (mac_data.entries <= VIRTNET_MAC_TABLE_ENTRIES) {
		s = virtnet_iov_to_buf(iov, iov_cnt, 0, macs,
				       mac_data.entries * ETH_ALEN);
		err_if(s != mac_data.entries * ETH_ALEN,
		       "number of valid uni-cast mac addresses is "
		       "smaller then entries[%d]", mac_data.entries);
		mac_table.in_use += mac_data.entries;
	} else {
		mac_table.uni_overflow = 1;
	}
	/* second part is multi-cast. */
	virtnet_iov_discard_front(&iov, &iov_cnt,
				  mac_data.entries * ETH_ALEN);
	mac_table.first_multi = mac_table.in_use;
	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &mac_data.entries,
			       sizeof(mac_data.entries));
	err_if(s != sizeof(mac_data.entries), "cannot get mac entries");
	virtnet_iov_discard_front(&iov, &iov_cnt, s);
	err_if((mac_data.entries * ETH_ALEN > virtnet_iov_size(iov, iov_cnt)),
		"multi-cast mac table is not complete: got entries[%d] "
		"but table is smaller than it", mac_data.entries);
	if (mac_data.entries <= VIRTNET_MAC_TABLE_ENTRIES - mac_table.in_use) {
		s = virtnet_iov_to_buf(iov, iov_cnt, 0,
				       &macs[mac_table.in_use * ETH_ALEN],
				       mac_data.entries * ETH_ALEN);
		err_if(s != mac_data.entries * ETH_ALEN,
		       "number of valid multi-cast mac addresses is "
		       "smaller then entries[%d]", mac_data.entries);
		mac_table.in_use += mac_data.entries;
	} else {
		mac_table.multi_overflow = 1;
	}
	/*
	 * For each mac address, try to find if it already exists.
	 * If yes and corresponding flow exists in HW,
	 * reuse this flow by store flow pointer.
	 */
	for (i = 0; i < mac_table.in_use; i++) {
		dev_flow = NULL;
		for (j = 0; j < dev->mac_table.in_use; j++) {
			if (!memcmp(&macs[i * ETH_ALEN],
				    dev->mac_table.entries[j].addr,
				    ETH_ALEN)) {
				dev_flow = dev->mac_table.entries[j].dev_flow;
				if (dev_flow)
					mac_table.entries[i].dev_flow =
								dev_flow;
				break;
			}
		}
		if (!dev_flow) {
			ret = virtnet_sf_mac_flow_apply
					(dev, &macs[i * ETH_ALEN], &dev_flow);
			err_if(!dev_flow, "cannot apply mac flow rule");
		}
		memcpy(mac_table.entries[i].addr, &macs[i * ETH_ALEN],
			ETH_ALEN);
		mac_table.entries[i].dev_flow = dev_flow;
	}
	/* destroy old, not used flows. */
	for (j = 0; j < dev->mac_table.in_use; j++) {
		for (i = 0; i < mac_table.in_use; i++) {
			if (mac_table.entries[i].dev_flow ==
					dev->mac_table.entries[j].dev_flow)
				break;
		}
		if (i == mac_table.in_use) {
			ret = virtnet_sf_mac_flow_destroy
				(dev, &dev->mac_table.entries[j].dev_flow);
			err_if(ret, "failed to destroy mac flow[%p], "
				"mac addr[%X:%X:%X:%X:%X:%X]",
				dev->mac_table.entries[j].dev_flow,
				dev->mac_table.entries[j].addr[0],
				dev->mac_table.entries[j].addr[1],
				dev->mac_table.entries[j].addr[2],
				dev->mac_table.entries[j].addr[3],
				dev->mac_table.entries[j].addr[4],
				dev->mac_table.entries[j].addr[5]);
		}
	}
	dev->mac_table.in_use = mac_table.in_use;
	dev->mac_table.first_multi = mac_table.first_multi;
	dev->mac_table.uni_overflow = mac_table.uni_overflow;
	dev->mac_table.multi_overflow = mac_table.multi_overflow;
	memset(dev->mac_table.entries, 0,
	       VIRTNET_MAC_TABLE_ENTRIES * sizeof(*dev->mac_table.entries));
	memcpy(dev->mac_table.entries, mac_table.entries,
	       mac_table.in_use * sizeof(*mac_table.entries));
	ret = 0;
free_exit:
	if (mac_table.entries)
		free(mac_table.entries);
	if (macs)
		free(macs);
	return (ret ? VIRTIO_NET_ERR:VIRTIO_NET_OK);
}

static int
virtnet_handle_rx_mode(struct virtnet_device *dev, uint8_t cmd,
		       struct virtnet_iovec *iov, unsigned int iov_cnt)
{
	uint8_t on;
	size_t s;
	int ret;

	s = virtnet_iov_to_buf(iov, iov_cnt, 0, &on, sizeof(on));
	err_if(s != sizeof(on), "cannot get byte on");
	if (cmd == VIRTIO_NET_CTRL_RX_PROMISC)
		dev->rx_mode.promisc = on;
	else if (cmd == VIRTIO_NET_CTRL_RX_ALLMULTI)
		dev->rx_mode.allmulti = on;
	else if (cmd == VIRTIO_NET_CTRL_RX_ALLUNI)
		dev->rx_mode.alluni = on;
	else if (cmd == VIRTIO_NET_CTRL_RX_NOMULTI)
		dev->rx_mode.nomulti = on;
	else if (cmd == VIRTIO_NET_CTRL_RX_NOUNI)
		dev->rx_mode.nouni = on;
	else if (cmd == VIRTIO_NET_CTRL_RX_NOBCAST)
		dev->rx_mode.nobcast = on;
	else {
		log_error("unknow command");
		return VIRTIO_NET_ERR;
	}
	ret = virtnet_sf_set_rx_mode(dev);
	err_if(ret, "failed to set rx mode - promisc:%u | allmuti:%u | "
	       "alluni:%u | nomulti:%u | nouni:%u | nobcast:%u",
	       dev->rx_mode.promisc, dev->rx_mode.allmulti,
	       dev->rx_mode.alluni, dev->rx_mode.nomulti,
	       dev->rx_mode.nouni, dev->rx_mode.nobcast);
	ret = virtnet_device_rxmode_save(dev, dev->rx_mode.val);
	err_if(ret, "cannot save rx_mode to recovery file");
	return VIRTIO_NET_OK;
free_exit:
	return VIRTIO_NET_ERR;
}

static size_t
virtnet_iov_size(const struct virtnet_iovec *iov, const unsigned int iov_cnt)
{
	size_t len = 0;
	unsigned int i;

	for (i = 0; i < iov_cnt; i++) {
		len += iov[i].iov_len;
	}
	return len;
}
