/*
 * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <glob.h>
#include <libudev.h>
#include "virtnet.h"

const char *ib_dev_names[IB_NUM_PORTS] = {
	[IB_DEV_P0]	= "mlx5_0",
	[IB_DEV_P1]	= "mlx5_1",
	[IB_DEV_LAG]	= "mlx5_bond_0",
};

static void virtnet_devices_update_link(struct virtnet_port_ctx *ctx,
					uint16_t vhca_id, bool up)
{
	struct virtnet_device_modify_fields val = {};
	struct virtnet_device *dev, *vf_dev;
	struct virtnet_context *vctx;
	bool cur_link;
	int i, j;

	val.link_up = up;
	vctx = ctx->virtnet_ctx;

	for (i = 0; i < vctx->sctx->virtio_net_pfs.max_pfs; i++) {
		dev = &vctx->dev_list[i];
		if (!dev->snap_dev)
			continue;

		if (dev->sf_verbs.vhca_id == vhca_id) {
			cur_link = dev->registers->status &
				VIRTIO_NET_S_LINK_UP;
			if (cur_link != up) {
				pthread_mutex_lock(&dev->lock);
				virtnet_device_modify(dev, &val,
						      VIRTNET_MODIFY_LINK);
				pthread_mutex_unlock(&dev->lock);
				log_info("PF(%u): link %s", dev->id,
					 up ? "up": "down");
			}
			break;
		}

		/* VF updates only work after all VFs created */
		for (j = 0; j < dev->num_of_vfs; j++) {
			vf_dev = &dev->vf_dev[j];

			if (vf_dev->sf_verbs.vhca_id != vhca_id)
				continue;

			cur_link = vf_dev->registers->status &
				VIRTIO_NET_S_LINK_UP;
			if (cur_link != up) {
				pthread_mutex_lock(&dev->lock);
				virtnet_device_modify(vf_dev, &val,
						      VIRTNET_MODIFY_LINK);
				pthread_mutex_unlock(&dev->lock);
				log_info("PF(%u) VF (%u): link %s",
					 dev->id, vf_dev->id,
					 up ? "up": "down");
			}
			break;
		}
	}
}

static void virtnet_dev_interrupt_handler(void *cb_arg)
{
	struct mlx5dv_port devx_port = {};
	struct ibv_async_event event;
	struct virtnet_port_ctx *ctx;
	uint32_t port_num;

	ctx = (struct virtnet_port_ctx *)cb_arg;

	if (!ctx) {
		log_info("Invalid ISR context");
		return;
	}

	/* Read all message from the IB device and acknowledge them. */
	while (1) {

		if (ibv_get_async_event(ctx->ib_ctx, &event))
			break;
		ibv_ack_async_event(&event);

		port_num = event.element.port_num;
		mlx5dv_query_port(ctx->ib_ctx, port_num, &devx_port);
		if (!(devx_port.flags & MLX5DV_QUERY_PORT_VPORT))
			break;

		switch (event.event_type) {
		case IBV_EVENT_PORT_ACTIVE:
			virtnet_devices_update_link(ctx,
						    devx_port.vport_vhca_id,
						    true);
			break;
		case IBV_EVENT_PORT_ERR:
			virtnet_devices_update_link(ctx,
						    devx_port.vport_vhca_id,
						    false);
			break;
		default:
			break;
		}
	}
}

static int _virtnet_device_isr_register(struct virtnet_port_ctx *port_ctx)
{
	struct virtnet_epoll_data *epoll_data;
	struct mlx5dv_context_attr attr = {};
	struct ibv_context *ib_ctx;
	struct ibv_device *dev;
	int ret;

	dev = port_ctx->dev;

	attr.flags = MLX5DV_CONTEXT_FLAGS_DEVX;
	ib_ctx = mlx5dv_open_device(dev, &attr);
	if (!ib_ctx) {
		log_error("Failed to open IB devce: %s\n", dev->name);
		return ENODEV;
	}

	epoll_data = calloc(1, sizeof(*epoll_data));
	if (!epoll_data) {
		log_error("Failed to allocate epoll data for %s\n", dev->name);
		ret = EAGAIN;
		goto err_epoll_data;
	}

	port_ctx->ib_ctx = ib_ctx;
	port_ctx->epoll_data = epoll_data;

	epoll_data->cb = virtnet_dev_interrupt_handler;
	epoll_data->ctx = (void *)port_ctx;

	/* Register link status async event. */
	ret = virtnet_epoll_add(ib_ctx->async_fd, epoll_data);
	if (ret)
		goto err_epoll_add;

	return 0;

err_epoll_add:
	free(epoll_data);
err_epoll_data:
	ibv_close_device(ib_ctx);
	return ret;
}

static void
virtnet_device_isr_unregister(struct virtnet_context *ctx)
{
	struct virtnet_port_ctx *port_ctx;
	int i;

	for (i = 0; i < IB_NUM_PORTS; i++) {
		port_ctx = ctx->port_ctx[i];

		if (!port_ctx)
			continue;

		virtnet_epoll_del(port_ctx->ib_ctx->async_fd);
		free(port_ctx->epoll_data);
		ibv_close_device(port_ctx->ib_ctx);
	}
}

static int virtnet_device_isr_register(struct virtnet_context *ctx)
{
	struct virtnet_port_ctx *port_ctx;
	int i, ret;

	for (i = 0; i < IB_NUM_PORTS; i++) {
		port_ctx = ctx->port_ctx[i];
		if (!port_ctx)
			continue;
		ret = _virtnet_device_isr_register(port_ctx);
		if (ret)
			goto unreg;
	}

	return 0;

unreg:
	virtnet_device_isr_unregister(ctx);
	return ret;
}

static void virtnet_device_vf_scan(struct virtnet_context *ctx,
				   struct virtnet_device *pf_dev)
{
	struct snap_virtio_net_device_attr attr = {};
	struct virtnet_device *vf_dev;
	int ret, i;

	snap_virtio_net_query_device(pf_dev->snap_dev, &attr);
	if (attr.vattr.num_of_vfs)
		log_info("Found (%d) VFs", attr.vattr.num_of_vfs);
	else
		return;

	pf_dev->vf_dev = calloc(attr.vattr.num_of_vfs,
				sizeof(struct virtnet_device));
	for (i = 0; i < attr.vattr.num_of_vfs; i++) {
		vf_dev = &pf_dev->vf_dev[i];
		vf_dev->id = i;
		vf_dev->ctx = ctx;
		vf_dev->port_ctx = pf_dev->port_ctx;
		vf_dev->snap_pci = &pf_dev->snap_pci->vfs[i];
		vf_dev->flag = VIRTNET_DEV_VF;

		virtnet_device_check_recover(vf_dev);

		ret = virtnet_sf_create(vf_dev);
		if (ret) {
			log_error("Failed to create SF for VF %i", i);
			goto err_sf;
		}

		ret = virtnet_device_open(vf_dev);
		if (ret) {
			log_error("Failed to open VF %i", i);
			goto err_open;
		}

		virtnet_device_mark_rec_file(vf_dev);

		if (!virtnet_device_is_recovering(vf_dev))
			if (virtnet_device_rec_init(vf_dev))
				goto err_open;
	}

	pf_dev->num_of_vfs = attr.vattr.num_of_vfs;

	return;

err_open:
	virtnet_sf_destroy(vf_dev);
err_sf:
	for (--i; i >= 0; i--) {
		vf_dev = &pf_dev->vf_dev[i];
		virtnet_device_close(vf_dev);
		virtnet_sf_destroy(vf_dev);
	}
}

static void virtnet_device_scan(struct virtnet_context *ctx,
				struct virtnet_device *dev_list)
{
	struct snap_context *sctx = ctx->sctx;
	struct virtnet_port_ctx *port_ctx;
	struct virtnet_device *dev;
	struct snap_pci **pfs;
	int ret, i;

	pfs = calloc(sctx->virtio_net_pfs.max_pfs,
		     sizeof(struct snap_pci *));

	ret = snap_get_pf_list(sctx, SNAP_VIRTIO_NET, pfs);
	if (ret < 0) {
		log_error("Failed to get PF list");
		goto out;
	}

	port_ctx = virtnet_ibdev_to_ibport(ctx, ctx->ib_dev);
	if (!port_ctx) {
		log_error("Failed to locate port context for %s",
			  ctx->ib_dev->name);
		goto out;
	}

	ret = virtnet_rec_sf_cleanup(port_ctx);
	if (ret)
		goto out;

	virtnet_device_dump_all_rec_files();

	for (i = 0; i < sctx->virtio_net_pfs.max_pfs; i++) {
		dev = &dev_list[i];
		if (!pfs[i]->plugged)
			continue;

		dev->id = i;
		dev->ctx = ctx;
		dev->snap_pci = pfs[i];
		dev->port_ctx = port_ctx;
		dev->flag = VIRTNET_DEV_PF |
			    VIRTNET_DEV_PRE_CONFIG;

		virtnet_device_check_recover(dev);

		ret = virtnet_sf_create(dev);
		if (ret) {
			log_error("Failed to create SF for PF %i", i);
			goto err_sf;
		}

		ret = virtnet_device_open(dev);
		if (ret) {
			log_error("Failed to open PF %i", i);
			goto err_open;
		}
		log_info("found pre-configured PF ID(%i)", i);

		virtnet_device_mark_rec_file(dev);

		if (!virtnet_device_is_recovering(dev))
			if (virtnet_device_rec_init(dev))
				goto err_open;

		/* Wait for PF ready */
		sleep(1);

		/* User defined callback to handle pre config device */
		if (ctx->handler)
			ctx->handler(dev);

		virtnet_device_vf_scan(ctx, dev);
	}

	virtnet_device_cleanup_unused_rec_files();

	goto out;

err_open:
	virtnet_sf_destroy(dev);
err_sf:
	for (--i; i >= 0; i--) {
		dev = &dev_list[i];
		virtnet_device_close(dev);
		virtnet_sf_destroy(dev);
	}
out:
	free(pfs);
	return;
}

static void virtnet_device_update_vfs(struct virtnet_device *dev)
{
	struct virtnet_device *vf_dev;
	int num_of_vfs;
	int ret, i;

	num_of_vfs = dev->snap_ctrl->common.bar_curr->num_of_vfs;
	if (num_of_vfs == dev->num_of_vfs)
		return;

	log_info("PF(%d) num of vfs: new (%d), old (%d)",
		 dev->id, num_of_vfs, dev->num_of_vfs);

	if (dev->num_of_vfs && !num_of_vfs) {
		log_info("PF(%d) close vf %i", dev->id, dev->num_of_vfs);
		for (i = dev->num_of_vfs - 1; i >= 0; i--) {
			vf_dev = &dev->vf_dev[i];
			virtnet_device_rec_destroy(vf_dev);
			virtnet_device_close(vf_dev);
			virtnet_sf_destroy(vf_dev);
		}
		free(dev->vf_dev);
		dev->num_of_vfs = 0;
		return;
	}

	dev->vf_dev = calloc(num_of_vfs,
			     sizeof(struct virtnet_device));

	for (i = 0; i < num_of_vfs; i++) {
		log_info("PF(%d) open vf %i", dev->id, i);
		vf_dev = &dev->vf_dev[i];
		vf_dev->id = i;
		vf_dev->ctx = dev->ctx;
		vf_dev->port_ctx = dev->port_ctx;
		vf_dev->snap_pci = &dev->snap_pci->vfs[i];
		vf_dev->flag = VIRTNET_DEV_VF;

		ret = virtnet_sf_create(vf_dev);
		if (ret) {
			log_error("Failed to create SF for PF(%i) VF %i",
				 dev->id, i);
			goto err_sf;
		}

		ret = virtnet_device_open(vf_dev);
		if (ret) {
			log_info("PF(%d) failed to open vf %i",
				 dev->id, i);
			goto err_vf;
		}

		if ((ret = virtnet_device_rec_init(vf_dev)))
			goto err_vf;
	}

	dev->num_of_vfs = num_of_vfs;

	return;

err_vf:
	virtnet_sf_destroy(vf_dev);
err_sf:
	for (--i; i >= 0; i--) {
		vf_dev = &dev->vf_dev[i];
		virtnet_device_close(vf_dev);
		virtnet_sf_destroy(vf_dev);
	}
	free(dev->vf_dev);
}

static void *virtnet_device_wq(void *ctx)
{
	struct virtnet_context *virtnet_ctx;
	struct virtnet_device *dev_list;
	struct virtnet_device *dev;
	struct snap_context *sctx;
	int i;

	virtnet_ctx = (struct virtnet_context *)ctx;

	while (1) {
		sem_wait(&virtnet_ctx->wq_sem);
		dev_list = virtnet_ctx->dev_list;
		sctx = virtnet_ctx->sctx;

		log_debug("Workqueue triggered");
		for (i = 0; i < sctx->virtio_net_pfs.max_pfs; i++) {
			dev = &dev_list[i];
			if (dev->flag & VIRTNET_DEV_OPENED)
				virtnet_device_update_vfs(dev);
		}
	}

	return ctx;
}

static int virtnet_device_wq_init(struct virtnet_context *ctx)
{
	int ret;

	ret = pthread_create(&ctx->wq_tid, NULL, virtnet_device_wq, ctx);
	if (ret)
		log_error("Failed to create device workqueue thread");

	sem_init(&ctx->wq_sem, 0, 0);

	return ret;
}

static void virtnet_device_wq_destroy(struct virtnet_context *ctx)
{
	sem_destroy(&ctx->wq_sem);
	pthread_cancel(ctx->wq_tid);
}

static void virtnet_ib_port_destroy(struct virtnet_context *ctx)
{
	int i;

	for (i = 0; i < IB_NUM_PORTS; i++)
		if (ctx->port_ctx[i]) {
			mlxdevm_close(ctx->port_ctx[i]->devm);
			free(ctx->port_ctx[i]);
		}
}

static uint16_t virtnet_get_port_mtu(char *pci_addr, int pci_func_id)
{
	char *glob_pattern = "/sys/bus/pci/devices/%s/net/*/phys_port_name";
	char phy_if_name[32], if_name[32];
	char tmp_path[PATH_MAX];
	uint16_t mtu = 0;
	glob_t glob_dat;
	char *cloc;
	FILE *fp;
	int i;

	sprintf(phy_if_name, "p%d", pci_func_id);
	sprintf(tmp_path, glob_pattern, pci_addr);
	if (glob(tmp_path, 0, NULL, &glob_dat) != 0) {
		log_error("failed to glob: %s", tmp_path);
		return 0;
	}
	for (i = 0; i < glob_dat.gl_pathc; i++) {
		fp = fopen(glob_dat.gl_pathv[i], "r");
		if (fp == NULL)
			continue;
		fscanf(fp, "%s", if_name);
		fclose(fp);
		if (strcmp(if_name, phy_if_name))
			continue;
		strcpy(tmp_path, glob_dat.gl_pathv[i]);
		cloc = strrchr(tmp_path, '/');
		*cloc = '\0';
		strcat(tmp_path, "/mtu");
		fp = fopen(tmp_path, "r");
		if (fp == NULL)
			break;
		fscanf(fp, "%hu", &mtu);
		fclose(fp);
		break;
	}
	globfree(&glob_dat);

	return mtu;
}

static int virtnet_ib_port_init(struct virtnet_context *ctx)
{
	struct virtnet_port_ctx *port_ctx;
	struct mlxdevm *mlxdevm;
	struct ibv_device *dev;
	uint16_t mtu;
	int i, ret;

	for (i = 0; i < IB_NUM_PORTS; i++) {
		dev = virtnet_get_ib_dev(ib_dev_names[i]);
		if (!dev)
			continue;

		port_ctx = calloc(1, sizeof(*port_ctx));
		if (!port_ctx) {
			log_error("Failed to allocate port_ctx of %s\n",
				  ib_dev_names[i]);
			return ENOMEM;
		}

		ret = virtnet_ibdev_to_pci_addr(dev,
						port_ctx->pci_addr_str);
		if (ret)
			goto destroy;

		ret = virtnet_get_pci_function(port_ctx->pci_addr_str,
					       &port_ctx->pci_function);
		if (ret)
			goto destroy;

		mtu = virtnet_get_port_mtu(port_ctx->pci_addr_str, port_ctx->pci_function);
		if (mtu == 0) {
			ret = -EFAULT;
			goto destroy;
		}

		mlxdevm = mlxdevm_open("mlxdevm", "pci",
				       port_ctx->pci_addr_str);
		if (!mlxdevm) {
			log_error("Failed to open devm socket, %s %s\n",
				  ib_dev_names[i],
				  port_ctx->pci_addr_str);
			ret = EAGAIN;
			goto destroy;
		}

		port_ctx->dev = dev;
		port_ctx->mtu = mtu;
		port_ctx->devm = mlxdevm;
		port_ctx->virtnet_ctx = ctx;
		ctx->port_ctx[i] = port_ctx;
		port_ctx->max_sf_num = VIRTNET_SF_BASE_ID;
	}

	return 0;

destroy:
	virtnet_ib_port_destroy(ctx);
	return ret;
}

static void virtnet_udev_destroy(struct virtnet_context *virtnet_ctx)
{
	udev_monitor_unref(virtnet_ctx->monitor);
	udev_unref(virtnet_ctx->udev);
}

static int virtnet_udev_init(struct virtnet_context *virtnet_ctx)
{
	struct udev_monitor *monitor;
	struct udev *udev;
	int ret;

	udev = udev_new();
	if (!udev) {
		log_error("Can't create udev");
		return -1;
	}

	monitor = udev_monitor_new_from_netlink(udev, "udev");
	if (!monitor) {
		log_error("Can't create udev monitor");
		goto err_monitor;
	}

	ret = udev_monitor_filter_add_match_subsystem_devtype(monitor,
							      "auxiliary",
							      NULL);
	if (ret) {
		log_error("Can't create udev monitor filter");
		goto err_filter;
	}

	ret = udev_monitor_enable_receiving(monitor);
	if (ret) {
		log_error("Can't enable udev monitor receiving");
		goto err_filter;
	}

	virtnet_ctx->monitor = monitor;
	virtnet_ctx->udev = udev;

	return 0;

err_filter:
	udev_monitor_unref(monitor);
err_monitor:
	udev_unref(udev);
	return -1;
}

struct virtnet_context *virtnet_init(struct virtnet_init_attr *attr,
				     virtnet_device_handler_cb_t handler)
{
	char ib_dev_name[IBV_SYSFS_NAME_MAX] = {};
	struct virtnet_context *virtnet_ctx;
	struct virtnet_device *dev_list;
	struct ibv_device *emu_ib_dev;
	struct ibv_device *ib_dev;
	struct snap_context *sctx;
	int ret, ib_dev_idx;

	ib_dev_idx = attr->is_lag ?
		IB_DEV_EMU_MANAGER_LAG : IB_DEV_EMU_MANAGER;

	strcpy(ib_dev_name, ib_dev_names[ib_dev_idx]);
	emu_ib_dev = virtnet_get_ib_dev(ib_dev_name);

	if (!emu_ib_dev) {
		log_error("No emulation manager device(%s) found",
			  ib_dev_name);
		return NULL;
	}

	ib_dev = virtnet_get_ib_dev(attr->ib_dev_name);

	if (!ib_dev) {
		log_error("No IB device(%s) found", attr->ib_dev_name);
		return NULL;
	}

	virtnet_ctx = calloc(1, sizeof(*virtnet_ctx));
	if (!virtnet_ctx) {
		errno = ENOMEM;
		goto err_out;
	}

	if (virtnet_ib_port_init(virtnet_ctx))
		goto err_out;

	if (virtnet_udev_init(virtnet_ctx))
		goto err_out;

	sctx = snap_open(emu_ib_dev);
	if (!sctx) {
		log_info("Can't open snap dev at %s, err(%d)",
			 emu_ib_dev->name, errno);
		goto err_out;
	}

	if (!sctx->hotplug_supported ||
	    !(sctx->hotplug.supported_types & SNAP_VIRTIO_NET) ||
	    !sctx->hotplug.max_devices) {
		errno = 0;
		log_info("%s doesn't support hotplug, "
			 "hotplug cap: %d, "
			 "hotplug net cap: %d, "
			 "hotplug max dev: %d. "
			 "Scanning preconfigured device",
			 emu_ib_dev->name, sctx->hotplug_supported,
			 sctx->hotplug.supported_types & SNAP_VIRTIO_NET,
			 sctx->hotplug.max_devices);
	}

	virtnet_ctx->max_features = MAX_FEATURES |
		sctx->virtio_net_caps.features;
	if (virtnet_ctx->max_features & (1ULL << VIRTIO_NET_F_CTRL_VQ))
		virtnet_ctx->max_features |=
			(1ULL << VIRTIO_NET_F_MQ) |
			(1ULL << VIRTIO_NET_F_CTRL_RX) |
			(1ULL << VIRTIO_NET_F_CTRL_VLAN);

	dev_list = calloc(sctx->virtio_net_pfs.max_pfs, sizeof(*dev_list));
	if (!dev_list) {
		errno = ENOMEM;
		goto err_dev_list;
	}

	ret = virtnet_device_wq_init(virtnet_ctx);
	if (ret) {
		errno = ret;
		goto err_wq;
	}

	/* Link status (Register event + Desc tunnel event) * N */
	ret = virtnet_epoll_init(sctx->virtio_net_pfs.max_pfs * 2 + 2);
	if (ret) {
		errno = ret;
		goto err_epoll;
	}

	/* Register link status async event */
	ret = virtnet_device_isr_register(virtnet_ctx);
	if (ret) {
		errno = ret;
		goto err_epoll;
	}

	sctx->virtio_net_pfs.pf_mac = attr->pf_mac;

	virtnet_ctx->emu_ib_dev = emu_ib_dev;
	virtnet_ctx->sctx = sctx;
	virtnet_ctx->ib_dev = ib_dev;
	virtnet_ctx->dev_list = dev_list;
	virtnet_ctx->handler = handler;

	virtnet_device_scan(virtnet_ctx, dev_list);

	return virtnet_ctx;

err_epoll:
	virtnet_device_wq_destroy(virtnet_ctx);
err_wq:
	free(dev_list);
err_dev_list:
	snap_close(sctx);
	free(virtnet_ctx);
err_out:
	return NULL;
}

void virtnet_destroy(struct virtnet_context *ctx)
{
	virtnet_device_isr_unregister(ctx);
	virtnet_epoll_close();
	virtnet_device_wq_destroy(ctx);
	free(ctx->dev_list);
	virtnet_udev_destroy(ctx);
	virtnet_ib_port_destroy(ctx);
	snap_close(ctx->sctx);
	free(ctx);
}

/**
 * Destroy default flow and VQ resources and shutdown device.
 */
static int virtnet_device_stop(void *ctx)
{
	struct virtnet_device_modify_fields val = {};
	struct virtnet_device *dev = ctx;
	void *tmp;

	if (!(dev->flag & VIRTNET_DEV_STARTED))
		return 0;

	val.link_up = false;
	virtnet_device_modify(dev, &val, VIRTNET_MODIFY_LINK);

	virtnet_sf_stop(dev);
	virtnet_vqs_destroy(dev);

	/* mac table. */
	tmp = dev->mac_table.entries;
	if (dev->mac_table.entries)
		memset(dev->mac_table.entries, 0,
		       VIRTNET_MAC_TABLE_ENTRIES *
		       sizeof(*dev->mac_table.entries));
	memset(&dev->mac_table, 0 , sizeof(dev->mac_table));
	dev->mac_table.entries = tmp;

	/* VLan table. */
	tmp = dev->vlan_table.entries;
	if (dev->vlan_table.entries)
		memset(dev->vlan_table.entries, 0,
		       VIRTNET_MAX_VLAN * sizeof(*dev->vlan_table.entries));
	memset(&dev->vlan_table, 0, sizeof(dev->vlan_table));
	dev->vlan_table.entries = tmp;

	dev->flag &= ~VIRTNET_DEV_STARTED;
	log_info("%s(%i): device stopped",
		 dev->flag & VIRTNET_DEV_PF ? "PF" : "VF", dev->id);

	return 0;
}

/**
 * Start a device after host driver finished virtio register configuration.
 * Create VQ resources and apply default steering flow.
 */
static int virtnet_device_start(void *ctx)
{
	struct virtnet_device_modify_fields val = {};
	struct virtnet_device *dev = ctx;
	uint64_t mac = 0;
	int ret;

	if (dev->flag & VIRTNET_DEV_STARTED) {
		log_info("started device again without reset");
		virtnet_device_stop(ctx);
	}

	dev->vq_pair_n = 1; /* enable one pair by default */

	mac = be64toh(dev->registers->mac);
	if (virtnet_ether_addr_is_zero(dev->mac.addr))
		memcpy(dev->mac.addr, (void *)((uintptr_t)&mac + 2), 6);

	ret = virtnet_vqs_create(dev);
	if (ret) {
		log_error("Failed to create VQs, err(%d)", ret);
		return ret;
	}

	ret = virtnet_sf_start(dev);
	if (ret) {
		log_error("Failed to start SF, err(%d)", ret);
		goto out;
	}

	if (virtnet_device_is_recovering(dev)) {
		if (virtnet_device_rxmode_recover(dev))
			goto recover_out;

		if (virtnet_device_mq_recover(dev))
			goto recover_out;
	}

	val.link_up = true;
	virtnet_device_modify(dev, &val, VIRTNET_MODIFY_LINK);

	dev->flag |= VIRTNET_DEV_STARTED;
	log_info("%s(%i): device started",
		 dev->flag & VIRTNET_DEV_PF ? "PF" : "VF",
		 dev->id);

	return 0;

recover_out:
	virtnet_sf_stop(dev);
out:
	virtnet_vqs_destroy(dev);
	return ret;
}

int virtnet_device_feature_validate(struct virtnet_device *dev,
				    uint64_t features)
{
	struct snap_virtio_net_device_attr *reg =
		to_net_device_attr(dev->snap_ctrl->common.bar_curr);
	struct snap_context *sctx = dev->ctx->sctx;
	int i;

	/* requested features have to be subset of allowed features. */
	err_if(features & ~reg->vattr.device_feature,
		"invalid driver feature 0x%lx requested, "
		"device feature 0x%lx",
		reg->vattr.driver_feature, reg->vattr.device_feature);

	err_if(features & ~dev->ctx->max_features,
		"invalid driver feature 0x%lx requested, max 0x%lx",
		reg->vattr.driver_feature, dev->ctx->max_features);

	/* TSO features: */
	err_if((features & 1ULL << VIRTIO_NET_F_GUEST_TSO4) &&
	       !(features & 1ULL << VIRTIO_NET_F_GUEST_CSUM),
		"invalid feature 0x%lx requested, "
		"GUEST_TSO4 must be enabled with GUEST_CSUM", features);

	err_if((features & 1ULL << VIRTIO_NET_F_GUEST_TSO6) &&
	       !(features & 1ULL << VIRTIO_NET_F_GUEST_CSUM),
		"invalid feature 0x%lx requested, "
		"GUEST_TSO6 must be enabled with GUEST_CSUM", features);

	err_if((features & 1ULL << VIRTIO_NET_F_HOST_TSO4) &&
	       !(features & 1ULL << VIRTIO_NET_F_CSUM),
		"invalid feature 0x%lx requested, "
		"HOST_TSO4 must be enabled with CSUM", features);

	err_if((features & 1ULL << VIRTIO_NET_F_HOST_TSO6) &&
	       !(features & 1ULL << VIRTIO_NET_F_CSUM),
		"invalid feature 0x%lx requested, "
		"HOST_TSO6 must be enabled with CSUM", features);

	/* Control Queue features: */
	err_if((features & 1ULL << VIRTIO_NET_F_CTRL_RX) &&
	       !(features & 1ULL << VIRTIO_NET_F_CTRL_VQ),
		"invalid feature 0x%lx requested, "
		"CTRL_RX must be enabled with MQ", features);

	err_if((features & 1ULL << VIRTIO_NET_F_CTRL_VLAN) &&
	       !(features & 1ULL << VIRTIO_NET_F_CTRL_VQ),
		"invalid feature 0x%lx requested, "
		"CTRL_VLAN must be enabled with MQ", features);

	err_if((features & 1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) &&
	       !(features & 1ULL << VIRTIO_NET_F_CTRL_VQ),
		"invalid feature 0x%lx requested, "
		"CTRL_RX must be enabled with MQ", features);

	err_if((features & 1ULL << VIRTIO_NET_F_CTRL_MAC_ADDR) &&
	       !(features & 1ULL << VIRTIO_NET_F_CTRL_VQ),
		"invalid feature 0x%lx requested, "
		"CTRL_RX must be enabled with MQ", features);

	if (features & 1ULL << VIRTIO_NET_F_MQ) {
		err_if(dev->snap_ctrl->common.enabled_queues >
		       reg->max_queue_pairs * 2 + 1,
			"invalid queue number %lu requested, supported 0x%hu",
			dev->snap_ctrl->common.enabled_queues,
			reg->max_queue_pairs * 2 + 1);
	} else {
		int max = (features & 1ULL << VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
		err_if(dev->snap_ctrl->common.enabled_queues > max,
			"invalid queue number %lu requested, supported 0x%hu",
			dev->snap_ctrl->common.enabled_queues, max);
	}

	/* MSI-X: */
	err_if(reg->vattr.msix_config > 0x7ff &&
	       reg->vattr.msix_config != VIRTIO_MSI_NO_VECTOR,
		"invalid MSI-X %hu requested", reg->vattr.msix_config);

	err_if(reg->vattr.msix_config != VIRTIO_MSI_NO_VECTOR &&
		!(sctx->virtio_net_caps.event_modes & SNAP_VIRTQ_MSIX_MODE),
		"MSI-X mode not supported");

	/* Control queue check */
	err_if((dev->snap_ctrl->common.enabled_queues & 1) &&
	       !(dev->ctx->max_features & (1ULL << VIRTIO_NET_F_CTRL_VQ)),
		"Control queue not supported");

	/* Queue parameters: */
	for (i = 0; i < dev->snap_ctrl->common.enabled_queues; i++) {
		if (!reg->q_attrs[i].vattr.enable)
			continue;

		/* VQ index: */
		err_if(reg->q_attrs[i].vattr.idx >
		       sctx->virtio_net_caps.max_emulated_virtqs,
			"Invalid virtio queue index %hu, max %u",
			reg->q_attrs[i].vattr.idx,
			sctx->virtio_net_caps.max_emulated_virtqs);

		/* Event mode: */
		err_if((reg->q_attrs[i].vattr.ev_mode &
			SNAP_VIRTQ_MSIX_MODE) &&
			!(sctx->virtio_net_caps.event_modes &
			  SNAP_VIRTQ_MSIX_MODE),
			"MSI-X mode not supported");

		/* vq size: */
		err_if(!reg->q_attrs[i].vattr.size,
		       "Virtio queue size not specified");

		/* Packed queue size: */
		if (features & 1ULL << VIRTIO_F_RING_PACKED) {
			err_if(!(sctx->virtio_net_caps.supported_types &
				 SNAP_VIRTQ_PACKED_MODE),
				"Packed mode not supported by hw");

			err_if(reg->q_attrs[i].vattr.size &
			       (reg->q_attrs[i].vattr.size - 1),
				"invalid queue size %u of packed queue %d, "
				"not power of 2",
				reg->q_attrs[i].vattr.size, i);
		}

		/* Vector range: */
		err_if(reg->q_attrs[i].vattr.msix_vector > 0x7ff &&
			reg->q_attrs[i].vattr.msix_vector !=
			VIRTIO_MSI_NO_VECTOR,
			"invalid MSI-X %hu requested for vq %d",
			reg->q_attrs[i].vattr.msix_vector, i);
	}

	return 0;
free_exit:
	return -EINVAL;
}

/**
 * Verify new virtio device registers configured by host driver.
 */
static int virtnet_register_verify(void *ctx)
{
	struct virtnet_device *dev = ctx;
	struct snap_virtio_net_device_attr *reg =
		to_net_device_attr(dev->snap_ctrl->common.bar_curr);
	uint64_t features = reg->vattr.driver_feature;

	return virtnet_device_feature_validate(dev, features);
}

static int virtnet_pre_flr(void *ctx)
{
	struct virtnet_device *dev = ctx;
	int ret;

	log_info("Pre FLR operation: remove previous epoll fd");
	ret = virtnet_epoll_del(snap_device_get_fd(dev->snap_ctrl->common.sdev));

	return ret;
}

static int virtnet_post_flr(void *ctx)
{
	struct virtnet_device *dev = ctx;
	int ret;

	log_info("Post FLR operation: add new epoll fd");
	ret = virtnet_epoll_add(snap_device_get_fd(dev->snap_ctrl->common.sdev),
						   &dev->epoll);
	dev->snap_dev = dev->snap_ctrl->common.sdev;

	return ret;
}

static struct snap_virtio_ctrl_bar_cbs bar_cbs = {
	.start = virtnet_device_start,
	.stop = virtnet_device_stop,
	.validate = virtnet_register_verify,
	.pre_flr = virtnet_pre_flr,
	.post_flr = virtnet_post_flr,
};

static struct snap_virtio_net_ctrl_lm_cbs lm_cbs = {
	.get_internal_state_size = NULL,
	.get_internal_state      = NULL,
	.dump_internal_state     = NULL,
	.set_internal_state      = NULL,
};

static void virtnet_device_event_cb(void *ctx)
{
	int num_of_vfs_cur, num_of_vfs_pre;
	struct virtnet_device *dev = ctx;
	struct snap_event event[4] = {};
	struct snap_virtio_ctrl *vctrl;
	int ret, i = 0;

	vctrl = &dev->snap_ctrl->common;
	ret = snap_device_get_events(dev->snap_dev, 4, event);
	if (ret < 0)
		log_error("failed to get device event");
	else if (ret == 0)
		log_info("no event in callback?");
	else {
		if (ret > 1)
			log_info("Got more events in one event");

		num_of_vfs_pre = dev->snap_ctrl->common.bar_curr->num_of_vfs;
		do {
			i++;
			pthread_mutex_lock(&dev->lock);
			snap_virtio_net_ctrl_progress(dev->snap_ctrl);
			pthread_mutex_unlock(&dev->lock);

			log_info("%s(%i): status 0x%x -> 0x%x, "
				 "queues %ld, enabled %d, gvmi: 0x%x, "
				 "retry 0x%x, total events 0x%x",
				 dev->flag & VIRTNET_DEV_PF ? "PF" : "VF",
				 dev->id,
				 dev->snap_ctrl->common.bar_prev->status,
				 dev->registers->vattr.status,
				 dev->snap_ctrl->common.enabled_queues,
				 dev->snap_ctrl->common.bar_curr->enabled,
				 dev->snap_pci->mpci.vhca_id, i, ret);
		} while (snap_virtio_ctrl_critical_bar_change_detected(vctrl));

		num_of_vfs_cur = dev->snap_ctrl->common.bar_curr->num_of_vfs;
		/* Check VFs in another thread */
		if ((dev->flag & VIRTNET_DEV_PF) &&
		    (num_of_vfs_cur != num_of_vfs_pre))
			sem_post(&dev->ctx->wq_sem);
	}
}

static int virtnet_device_mac_init(struct snap_virtio_net_ctrl *ctrl)
{
	struct snap_virtio_net_device_attr nattr = {};
	struct snap_device *sdev = ctrl->common.sdev;
	uint8_t *mac;
	int ret;

	/* Assign random mac to non-hotplug virtio net devices */
	if (sdev->pci->hotplugged)
		return 0;

	ret = snap_virtio_net_query_device(sdev, &nattr);
	if (ret)
		return ret;

	if (sdev->pci->type == SNAP_VIRTIO_NET_PF &&
	    sdev->sctx->virtio_net_pfs.pf_mac) {
		nattr.mac = sdev->sctx->virtio_net_pfs.pf_mac++;
	} else {
		mac = (uint8_t *)&nattr.mac;
		eth_random_addr(&mac[2]);
		nattr.mac = be64toh(nattr.mac);
	}

	ret = snap_virtio_net_modify_device(sdev, SNAP_VIRTIO_MOD_DEV_CFG,
					    &nattr);

	return ret;
}

int virtnet_device_open(struct virtnet_device *dev)
{
	struct snap_virtio_net_ctrl_attr attr = {
		.common = {
			.type = SNAP_VIRTIO_NET_CTRL,
			.pf_id = dev->snap_pci->id,
			.event = true,
			.cb_ctx = dev,
			.bar_cbs = &bar_cbs,
		},
		.lm_cbs = &lm_cbs
	};
	struct snap_context *sctx = dev->ctx->sctx;
	int ret;

	if (dev->flag & VIRTNET_DEV_PF) {
		attr.common.pci_type = SNAP_VIRTIO_NET_PF;
		attr.common.pf_id = dev->snap_pci->id;

		if (virtnet_device_is_recovering(dev))
			virtnet_device_devtype_recover(dev);
	} else {
		attr.common.pci_type = SNAP_VIRTIO_NET_VF;
		attr.common.pf_id = dev->snap_pci->parent->id;
		attr.common.vf_id = dev->snap_pci->id;
	}

	attr.common.context = dev->sf_verbs.dev;
	dev->snap_ctrl = snap_virtio_net_ctrl_open(sctx, &attr);
	if (!dev->snap_ctrl) {
		ret = errno;
		log_error("Failed to create device control");
		goto err_snap_ctrl;
	}

	if (virtnet_device_is_recovering(dev))
		ret = virtnet_device_mac_recover(dev);
	else
		ret = virtnet_device_mac_init(dev->snap_ctrl);

	if (ret)
		goto err_mac_init;

	dev->snap_dev = dev->snap_ctrl->common.sdev;
	dev->registers = to_net_device_attr(dev->snap_ctrl->common.bar_curr);
	dev->reg_vqs = dev->registers->q_attrs;

	dev->eth_vqs = calloc(sctx->virtio_net_caps.max_emulated_virtqs,
			  sizeof(*dev->eth_vqs));
	if (!dev->eth_vqs) {
		ret = ENOMEM;
		log_error("Failed to allocate vq memory");
		goto err_vq_alloc;
	}

	dev->mac_table.entries = calloc(VIRTNET_MAC_TABLE_ENTRIES,
					sizeof(*dev->mac_table.entries));
	if (!dev->mac_table.entries) {
		ret = ENOMEM;
		log_error("Failed to calloc memory for mac table");
		goto err_mac_tbl;
	}

	dev->vlan_table.entries = calloc(VIRTNET_MAX_VLAN,
					 sizeof(*dev->vlan_table.entries));
	if (!dev->vlan_table.entries) {
		ret = ENOMEM;
		log_error("Failed to calloc memory for vlan table");
		goto err_vlan_tbl;
	}

	pthread_mutex_init(&dev->lock, NULL);

	dev->epoll.ctx = dev;
	dev->epoll.cb = virtnet_device_event_cb;
	ret = virtnet_epoll_add(snap_device_get_fd(dev->snap_dev),
				&dev->epoll);
	if (ret) {
		log_error("Failed to register device event, err(%d)", ret);
		goto err_epoll;
	}

	pthread_mutex_lock(&dev->lock);
	snap_virtio_net_ctrl_progress(dev->snap_ctrl);
	pthread_mutex_unlock(&dev->lock);
	dev->pci_bdf = dev->snap_pci->pci_bdf.raw;
	log_info("%s(%i): Device status 0x%x, queues %lu, enable %d, bdf: %x:%x.%x",
		 dev->flag & VIRTNET_DEV_PF ? "PF" : "VF", dev->id,
		 dev->registers->vattr.status,
		 dev->snap_ctrl->common.enabled_queues,
		 dev->registers->vattr.enabled,
		 dev->snap_pci->pci_bdf.bdf.bus,
		 dev->snap_pci->pci_bdf.bdf.device,
		 dev->snap_pci->pci_bdf.bdf.function);

	dev->flag |= VIRTNET_DEV_OPENED;

	return 0;

err_epoll:
	free(dev->vlan_table.entries);
err_vlan_tbl:
	free(dev->mac_table.entries);
err_mac_tbl:
	free(dev->eth_vqs);
err_vq_alloc:
err_mac_init:
	snap_virtio_net_ctrl_close(dev->snap_ctrl);
err_snap_ctrl:
	return ret;
}

void virtnet_device_close(struct virtnet_device *dev)
{
	int i;

	if (!(dev->flag & VIRTNET_DEV_OPENED))
		return;

	/* VFs were open via event */
	if (dev->flag & VIRTNET_DEV_PF
	    && dev->num_of_vfs) {
		log_info("PF(%i) Close %d vfs", dev->id, dev->num_of_vfs);
		for (i = dev->num_of_vfs - 1; i >= 0; i--) {
			virtnet_device_close(&dev->vf_dev[i]);
			virtnet_sf_destroy(&dev->vf_dev[i]);
		}
		free(dev->vf_dev);
		dev->num_of_vfs = 0;
	}

	log_info("%s(%i): bdf: %x:%x.%x",
		 dev->flag & VIRTNET_DEV_PF ? "PF" : "VF",
		 dev->id,
		 dev->snap_pci->pci_bdf.bdf.bus,
		 dev->snap_pci->pci_bdf.bdf.device,
		 dev->snap_pci->pci_bdf.bdf.function);

	pthread_mutex_lock(&dev->lock);
	snap_virtio_net_ctrl_close(dev->snap_ctrl);
	pthread_mutex_unlock(&dev->lock);

	virtnet_epoll_del(snap_device_get_fd(dev->snap_ctrl->common.sdev));

	pthread_mutex_destroy(&dev->lock);
	free(dev->vlan_table.entries);
	free(dev->mac_table.entries);
	free(dev->eth_vqs);

	dev->flag &= ~VIRTNET_DEV_OPENED;
}

int virtnet_device_modify(struct virtnet_device *dev,
			  struct virtnet_device_modify_fields *val,
			  uint64_t mask)
{
	uint64_t snap_mask = 0;
	uint8_t new_status;
	int ret;

	if (mask & VIRTNET_MODIFY_STATE) {
		snap_mask |= SNAP_VIRTIO_MOD_DEV_STATUS;
		if (val->state & STATUS_DEVICE_NEEDS_RESET)
			new_status = dev->registers->vattr.status |
				     val->state;
		else
			new_status = 0;

		dev->registers->vattr.status = new_status;
	}

	if (mask & VIRTNET_MODIFY_FEATURES) {
		snap_mask |= SNAP_VIRTIO_MOD_PCI_COMMON_CFG;
		dev->registers->vattr.device_feature = val->features;
	}

	if (mask & VIRTNET_MODIFY_MAC) {
		snap_mask |= SNAP_VIRTIO_MOD_DEV_CFG;
		dev->registers->mac =
			be64toh(*(uint64_t *)val->mac.ether_addr_octet) >> 16;
	}

	if (mask & VIRTNET_MODIFY_MTU) {
		if (val->mtu > dev->port_ctx->mtu) {
			log_error("MTU (%d) is greater than physical port MTU(%d)\n",
				  val->mtu, dev->port_ctx->mtu);
			ret = -EINVAL;
			goto out;
		}
		snap_mask |= SNAP_VIRTIO_MOD_DEV_CFG;
		dev->registers->mtu = val->mtu;
	}

	if (mask & VIRTNET_MODIFY_LINK) {
		snap_mask |= SNAP_VIRTIO_MOD_LINK_STATUS;
		if (val->link_up)
			dev->registers->status |= VIRTIO_NET_S_LINK_UP;
		else
			dev->registers->status &= ~VIRTIO_NET_S_LINK_UP;
	}

	ret = snap_virtio_net_modify_device(dev->snap_dev, snap_mask,
					    dev->registers);
	if (ret) {
		log_error("Failed to modify device");
		goto out;
	}

	if ((mask & VIRTNET_MODIFY_MAC) &&
	    (dev->flag & VIRTNET_DEV_STARTED)) {
		virtnet_sf_mac_flow_destroy(dev, &dev->mac.dev_flow);
		memcpy(dev->mac.addr, val->mac.ether_addr_octet,
		       sizeof(val->mac.ether_addr_octet));
		ret = virtnet_sf_mac_flow_apply(dev, dev->mac.addr,
						&dev->mac.dev_flow);
		if (ret)
			log_error("Failed to apply MAC flow");
	}
out:
	return ret;
}

#define VRTIO_NET_F_MULTI_Q ((1ULL << VIRTIO_NET_F_CTRL_RX) |	\
			     (1ULL << VIRTIO_NET_F_MQ) |	\
			     (1ULL << VIRTIO_NET_F_CTRL_VQ))
static int
virtnet_device_attr_check(struct virtnet_context * ctx,
			  struct virtnet_device_attr *attr)
{
	struct snap_context *sctx = ctx->sctx;
	struct virtnet_port_ctx *port_ctx;

	if (attr->num_queues > sctx->virtio_net_caps.max_emulated_virtqs ||
	    attr->num_queues < 2) {
		log_error("Invalid number of queues %hu, min 2, max %u",
			  attr->num_queues,
			  sctx->virtio_net_caps.max_emulated_virtqs);
		return -EINVAL;
	}

	if ((attr->num_queues > 2) && !(attr->num_queues & 1)) {
	    log_error("When multiple queues enabled,"
		      "it must be a odd number to account for one ctrlq.");
	    return -EINVAL;
	}

	if(attr->features & VRTIO_NET_F_MULTI_Q) {
	       log_error("Queue features are controlled by q_num.");
	       return -EINVAL;
	}

	port_ctx = virtnet_ibdev_to_ibport(ctx, attr->ib_dev);
	if (attr->mtu > port_ctx->mtu) {
	       log_error("MTU (%d) is greater than physical port MTU(%d)\n",
			 attr->mtu, port_ctx->mtu);
	       return -EINVAL;
	}

	return 0;
}

static void
virtnet_device_hotplug_attr_init(struct snap_hotplug_attr *attr,
				 struct virtnet_device_attr *dev_attr)
{
	attr->type = SNAP_VIRTIO_NET;
	attr->pci_attr.device_id = VIRTIO_PCI_MODERN_DEVICEID_NET;
	attr->pci_attr.vendor_id = VIRTIO_PCI_VENDORID;
	attr->pci_attr.subsystem_id = VIRTIO_PCI_SUBSYSTEM_ID;
	attr->pci_attr.subsystem_vendor_id = VIRTIO_PCI_SUBSYSTEM_VENDOR_ID;
	attr->pci_attr.revision_id = VIRTIO_PCI_REV;
	attr->pci_attr.class_code = VIRTIO_PCI_CLASS_NETWORK_ETHERNET;
	attr->pci_attr.num_msix = MIN(dev_attr->num_queues + 1,
				      VIRTIO_MAX_MSIX);
	if (dev_attr->num_queues > 2)
		attr->regs.virtio_net.device_features |=
			(1ULL << VIRTIO_NET_F_CTRL_RX) |
			(1ULL << VIRTIO_NET_F_MQ) |
			(1ULL << VIRTIO_NET_F_CTRL_VQ);
	attr->regs.virtio_net.status = VIRTIO_NET_S_LINK_UP;

	attr->regs.virtio_net.device_features |=
		MAX_FEATURES | dev_attr->features;
	attr->regs.virtio_net.mac =
		(uint64_t)dev_attr->mac.ether_addr_octet[0] << 40 |
		(uint64_t)dev_attr->mac.ether_addr_octet[1] << 32 |
		(uint64_t)dev_attr->mac.ether_addr_octet[2] << 24 |
		(uint64_t)dev_attr->mac.ether_addr_octet[3] << 16 |
		(uint64_t)dev_attr->mac.ether_addr_octet[4] << 8  |
		(uint64_t)dev_attr->mac.ether_addr_octet[5] << 0;
	attr->regs.virtio_net.mtu = dev_attr->mtu;
	attr->regs.virtio_net.max_queues = dev_attr->num_queues / 2;
	attr->regs.virtio_net.queue_size = dev_attr->queue_size;
}

static int
virtnet_device_query_pci_functions_info(struct snap_context *sctx,
					struct snap_pci *pci)
{
	uint8_t out[DEVX_ST_SZ_BYTES(query_emulated_functions_info_out)] = {0};
	uint8_t in[DEVX_ST_SZ_BYTES(query_emulated_functions_info_in)] = {0};
	struct ibv_context *context = sctx->context;
	int ret = 0;

	DEVX_SET(query_emulated_functions_info_in, in, opcode,
		 MLX5_CMD_OP_QUERY_EMULATED_FUNCTIONS_INFO);
	DEVX_SET(query_emulated_functions_info_in, in, op_mod,
	         MLX5_SET_EMULATED_FUNCTIONS_OP_MOD_VIRTIO_NET_PHYSICAL_FUNCTIONS);

	ret = mlx5dv_devx_general_cmd(context, in, sizeof(in),
				      out, sizeof(out));

	if (ret)
		return ret;

	pci->pci_bdf.raw = DEVX_GET(query_emulated_functions_info_out,
				    out, emulated_function_info[0].pci_bdf);

	return ret;
}

struct virtnet_device *
virtnet_device_hotplug(struct virtnet_context *ctx,
		       struct virtnet_device_attr *attr)
{
#define PCI_ENUMERATE_TIME_WAIT_US	(50000u)
#define PCI_ENUMERATE_MAX_RETRY		(200u)
	struct snap_hotplug_attr hotplug_attr = {};
	struct virtnet_device *dev;
	struct snap_pci *pci;
	int ret, retry = 0;

	if (virtnet_device_attr_check(ctx, attr))
		return NULL;

	virtnet_device_hotplug_attr_init(&hotplug_attr, attr);

	pci = snap_hotplug_pf(ctx->sctx, &hotplug_attr);
	if (!pci) {
		log_error("Failed to hotplug PCI device");
		return NULL;
	}

	do {
		if (retry++ > PCI_ENUMERATE_MAX_RETRY) {
			log_error("Hotplug finished without PCI enumeration");
			goto err_out;
		}
		usleep(PCI_ENUMERATE_TIME_WAIT_US);
		ret = virtnet_device_query_pci_functions_info(ctx->sctx, pci);
		if (ret) {
			log_error("Failed to enumerate hotplug PCI device, err(%d)", ret);
			goto err_out;
		}
	} while (!pci->pci_bdf.raw);

	dev = &ctx->dev_list[pci->id];
	dev->snap_pci = pci;
	dev->id = pci->id;
	dev->flag = VIRTNET_DEV_PF;
	dev->ctx = ctx;
	dev->port_ctx = virtnet_ibdev_to_ibport(ctx, attr->ib_dev);

	if (!dev->port_ctx) {
		log_error("Failed to locate port context for %s",
			  attr->ib_dev->name);
		goto err_out;
	}

	ret = virtnet_sf_create(dev);
	if (ret) {
		log_error("Failed to create SF for hotplug dev %i",
			  dev->id);
		goto err_out;
	}

	ret = virtnet_device_open(dev);
	if (ret) {
		log_error("Failed to open hotplug device, bdf: %x:%x.%x",
			  pci->pci_bdf.bdf.bus,
			  pci->pci_bdf.bdf.device,
			  pci->pci_bdf.bdf.function);
		goto err_open;
	}

	if (virtnet_device_rec_init(dev))
		goto err_open;

	return dev;

err_open:
	virtnet_sf_destroy(dev);
err_out:
	snap_hotunplug_pf(pci);
	return NULL;
}

void virtnet_device_unplug(struct virtnet_device *dev)
{
	if (!dev->snap_pci || !dev->snap_pci->hotplugged)
		return;

	log_info("bdf: %x:%x.%x",
		 dev->snap_pci->pci_bdf.bdf.bus,
		 dev->snap_pci->pci_bdf.bdf.device,
		 dev->snap_pci->pci_bdf.bdf.function);

	virtnet_device_rec_destroy(dev);
	virtnet_device_close(dev);
	virtnet_sf_destroy(dev);
	snap_hotunplug_pf(dev->snap_pci);
	memset(dev, 0, sizeof(*dev));
}

