/*
 * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include <limits.h>
#include <dirent.h>
#include <fcntl.h>
#include <net/ethernet.h>
#include <netinet/in.h>
#include <infiniband/mlx5dv.h>
#include <linux/virtio_net.h>
#include <libudev.h>
#include <mlxdevm.h>
#include "virtnet.h"
#include "virtnet_sf.h"

#define VIRTNET_SF_MAX_MDEV 32
#define DEFAULT_TIR_NUM 7
#define PRIO_INC(base, val) (base - val)
#define PRIO_DEC(base, val) (base + val)

/* Flow's base priority. */
const uint16_t virtnet_flow_promisc_prio_base = 3; /* After RSS, it is 3 - 1.*/
const uint16_t virtnet_flow_drop_prio_base = 4; /* No RSS for drop action. */
const uint16_t virtnet_flow_mac_prio_base = 7; /* After RSS, it is 7 - 5. */
const uint16_t virtnet_flow_vlan_prio_base = 7; /* After RSS, it is 7 - 5. */

static struct ether_addr virtnet_unicast_mac_addr = {
	.ether_addr_octet = "\x00\x00\x00\x00\x00\x00",
};

static struct ether_addr virtnet_unicast_mac_mask = {
	.ether_addr_octet = "\x01\x00\x00\x00\x00\x00",
};

static struct ether_addr virtnet_multicast_mac_addr = {
	.ether_addr_octet = "\x01\x00\x00\x00\x00\x00",
};

static struct ether_addr virtnet_multicast_mac_mask = {
	.ether_addr_octet = "\x01\x00\x00\x00\x00\x00",
};

static struct ether_addr bcast_mac = {
	.ether_addr_octet = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
};

struct virtnet_sf_flow_matcher {
	SLIST_ENTRY(virtnet_sf_flow_matcher) next;
	struct mlx5dv_flow_match_parameters *mask;
	struct mlx5dv_flow_match_parameters *value;
	uint16_t priority;
	uint8_t criteria;
	struct virtnet_rx_hash_field_select hash_field;
};

SLIST_HEAD(virtnet_sf_flow_matcher_list, virtnet_sf_flow_matcher);

struct virtnet_sf_dev_flow {
	SLIST_ENTRY(virtnet_sf_dev_flow) next;
	void *matcher;
	void *flow;
};

SLIST_HEAD(rss_flow_list, virtnet_sf_dev_flow);

/* "borrowed" from DPDK vDPA. */

enum {
	MLX_TIRC_DISP_TYPE_DIRECT    = 0x0,
	MLX_TIRC_DISP_TYPE_INDIRECT  = 0x1,
};

enum {
	MLX_L3_PROT_TYPE_IPV4 = 0,
	MLX_L3_PROT_TYPE_IPV6 = 1,
};

enum {
	MLX_L4_PROT_TYPE_TCP = 0,
	MLX_L4_PROT_TYPE_UDP = 1,
};

enum {
	MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP     = 0x0,
	MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP     = 0x1,
	MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT   = 0x2,
	MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT   = 0x3,
	MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_IPSEC_SPI  = 0x4,
};

enum {
	MLX_RX_HASH_FN_NONE           = 0x0,
	MLX_RX_HASH_FN_INVERTED_XOR8  = 0x1,
	MLX_RX_HASH_FN_TOEPLITZ       = 0x2,
};

enum {
	MLX_MATCH_CRITERIA_ENABLE_OUTER_BIT,
	MLX_MATCH_CRITERIA_ENABLE_MISC_BIT,
	MLX_MATCH_CRITERIA_ENABLE_INNER_BIT,
	MLX_MATCH_CRITERIA_ENABLE_MISC2_BIT,
	MLX_MATCH_CRITERIA_ENABLE_MISC3_BIT
};

/* forward declare. */
static int virtnet_sf_sysfs_write(const char *, const char *);
/* RQTable functions. */
static int virtnet_sf_rqt_update(struct virtnet_device *dev);
static int virtnet_sf_rqt_destroy(struct virtnet_device *dev);
/* TIR functions. */
static int virtnet_sf_tir_create_default(struct virtnet_device *dev);
static void virtnet_sf_tir_destroy_default(struct virtnet_device *dev);
static struct virtnet_tir *virtnet_sf_tir_new(struct virtnet_device *dev,
					struct virtnet_tir_devx_attr *attr);
static void virtnet_sf_tir_delete(struct virtnet_tir *vtir);
static struct virtnet_tir *virtnet_sf_tir_lookup(struct virtnet_device *dev,
					struct virtnet_tir_devx_attr *attr);
static struct virtnet_tir *virtnet_sf_tir_get(struct virtnet_device *dev,
					struct virtnet_tir_devx_attr *attr);
static void inline virtnet_sf_tir_devx_attr_default(struct virtnet_device *dev,
				 struct virtnet_tir_devx_attr *attr);
/* RSS functions. */
static int virtnet_sf_flow_matcher_rss_expand(uint16_t priority,
				struct virtnet_sf_flow_matcher_list **list);
static void virtnet_sf_flow_matcher_rss_destroy
				(struct virtnet_sf_flow_matcher_list **list);
static int virtnet_sf_rss_flow_apply(struct virtnet_device *dev,
				     uint16_t base_priority,
				     struct ether_addr *mac_m,
				     struct ether_addr *mac_v,
				     struct rss_flow_list **flow_list);
static int virtnet_sf_rss_flow_destroy(struct rss_flow_list **flow_list);
static int inline virtnet_ether_addr_is_zero(uint8_t *mac);
static int virtnet_sf_drop_flow_apply(struct virtnet_device *dev,
				      uint16_t base_priority,
				      struct ether_addr *mac_m,
				      struct ether_addr *mac_v,
				      struct virtnet_sf_dev_flow **dev_flow);
static int virtnet_sf_drop_flow_destroy(struct virtnet_sf_dev_flow **dev_flow);

void *
virtnet_cmdif_create_flow_table(struct ibv_context *ctx, uint32_t type)
{
	struct virtnet_flow_table *ft = NULL;
	uint32_t id = 0;
	uint8_t level = 0;
	uint8_t log_size = 12;

	ft = calloc(1, sizeof(*ft));
	err_if(!ft, "cannot alloc flow tabel struct");
	ft->size = (1ULL << log_size);
	SLIST_INIT(&ft->fg_list);
	ft->ftes = calloc(ft->size, sizeof(*ft->ftes));
	err_if(!ft->ftes, "cannot alloc flow table entry struct");
	ft->obj = mlx_devx_create_flow_table(ctx, type, level, log_size, &id);
	err_if(!ft->obj, "cannot create flow table");
	ft->id = id;
	return (void *)ft;
free_exit:
	if (ft) {
		if (ft->ftes)
			free(ft->ftes);
		free(ft);
	}
	return NULL;
}

int
virtnet_cmdif_destroy_flow_table(void *table)
{
	int ret = 0;

	struct virtnet_flow_table *tbl = (struct virtnet_flow_table *)table;
	ret = mlx5dv_devx_obj_destroy(tbl->obj);
	if (ret)
		log_error("cannot destory table DevX object");
	free(tbl);
	return ret;
}

/* Apply device vlan flow. */
int
virtnet_sf_vlan_flow_apply(struct virtnet_device *dev, uint16_t vid,
			   void **flow)
{
	(void)(dev);
	(void)(vid);
	*flow = (void *)0xdeadbeef;
	return 0;
}

/* Destroy one device vlan flow. */
int
virtnet_sf_vlan_flow_destroy(struct virtnet_device *dev, void **flow)
{
	(void)(dev);
	*flow = NULL;
	return 0;
}

/* Apply device mac flow. */
int
virtnet_sf_mac_flow_apply(struct virtnet_device *dev, uint8_t *mac, void **flow)
{
	struct rss_flow_list **list = (struct rss_flow_list **)flow;
	struct ether_addr mac_mask = {
		.ether_addr_octet = "\xFF\xFF\xFF\xFF\xFF\xFF",
	};
	struct ether_addr mac_value;

	if (!mac || !flow)
		return -1;

	memcpy(mac_value.ether_addr_octet, mac, ETH_ALEN);
	return virtnet_sf_rss_flow_apply(dev, virtnet_flow_mac_prio_base,
					 &mac_mask, &mac_value, list);
}

/* Destroy one device mac flow. */
int
virtnet_sf_mac_flow_destroy(struct virtnet_device *dev, void **flow)
{
	struct rss_flow_list **list = (struct rss_flow_list **)flow;

	return virtnet_sf_rss_flow_destroy(list);
}

static bool
virtnet_sf_sysfs_validate(const char *sysfs)
{
	bool ret;
	int fd;

	fd = open(sysfs, O_WRONLY);
	ret = fd < 0 ? false : true;
	close(fd);

	return ret;
}

static void virtnet_sf_cap_modify(struct virtnet_device *dev,
				  struct mlxdevm_port *port)
{
	struct mlxdevm_port_fn_ext_cap cap = {};
	struct mlxdevm *devm;
	int err;

	devm = dev->port_ctx->devm;

	if (port->ext_cap.roce_valid || port->ext_cap.max_uc_macs_valid) {
		cap.roce = false;
		cap.roce_valid = port->ext_cap.roce_valid;

		cap.max_uc_macs = 1;
		cap.max_uc_macs_valid = port->ext_cap.max_uc_macs_valid;

		err = mlxdevm_port_fn_cap_set(devm, port, &cap);
		if (err && err != -EOPNOTSUPP)
			log_error("Failed to disable RoCE and UC MACs");
	}
}

static void virtnet_sf_apply_config(struct virtnet_device *dev)
{
	struct mlxdevm_param param = {};
	struct mlxdevm *devm;
	int err;

	devm = mlxdevm_open("mlxdevm", "auxiliary",
			    dev->sf_verbs.sf_sys_name);
	if (!devm) {
		log_error("Failed to open devm socket, %s\n",
			  dev->sf_verbs.sf_sys_name);
		return;
	}

	err = mlxdevm_dev_driver_param_get(devm, "cmpl_eq_depth", &param);
	if (!err) {
		param.u.val_u32 = 64;
		mlxdevm_dev_driver_param_set(devm, "cmpl_eq_depth", &param);
	}

	err = mlxdevm_dev_driver_param_get(devm, "async_eq_depth", &param);
	if (!err) {
		param.u.val_u32 = 64;
		mlxdevm_dev_driver_param_set(devm, "async_eq_depth",
					     &param);
	}

	err = mlxdevm_dev_driver_param_get(devm, "disable_fc", &param);
	if (!err) {
		param.u.val_bool = false;
		mlxdevm_dev_driver_param_set(devm, "disable_fc", &param);
	}

	err = mlxdevm_dev_driver_param_get(devm, "disable_netdev", &param);
	if (!err) {
		param.u.val_bool = true;
		mlxdevm_dev_driver_param_set(devm, "disable_netdev",
					     &param);
	}

	err = mlxdevm_dev_driver_param_get(devm, "max_cmpl_eqs", &param);
	if (!err) {
		param.u.val_u16 = 1;
		mlxdevm_dev_driver_param_set(devm, "max_cmpl_eqs", &param);
	}

	mlxdevm_close(devm);
}

static int virtnet_sf_udev_fetch(struct virtnet_device *dev,
				 uint32_t sfnum)
{
#define SF_UDEV_TIME_WAIT_US	(5000u)
#define SF_UDEV_MAX_RETRY	(2000u)
	struct udev_device *parent_dev;
	struct udev_device *udev_dev;
	struct udev_monitor *monitor;
	char sf_num_in[10] = {};
	const char *sf_sys_path;
	const char *sf_sys_name;
	const char *pci_name;
	const char *sf_num;
	const char *action;
	struct udev *udev;
	int retry = 0;
	int ret = -1;

	udev = dev->ctx->udev;
	monitor = dev->ctx->monitor;

	udev_ref(udev);
	udev_monitor_ref(monitor);

	snprintf(sf_num_in, sizeof(sf_num_in), "%d", sfnum);
	do {
		if (retry++ > SF_UDEV_MAX_RETRY)
			break;

		usleep(SF_UDEV_TIME_WAIT_US);

		udev_dev = udev_monitor_receive_device(monitor);
		if (!udev_dev)
			continue;

		action = udev_device_get_action(udev_dev);

		if (strcmp(action, "bind")) {
			udev_device_unref(udev_dev);
			continue;
		}

		sf_num = udev_device_get_sysattr_value(udev_dev, "sfnum");
		sf_sys_path = udev_device_get_syspath(udev_dev);
		parent_dev = udev_device_get_parent(udev_dev);
		pci_name = udev_device_get_sysname(parent_dev);
		if (!strcmp(pci_name, dev->port_ctx->pci_addr_str) &&
		    sf_num && !strcmp(sf_num_in, sf_num)) {
			ret = 0;
			sf_sys_name = udev_device_get_sysname(udev_dev);
			log_debug("Path: %s, sfname: %s",
				  sf_sys_path, sf_sys_name);
			strcpy(dev->sf_verbs.sf_sys_name, sf_sys_name);
			strcpy(dev->sf_verbs.sf_sys_path, sf_sys_path);
			udev_device_unref(udev_dev);
			break;
		} else {
			udev_device_unref(udev_dev);
			continue;
		}
	} while (1);

	udev_monitor_unref(monitor);
	udev_unref(udev);

	return ret;
}

static int
_virtnet_sf_get_ib_name(struct virtnet_device *dev, char *ib_name)
{
	char dirname[PATH_MAX] = {0};
	struct dirent *ib_dev_e;
	int ret = -ENOENT;
	DIR *sf_dir;

	snprintf(dirname, sizeof(dirname),
		 "%s/infiniband/", dev->sf_verbs.sf_sys_path);
	sf_dir = opendir(dirname);
	if (!sf_dir) {
		log_error("Can't open %s", dirname);
		return ret;
	}

	while ((ib_dev_e = readdir(sf_dir)) != NULL) {
		if (memcmp(ib_dev_e->d_name, "mlx5_", sizeof("mlx5_") - 1))
			continue;
		snprintf(ib_name, PATH_MAX, "%s", ib_dev_e->d_name);
		ret = 0;
		break;
	}

	closedir(sf_dir);
	return ret;
}

static int
virtnet_sf_get_ib_name(struct virtnet_device *dev, char *ib_name)
{
#define SF_IB_TIME_WAIT_US	(1000u)
#define SF_IB_MAX_RETRY		(5000u)
	int retry = 0;
	int ret = -1;

	do {
		ret = _virtnet_sf_get_ib_name(dev, ib_name);

		if (!ret || retry++ > SF_IB_MAX_RETRY)
			break;

		usleep(SF_IB_TIME_WAIT_US);
	} while (1);

	return ret;
}

static int
virtnet_sf_unbind_cfg_drv(struct virtnet_device *dev)
{
	char sysfs[PATH_MAX] =
		"/sys/bus/auxiliary/drivers/mlx5_core.sf_cfg/unbind";

	if (!virtnet_sf_sysfs_validate(sysfs))
		return 0;

	return virtnet_sf_sysfs_write(sysfs, dev->sf_verbs.sf_sys_name);
}

static int
virtnet_sf_bind_core_drv(struct virtnet_device *dev)
{
	char sysfs[PATH_MAX] =
		"/sys/bus/auxiliary/drivers/mlx5_core.sf/bind";

	if (!virtnet_sf_sysfs_validate(sysfs))
		return 0;

	return virtnet_sf_sysfs_write(sysfs, dev->sf_verbs.sf_sys_name);
}

static int
virtnet_sf_unbind_core_drv(struct virtnet_device *dev)
{
	char sysfs[PATH_MAX] =
		"/sys/bus/auxiliary/drivers/mlx5_core.sf/unbind";

	if (!virtnet_sf_sysfs_validate(sysfs))
		return 0;

	return virtnet_sf_sysfs_write(sysfs, dev->sf_verbs.sf_sys_name);
}

static int virtnet_sf_get_id(struct virtnet_device *dev)
{
	uint32_t sf_num;

	pthread_mutex_lock(&dev->lock);
	sf_num = dev->sf_verbs.sf_num ? dev->sf_verbs.sf_num :
		dev->port_ctx->max_sf_num++;
	pthread_mutex_unlock(&dev->lock);

	dev->sf_verbs.sf_num = sf_num;

	return sf_num;
}

static int virtnet_sf_dev_create(struct virtnet_device *dev)
{
	struct mlx5dv_context_attr dev_attr = {
		.flags = MLX5DV_CONTEXT_FLAGS_DEVX
	};
	struct mlxdevm_port *port;
	uint32_t pf_num, sf_num;
	struct mlxdevm *devm;
	int err;

	devm = dev->port_ctx->devm;

	if (virtnet_device_is_recovering(dev)) {
		err = virtnet_device_sf_recover(dev, &pf_num, &sf_num);
		if (err)
			return err;
	} else {
		pf_num = dev->port_ctx->pci_function;
		sf_num = virtnet_sf_get_id(dev);
	}

	port = mlxdevm_sf_port_add(devm, pf_num, sf_num);
	if (!port) {
		log_error("Failed to add sf port err(%d)\n", errno);
		err = EEXIST;
		goto add_err;
	}

	virtnet_sf_cap_modify(dev, port);

	err = mlxdevm_port_fn_state_set(devm, port, 1);
	if (err) {
		log_error("Failed to activate sf err(%d)\n", err);
		goto set_err;
	}

	err = virtnet_sf_udev_fetch(dev, sf_num);
	if (err) {
		log_error("Failed to fetch sf device err(%d)\n", err);
		goto udev_err;
	}

	virtnet_sf_apply_config(dev);

	err = virtnet_sf_unbind_cfg_drv(dev);
	if (err) {
		log_error("Failed to unbind SF[%s] from cfg driver",
			  dev->sf_verbs.sf_sys_name);
		goto udev_err;
	}

	err = virtnet_sf_bind_core_drv(dev);
	if (err) {
		log_error("Failed to bind SF[%s] to core driver",
			  dev->sf_verbs.sf_sys_name);
		goto udev_err;
	}

	err = mlxdevm_port_fn_opstate_wait_attached(devm, port);
	if (err) {
		log_error("Failed to attach device err(%d)\n", err);
		goto attach_err;
	}

	err = mlxdevm_port_netdev_get(devm, port,
				      dev->sf_verbs.rep_ndev_name);
	if (err) {
		log_error("Failed to find rep netdev for SF[%s]",
			  dev->sf_verbs.sf_sys_name);
		goto attach_err;
	}

	err = virtnet_sf_get_ib_name(dev, dev->sf_verbs.rdma_dev_name);
	if (err) {
		log_error("Failed to find IB dev for SF[%s]",
			  dev->sf_verbs.sf_sys_name);
		goto attach_err;
	}

	dev->sf_verbs.dev = mlx_dv_open_device(dev->sf_verbs.rdma_dev_name,
					       &dev_attr);
	if (err) {
		log_error("Failed to open IB dev for SF[%s]",
			  dev->sf_verbs.sf_sys_name);
		goto attach_err;
	}

	dev->sf_verbs.port = port;

	log_debug("%s(%i): new sf: rdma dev %s, rep netdev %s",
		  dev->flag & VIRTNET_DEV_PF ? "PF" : "VF",
		  dev->id, dev->sf_verbs.rdma_dev_name,
		  dev->sf_verbs.rep_ndev_name);

	return 0;
attach_err:
	virtnet_sf_unbind_core_drv(dev);
udev_err:
	mlxdevm_port_fn_state_set(devm, port, 0);
set_err:
	mlxdevm_sf_port_del(devm, port);
add_err:
	return err;
}

void virtnet_sf_dev_destroy(struct virtnet_device *dev)
{
	if (!dev->sf_verbs.dev)
		return;

	ibv_close_device(dev->sf_verbs.dev);
	virtnet_sf_unbind_core_drv(dev);
	mlxdevm_port_fn_state_set(dev->port_ctx->devm,
				  dev->sf_verbs.port, 0);
	mlxdevm_port_fn_opstate_wait_detached(dev->port_ctx->devm,
					      dev->sf_verbs.port);
	mlxdevm_sf_port_del(dev->port_ctx->devm, dev->sf_verbs.port);
	dev->sf_verbs.dev = NULL;
}

static int
virtnet_sf_promisc_flow_apply(struct virtnet_device *dev)
{
	struct rss_flow_list **promisc_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.promisc_flow;

	return virtnet_sf_rss_flow_apply(dev, virtnet_flow_promisc_prio_base,
					 NULL, NULL, promisc_rss_flows);
}

static int
virtnet_sf_promisc_flow_destroy(struct virtnet_device *dev)
{
	struct rss_flow_list **promisc_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.promisc_flow;

	return virtnet_sf_rss_flow_destroy(promisc_rss_flows);
}

static int
virtnet_sf_allmulti_flow_apply(struct virtnet_device *dev)
{
	struct rss_flow_list **allmulti_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.allmulti_flow;

	return virtnet_sf_rss_flow_apply(dev, virtnet_flow_mac_prio_base,
					 &virtnet_multicast_mac_mask,
					 &virtnet_multicast_mac_addr,
					 allmulti_rss_flows);
}

static int
virtnet_sf_allmulti_flow_destroy(struct virtnet_device *dev)
{
	struct rss_flow_list **allmulti_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.allmulti_flow;

	return virtnet_sf_rss_flow_destroy(allmulti_rss_flows);
}

static int
virtnet_sf_allbcast_flow_apply(struct virtnet_device *dev)
{
	struct rss_flow_list **allbcast_rss_flows =
					&dev->sf_verbs.steer.allbcast_flow;

	/* If rule already exists, no need to add same rule again. */
	if (dev->sf_verbs.steer.allbcast_flow)
		return 0;

	return virtnet_sf_rss_flow_apply(dev, virtnet_flow_mac_prio_base,
					 &bcast_mac,
					 &bcast_mac,
					 allbcast_rss_flows);
}

static int
virtnet_sf_allbcast_flow_destroy(struct virtnet_device *dev)
{
	struct rss_flow_list **allbcast_rss_flows =
					&dev->sf_verbs.steer.allbcast_flow;
	int ret;

	if (!dev->sf_verbs.steer.allbcast_flow)
		return 0;

	ret = virtnet_sf_rss_flow_destroy(allbcast_rss_flows);
	dev->sf_verbs.steer.allbcast_flow = NULL;
	return ret;
}

static int
virtnet_sf_alluni_flow_apply(struct virtnet_device *dev)
{
	struct rss_flow_list **alluni_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.alluni_flow;

	return virtnet_sf_rss_flow_apply(dev, virtnet_flow_mac_prio_base,
					 &virtnet_unicast_mac_mask,
					 &virtnet_unicast_mac_addr,
					 alluni_rss_flows);
}

static int
virtnet_sf_alluni_flow_destroy(struct virtnet_device *dev)
{
	struct rss_flow_list **alluni_rss_flows =
		(struct rss_flow_list **)&dev->sf_verbs.steer.alluni_flow;

	return virtnet_sf_rss_flow_destroy(alluni_rss_flows);
}

static int
virtnet_sf_nouni_flow_apply(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nouni_flow =
		(struct virtnet_sf_dev_flow **)&dev->sf_verbs.steer.nouni_flow;

	return virtnet_sf_drop_flow_apply(dev, virtnet_flow_drop_prio_base,
					  &virtnet_unicast_mac_mask,
					  &virtnet_unicast_mac_addr,
					  nouni_flow);
}

static int
virtnet_sf_nouni_flow_destroy(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nouni_flow =
		(struct virtnet_sf_dev_flow **)&dev->sf_verbs.steer.nouni_flow;

	return virtnet_sf_drop_flow_destroy(nouni_flow);
}

static int
virtnet_sf_nomulti_flow_apply(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nomulti_flow =
		(struct virtnet_sf_dev_flow **)
		 &dev->sf_verbs.steer.nomulti_flow;

	return virtnet_sf_drop_flow_apply(dev, virtnet_flow_drop_prio_base,
					  &virtnet_multicast_mac_mask,
					  &virtnet_multicast_mac_addr,
					  nomulti_flow);
}

static int
virtnet_sf_nomulti_flow_destroy(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nomulti_flow =
		(struct virtnet_sf_dev_flow **)
		 &dev->sf_verbs.steer.nomulti_flow;

	return virtnet_sf_drop_flow_destroy(nomulti_flow);
}

static int
virtnet_sf_nobcast_flow_apply(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nobcast_flow =
		(struct virtnet_sf_dev_flow **)
		 &dev->sf_verbs.steer.nobcast_flow;
	static struct ether_addr virtnet_bcast_mac = {
		.ether_addr_octet = "\xFF\xFF\xFF\xFF\xFF\xFF",
	};

	return virtnet_sf_drop_flow_apply(dev, virtnet_flow_drop_prio_base,
					  &virtnet_bcast_mac,
					  &virtnet_bcast_mac,
					  nobcast_flow);
}

static int
virtnet_sf_nobcast_flow_destroy(struct virtnet_device *dev)
{
	struct virtnet_sf_dev_flow **nobcast_flow =
		(struct virtnet_sf_dev_flow **)
		 &dev->sf_verbs.steer.nobcast_flow;

	return virtnet_sf_drop_flow_destroy(nobcast_flow);
}

static int
virtnet_sf_sysfs_write(const char *sysfs, const char *string_to_write)
{
	int fd, ret;

	fd = open(sysfs, O_WRONLY);
	if (fd < 0) {
		log_error("sysfs[%s] doesn't exist", sysfs);
		return -1;
	}
	ret = write(fd, string_to_write, strlen(string_to_write));
	ret = ret > 0 ? 0 : -1;
	if (ret)
		log_error("failed to write %s to %s", string_to_write, sysfs);
	close(fd);
	return ret;
}

static int
virtnet_sf_rqt_update(struct virtnet_device *dev)
{
	uint16_t rq_num = 0;
	struct snap_virtio_net_queue_attr *reg_vqs = dev->reg_vqs;
	struct virtnet_vq *vqs = dev->eth_vqs;
	struct mlx_devx_rqt_attr *attr = NULL;
	struct snap_virtio_queue_attr *vattr = NULL;
	int i, j, ret;
	uint32_t rqt_entries = 128;
	uint32_t log_max_queue_pairs = log2above(dev->vq_pair_n);

	rqt_entries = MAX(rqt_entries, (1ULL << log_max_queue_pairs));
	attr = calloc(1,
		      sizeof(*attr) + rqt_entries * sizeof(attr->rq_list[0]));
	err_if (!attr, "cannot allocate RQT attribute memory.");
	attr->rq_type = MLX5_INLINE_Q_TYPE_VIRTQ;
	attr->max_size = rqt_entries;
	for (i = 0; i < dev->vq_pair_n * 2; i++) {
		vattr = &reg_vqs[i].vattr;
		if (vattr->enable &&
		    vattr->offload_type == SNAP_VIRTQ_OFFLOAD_ETH_FRAME &&
		    vattr->idx % 2 == 0) {
			attr->rq_list[rq_num++] =
				vqs[vattr->idx].snap_q->virtq.virtq->obj_id;
		}
	}
	for (j = 0; rq_num != rqt_entries; rq_num++, j++) {
		attr->rq_list[rq_num] = attr->rq_list[j];
	}
	attr->actual_size = rq_num;
	err_if(rq_num != rqt_entries,
	       "rq_num(%u) is different with rqt_entries(%u)",
	       rq_num, rqt_entries);
	if (!dev->sf_verbs.steer.rqt.obj) {
		dev->sf_verbs.steer.rqt.obj =
			mlx_devx_rqt_create(dev->sf_verbs.dev, attr,
					    &dev->sf_verbs.steer.rqt.id);
		err_if(!dev->sf_verbs.steer.rqt.obj, "cannot create sf RQTable with "
			"attribute: rq_type[%d], max_size[%u], actual_size[%u]",
			attr->rq_type, attr->max_size, attr->actual_size);
	} else {
		ret = mlx_devx_rqt_modify(dev->sf_verbs.steer.rqt.obj, dev->sf_verbs.steer.rqt.id, attr);
		err_if(ret, "cannot modify sf RQTable with attribute: rq_type[%d], max_size[%u], actual_size[%u]",
			attr->rq_type, attr->max_size, attr->actual_size);
	}
	free(attr);
	return 0;
free_exit:
	if (attr)
		free(attr);
	return -1;
}

static int
virtnet_sf_rqt_destroy(struct virtnet_device *dev)
{
	int ret = 0;

	if (dev->sf_verbs.steer.rqt.obj)
		ret = mlx_devx_obj_destroy(dev->sf_verbs.steer.rqt.obj);
	if (!ret)
		memset(&dev->sf_verbs.steer.rqt, 0,
		       sizeof(dev->sf_verbs.steer.rqt));
	else
		log_error("cannot destroy sf steer RXQ table[%p], id[%u]",
			  dev->sf_verbs.steer.rqt.obj,
			  dev->sf_verbs.steer.rqt.id);
	return ret;
}

int
virtnet_sf_mq_update(struct virtnet_device *dev, uint16_t mq)
{

	if (mq == dev->vq_pair_n)
		return 0;

	if (!mq || mq * 2 > dev->snap_ctrl->common.enabled_queues) {
		log_error("invalid mq request size: %hu", mq);
		return VIRTIO_NET_ERR;
	}

	dev->vq_pair_n = mq;
	return virtnet_sf_rqt_update(dev);
}

static void inline
virtnet_sf_tir_devx_attr_default(struct virtnet_device *dev,
				 struct virtnet_tir_devx_attr *attr)
{
	uint8_t rx_hash_toeplitz_key[VIRTNET_HASH_KEY_LEN] =
						{ 0x2c, 0xc6, 0x81, 0xd1,
						  0x5b, 0xdb, 0xf4, 0xf7,
						  0xfc, 0xa2, 0x83, 0x19,
						  0xdb, 0x1a, 0x3e, 0x94,
						  0x6b, 0x9e, 0x38, 0xd9,
						  0x2c, 0x9c, 0x03, 0xd1,
						  0xad, 0x99, 0x44, 0xa7,
						  0xd9, 0x56, 0x3d, 0x59,
						  0x06, 0x3c, 0x25, 0xf3,
						  0xfc, 0x1f, 0xdc, 0x2a };

	memset(attr, 0, sizeof(*attr));
        attr->disp_type = MLX_TIRC_DISP_TYPE_INDIRECT;
	attr->rx_hash_fn = MLX_RX_HASH_FN_TOEPLITZ;
	attr->transport_domain = dev->sf_verbs.td_num;
	attr->indirect_table = dev->sf_verbs.steer.rqt.id;
	attr->rx_hash_symmetric = 1;
	memcpy(attr->rx_hash_toeplitz_key, &rx_hash_toeplitz_key,
	       VIRTNET_HASH_KEY_LEN * sizeof(rx_hash_toeplitz_key[0]));
}

static int
virtnet_sf_tir_create_default(struct virtnet_device *dev)
{
	int i;
	struct virtnet_tir *vtir = NULL;
	struct rss_tir_list *rss_tir_list = NULL;
	struct virtnet_tir_devx_attr tir_attr;

	const uint8_t l3_hash =
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
	const uint8_t l4_hash =
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
	enum { L3_BIT, L4_BIT, HASH, END };
	const uint8_t vars[DEFAULT_TIR_NUM][END] = {
		{ MLX_L3_PROT_TYPE_IPV6, MLX_L4_PROT_TYPE_TCP,
		  l3_hash | l4_hash }, /* IPV6+TCP. */
		{ MLX_L3_PROT_TYPE_IPV4, MLX_L4_PROT_TYPE_TCP,
		  l3_hash | l4_hash }, /* IPV4+TCP. */
		{ MLX_L3_PROT_TYPE_IPV6, MLX_L4_PROT_TYPE_UDP,
		  l3_hash | l4_hash }, /* IPV6+UDP. */
		{ MLX_L3_PROT_TYPE_IPV4, MLX_L4_PROT_TYPE_UDP,
		  l3_hash | l4_hash }, /* IPV4+UDP. */
		{ MLX_L3_PROT_TYPE_IPV6, 0, l3_hash }, /* IPV6. */
		{ MLX_L3_PROT_TYPE_IPV4, 0, l3_hash }, /* IPV4. */
		{ 0, 0, 0 }, /* None. */
	};

	err_if(!SLIST_EMPTY(&dev->sf_verbs.steer.rss_tir_list),
	       "rss_tir_list is not empty");
	rss_tir_list = &dev->sf_verbs.steer.rss_tir_list;
	virtnet_sf_tir_devx_attr_default(dev, &tir_attr);
	for (i = 0; i < DEFAULT_TIR_NUM; i++) {
		tir_attr.rx_hash_field_selector_outer.l3_prot_type =
								vars[i][L3_BIT];
		tir_attr.rx_hash_field_selector_outer.l4_prot_type =
								vars[i][L4_BIT];
		tir_attr.rx_hash_field_selector_outer.selected_fields =
								vars[i][HASH];
		vtir = virtnet_sf_tir_new(dev, &tir_attr);
		err_if(!vtir, "cannot create TIR with attr:l3_prot_type[0x%x],"
			"l4_prot_type[0x%x], selected_fields[0x%x]",
			tir_attr.rx_hash_field_selector_outer.l3_prot_type,
			tir_attr.rx_hash_field_selector_outer.l4_prot_type,
			tir_attr.rx_hash_field_selector_outer.selected_fields);
		SLIST_INSERT_HEAD(rss_tir_list, vtir, next);
	}
	return 0;
free_exit:
	return -1;
}

static void 
virtnet_sf_tir_destroy_default(struct virtnet_device *dev)
{
	struct virtnet_tir *vtir = NULL;
	struct rss_tir_list *rss_tir_list = &dev->sf_verbs.steer.rss_tir_list;

	while (!SLIST_EMPTY(rss_tir_list)) {
		vtir = SLIST_FIRST(rss_tir_list);
		SLIST_REMOVE_HEAD(rss_tir_list, next);
		virtnet_sf_tir_delete(vtir);
	}
}

static struct virtnet_tir *
virtnet_sf_tir_new(struct virtnet_device *dev,
		   struct virtnet_tir_devx_attr *attr)
{
	struct virtnet_tir *vtir = NULL;

	vtir = calloc(1, sizeof (*vtir));
	err_if(!vtir, "cannot alloc virtnet tir struct");
	vtir->obj = mlx_devx_cmd_create_tir(dev->sf_verbs.dev, attr, &vtir->id);
	err_if(!vtir->obj, "cannot create TIR via DevX");
	vtir->action = mlx5dv_dr_action_create_dest_devx_tir(vtir->obj);
	err_if(!vtir->action, "cannot create TIR[%p]-id[%u] action via DevX",
		vtir->obj, vtir->id);
	vtir->attr = (*attr);
	return vtir;
free_exit:
	if (vtir->obj)
		mlx_devx_obj_destroy(vtir->obj);
	if (vtir)
		free(vtir);
	return NULL;
}

static void 
virtnet_sf_tir_delete(struct virtnet_tir *vtir)
{
	int ret = 0;

	if (vtir->action)
		ret = mlx5dv_dr_action_destroy(vtir->action);
	if (ret)
		log_error("cannot destroy TIR action[%p] via DevX",
			  vtir->action);
	ret = mlx_devx_obj_destroy(vtir->obj);
	if (ret)
		log_error("cannot destroy TIR obj[%p] via DevX", vtir->obj);
	free(vtir);
}

static struct virtnet_tir *
virtnet_sf_tir_lookup(struct virtnet_device *dev,
		      struct virtnet_tir_devx_attr *attr)
{
	struct virtnet_tir *vattr;
	SLIST_FOREACH(vattr, &dev->sf_verbs.steer.rss_tir_list, next) {
		if (memcmp(&vattr->attr, attr, sizeof(*attr)))
			continue;
		return vattr;
	}
	return NULL;
}

static struct virtnet_tir *
virtnet_sf_tir_get(struct virtnet_device *dev,
		   struct virtnet_tir_devx_attr *attr)
{
	struct virtnet_tir *vtir = NULL;

	vtir = virtnet_sf_tir_lookup(dev, attr);
	if (!vtir) {
		log_debug("create new virtnet tir: "
			  "l3_prot_type[0x%X], l4_prot_type[0x%X], "
			  "selected_fields[0x%X]",
			  attr->rx_hash_field_selector_outer.l3_prot_type,
			  attr->rx_hash_field_selector_outer.l4_prot_type,
			  attr->rx_hash_field_selector_outer.selected_fields);
		vtir = virtnet_sf_tir_new(dev, attr);
	}
	if (!vtir)
		log_error("cannot create virtnet tir");
	return vtir;
}

static int
virtnet_sf_flow_matcher_rss_expand(uint16_t base_priority,
				   struct virtnet_sf_flow_matcher_list **list)
{
	int i, ret = 0;
	struct mlx5dv_flow_match_parameters *mask = NULL;
	struct mlx5dv_flow_match_parameters *value = NULL;
	struct virtnet_sf_flow_matcher *matcher = NULL;
	const uint8_t l3_hash =
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_SRC_IP) |
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_DST_IP);
	const uint8_t l4_hash =
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_SPORT) |
		(1 << MLX_RX_HASH_FIELD_SELECT_SELECTED_FIELDS_L4_DPORT);
	const uint16_t criteria = 1 << MLX_MATCH_CRITERIA_ENABLE_OUTER_BIT;
	enum { PRIO, CRITERIA, IP_VER, IP_PROTO,
	       HASH_L3_TYPE, HASH_L4_TYPE, HASH, END };
	const uint8_t vars[][END] = {
		{ 0, 0, 0, 0, 0, 0, 0 },
		{ 1, criteria, 4, 0, MLX_L3_PROT_TYPE_IPV4, 0, l3_hash },
		{ 1, criteria, 6, 0, MLX_L3_PROT_TYPE_IPV6, 0, l3_hash },
		{ 2, criteria, 4, IPPROTO_UDP, MLX_L3_PROT_TYPE_IPV4,
		  MLX_L4_PROT_TYPE_UDP, l3_hash | l4_hash },
		{ 2, criteria, 6, IPPROTO_UDP, MLX_L3_PROT_TYPE_IPV6,
		  MLX_L4_PROT_TYPE_UDP, l3_hash | l4_hash },
		{ 2, criteria, 4, IPPROTO_TCP, MLX_L3_PROT_TYPE_IPV4,
		  MLX_L4_PROT_TYPE_TCP, l3_hash | l4_hash },
		{ 2, criteria, 6, IPPROTO_TCP, MLX_L3_PROT_TYPE_IPV6,
		  MLX_L4_PROT_TYPE_TCP, l3_hash | l4_hash },
	};

	(*list) = calloc(1, sizeof(**list)); /* alloc list head. */
	err_if(!(*list), "cannot alloc rss matcher list head");
	for (i = 0; i < DIM(vars); i++) {
		matcher = calloc(1, sizeof(*matcher));
		err_if(!matcher, "cannot alloc virtnet macher structure");
		ret = mlx_devx_match_lyr_2_4_new(&mask, &value);
		err_if(ret, "cannot alloc match lyr_2_4 buf for mask or value");
		mlx_devx_match_lyr_2_4_init
			(vars[i][IP_VER], vars[i][IP_PROTO], mask, value);
		matcher->mask = mask;
		matcher->value = value;
		matcher->criteria = vars[i][CRITERIA];
		matcher->priority = PRIO_INC(base_priority, vars[i][PRIO]);
		matcher->hash_field.l3_prot_type = vars[i][HASH_L3_TYPE];
		matcher->hash_field.l4_prot_type = vars[i][HASH_L4_TYPE];
		matcher->hash_field.selected_fields = vars[i][HASH];
		SLIST_INSERT_HEAD(*list, matcher, next);
	}
	return 0;
free_exit:
	if (matcher)
		free(matcher);
	virtnet_sf_flow_matcher_rss_destroy(list);
	return ret;
}

static void
virtnet_sf_flow_matcher_rss_destroy(struct virtnet_sf_flow_matcher_list **list)
{
	struct virtnet_sf_flow_matcher *matcher = NULL;

	if (!(*list)) {
		log_debug("pointer to flow matcher's list is NULL");
		return;
	}
	while(!SLIST_EMPTY(*list)) {
		matcher = SLIST_FIRST(*list);
		SLIST_REMOVE_HEAD(*list, next);
		mlx_devx_match_lyr_2_4_delete
			(matcher->mask, matcher->value);
		free(matcher);
	}
	free(*list);
	*list = NULL;
}

static int
virtnet_sf_rss_flow_apply(struct virtnet_device *dev, uint16_t base_priority,
			  struct ether_addr *mac_m, struct ether_addr *mac_v,
			  struct rss_flow_list **flow_list)
{
	int ret = 0;
	struct rss_flow_list *listhdr = NULL;
	struct mlx5dv_dr_matcher *matcher = NULL;
	struct virtnet_tir_devx_attr tir_attr;
	struct virtnet_tir *vtir = NULL;
	struct virtnet_sf_dev_flow *dev_flow = NULL;
	struct virtnet_sf_flow_matcher_list *vmatcher_list = NULL;
	struct virtnet_sf_flow_matcher *vmatcher = NULL;
	struct mlx5dv_dr_action *actions[1];

	listhdr = calloc(1, sizeof(*listhdr));
	err_if(!listhdr, "cannot alloc rss flow list head");
	virtnet_sf_tir_devx_attr_default(dev, &tir_attr);
	ret = virtnet_sf_flow_matcher_rss_expand(base_priority, &vmatcher_list);
	err_if(ret, "cannot expand matchers for RSS");
	SLIST_FOREACH(vmatcher, vmatcher_list, next) {
		dev_flow = calloc(1, sizeof(*dev_flow));
		err_if(!dev_flow, "cannot alloc virtnet dev_flow struct");
		if (mac_m && mac_v) {
			mlx_devx_match_set_mac_dst(mac_m->ether_addr_octet,
						   mac_v->ether_addr_octet,
						   vmatcher->mask,
						   vmatcher->value);
			vmatcher->criteria |=
				1 << MLX_MATCH_CRITERIA_ENABLE_OUTER_BIT;
		}

		matcher = mlx5dv_dr_matcher_create
			(dev->sf_verbs.steer.rx_tbl,
			 vmatcher->priority, vmatcher->criteria,
			 vmatcher->mask);
		err_if(!matcher, "cannot create matcher via DevX: tbl[%p],"
		       "priority[%u], match_criteria_enable[0x%X], mask[%p]",
		       dev->sf_verbs.steer.rx_tbl, vmatcher->priority,
		       vmatcher->criteria, vmatcher->mask);
		dev_flow->matcher = matcher;

		tir_attr.rx_hash_field_selector_outer.l3_prot_type =
				vmatcher->hash_field.l3_prot_type;
		tir_attr.rx_hash_field_selector_outer.l4_prot_type =
				vmatcher->hash_field.l4_prot_type;
		tir_attr.rx_hash_field_selector_outer.selected_fields =
				vmatcher->hash_field.selected_fields;
		vtir = virtnet_sf_tir_get(dev, &tir_attr);
		err_if(!vtir, "cannot get virtnet tir");

		actions[0] = vtir->action;
		dev_flow->flow = mlx5dv_dr_rule_create
			(matcher, vmatcher->value, 1, actions);
		err_if(!dev_flow->flow, "cannot create flow: matcher[%p], "
		       "value[%p], tir action[%p]",
		       matcher, vmatcher->value, vtir->action);

		SLIST_INSERT_HEAD(listhdr, dev_flow, next);
	}
	virtnet_sf_flow_matcher_rss_destroy(&vmatcher_list);
	*flow_list = listhdr;
	return 0;
free_exit:
	if (dev_flow)
		free(dev_flow);
	virtnet_sf_flow_matcher_rss_destroy(&vmatcher_list);
	virtnet_sf_rss_flow_destroy(&listhdr);
	ret = ret ? ret : -1;
	return ret;
}

static int
virtnet_sf_rss_flow_destroy(struct rss_flow_list **flow_list)
{
	int ret = 0;
	struct virtnet_sf_dev_flow *dev_flow = NULL;

	if (!flow_list || !(*flow_list))
		return 0;
	while (!SLIST_EMPTY(*flow_list)) {
		dev_flow = SLIST_FIRST(*flow_list);
		SLIST_REMOVE_HEAD(*flow_list, next);
		ret = mlx5dv_dr_rule_destroy(dev_flow->flow);
		err_if(ret, "failed to destroy flow rule[%p] via Devx",
		       dev_flow->flow);
		dev_flow->flow = NULL;
		ret = mlx5dv_dr_matcher_destroy(dev_flow->matcher);
		/* ignore error. */
		if (ret)
			log_error("failed to destroy matcher[%p] via DevX",
				  dev_flow->matcher);
		free(dev_flow);
	}
	free(*flow_list);
	*flow_list = NULL;
	return 0;
free_exit:
	if (dev_flow)
		SLIST_INSERT_HEAD(*flow_list, dev_flow, next);
	return ret;
}

static int
virtnet_sf_drop_flow_apply(struct virtnet_device *dev, uint16_t base_priority,
			   struct ether_addr *mac_m, struct ether_addr *mac_v,
			   struct virtnet_sf_dev_flow **dev_flow)
{
	int ret = 0;
	struct virtnet_sf_dev_flow *dflow = NULL;
	uint8_t criteria = 0;
	struct mlx5dv_flow_match_parameters *mask = NULL;
	struct mlx5dv_flow_match_parameters *value = NULL;
	struct virtnet_tir *vtir = dev->sf_verbs.steer.drop.tir;
	struct mlx5dv_dr_action *actions[1];

	dflow = calloc(1, sizeof(*dflow));
	err_if(!dflow, "cannot alloc virtnet dev_flow struct for drop flow");
	/* create matcher. */
	ret = mlx_devx_match_lyr_2_4_new(&mask, &value);
	err_if(ret, "cannot alloc match lyr_2_4 buf for mask or value");
	mlx_devx_match_lyr_2_4_init(0, 0, mask, value);
	if (mac_m && mac_v) {
		criteria = 1 << MLX_MATCH_CRITERIA_ENABLE_OUTER_BIT;
		mlx_devx_match_set_mac_dst(mac_m->ether_addr_octet,
					   mac_v->ether_addr_octet,
					   mask, value);
	}

	dflow->matcher = mlx5dv_dr_matcher_create
		(dev->sf_verbs.steer.rx_tbl,
		 base_priority, criteria, mask);
	err_if(!dflow->matcher, "cannot create matcher via DevX: "
	       "tbl[%p], priority[%u], match_criteria_enable[0x%X], "
	       "mask[%p]",
	       dev->sf_verbs.steer.rx_tbl, base_priority, criteria,
	       mask);

	actions[0] = vtir->action;
	dflow->flow = mlx5dv_dr_rule_create(dflow->matcher, value, 1,
					    actions);
	err_if(!dflow->flow, "cannot create flow: matcher[%p], "
	       "value[%p], tir action[%p]",
	       dflow->matcher, value, vtir->action);

	*dev_flow = dflow;
	return 0;
free_exit:
	return -1;
}

static int
virtnet_sf_drop_flow_destroy(struct virtnet_sf_dev_flow **dev_flow)
{
	int ret = 0;
	struct virtnet_sf_dev_flow *dflow = NULL;

	dflow = *dev_flow;
	if (!dflow)
		return 0;
	ret = mlx5dv_dr_rule_destroy(dflow->flow);
	err_if(ret, "failed to destroy flow rule[%p] via Devx", dflow->flow);
	dflow->flow = NULL;
	ret = mlx5dv_dr_matcher_destroy(dflow->matcher);
	/* ignore error. */
	if (ret)
		log_error("failed to destroy matcher[%p] via DevX",
			  dflow->matcher);
	free(dflow);
	*dev_flow = NULL;
free_exit:
	return ret;
}
/*
 * Apply device mac, vlan, queue configuration, create flows accordingly.
 */
int
virtnet_sf_flows_apply(struct virtnet_device *dev)
{
	int ret = 0;
	struct virtnet_mac_table_entry *mac_entry;
	int i;

	ret = virtnet_sf_set_rx_mode(dev);
	err_if(ret, "cannot apply flows based on rx mode");

	if (!virtnet_ether_addr_is_zero(dev->mac.addr)) {
		ret = virtnet_sf_mac_flow_apply
			(dev, dev->mac.addr, &dev->mac.dev_flow);
		err_if(ret, "cannot apply default mac flow");
	}
	if (dev->mac_table.entries) {
		for (i = 0; i < VIRTNET_MAC_TABLE_ENTRIES; i++) {
			mac_entry = &dev->mac_table.entries[i];
			if (!virtnet_ether_addr_is_zero(mac_entry->addr)) {
				ret = virtnet_sf_mac_flow_apply
					(dev, mac_entry->addr,
					 &mac_entry->dev_flow);
				err_if(ret, "cannot apply flow in mac table");
			}
		}
	}
	return 0;
free_exit:
	return ret;
}

static void
virtnet_sf_flows_destroy(struct virtnet_device *dev)
{
	int i;

	virtnet_sf_allbcast_flow_destroy(dev);
	virtnet_sf_promisc_flow_destroy(dev);
	virtnet_sf_allmulti_flow_destroy(dev);
	virtnet_sf_mac_flow_destroy(dev, &dev->mac.dev_flow);
	virtnet_sf_nomulti_flow_destroy(dev);
	virtnet_sf_nouni_flow_destroy(dev);
	virtnet_sf_nobcast_flow_destroy(dev);
	if (dev->mac_table.entries) {
		for (i = 0; i < VIRTNET_MAC_TABLE_ENTRIES; i++) {
			virtnet_sf_mac_flow_destroy
				(dev, &dev->mac_table.entries[i].dev_flow);
		}
	}
}

static int
virtnet_sf_drop_rqt_create(struct virtnet_device *dev)
{
	struct mlx_devx_rqt_attr *attr =
		calloc(1, sizeof(*attr) + 1 * sizeof(attr->rq_list[0]));

	err_if (!attr, "cannot allocate drop RQT attribute memory.");
	attr->actual_size = 1;
	attr->max_size = 1;
	attr->rq_type = MLX5_INLINE_Q_TYPE_RQ;
	attr->rq_list[0] = dev->sf_verbs.steer.drop.rqn;
	dev->sf_verbs.steer.drop.rqt.obj =
		mlx_devx_rqt_create(dev->sf_verbs.dev, attr,
				    &dev->sf_verbs.steer.drop.rqt.id);
	err_if(!dev->sf_verbs.steer.drop.rqt.obj,
		"cannot create sf RQTable with "
		"attribute: rq_type[%d], max_size[%u], actual_size[%u]",
		attr->rq_type, attr->max_size, attr->actual_size);
	free(attr);
	return 0;
free_exit:
	if (attr)
		free(attr);
	return -1;
}

static int
virtnet_sf_drop_rqt_destroy(struct virtnet_device *dev)
{
	int ret = 0;

	if (dev->sf_verbs.steer.drop.rqt.obj)
		ret = mlx_devx_obj_destroy(dev->sf_verbs.steer.drop.rqt.obj);
	if (!ret)
		memset(&dev->sf_verbs.steer.drop.rqt, 0,
			sizeof(dev->sf_verbs.steer.drop.rqt));
	else
		log_error("cannot destroy sf drop RXQ table[%p], id[%u]",
			  dev->sf_verbs.steer.drop.rqt.obj,
			  dev->sf_verbs.steer.drop.rqt.id);
	return ret;
}

static int
virtnet_sf_drop_rq_create(struct virtnet_device *dev)
{
	struct mlx_devx_create_rq_attr rq_attr;
	uint32_t wqe_n = 1;
	uint32_t wq_size = 0;
	uint32_t wqe_size = 0;
	uint32_t log_wqe_size = 0;
	struct mlx5dv_devx_obj *rq = NULL;
	struct mlx5dv_obj obj;
	struct mlx5dv_pd pd_info;
	struct mlx5dv_cq cq_info;
	void *wqes = dev->sf_verbs.steer.drop.wqes;
	struct mlx5dv_devx_umem *wqes_umem = dev->sf_verbs.steer.drop.wqes_umem;
	struct ibv_context *ctx = dev->sf_verbs.dev;
	int ret = 0;

	memset(&rq_attr, 0, sizeof(rq_attr));
	/* Fill RQ attributes. */
	rq_attr.mem_rq_type = 0; /* MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE. */
	rq_attr.flush_in_error_en = 1;
	rq_attr.state = 0; /* MLX5_RQC_STATE_RST. */
	rq_attr.vsd = 0; /* vlan strip. */
	/* get cqn. */
	obj.cq.in = dev->sf_verbs.steer.drop.cq;
	obj.cq.out = &cq_info;
	ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_CQ);
	err_if(ret, "cannot get CQ[%p] number", dev->sf_verbs.steer.drop.cq);
	rq_attr.cqn = cq_info.cqn;
#if 0
	rq_attr.scatter_fcs = 1; /* CRC. */
#else
	rq_attr.scatter_fcs = 0;
#endif
	rq_attr.wq_attr.wq_type = 1; /* MLX5_WQ_TYPE_CYCLIC. */
	wqe_size = sizeof(struct mlx5_wqe_data_seg);
	log_wqe_size = log2above(wqe_size);
	rq_attr.wq_attr.log_wq_stride = log_wqe_size;
	rq_attr.wq_attr.log_wq_sz = 2; /* Dummy value. */
	wqe_size = 1 << log_wqe_size;
	wq_size = wqe_n * wqe_size;
	wqes = memalign(512, wq_size + sizeof(uint32_t) * 2);/* with doorbell.*/
	err_if(!wqes, "cannot alloc WQ");
	wqes_umem = mlx5dv_devx_umem_reg(ctx, wqes, wq_size, 0);
	err_if(!wqes_umem, "cannot register WQEs as umem");
	rq_attr.wq_attr.end_padding_mode = 0; /* No pad. */
	/* get pdn. */
	obj.pd.in = dev->sf_verbs.pd;
	obj.pd.out = &pd_info;
	ret = mlx5dv_init_obj(&obj, MLX5DV_OBJ_PD);
	err_if(ret, "cannot get PD[%p] number", dev->sf_verbs.pd);
	/* fill wq attr. */
	rq_attr.wq_attr.pd = pd_info.pdn;
	rq_attr.wq_attr.wq_umem_id = wqes_umem->umem_id;
	rq_attr.wq_attr.dbr_umem_id = wqes_umem->umem_id;
	rq_attr.wq_attr.wq_umem_valid = 1;
	rq_attr.wq_attr.dbr_umem_valid = 1;
	rq_attr.wq_attr.wq_umem_offset = 0;
	rq = mlx_devx_create_rq(ctx, &rq_attr, &dev->sf_verbs.steer.drop.rqn);
	err_if(!rq, "cannot create RQ via DevX cmd");
	dev->sf_verbs.steer.drop.rq = rq;
	return 0;
free_exit:
	return (ret ? ret : -1);
}

static int
virtnet_sf_drop_rq_destroy(struct virtnet_device *dev)
{
	int ret = 0;
	struct mlx5dv_devx_obj *rq = dev->sf_verbs.steer.drop.rq;
	void *wqes = dev->sf_verbs.steer.drop.wqes;
	struct mlx5dv_devx_umem *wqes_umem = dev->sf_verbs.steer.drop.wqes_umem;

	if (rq)
		ret = mlx5dv_devx_obj_destroy(rq);
	if (ret)
		log_error("cannot destroy drop RQ devx object");
	if (wqes_umem)
		ret = mlx5dv_devx_umem_dereg(wqes_umem);
	if (ret)
		log_error("cannot dereg wqes umem");
	dev->sf_verbs.steer.drop.wqes_umem = NULL;
	if (wqes)
		free(wqes);
	dev->sf_verbs.steer.drop.wqes = NULL;
	dev->sf_verbs.steer.drop.rq = NULL;
	return ret;
}

static int
virtnet_sf_drop_cq_new(struct virtnet_device *dev)
{
	struct ibv_context *ctx = dev->sf_verbs.dev;
	struct {
		struct ibv_cq_init_attr_ex ibv;
		struct mlx5dv_cq_init_attr mlx5;
	} cq_attr;

	cq_attr.ibv = (struct ibv_cq_init_attr_ex) {
		.cqe = 2,
		.channel = NULL,
		.comp_mask = 0,
	};
	cq_attr.mlx5 = (struct mlx5dv_cq_init_attr) {
		.comp_mask = 0,
	};
	dev->sf_verbs.steer.drop.cq =
		ibv_cq_ex_to_cq(mlx5dv_create_cq(ctx, &cq_attr.ibv,
						 &cq_attr.mlx5));
	return (dev->sf_verbs.steer.drop.cq ? 0 : -1);
}

static int
virtnet_sf_drop_tir_create(struct virtnet_device *dev)
{
	struct virtnet_tir_devx_attr tir_attr;
	struct virtnet_tir *vtir = NULL;

	memset(&tir_attr, 0, sizeof(tir_attr));
	virtnet_sf_tir_devx_attr_default(dev, &tir_attr);
	tir_attr.indirect_table = dev->sf_verbs.steer.drop.rqt.id;
	vtir = virtnet_sf_tir_new(dev, &tir_attr);
	err_if(!vtir, "cannot create drop TIR");
	dev->sf_verbs.steer.drop.tir = vtir;
	return 0;
free_exit:
	return -1;
}

static void
virtnet_sf_drop_tir_destroy(struct virtnet_device *dev)
{
	struct virtnet_tir *vtir = dev->sf_verbs.steer.drop.tir;

	if (vtir)
		virtnet_sf_tir_delete(vtir);
	dev->sf_verbs.steer.drop.tir = NULL;
}

static int
virtnet_sf_drop_create(struct virtnet_device *dev)
{
	int ret;

	ret = virtnet_sf_drop_cq_new(dev);
	err_if(ret, "cannot create cq");
	ret = virtnet_sf_drop_rq_create(dev);
	err_if(ret, "cannot create rq");
	ret = virtnet_sf_drop_rqt_create(dev);
	err_if(ret, "cannot create RQ Table");
	ret = virtnet_sf_drop_tir_create(dev);
	err_if(ret, "cannot create TIR");
	return 0;
free_exit:
	return (ret ? ret : -1);
}

static int
virtnet_sf_drop_destroy(struct virtnet_device *dev)
{
	int ret = 0;

	virtnet_sf_drop_tir_destroy(dev);
	virtnet_sf_drop_rqt_destroy(dev);
	virtnet_sf_drop_rq_destroy(dev);
	if (dev->sf_verbs.steer.drop.cq)
		ret = ibv_destroy_cq(dev->sf_verbs.steer.drop.cq);
	if (ret)
		log_error("cannot destroy drop CQ, resource leaked");
	dev->sf_verbs.steer.drop.cq = NULL;
	return ret;
}

int
virtnet_sf_set_rx_mode(struct virtnet_device *dev)
{
	int ret = 0;

	if (dev->rx_mode.promisc) {
		if (!dev->sf_verbs.steer.promisc_flow) {
			ret = virtnet_sf_promisc_flow_apply(dev);
			err_if(ret, "create promisc flow failed");
		} else
			log_debug("promisc flow already exists");
	} else {
		ret = virtnet_sf_promisc_flow_destroy(dev);
		err_if(ret, "destroy promisc flow failed");
	}
	if (dev->rx_mode.allmulti) {
		if (!dev->sf_verbs.steer.allmulti_flow) {
			ret = virtnet_sf_allmulti_flow_apply(dev);
			err_if(ret, "create allmulti flow failed");
		} else
			log_debug("all multicast flow already exists");
	} else {
		ret = virtnet_sf_allmulti_flow_destroy(dev);
		err_if(ret, "destroy all multicast flow failed");
	}
	if (dev->rx_mode.alluni) {
		if (!dev->sf_verbs.steer.alluni_flow) {
			ret = virtnet_sf_alluni_flow_apply(dev);
			err_if(ret, "create alluni flow failed");
		} else
			log_debug("all unicast flow already exists");
	} else {
		ret = virtnet_sf_alluni_flow_destroy(dev);
		err_if(ret, "destroy all unicast flow failed");
	}
	if (dev->rx_mode.nouni) {
		if (!dev->sf_verbs.steer.nouni_flow) {
			ret = virtnet_sf_nouni_flow_apply(dev);
			err_if(ret, "create no unicast flow failed");
		} else
			log_debug("no unicast flow already exists");
	} else {
		ret = virtnet_sf_nouni_flow_destroy(dev);
		err_if(ret, "destroy no unicast flow failed");
	}
	if (dev->rx_mode.nomulti) {
		if (!dev->sf_verbs.steer.nomulti_flow) {
			ret = virtnet_sf_nomulti_flow_apply(dev);
			err_if(ret, "create no multicast flow failed");
		} else
			log_debug("no multicast flow already exists");
	} else {
		ret = virtnet_sf_nomulti_flow_destroy(dev);
		err_if(ret, "destroy no multicast flow failed");
	}
	if (dev->rx_mode.nobcast) {
		/* We are not destroying any existing bcast allow rule.
		 * we are adding a rule to drop the bcast packets.
		 * If this drop rule addition fails, error is returned to
		 * user. However, there is a possibility that previous
		 * rule entry exist for allowing bcast rx traffic.
		 * In such case VIRTIO_NET_CTRL_RX_NOBCAST = 1 indicating
		 * broadcast should be dropped but it is actually allowed!
		 * In second scenario, such broadcast allow rule doesn't
		 * exist but VIRTIO_NET_CTRL_RX_ALLMULTI is on. In this
		 * case broadcast packet is fine false hit with multicast
		 * packet!
		 * Jack Min prefers this way to solve it, which seems
		 * error prone and confusing way to solve RM 2247384 for now.
		 * Ideally we should delete any existing low priority bcast
		 * allow rule in addition to adding high priority drop
		 * rule... This also makes it symmetric with nobcast=0
		 * request.. But...
		 */
		if (!dev->sf_verbs.steer.nobcast_flow) {
			ret = virtnet_sf_nobcast_flow_apply(dev);
			err_if(ret, "create no broadcast flow failed");
		} else
			log_debug("no broadcast flow already exists");

	} else {
		ret = virtnet_sf_nobcast_flow_destroy(dev);
		err_if(ret, "destroy no broadcast flow failed");
		ret = virtnet_sf_allbcast_flow_apply(dev);
		err_if(ret, "adding all broadcast allow flow failed");
	}
free_exit:
	return ret;
}

static int
virtnet_sf_apply_mtu(struct virtnet_device *dev)
{
	struct virtnet_device_modify_fields val = {};
	uint16_t mtu = 0;
	int ret = 0;

	if (!dev->registers->mtu)
		return 0;

	ret = virtnet_get_mtu(dev->sf_verbs.dev, &mtu);
	err_if(ret, "failed to get device mtu");

	if (mtu == dev->registers->mtu)
		return 0;

	val.mtu = dev->registers->mtu;

	ret = virtnet_device_modify(dev, &val, VIRTNET_MODIFY_MTU);
	err_if(ret, "failed to set device mtu %hu", dev->registers->mtu);

free_exit:
	return ret;
}

/**
 * Create SF device and init related verbs resources.
 *
 * @param[in] dev
 * 	Pointer to device
 * return
 * 	0 on success, negative value on error
 */
int
virtnet_sf_create(struct virtnet_device *dev)
{
	int ret;

	ret = virtnet_sf_dev_create(dev);
	err_if(!dev->sf_verbs.dev, "failed to create sf device.");

	ret = snap_get_dev_vhca_id(dev->sf_verbs.dev);
	err_if(ret < 0, "failed to query sf device vhca_id.");
	dev->sf_verbs.vhca_id = ret;

	dev->sf_verbs.pd = ibv_alloc_pd(dev->sf_verbs.dev);
	err_if(!dev->sf_verbs.pd, "failed to allocate sf protection domain.");

	dev->sf_verbs.channel = ibv_create_comp_channel(dev->sf_verbs.dev);
	err_if(!dev->sf_verbs.channel, "Failed to create sf completion channel");

	dev->sf_verbs.td = mlx_devx_td_create(dev->sf_verbs.dev, &dev->sf_verbs.td_num);
	err_if(!dev->sf_verbs.td, "Failed to create sf TD");

	dev->sf_verbs.tis = mlx_devx_tis_create(dev->sf_verbs.dev,
			dev->sf_verbs.td_num, &dev->sf_verbs.tis_num);
	err_if(!dev->sf_verbs.tis, "Failed to create sf TIS");

	return 0;
free_exit:
	return ret;
}

void
virtnet_sf_destroy(struct virtnet_device *dev)
{
	if (dev->sf_verbs.tis)
		mlx_devx_obj_destroy(dev->sf_verbs.tis);
	if (dev->sf_verbs.td)
		mlx_devx_obj_destroy(dev->sf_verbs.td);
	if (dev->sf_verbs.channel)
		ibv_destroy_comp_channel(dev->sf_verbs.channel);
	if (dev->sf_verbs.pd)
		ibv_dealloc_pd(dev->sf_verbs.pd);
	if (dev->sf_verbs.dev)
		virtnet_sf_dev_destroy(dev);
}

int
virtnet_sf_start(struct virtnet_device *dev)
{
	int ret = 0;

	virtnet_sf_apply_mtu(dev);

	ret = virtnet_sf_rqt_update(dev);
	err_if(ret, "cannot create steer RQ Table");
	ret = virtnet_sf_drop_create(dev);
	err_if(ret, "cannot create drop queue resources");
	ret = virtnet_sf_tir_create_default(dev);
	err_if(ret, "cannot create default RSS TIRs");
	dev->sf_verbs.steer.rx_domain = mlx5dv_dr_domain_create
		(dev->sf_verbs.dev, MLX5DV_DR_DOMAIN_TYPE_NIC_RX);
	err_if(!dev->sf_verbs.steer.rx_domain,
	       "cannot create steer rx domain");
	dev->sf_verbs.steer.rx_tbl = mlx5dv_dr_table_create
		(dev->sf_verbs.steer.rx_domain, 0);
	err_if(!dev->sf_verbs.steer.rx_tbl,
	       "cannot create steer rx flow table");
	ret = virtnet_sf_flows_apply(dev);
	err_if(ret, "cannot apply sf flows");
	return 0;
free_exit:
	virtnet_sf_stop(dev);
	return -errno;
}

void virtnet_sf_stop(struct virtnet_device *dev)
{
	int ret = 0;

	virtnet_sf_flows_destroy(dev);
	virtnet_sf_tir_destroy_default(dev);
	virtnet_sf_rqt_destroy(dev);
	virtnet_sf_drop_destroy(dev);
	if (dev->sf_verbs.steer.rx_tbl) {
		ret = mlx5dv_dr_table_destroy(dev->sf_verbs.steer.rx_tbl);
		if (!ret)
			dev->sf_verbs.steer.rx_tbl = NULL;
		else
			log_error("cannot destroy sf steer rx table[%p]",
				  dev->sf_verbs.steer.rx_tbl);
	}
	if (dev->sf_verbs.steer.rx_domain)
		ret = mlx5dv_dr_domain_destroy(dev->sf_verbs.steer.rx_domain);
	if (!ret)
		dev->sf_verbs.steer.rx_domain = NULL;
	else
		log_error("cannot destroy sf steer rx domain[%p]",
			   dev->sf_verbs.steer.rx_domain);
	return;
}
