/*
 * Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#include "virtnet.h"

#define MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN 0x816
#define DEVX_CMD_OP_CREATE_RQ 0x908

struct mlx5_ifc_alloc_transport_domain_out_bits {
	u8 status[0x8];
	u8 reserved_at_8[0x18];
	u8 syndrome[0x20];
	u8 reserved_at_40[0x8];
	u8 transport_domain[0x18];
	u8 reserved_at_60[0x20];
};

struct mlx5_ifc_alloc_transport_domain_in_bits {
	u8 opcode[0x10];
	u8 reserved_at_10[0x10];
	u8 reserved_at_20[0x10];
	u8 op_mod[0x10];
	u8 reserved_at_40[0x40];
};

enum {
	MLX5_WQ_END_PAD_MODE_NONE  = 0x0,
	MLX5_WQ_END_PAD_MODE_ALIGN = 0x1,
};

struct mlx5_ifc_wq_bits {
	u8 wq_type[0x4];
	u8 wq_signature[0x1];
	u8 end_padding_mode[0x2];
	u8 cd_slave[0x1];
	u8 reserved_at_8[0x18];
	u8 hds_skip_first_sge[0x1];
	u8 log2_hds_buf_size[0x3];
	u8 reserved_at_24[0x7];
	u8 page_offset[0x5];
	u8 lwm[0x10];
	u8 reserved_at_40[0x8];
	u8 pd[0x18];
	u8 reserved_at_60[0x8];
	u8 uar_page[0x18];
	u8 dbr_addr[0x40];
	u8 hw_counter[0x20];
	u8 sw_counter[0x20];
	u8 reserved_at_100[0xc];
	u8 log_wq_stride[0x4];
	u8 reserved_at_110[0x3];
	u8 log_wq_pg_sz[0x5];
	u8 reserved_at_118[0x3];
	u8 log_wq_sz[0x5];
	u8 dbr_umem_valid[0x1];
	u8 wq_umem_valid[0x1];
	u8 reserved_at_122[0x1];
	u8 log_hairpin_num_packets[0x5];
	u8 reserved_at_128[0x3];
	u8 log_hairpin_data_sz[0x5];
	u8 reserved_at_130[0x4];
	u8 single_wqe_log_num_of_strides[0x4];
	u8 two_byte_shift_en[0x1];
	u8 reserved_at_139[0x4];
	u8 single_stride_log_num_of_bytes[0x3];
	u8 dbr_umem_id[0x20];
	u8 wq_umem_id[0x20];
	u8 wq_umem_offset[0x40];
	u8 reserved_at_1c0[0x440];
};

enum {
	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE  = 0x0,
	MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_RMP     = 0x1,
};

enum {
	MLX5_RQC_STATE_RST  = 0x0,
	MLX5_RQC_STATE_RDY  = 0x1,
	MLX5_RQC_STATE_ERR  = 0x3,
};

struct mlx5_ifc_rqc_bits {
	u8 rlky[0x1];
	u8 delay_drop_en[0x1];
	u8 scatter_fcs[0x1];
	u8 vsd[0x1];
	u8 mem_rq_type[0x4];
	u8 state[0x4];
	u8 reserved_at_c[0x1];
	u8 flush_in_error_en[0x1];
	u8 hairpin[0x1];
	u8 reserved_at_f[0x11];
	u8 reserved_at_20[0x8];
	u8 user_index[0x18];
	u8 reserved_at_40[0x8];
	u8 cqn[0x18];
	u8 counter_set_id[0x8];
	u8 reserved_at_68[0x18];
	u8 reserved_at_80[0x8];
	u8 rmpn[0x18];
	u8 reserved_at_a0[0x8];
	u8 hairpin_peer_sq[0x18];
	u8 reserved_at_c0[0x10];
	u8 hairpin_peer_vhca[0x10];
	u8 reserved_at_e0[0xa0];
	struct mlx5_ifc_wq_bits wq; /* Not used in LRO RQ. */
};

struct mlx5_ifc_create_rq_out_bits {
	u8 status[0x8];
	u8 reserved_at_8[0x18];
	u8 syndrome[0x20];
	u8 reserved_at_40[0x8];
	u8 rqn[0x18];
	u8 reserved_at_60[0x20];
};

struct mlx5_ifc_create_rq_in_bits {
	u8 opcode[0x10];
	u8 uid[0x10];
	u8 reserved_at_20[0x10];
	u8 op_mod[0x10];
	u8 reserved_at_40[0xc0];
	struct mlx5_ifc_rqc_bits ctx;
};

struct mlx5_ifc_modify_rq_out_bits {
	u8 status[0x8];
	u8 reserved_at_8[0x18];
	u8 syndrome[0x20];
	u8 reserved_at_40[0x40];
};


/**
 * Create transport domain and return ID.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[out] td_id
 * 	Pointer to TD ID
 * @return
 * 	Pointer to TD object created, NULL otherwise
 */
void *
mlx_devx_td_create(struct ibv_context *dev, uint32_t *td_id)
{
	struct mlx5dv_devx_obj *obj;
	uint8_t in[DEVX_ST_SZ_BYTES(alloc_transport_domain_in)] = {0};
	uint8_t out[DEVX_ST_SZ_BYTES(alloc_transport_domain_out)] = {0};

	DEVX_SET(alloc_transport_domain_in, in, opcode, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN);
	obj = mlx5dv_devx_obj_create(dev, in, sizeof(in), out, sizeof(out));
	if (!obj)
		log_error("failed to create transport domain");
	else
		*td_id = DEVX_GET(alloc_transport_domain_out, out, transport_domain);
	return obj;
}

struct mlx5_ifc_create_tis_out_bits {
	u8         status[0x8];
	u8         reserved_at_8[0x18];
	u8         syndrome[0x20];
	u8         reserved_at_40[0x8];
	u8         tisn[0x18];
	u8         reserved_at_60[0x20];
};

struct mlx5_ifc_tisc_bits {
	u8 strict_lag_tx_port_affinity[0x1];
	u8 reserved_at_1[0x3];
	u8 lag_tx_port_affinity[0x04];
	u8 reserved_at_8[0x4];
	u8 prio[0x4];
	u8 reserved_at_10[0x10];
	u8 reserved_at_20[0x100];
	u8 reserved_at_120[0x8];
	u8 transport_domain[0x18];
	u8 reserved_at_140[0x8];
	u8 underlay_qpn[0x18];
	u8 reserved_at_160[0x08];
	u8 pd[0x18];
	u8 reserved_at_180[0x380];
};

struct mlx5_ifc_create_tis_in_bits {
	u8         opcode[0x10];
	u8         reserved_at_10[0x10];
	u8         reserved_at_20[0x10];
	u8         op_mod[0x10];
	u8         reserved_at_40[0xc0];
	struct mlx5_ifc_tisc_bits ctx;
};

struct mlx5_ifc_rq_num_bits {
	u8 reserved_at_0[0x8];
	u8 rq_num[0x18];
};

struct mlx5_ifc_rqtc_bits {
	u8 reserved_at_0[0xa5];
	u8 list_q_type[0x3];
	u8 reserved_at_a8[0x8];
	u8 rqt_max_size[0x10];
	u8 reserved_at_c0[0x10];
	u8 rqt_actual_size[0x10];
	u8 reserved_at_e0[0x6a0];
	struct mlx5_ifc_rq_num_bits rq_num[];
};

struct mlx5_ifc_create_rqt_in_bits {
	u8 opcode[0x10];
	u8 uid[0x10];
	u8 reserved_at_20[0x10];
	u8 op_mod[0x10];
	u8 reserved_at_40[0xc0];
	struct mlx5_ifc_rqtc_bits rqt_context;
};

struct mlx5_ifc_create_rqt_out_bits {
	u8 status[0x8];
	u8 reserved_at_8[0x18];
	u8 syndrome[0x20];
	u8 reserved_at_40[0x8];
	u8 rqtn[0x18];
	u8 reserved_at_60[0x20];
};

struct mlx5_ifc_modify_rqt_in_bits {
	u8 opcode[0x10];
	u8 uid[0x10];
	u8 reserved_at_20[0x10];
	u8 op_mod[0x10];
	u8 reserved_at_40[0x8];
	u8 rqtn[0x18];
	u8 reserved_at_60[0x20];
	u8 modify_bitmask[0x40];
	u8 reserved_at_c0[0x40];
	struct mlx5_ifc_rqtc_bits rqt_context;
};

struct mlx5_ifc_modify_rqt_out_bits {
	u8 status[0x8];
	u8 reserved_at_8[0x18];
	u8 syndrome[0x20];
	u8 reserved_at_40[0x40];
};

enum {
	MLX5_FLOW_TABLE_TYPE_NIC_RX = 0x0,
	MLX5_FLOW_TABLE_TYPE_NIC_TX = 0x1,
	MLX5_FLOW_TABLE_TYPE_ESW_EGRESS_ACL = 0x2,
	MLX5_FLOW_TABLE_TYPE_ESW_INGRESS_ACL = 0x3,
	MLX5_FLOW_TABLE_TYPE_FDB = 0X4,
	MLX5_FLOW_TABLE_TYPE_SNIFFER_RX = 0X5,
	MLX5_FLOW_TABLE_TYPE_SNIFFER_TX = 0X6,
};

#define MLX5_CMD_OP_CREATE_TIR 0x900
#define MLX5_CMD_OP_CREATE_TIS 0x912
#define MLX5_CMD_OP_CREATE_RQT 0x916
#define MLX5_CMD_OP_MODIFY_RQT 0x917

/**
 * Create TIS object and return TIS ID.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[out] tis_id
 * 	Pointer to TIS ID
 * @return
 * 	Pointer to TIS object created, NULL otherwise
 */
void *
mlx_devx_tis_create(struct ibv_context *dev, uint32_t td_id, uint32_t *tis_id)
{
	struct mlx5dv_devx_obj *obj;
	uint8_t in[DEVX_ST_SZ_BYTES(create_tis_in)] = {0};
	uint8_t out[DEVX_ST_SZ_BYTES(create_tis_out)] = {0};

	DEVX_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
	DEVX_SET(create_tis_in, in, ctx.transport_domain, td_id);
	obj = mlx5dv_devx_obj_create(dev, in, sizeof(in), out, sizeof(out));
	if (!obj)
		log_error("Can't create TIS");
	else
		*tis_id = DEVX_GET(create_tis_out, out, tisn);
	return obj;
}

/**
 * Destroy DevX Object
 *
 * @param[in] obj
 * 	Pointer to DevX object
 * @return
 * 	0 on success, error otherwise
 */
int
mlx_devx_obj_destroy(struct mlx5dv_devx_obj *obj)
{
	return mlx5dv_devx_obj_destroy(obj);
}

/**
 * Open device by name
 *
 * @param[in] dev_name
 * 	Device name
 * @return
 * 	Verbs device context, NULL if any error
 */
struct ibv_context *
mlx_dv_open_device(const char *name, struct mlx5dv_context_attr *attr)
{
	struct ibv_device **list;
	int n, i;
	struct ibv_device *dev = NULL;
	struct ibv_context *ctx = NULL;

	list = ibv_get_device_list(&n);
	err_if(!list, "failed to get device list");
	for (i = 0; i < n; i++) {
		if (!name || !strncmp(name, list[i]->name,
				      sizeof(list[i]->name))) {
			dev = list[i];
			break;
		}
	}
	err_if(!dev, "device name %s not found", name);
	ctx = mlx5dv_open_device(dev, attr);
	err_if(!ctx, "failed to open device %s", name);
free_exit:
	if (list)
		ibv_free_device_list(list);
	return ctx;
}

/**
 * Create RQTable and return RQTNumber if need.
 *
 * @param[in] dev
 * 	Pointer to device
 * @param[in] rqt_attr
 * 	Pointer to RQTable attribute
 * @param[out] rqt_id
 * 	Pointer to RQ Table Number
 * @return
 * 	Pointer to rqt object created, NULL otherwise
 */
void *
mlx_devx_rqt_create(struct ibv_context *dev,
		    struct mlx_devx_rqt_attr *rqt_attr,
		    uint32_t *rqt_id)
{
	uint32_t *in = NULL;
	uint32_t inlen = DEVX_ST_SZ_BYTES(create_rqt_in) +
			 rqt_attr->actual_size * sizeof(uint32_t);
	uint32_t out[DEVX_ST_SZ_DW(create_rqt_out)] = {0};
	uint32_t max_size = 1 << log2above(rqt_attr->actual_size);
	void *rqt_ctx;
	void *obj = NULL;
	int i;

	in = calloc(1, inlen);
	err_if(!in, "cannot allocate RQT IN data");
	DEVX_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
	rqt_ctx = DEVX_ADDR_OF(create_rqt_in, in, rqt_context);
	DEVX_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
	DEVX_SET(rqtc, rqt_ctx, rqt_max_size, max_size);
	DEVX_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->actual_size);
	for (i = 0; i < rqt_attr->actual_size; i++)
		DEVX_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
	obj = mlx5dv_devx_obj_create(dev, in, inlen, out, sizeof(out));
	free(in);
	err_if(!obj, "DevX comamnd - CREATE_RQT failed");
	if (rqt_id)
		*rqt_id = DEVX_GET(create_rqt_out, out, rqtn);
free_exit:
	return obj;
}

/**
 * Modify RQT using DevX API.
 *
 * @param[in] rqt
 *   Pointer to RQT DevX object structure.
 * @param [in] rqt_attr
 *   Pointer to RQT attributes structure.
 *
 * @return
 *   0 on success, a negative errno value otherwise and rte_errno is set.
 */
int
mlx_devx_rqt_modify(struct mlx5dv_devx_obj *rqt, int rqtn,
		    struct mlx_devx_rqt_attr *rqt_attr)
{
	uint32_t inlen = DEVX_ST_SZ_BYTES(modify_rqt_in) +
			 rqt_attr->actual_size * sizeof(uint32_t);
	uint32_t out[DEVX_ST_SZ_DW(modify_rqt_out)] = {0};
	uint32_t *in = calloc(1, inlen);
	void *rqt_ctx;
	int i;
	int ret;

	err_if(!in, "Failed to allocate RQT modify IN data.");
	DEVX_SET(modify_rqt_in, in, opcode, MLX5_CMD_OP_MODIFY_RQT);
	DEVX_SET(modify_rqt_in, in, rqtn, rqtn);
	DEVX_SET64(modify_rqt_in, in, modify_bitmask, 0x1);
	rqt_ctx = DEVX_ADDR_OF(modify_rqt_in, in, rqt_context);
	DEVX_SET(rqtc, rqt_ctx, list_q_type, rqt_attr->rq_type);
	DEVX_SET(rqtc, rqt_ctx, rqt_max_size, rqt_attr->max_size);
	DEVX_SET(rqtc, rqt_ctx, rqt_actual_size, rqt_attr->actual_size);
	for (i = 0; i < rqt_attr->actual_size; i++)
		DEVX_SET(rqtc, rqt_ctx, rq_num[i], rqt_attr->rq_list[i]);
	ret = mlx5dv_devx_obj_modify(rqt, in, inlen, out, sizeof(out));
	free(in);
	err_if(ret, "Failed to modify RQT using DevX.");
	return 0;
free_exit:
	return -1;
}

void *
mlx_devx_cmd_create_tir(struct ibv_context *ctx,
			struct virtnet_tir_devx_attr *tir_attr,
			uint32_t *tir_id)
{
	uint32_t in[DEVX_ST_SZ_DW(create_tir_in)] = {0};
	uint32_t out[DEVX_ST_SZ_DW(create_tir_out)] = {0};
	void *tir_ctx, *outer, *rss_key;
	struct mlx5dv_devx_obj *tir = NULL;

	DEVX_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
	tir_ctx = DEVX_ADDR_OF(create_tir_in, in, ctx);
	DEVX_SET(tirc, tir_ctx, disp_type, tir_attr->disp_type);
	DEVX_SET(tirc, tir_ctx, lro_timeout_period_usecs,
		 tir_attr->lro_timeout_period_usecs);
	DEVX_SET(tirc, tir_ctx, inline_rqn, tir_attr->inline_rqn);
	DEVX_SET(tirc, tir_ctx, rx_hash_symmetric,
		 tir_attr->rx_hash_symmetric);
	DEVX_SET(tirc, tir_ctx, tunneled_offload_en,
		 tir_attr->tunneled_offload_en);
	DEVX_SET(tirc, tir_ctx, indirect_table, tir_attr->indirect_table);
	DEVX_SET(tirc, tir_ctx, rx_hash_fn, tir_attr->rx_hash_fn);
	DEVX_SET(tirc, tir_ctx, self_lb_block, tir_attr->self_lb_block);
	DEVX_SET(tirc, tir_ctx, transport_domain, tir_attr->transport_domain);
	rss_key = DEVX_ADDR_OF(tirc, tir_ctx, rx_hash_toeplitz_key);
	memcpy(rss_key, tir_attr->rx_hash_toeplitz_key, VIRTNET_HASH_KEY_LEN);
	outer = DEVX_ADDR_OF(tirc, tir_ctx, rx_hash_field_selector_outer);
	DEVX_SET(rx_hash_field_select, outer, l3_prot_type,
		 tir_attr->rx_hash_field_selector_outer.l3_prot_type);
	DEVX_SET(rx_hash_field_select, outer, l4_prot_type,
		 tir_attr->rx_hash_field_selector_outer.l4_prot_type);
	DEVX_SET(rx_hash_field_select, outer, selected_fields,
		 tir_attr->rx_hash_field_selector_outer.selected_fields);
	tir = mlx5dv_devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
	err_if(!tir, "DevX command - CREATE_TIR failed");
	if (tir_id)
		*tir_id = DEVX_GET(create_tir_out, out, tirn);
	return tir;
free_exit:
	return NULL;
}

int inline
mlx_devx_match_lyr_2_4_new(struct mlx5dv_flow_match_parameters **mask,
			   struct mlx5dv_flow_match_parameters **value)
{
	size_t sz = sizeof(struct mlx5dv_flow_match_parameters) +
		    sizeof(uint32_t) * DEVX_ST_SZ_DW(fte_match_param);

	err_if(!mask, "pointer to mask is NULL");
	err_if(!value, "pointer to value is NULL");
	*mask = calloc(1, sz);
	err_if(!(*mask), "cannot alloc match's mask buf: size[%lu]", sz);
	*value = calloc(1, sz);
	err_if(!(*value), "cannot alloc match's value buf: size[%lu]", sz);
	return 0;
free_exit:
	return -ENOMEM;
}

int inline
mlx_devx_match_lyr_2_4_init(uint8_t ip_version, uint8_t ip_proto,
			   struct mlx5dv_flow_match_parameters *mask,
			   struct mlx5dv_flow_match_parameters *value)
{
	void *match_m, *match_v, *headers_m, *headers_v;
	uint8_t val = 0;

	err_if(!mask, "match's mask buf is NULL");
	err_if(!value, "match's value buf is NULL");
	mask->match_sz = sizeof(uint32_t) * DEVX_ST_SZ_DW(fte_match_param);
	value->match_sz = sizeof(uint32_t) * DEVX_ST_SZ_DW(fte_match_param);
	match_m = mask->match_buf;
	match_v = value->match_buf;
	headers_m = DEVX_ADDR_OF(fte_match_param, match_m, outer_headers);
	headers_v = DEVX_ADDR_OF(fte_match_param, match_v, outer_headers);
	val = ip_version ? 0xF : 0;
	DEVX_SET(fte_match_set_lyr_2_4, headers_m, ip_version, val);
	DEVX_SET(fte_match_set_lyr_2_4, headers_v, ip_version, ip_version);
	val = ip_proto ? 0xFF : 0;
	DEVX_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, val);
	DEVX_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, ip_proto);
	return 0;
free_exit:
	return -EINVAL;
}

void inline
mlx_devx_match_lyr_2_4_delete(struct mlx5dv_flow_match_parameters *mask,
			      struct mlx5dv_flow_match_parameters *value)
{
	if (mask)
		free(mask);
	if (value)
		free(value);
}

void inline
mlx_devx_match_set_mac_dst(uint8_t *addr_m, uint8_t *addr_v,
			   struct mlx5dv_flow_match_parameters *mask,
			   struct mlx5dv_flow_match_parameters *value)
{
	void *match_m, *match_v, *headers_m, *headers_v;

	match_m = mask->match_buf;
	match_v = value->match_buf;
	headers_m = DEVX_ADDR_OF(fte_match_param, match_m, outer_headers);
	headers_v = DEVX_ADDR_OF(fte_match_param, match_v, outer_headers);
	memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
	       addr_m, ETH_ALEN);
	memcpy(DEVX_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16),
	       addr_v, ETH_ALEN);
}

struct mlx5dv_devx_obj *
mlx_devx_create_flow_table(struct ibv_context *ctx, uint32_t type,
			   uint8_t level, uint8_t log_size, uint32_t *id)
{
	uint8_t in[DEVX_ST_SZ_BYTES(create_flow_table_in)] = {0};
	uint8_t out[DEVX_ST_SZ_BYTES(create_flow_table_out)] = {0};
	void *ft_ctx;
	struct mlx5dv_devx_obj *obj;

	DEVX_SET(create_flow_table_in, in, opcode,
		 MLX5_CMD_OP_CREATE_FLOW_TABLE);
	DEVX_SET(create_flow_table_in, in, table_type, type);
	ft_ctx = DEVX_ADDR_OF(create_flow_table_in, in, flow_table_context);
	DEVX_SET(flow_table_context, ft_ctx, level, level);
	obj = mlx5dv_devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
	err_if(!obj, "fail to create flow table DevX object");
	if (id)
		*id = DEVX_GET(create_flow_table_out, out, table_id);
	return obj;
free_exit:
	return NULL;
}

void
mlx_devx_fill_wq_data(void *wq_ctx, struct mlx_devx_wq_attr *wq_attr)
{
	DEVX_SET(wq, wq_ctx, wq_type, wq_attr->wq_type);
	DEVX_SET(wq, wq_ctx, wq_signature, wq_attr->wq_signature);
	DEVX_SET(wq, wq_ctx, end_padding_mode, wq_attr->end_padding_mode);
	DEVX_SET(wq, wq_ctx, cd_slave, wq_attr->cd_slave);
	DEVX_SET(wq, wq_ctx, hds_skip_first_sge, wq_attr->hds_skip_first_sge);
	DEVX_SET(wq, wq_ctx, log2_hds_buf_size, wq_attr->log2_hds_buf_size);
	DEVX_SET(wq, wq_ctx, page_offset, wq_attr->page_offset);
	DEVX_SET(wq, wq_ctx, lwm, wq_attr->lwm);
	DEVX_SET(wq, wq_ctx, pd, wq_attr->pd);
	DEVX_SET(wq, wq_ctx, uar_page, wq_attr->uar_page);
	DEVX_SET64(wq, wq_ctx, dbr_addr, wq_attr->dbr_addr);
	DEVX_SET(wq, wq_ctx, hw_counter, wq_attr->hw_counter);
	DEVX_SET(wq, wq_ctx, sw_counter, wq_attr->sw_counter);
	DEVX_SET(wq, wq_ctx, log_wq_stride, wq_attr->log_wq_stride);
	DEVX_SET(wq, wq_ctx, log_wq_pg_sz, wq_attr->log_wq_pg_sz);
	DEVX_SET(wq, wq_ctx, log_wq_sz, wq_attr->log_wq_sz);
	DEVX_SET(wq, wq_ctx, dbr_umem_valid, wq_attr->dbr_umem_valid);
	DEVX_SET(wq, wq_ctx, wq_umem_valid, wq_attr->wq_umem_valid);
	DEVX_SET(wq, wq_ctx, log_hairpin_num_packets,
		 wq_attr->log_hairpin_num_packets);
	DEVX_SET(wq, wq_ctx, log_hairpin_data_sz,
		 wq_attr->log_hairpin_data_sz);
	DEVX_SET(wq, wq_ctx, single_wqe_log_num_of_strides,
		 wq_attr->single_wqe_log_num_of_strides);
	DEVX_SET(wq, wq_ctx, two_byte_shift_en, wq_attr->two_byte_shift_en);
	DEVX_SET(wq, wq_ctx, single_stride_log_num_of_bytes,
		 wq_attr->single_stride_log_num_of_bytes);
	DEVX_SET(wq, wq_ctx, dbr_umem_id, wq_attr->dbr_umem_id);
	DEVX_SET(wq, wq_ctx, wq_umem_id, wq_attr->wq_umem_id);
	DEVX_SET64(wq, wq_ctx, wq_umem_offset, wq_attr->wq_umem_offset);
}

struct mlx5dv_devx_obj *
mlx_devx_create_rq(struct ibv_context *ctx,
		   struct mlx_devx_create_rq_attr *rq_attr, uint32_t *rqn)
{
	uint32_t in[DEVX_ST_SZ_DW(create_rq_in)] = {0};
	uint32_t out[DEVX_ST_SZ_DW(create_rq_out)] = {0};
	void *rq_ctx, *wq_ctx;
	struct mlx_devx_wq_attr *wq_attr;
	struct mlx5dv_devx_obj *rq = NULL;

	DEVX_SET(create_rq_in, in, opcode, DEVX_CMD_OP_CREATE_RQ);
	rq_ctx = DEVX_ADDR_OF(create_rq_in, in, ctx);
	DEVX_SET(rqc, rq_ctx, rlky, rq_attr->rlky);
	DEVX_SET(rqc, rq_ctx, delay_drop_en, rq_attr->delay_drop_en);
	DEVX_SET(rqc, rq_ctx, scatter_fcs, rq_attr->scatter_fcs);
	DEVX_SET(rqc, rq_ctx, vsd, rq_attr->vsd);
	DEVX_SET(rqc, rq_ctx, mem_rq_type, rq_attr->mem_rq_type);
	DEVX_SET(rqc, rq_ctx, state, rq_attr->state);
	DEVX_SET(rqc, rq_ctx, flush_in_error_en, rq_attr->flush_in_error_en);
	DEVX_SET(rqc, rq_ctx, hairpin, rq_attr->hairpin);
	DEVX_SET(rqc, rq_ctx, user_index, rq_attr->user_index);
	DEVX_SET(rqc, rq_ctx, cqn, rq_attr->cqn);
	DEVX_SET(rqc, rq_ctx, counter_set_id, rq_attr->counter_set_id);
	DEVX_SET(rqc, rq_ctx, rmpn, rq_attr->rmpn);
	wq_ctx = DEVX_ADDR_OF(rqc, rq_ctx, wq);
	wq_attr = &rq_attr->wq_attr;
	mlx_devx_fill_wq_data(wq_ctx, wq_attr);
	rq = mlx5dv_devx_obj_create(ctx, in, sizeof(in), out, sizeof(out));
	err_if(!rq, "cannot create RQ obj via DevX");
	if (rqn)
		*rqn = DEVX_GET(create_rq_out, out, rqn);
	return rq;
free_exit:
	return NULL;

}
