#ifndef __RTE_RING_H__
#define __RTE_RING_H__

#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <errno.h>
#include "rte_common.h"
#include "rte_stdatomic.h"

typedef unsigned char   U8;
typedef unsigned short  U16;
typedef unsigned int    U32;
typedef unsigned long   U64;

typedef signed char     S8;
typedef signed short    S16;
typedef signed int      S32;
typedef signed long     S64;

#ifndef RTE_WRITE_ONCE
#define RTE_WRITE_ONCE(a,b)     ((a) = (b))
#endif

#ifndef rte_offsetof
#define rte_offsetof(type, field)  ((size_t) &( ((type *)0)->field) )
#endif

#ifndef rte_container_of
#define rte_container_of(ptr, type, member) ({		\
			const typeof(((type *)0)->member) *_ptr = (ptr); \
			__attribute__((__unused__)) type *_target_ptr =	\
				(type *)(ptr);				\
			(type *)(((uintptr_t)_ptr) - rte_offsetof(type, member)); \
		})
#endif



#define RTE_TAILQ_RING_NAME "RTE_RING"

/** enqueue/dequeue behavior types */
enum rte_ring_queue_behavior {
	/** Enq/Deq a fixed number of items from a ring */
	RTE_RING_QUEUE_FIXED = 0,
	/** Enq/Deq as many items as possible from ring */
	RTE_RING_QUEUE_VARIABLE
};

/** The maximum length of a ring name. */
#define RTE_RING_NAMESIZE (32 + 1)

/** prod/cons sync types */
enum rte_ring_sync_type {
	RTE_RING_SYNC_MT,     /**< multi-thread safe (default mode) */
	RTE_RING_SYNC_ST,     /**< single thread only */
	RTE_RING_SYNC_MT_RTS, /**< multi-thread relaxed tail sync */
	RTE_RING_SYNC_MT_HTS, /**< multi-thread head/tail sync */
};

/**
 * structures to hold a pair of head/tail values and other metadata.
 * Depending on sync_type format of that structure might be different,
 * but offset for *sync_type* and *tail* values should remain the same.
 */
struct rte_ring_headtail {
	volatile RTE_ATOMIC(U32) head;      /**< prod/consumer head. */
	volatile RTE_ATOMIC(U32) tail;      /**< prod/consumer tail. */
	union {
		/** sync type of prod/cons */
		enum rte_ring_sync_type sync_type;
		/** deprecated -  True if single prod/cons */
		U32 single;
	};
};

union __rte_ring_rts_poscnt {
	/** raw 8B value to read/write *cnt* and *pos* as one atomic op */
	RTE_ATOMIC(U64) raw __rte_aligned(8);
	struct {
		U32 cnt; /**< head/tail reference counter */
		U32 pos; /**< head/tail position */
	} val;
};

struct rte_ring_rts_headtail {
	volatile union __rte_ring_rts_poscnt tail;
	enum rte_ring_sync_type sync_type;  /**< sync type of prod/cons */
	U32 htd_max;   /**< max allowed distance between head/tail */
	volatile union __rte_ring_rts_poscnt head;
};

union __rte_ring_hts_pos {
	/** raw 8B value to read/write *head* and *tail* as one atomic op */
	RTE_ATOMIC(U64) raw __rte_aligned(8);
	struct {
		RTE_ATOMIC(U32) head; /**< head position */
		RTE_ATOMIC(U32) tail; /**< tail position */
	} pos;
};

struct rte_ring_hts_headtail {
	volatile union __rte_ring_hts_pos ht;
	enum rte_ring_sync_type sync_type;  /**< sync type of prod/cons */
};

/**
 * An RTE ring structure.
 *
 * The producer and the consumer have a head and a tail index. The particularity
 * of these index is that they are not between 0 and size(ring)-1. These indexes
 * are between 0 and 2^32 -1, and we mask their value when we access the ring[]
 * field. Thanks to this assumption, we can do subtractions between 2 index
 * values in a modulo-32bit base: that's why the overflow of the indexes is not
 * a problem.
 */
typedef struct rte_ring {
	/**< Name of the ring. */
	int flags;               /**< Flags supplied at creation. */
	U32 size;           /**< Size of ring. */
	U32 mask;           /**< Mask (size-1) of ring. */
	U32 capacity;       /**< Usable size of ring */

	/** Ring producer status. */
	union {
		struct rte_ring_headtail prod;
		//struct rte_ring_hts_headtail hts_prod;
		struct rte_ring_rts_headtail rts_prod;
	}  __rte_cache_aligned;


	/** Ring consumer status. */
	union {
		struct rte_ring_headtail cons;
		//struct rte_ring_hts_headtail hts_cons;
		struct rte_ring_rts_headtail rts_cons;
	}  __rte_cache_aligned;

}RTE_RING_S;

#define RING_F_SP_ENQ 0x0001 /**< The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /**< The default dequeue is "single-consumer". */
/**
 * Ring is to hold exactly requested number of entries.
 * Without this flag set, the ring size requested must be a power of 2, and the
 * usable space will be that size - 1. With the flag, the requested size will
 * be rounded up to the next power of two, but the usable space will be exactly
 * that requested. Worst case, if a power-of-2 size is requested, half the
 * ring space will be wasted.
 */
#define RING_F_EXACT_SZ 0x0004
#define RTE_RING_SZ_MASK  (0x7fffffffU) /**< Ring size mask */

#define RING_F_MP_RTS_ENQ 0x0008 /**< The default enqueue is "MP RTS". */
#define RING_F_MC_RTS_DEQ 0x0010 /**< The default dequeue is "MC RTS". */

#define RING_F_MP_HTS_ENQ 0x0020 /**< The default enqueue is "MP HTS". */
#define RING_F_MC_HTS_DEQ 0x0040 /**< The default dequeue is "MC HTS". */

/* mask of all valid flag values to ring_create() */
#define RING_F_MASK (RING_F_SP_ENQ | RING_F_SC_DEQ | RING_F_EXACT_SZ | \
		     RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ |	       \
		     RING_F_MP_HTS_ENQ | RING_F_MC_HTS_DEQ)

/* true if x is a power of 2 */
#define POWEROF2(x) ((((x)-1) & (x)) == 0)

/* by default set head/tail distance as 1/8 of ring capacity */
#define HTD_MAX_DEF	8




#ifndef UINT32_MAX
#define UINT32_MAX   ((U32)-1)
#endif

/**
 * Return producer max Head-Tail-Distance (HTD).
 *
 * @param r
 *   A pointer to the ring structure.
 * @return
 *   Producer HTD value, if producer is set in appropriate sync mode,
 *   or UINT32_MAX otherwise.
 */
static inline U32
rte_ring_get_prod_htd_max(const struct rte_ring *r)
{
	if (r->prod.sync_type == RTE_RING_SYNC_MT_RTS)
		return r->rts_prod.htd_max;
	return UINT32_MAX;
}

/**
 * Set producer max Head-Tail-Distance (HTD).
 * Note that producer has to use appropriate sync mode (RTS).
 *
 * @param r
 *   A pointer to the ring structure.
 * @param v
 *   new HTD value to setup.
 * @return
 *   Zero on success, or negative error code otherwise.
 */
static inline int
rte_ring_set_prod_htd_max(struct rte_ring *r, U32 v)
{
	if (r->prod.sync_type != RTE_RING_SYNC_MT_RTS)
		return -1;

	r->rts_prod.htd_max = v;
	return 0;
}

/**
 * Return consumer max Head-Tail-Distance (HTD).
 *
 * @param r
 *   A pointer to the ring structure.
 * @return
 *   Consumer HTD value, if consumer is set in appropriate sync mode,
 *   or UINT32_MAX otherwise.
 */
static inline U32
rte_ring_get_cons_htd_max(const struct rte_ring *r)
{
	if (r->cons.sync_type == RTE_RING_SYNC_MT_RTS)
		return r->rts_cons.htd_max;
	return UINT32_MAX;
}

/**
 * Set consumer max Head-Tail-Distance (HTD).
 * Note that consumer has to use appropriate sync mode (RTS).
 *
 * @param r
 *   A pointer to the ring structure.
 * @param v
 *   new HTD value to setup.
 * @return
 *   Zero on success, or negative error code otherwise.
 */
static inline int
rte_ring_set_cons_htd_max(struct rte_ring *r, U32 v)
{
	if (r->cons.sync_type != RTE_RING_SYNC_MT_RTS)
		return -1;

	r->rts_cons.htd_max = v;
	return 0;
}

static int
get_sync_type(U32 flags, enum rte_ring_sync_type *prod_st,
	enum rte_ring_sync_type *cons_st)
{
	static const U32 prod_st_flags =
		(RING_F_SP_ENQ | RING_F_MP_RTS_ENQ | RING_F_MP_HTS_ENQ);
	static const U32 cons_st_flags =
		(RING_F_SC_DEQ | RING_F_MC_RTS_DEQ | RING_F_MC_HTS_DEQ);

	switch (flags & prod_st_flags) {
	case 0:
		*prod_st = RTE_RING_SYNC_MT;
		break;
	case RING_F_SP_ENQ:
		*prod_st = RTE_RING_SYNC_ST;
		break;
	case RING_F_MP_RTS_ENQ:
		*prod_st = RTE_RING_SYNC_MT_RTS;
		break;
	case RING_F_MP_HTS_ENQ:
		*prod_st = RTE_RING_SYNC_MT_HTS;
		break;
	default:
		return -EINVAL;
	}

	switch (flags & cons_st_flags) {
	case 0:
		*cons_st = RTE_RING_SYNC_MT;
		break;
	case RING_F_SC_DEQ:
		*cons_st = RTE_RING_SYNC_ST;
		break;
	case RING_F_MC_RTS_DEQ:
		*cons_st = RTE_RING_SYNC_MT_RTS;
		break;
	case RING_F_MC_HTS_DEQ:
		*cons_st = RTE_RING_SYNC_MT_HTS;
		break;
	default:
		return -EINVAL;
	}

	return 0;
}


/* return the size of memory occupied by a ring */
static inline ssize_t
rte_ring_get_memsize_elem( unsigned int count)
{
    ssize_t sz;
    unsigned int esize = sizeof(void*);

    /* Check if element size is a multiple of 4B */
    if (esize % 4 != 0) {
        printf("element size is not a multiple of 4\n");

        return -EINVAL;
    }

    /* count must be a power of 2 */
    if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK )) {
        printf("Requested number of elements is invalid, must be power of 2, and not exceed %u\n", RTE_RING_SZ_MASK);
        return -EINVAL;
    }

    sz = sizeof(struct rte_ring) + (ssize_t)count * esize;
    sz = RTE_ALIGN(sz, RTE_CACHE_LINE_SIZE);
    return sz;
}


static inline int
rte_ring_init(struct rte_ring *r, unsigned int count)
{
	int ret;
    int flags = RING_F_MP_RTS_ENQ | RING_F_MC_RTS_DEQ;

	/* compilation-time checks */
	RTE_BUILD_BUG_ON((sizeof(struct rte_ring) &
			  RTE_CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((rte_offsetof(struct rte_ring, cons) &
			  RTE_CACHE_LINE_MASK) != 0);
	RTE_BUILD_BUG_ON((rte_offsetof(struct rte_ring, prod) &
			  RTE_CACHE_LINE_MASK) != 0);

	RTE_BUILD_BUG_ON(rte_offsetof(struct rte_ring_headtail, sync_type) !=
		rte_offsetof(struct rte_ring_hts_headtail, sync_type));
	RTE_BUILD_BUG_ON(rte_offsetof(struct rte_ring_headtail, tail) !=
		rte_offsetof(struct rte_ring_hts_headtail, ht.pos.tail));

	RTE_BUILD_BUG_ON(rte_offsetof(struct rte_ring_headtail, sync_type) !=
		rte_offsetof(struct rte_ring_rts_headtail, sync_type));
	RTE_BUILD_BUG_ON(rte_offsetof(struct rte_ring_headtail, tail) !=
		rte_offsetof(struct rte_ring_rts_headtail, tail.val.pos));

	/* future proof flags, only allow supported values */
	if (flags & ~RING_F_MASK) {
		printf("Unsupported flags requested %#x\n", flags);
		return -EINVAL;
	}

	/* init the ring structure */
	memset(r, 0, sizeof(*r));

	r->flags = flags;
	ret = get_sync_type(flags, &r->prod.sync_type, &r->cons.sync_type);
	if (ret != 0)
		return ret;

	if (flags & RING_F_EXACT_SZ) {
		r->size = rte_align32pow2(count + 1);
		r->mask = r->size - 1;
		r->capacity = count;
	} else {
		if ((!POWEROF2(count)) || (count > RTE_RING_SZ_MASK)) {
			printf("Requested size is invalid, must be power of 2, and not exceed the size limit %u\n", RTE_RING_SZ_MASK);
			return -EINVAL;
		}
		r->size = count;
		r->mask = count - 1;
		r->capacity = r->mask;
	}

	/* set default values for head-tail distance */
	if (flags & RING_F_MP_RTS_ENQ)
		rte_ring_set_prod_htd_max(r, r->capacity / HTD_MAX_DEF);
	if (flags & RING_F_MC_RTS_DEQ)
		rte_ring_set_cons_htd_max(r, r->capacity / HTD_MAX_DEF);

	return 0;
}


static __rte_always_inline void
__rte_ring_enqueue_elems_32(struct rte_ring *r, const U32 size,
        U32 idx, const void *obj_table, U32 n)
{
    unsigned int i;
    U32 *ring = (U32 *)&r[1];
    const U32 *obj = (const U32 *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
            ring[idx] = obj[i];
            ring[idx + 1] = obj[i + 1];
            ring[idx + 2] = obj[i + 2];
            ring[idx + 3] = obj[i + 3];
            ring[idx + 4] = obj[i + 4];
            ring[idx + 5] = obj[i + 5];
            ring[idx + 6] = obj[i + 6];
            ring[idx + 7] = obj[i + 7];
        }
        switch (n & 0x7) {
        case 7:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 6:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 5:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 4:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 3:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 2:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 1:
            ring[idx++] = obj[i++]; /* fallthrough */
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            ring[idx] = obj[i];
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            ring[idx] = obj[i];
    }
}

static __rte_always_inline void
__rte_ring_enqueue_elems_64(struct rte_ring *r, U32 prod_head,
        const void *obj_table, U32 n)
{
    unsigned int i;
    const U32 size = r->size;
    U32 idx = prod_head & r->mask;
    U64 *ring = (U64 *)&r[1];
    const U64 *obj = (const U64 *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
            ring[idx] = obj[i];
            ring[idx + 1] = obj[i + 1];
            ring[idx + 2] = obj[i + 2];
            ring[idx + 3] = obj[i + 3];
        }
        switch (n & 0x3) {
        case 3:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 2:
            ring[idx++] = obj[i++]; /* fallthrough */
        case 1:
            ring[idx++] = obj[i++];
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            ring[idx] = obj[i];
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            ring[idx] = obj[i];
    }
}

static __rte_always_inline void
__rte_ring_enqueue_elems_128(struct rte_ring *r, U32 prod_head,
        const void *obj_table, U32 n)
{
    unsigned int i;
    const U32 size = r->size;
    U32 idx = prod_head & r->mask;
    rte_int128_t *ring = (rte_int128_t *)&r[1];
    const rte_int128_t *obj = (const rte_int128_t *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
            memcpy((void *)(ring + idx),
                (const void *)(obj + i), 32);
        switch (n & 0x1) {
        case 1:
            memcpy((void *)(ring + idx),
                (const void *)(obj + i), 16);
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            memcpy((void *)(ring + idx),
                (const void *)(obj + i), 16);
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            memcpy((void *)(ring + idx),
                (const void *)(obj + i), 16);
    }
}

/* the actual enqueue of elements on the ring.
 * Placed here since identical code needed in both
 * single and multi producer enqueue functions.
 */
static __rte_always_inline void
__rte_ring_enqueue_elems(struct rte_ring *r, U32 prod_head,
        const void *obj_table, U32 esize, U32 num)
{
    /* 8B and 16B copies implemented individually to retain
     * the current performance.
     */
    if (esize == 8)
        __rte_ring_enqueue_elems_64(r, prod_head, obj_table, num);
    else if (esize == 16)
        __rte_ring_enqueue_elems_128(r, prod_head, obj_table, num);
    else {
        U32 idx, scale, nr_idx, nr_num, nr_size;

        /* Normalize to U32 */
        scale = esize / sizeof(U32);
        nr_num = num * scale;
        idx = prod_head & r->mask;
        nr_idx = idx * scale;
        nr_size = r->size * scale;
        __rte_ring_enqueue_elems_32(r, nr_size, nr_idx,
                obj_table, nr_num);
    }
}

static __rte_always_inline void
__rte_ring_dequeue_elems_32(struct rte_ring *r, const U32 size,
        U32 idx, void *obj_table, U32 n)
{
    unsigned int i;
    U32 *ring = (U32 *)&r[1];
    U32 *obj = (U32 *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x7); i += 8, idx += 8) {
            obj[i] = ring[idx];
            obj[i + 1] = ring[idx + 1];
            obj[i + 2] = ring[idx + 2];
            obj[i + 3] = ring[idx + 3];
            obj[i + 4] = ring[idx + 4];
            obj[i + 5] = ring[idx + 5];
            obj[i + 6] = ring[idx + 6];
            obj[i + 7] = ring[idx + 7];
        }
        switch (n & 0x7) {
        case 7:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 6:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 5:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 4:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 3:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 2:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 1:
            obj[i++] = ring[idx++]; /* fallthrough */
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            obj[i] = ring[idx];
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            obj[i] = ring[idx];
    }
}

static __rte_always_inline void
__rte_ring_dequeue_elems_64(struct rte_ring *r, U32 cons_head,
        void *obj_table, U32 n)
{
    unsigned int i;
    const U32 size = r->size;
    U32 idx = cons_head & r->mask;
    U64 *ring = (U64 *)&r[1];
    U64 *obj = (U64 *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x3); i += 4, idx += 4) {
            obj[i] = ring[idx];
            obj[i + 1] = ring[idx + 1];
            obj[i + 2] = ring[idx + 2];
            obj[i + 3] = ring[idx + 3];
        }
        switch (n & 0x3) {
        case 3:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 2:
            obj[i++] = ring[idx++]; /* fallthrough */
        case 1:
            obj[i++] = ring[idx++]; /* fallthrough */
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            obj[i] = ring[idx];
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            obj[i] = ring[idx];
    }
}

static __rte_always_inline void
__rte_ring_dequeue_elems_128(struct rte_ring *r, U32 cons_head,
        void *obj_table, U32 n)
{
    unsigned int i;
    const U32 size = r->size;
    U32 idx = cons_head & r->mask;
    rte_int128_t *ring = (rte_int128_t *)&r[1];
    rte_int128_t *obj = (rte_int128_t *)obj_table;
    if (likely(idx + n <= size)) {
        for (i = 0; i < (n & ~0x1); i += 2, idx += 2)
            memcpy((void *)(obj + i), (void *)(ring + idx), 32);
        switch (n & 0x1) {
        case 1:
            memcpy((void *)(obj + i), (void *)(ring + idx), 16);
        }
    } else {
        for (i = 0; idx < size; i++, idx++)
            memcpy((void *)(obj + i), (void *)(ring + idx), 16);
        /* Start at the beginning */
        for (idx = 0; i < n; i++, idx++)
            memcpy((void *)(obj + i), (void *)(ring + idx), 16);
    }
}

/* the actual dequeue of elements from the ring.
 * Placed here since identical code needed in both
 * single and multi producer enqueue functions.
 */
static __rte_always_inline void
__rte_ring_dequeue_elems(struct rte_ring *r, U32 cons_head,
        void *obj_table, U32 esize, U32 num)
{
    /* 8B and 16B copies implemented individually to retain
     * the current performance.
     */
    if (esize == 8)
        __rte_ring_dequeue_elems_64(r, cons_head, obj_table, num);
    else if (esize == 16)
        __rte_ring_dequeue_elems_128(r, cons_head, obj_table, num);
    else {
        U32 idx, scale, nr_idx, nr_num, nr_size;

        /* Normalize to U32 */
        scale = esize / sizeof(U32);
        nr_num = num * scale;
        idx = cons_head & r->mask;
        nr_idx = idx * scale;
        nr_size = r->size * scale;
        __rte_ring_dequeue_elems_32(r, nr_size, nr_idx,
                obj_table, nr_num);
    }
}


/**
 * @internal This function updates tail values.
 */
static __rte_always_inline void
__rte_ring_rts_update_tail(struct rte_ring_rts_headtail *ht)
{
    union __rte_ring_rts_poscnt h, ot, nt;

    /*
     * If there are other enqueues/dequeues in progress that
     * might preceded us, then don't update tail with new value.
     */

    ot.raw = rte_atomic_load_explicit(&ht->tail.raw, rte_memory_order_acquire);

    do {
        /* on 32-bit systems we have to do atomic read here */
        h.raw = rte_atomic_load_explicit(&ht->head.raw, rte_memory_order_relaxed);

        nt.raw = ot.raw;
        if (++nt.val.cnt == h.val.cnt)
            nt.val.pos = h.val.pos;

    } while (rte_atomic_compare_exchange_strong_explicit(&ht->tail.raw,
            (U64 *)(uintptr_t)&ot.raw, nt.raw,
            rte_memory_order_release, rte_memory_order_acquire) == 0);
}

/**
 * @internal This function waits till head/tail distance wouldn't
 * exceed pre-defined max value.
 */
static __rte_always_inline void
__rte_ring_rts_head_wait(const struct rte_ring_rts_headtail *ht,
    union __rte_ring_rts_poscnt *h)
{
    U32 max;

    max = ht->htd_max;

    while (h->val.pos - ht->tail.val.pos > max) {
        rte_pause();
        h->raw = rte_atomic_load_explicit(&ht->head.raw, rte_memory_order_acquire);
    }
}

/**
 * @internal This function updates the producer head for enqueue.
 */
static __rte_always_inline U32
__rte_ring_rts_move_prod_head(struct rte_ring *r, U32 num,
    enum rte_ring_queue_behavior behavior, U32 *old_head,
    U32 *free_entries)
{
    U32 n;
    union __rte_ring_rts_poscnt nh, oh;

    const U32 capacity = r->capacity;

    oh.raw = rte_atomic_load_explicit(&r->rts_prod.head.raw, rte_memory_order_acquire);

    do {
        /* Reset n to the initial burst count */
        n = num;

        /*
         * wait for prod head/tail distance,
         * make sure that we read prod head *before*
         * reading cons tail.
         */
        __rte_ring_rts_head_wait(&r->rts_prod, &oh);

        /*
         *  The subtraction is done between two unsigned 32bits value
         * (the result is always modulo 32 bits even if we have
         * *old_head > cons_tail). So 'free_entries' is always between 0
         * and capacity (which is < size).
         */
        *free_entries = capacity + r->cons.tail - oh.val.pos;

        /* check that we have enough room in ring */
        if (unlikely(n > *free_entries))
            n = (behavior == RTE_RING_QUEUE_FIXED) ?
                    0 : *free_entries;

        if (n == 0)
            break;

        nh.val.pos = oh.val.pos + n;
        nh.val.cnt = oh.val.cnt + 1;

    /*
     * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
     *  - OOO reads of cons tail value
     *  - OOO copy of elems to the ring
     */
    } while (rte_atomic_compare_exchange_strong_explicit(&r->rts_prod.head.raw,
            (U64 *)(uintptr_t)&oh.raw, nh.raw,
            rte_memory_order_acquire, rte_memory_order_acquire) == 0);

    *old_head = oh.val.pos;
    return n;
}

/**
 * @internal This function updates the consumer head for dequeue
 */
static __rte_always_inline unsigned int
__rte_ring_rts_move_cons_head(struct rte_ring *r, U32 num,
    enum rte_ring_queue_behavior behavior, U32 *old_head,
    U32 *entries)
{
    U32 n;
    union __rte_ring_rts_poscnt nh, oh;

    oh.raw = rte_atomic_load_explicit(&r->rts_cons.head.raw, rte_memory_order_acquire);

    /* move cons.head atomically */
    do {
        /* Restore n as it may change every loop */
        n = num;

        /*
         * wait for cons head/tail distance,
         * make sure that we read cons head *before*
         * reading prod tail.
         */
        __rte_ring_rts_head_wait(&r->rts_cons, &oh);

        /* The subtraction is done between two unsigned 32bits value
         * (the result is always modulo 32 bits even if we have
         * cons_head > prod_tail). So 'entries' is always between 0
         * and size(ring)-1.
         */
        *entries = r->prod.tail - oh.val.pos;

        /* Set the actual entries for dequeue */
        if (n > *entries)
            n = (behavior == RTE_RING_QUEUE_FIXED) ? 0 : *entries;

        if (unlikely(n == 0))
            break;

        nh.val.pos = oh.val.pos + n;
        nh.val.cnt = oh.val.cnt + 1;

    /*
     * this CAS(ACQUIRE, ACQUIRE) serves as a hoist barrier to prevent:
     *  - OOO reads of prod tail value
     *  - OOO copy of elems from the ring
     */
    } while (rte_atomic_compare_exchange_strong_explicit(&r->rts_cons.head.raw,
            (U64 *)(uintptr_t)&oh.raw, nh.raw,
            rte_memory_order_acquire, rte_memory_order_acquire) == 0);

    *old_head = oh.val.pos;
    return n;
}

/**
 * @internal Enqueue several objects on the RTS ring.
 *
 * @param r
 *   A pointer to the ring structure.
 * @param obj_table
 *   A pointer to a table of objects.
 * @param esize
 *   The size of ring element, in bytes. It must be a multiple of 4.
 *   This must be the same value used while creating the ring. Otherwise
 *   the results are undefined.
 * @param n
 *   The number of objects to add in the ring from the obj_table.
 * @param behavior
 *   RTE_RING_QUEUE_FIXED:    Enqueue a fixed number of items from a ring
 *   RTE_RING_QUEUE_VARIABLE: Enqueue as many items as possible from ring
 * @param free_space
 *   returns the amount of space after the enqueue operation has finished
 * @return
 *   Actual number of objects enqueued.
 *   If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
 */
static __rte_always_inline unsigned int
__rte_ring_do_rts_enqueue_elem(struct rte_ring *r, const void *obj_table,
    U32 esize, U32 n, enum rte_ring_queue_behavior behavior,
    U32 *free_space)
{
    U32 free, head;

    n =  __rte_ring_rts_move_prod_head(r, n, behavior, &head, &free);

    if (n != 0) {
        __rte_ring_enqueue_elems(r, head, obj_table, esize, n);
        __rte_ring_rts_update_tail(&r->rts_prod);
    }

    if (free_space != NULL)
        *free_space = free - n;
    return n;
}

/**
 * @internal Dequeue several objects from the RTS ring.
 *
 * @param r
 *   A pointer to the ring structure.
 * @param obj_table
 *   A pointer to a table of objects.
 * @param esize
 *   The size of ring element, in bytes. It must be a multiple of 4.
 *   This must be the same value used while creating the ring. Otherwise
 *   the results are undefined.
 * @param n
 *   The number of objects to pull from the ring.
 * @param behavior
 *   RTE_RING_QUEUE_FIXED:    Dequeue a fixed number of items from a ring
 *   RTE_RING_QUEUE_VARIABLE: Dequeue as many items as possible from ring
 * @param available
 *   returns the number of remaining ring entries after the dequeue has finished
 * @return
 *   - Actual number of objects dequeued.
 *     If behavior == RTE_RING_QUEUE_FIXED, this will be 0 or n only.
 */
static __rte_always_inline unsigned int
__rte_ring_do_rts_dequeue_elem(struct rte_ring *r, void *obj_table,
    U32 esize, U32 n, enum rte_ring_queue_behavior behavior,
    U32 *available)
{
    U32 entries, head;

    n = __rte_ring_rts_move_cons_head(r, n, behavior, &head, &entries);

    if (n != 0) {
        __rte_ring_dequeue_elems(r, head, obj_table, esize, n);
        __rte_ring_rts_update_tail(&r->rts_cons);
    }

    if (available != NULL)
        *available = entries - n;
    return n;
}

static inline int rte_ring_dequeue(struct rte_ring *r, void **obj_p)
{
    return __rte_ring_do_rts_dequeue_elem(r, obj_p, sizeof(void *), 1, RTE_RING_QUEUE_FIXED, NULL) ? 0 :-ENOENT;
}

static inline int rte_ring_enqueue(struct rte_ring *r, void *obj)
{
    return __rte_ring_do_rts_enqueue_elem(r, &obj, sizeof(void *), 1, RTE_RING_QUEUE_FIXED, NULL) ? 0 : -ENOBUFS;
}


#endif

