/*
 * SPDX-License-Identifier: BSD-3-Clause
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Description   : udk ring header file
 */

#ifndef UDK_RING_H
#define UDK_RING_H

#include <stdint.h>
#include <pthread.h>

#include "udk_common.h"
#include "udk_memzone.h"
#include "udk_rwlock.h"

#define UDK_TAILQ_RING_NAME "UDK_RING"
#define UDK_RING_MZ_PREFIX  "RG_"
/* The maximum length of a ring name. */
#define UDK_RING_NAMESIZE   ((UDK_MEMZONE_NAMESIZE - sizeof(UDK_RING_MZ_PREFIX)) + 1)

enum udk_ring_queue_behavior {
    UDK_RING_QUEUE_FIXED = 0, /* Enq/Deq a fixed number of items from a ring */
    UDK_RING_QUEUE_VARIABLE   /* Enq/Deq as many items as possible from ring */
};

enum udk_ring_sync_type {
    UDK_RING_SYNC_MT,     /* multi-thread safe */
    UDK_RING_SYNC_ST,     /* single-thread only */
    UDK_RING_SYNC_MT_RTS, /* multi-thread relaxed tail sync */
    UDK_RING_SYNC_MT_HTS, /* multi-thread head/tail sync */
};

struct udk_ring_headtail {
    volatile uint32_t head;
    volatile uint32_t tail;
    UDK_STD_C11
    union {
        enum udk_ring_sync_type sync_type;
        uint32_t single;
    };
};

union udk_ring_rts_poscnt {
    uint64_t raw udk_aligned(8);
    struct {
        uint32_t cnt;
        uint32_t pos;
    } val;
};

struct udk_ring_rts_headtail {
    volatile union udk_ring_rts_poscnt tail;
    enum udk_ring_sync_type sync_type;
    uint32_t htd_max;
    volatile union udk_ring_rts_poscnt head;
};

union udk_ring_hts_pos {
    uint64_t raw udk_aligned(8);
    struct {
        uint32_t head;
        uint32_t tail;
    } pos;
};

/* head/tail sync mode */
struct udk_ring_hts_headtail {
    volatile union udk_ring_hts_pos ht;
    enum udk_ring_sync_type sync_type;
};

#define RING_F_LOCK   0x0080 /* use pthread lock */

struct udk_ring {
    char name[UDK_MEMZONE_NAMESIZE] udk_cache_aligned;
    uint32_t flags;
    const struct udk_memzone *memzone;
    uint32_t size;
    uint32_t mask;
    uint32_t capacity;
    pthread_mutex_t lock;

    char pad0 udk_cache_aligned;

    /* producer status */
    UDK_STD_C11
    union {
        struct udk_ring_headtail prod;
        struct udk_ring_hts_headtail hts_prod;
        struct udk_ring_rts_headtail rts_prod;
    } udk_cache_aligned;

    char pad1 udk_cache_aligned;

    /* consumer status */
    union {
        struct udk_ring_headtail cons;
        struct udk_ring_hts_headtail hts_cons;
        struct udk_ring_rts_headtail rts_cons;
    } udk_cache_aligned;

    char pad2 udk_cache_aligned;
};

/* the actual copy of pointers on the ring to obj_table.
 * Placed here since identical code needed in both
 * single and multi consumer dequeue functions */
static udk_force_inline void dequeue_ptrs_fun(struct udk_ring *r, uint32_t cons_head,
    void **obj_table, uint32_t n)
{
    size_t i;
    const uint32_t size = r->size;
    size_t index = cons_head & r->mask;
    void **ring = (void **)(&r[1]);

    if (likely(index + n < size)) {
        for (i = 0; i < (n & (~(uint32_t)0x3)); i += 4, index += 4) {
            obj_table[i] = ring[index];
            obj_table[i + 1] = ring[index + 1];
            obj_table[i + 2] = ring[index + 2];
            obj_table[i + 3] = ring[index + 3];
        }
        switch (n & 0x3) {
        case 3:
            obj_table[i++] = ring[index++];
            obj_table[i++] = ring[index++];
            obj_table[i++] = ring[index++];
            break;
        case 2:
            obj_table[i++] = ring[index++];
            obj_table[i++] = ring[index++];
            break;
        case 1:
            obj_table[i++] = ring[index++];
            break;
        default:
            break;
        }
    } else {
        for (i = 0; index < size; i++, index++)
            obj_table[i] = ring[index];
        for (index = 0; i < n; i++, index++)
            obj_table[i] = ring[index];
    }
    return;
}

/* the actual enqueue of pointers on the ring.
 * Placed here since identical code needed in both
 * single and multi producer enqueue functions */
static udk_force_inline void enqueue_ptrs_fun(struct udk_ring *r, uint32_t prod_head,
    void * const * obj_table, uint32_t n)
{
    size_t i;
    const uint32_t size = r->size;
    size_t index = prod_head & r->mask;
    void **ring = (void **)(&r[1]);
    if (likely(index + n < size)) {
        for (i = 0; i < (n & ((~(uint32_t)0x3))); i += 4, index += 4) {
            ring[index] = obj_table[i];
            ring[index + 1] = obj_table[i + 1];
            ring[index + 2] = obj_table[i + 2];
            ring[index + 3] = obj_table[i + 3];
        }
        switch (n & 0x3) {
        case 3:
            ring[index++] = obj_table[i++];
            ring[index++] = obj_table[i++];
            ring[index++] = obj_table[i++];
            break;
        case 2:
            ring[index++] = obj_table[i++];
            ring[index++] = obj_table[i++];
            break;
        case 1:
            ring[index++] = obj_table[i++];
            break;
        default:
            break;
        }
    } else {
        for (i = 0; index < size; i++, index++)
            ring[index] = obj_table[i];
        for (index = 0; i < n; i++, index++)
            ring[index] = obj_table[i];
    }
    return;
}

static udk_force_inline void udk_ring_update_tail(struct udk_ring_headtail *ht, uint32_t old_val, uint32_t new_val,
    uint32_t single, uint32_t enqueue)
{
    UDK_REF_VAR(enqueue);

    /*
     * If there are other enqueues/dequeues in progress that preceded us,
     * we need to wait for them to complete
     */
    if (single == 0) {
        while (unlikely(ht->tail != old_val)) {
            udk_pause();
        }
    }

    __atomic_store_n(&ht->tail, new_val, __ATOMIC_RELEASE);

    return;
}

#define IS_SP   1
#define IS_MP   0
#define IS_SC   1
#define IS_MC   0

static udk_force_inline uint32_t udk_ring_move_prod_head(struct udk_ring *r, uint32_t is_sp, uint32_t n,
    enum udk_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *free_entries)
{
    const uint32_t capacity = r->capacity;
    uint32_t cons_tail;
    uint32_t max = n;
    int success;

    *old_head = __atomic_load_n(&r->prod.head, __ATOMIC_RELAXED);
    do {
        n = max;

        __atomic_thread_fence(__ATOMIC_ACQUIRE);

        /* load-acquire synchronize with store-release of ht->tail in udk_ring_update_tail. */
        cons_tail = __atomic_load_n(&r->cons.tail, __ATOMIC_ACQUIRE);
        *free_entries = (capacity + cons_tail - *old_head);

        if (unlikely(n > *free_entries)) {
            n = (behavior == UDK_RING_QUEUE_FIXED) ?  0 : *free_entries;
        }

        if (n == 0) {
            return 0;
        }

        *new_head = *old_head + n;
        if (is_sp != 0) {
            r->prod.head = *new_head;
            success = 1;
        } else {
            /* on failure, *old_head is updated */
            success = __atomic_compare_exchange_n(&r->prod.head, old_head, *new_head,
                0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
        }
    } while (unlikely(success == 0));

    return n;
}

static udk_force_inline uint32_t udk_ring_do_enqueue(struct udk_ring *r, void * const *obj_table, uint32_t n,
    enum udk_ring_queue_behavior behavior, uint32_t is_sp, uint32_t *free_space)
{
    uint32_t prod_head, prod_next;
    uint32_t free_items;

    if ((r->flags & RING_F_LOCK) != 0) {
        pthread_mutex_lock(&r->lock);
    }

    n = udk_ring_move_prod_head(r, is_sp, n, behavior, &prod_head, &prod_next, &free_items);
    if (n == 0) {
        goto end;
    }

    enqueue_ptrs_fun(r, prod_head, obj_table, n);

    udk_ring_update_tail(&r->prod, prod_head, prod_next, is_sp, 1);

end:
    if (free_space != NULL) {
        *free_space = free_items - n;
    }

    if ((r->flags & RING_F_LOCK) != 0) {
        pthread_mutex_unlock(&r->lock);
    }
    return n;
}

static udk_force_inline uint32_t udk_ring_move_cons_head(struct udk_ring *r, uint32_t is_sc, uint32_t n,
    enum udk_ring_queue_behavior behavior, uint32_t *old_head, uint32_t *new_head, uint32_t *entries)
{
    uint32_t max = n;
    uint32_t prod_tail;
    int success;

    *old_head = __atomic_load_n(&r->cons.head, __ATOMIC_RELAXED);
    do {
        n = max;

        __atomic_thread_fence(__ATOMIC_ACQUIRE);

        /* this load-acquire synchronize with store-release of ht->tail in udk_ring_update_tail. */
        prod_tail = __atomic_load_n(&r->prod.tail, __ATOMIC_ACQUIRE);
        *entries = (prod_tail - *old_head);

        if (n > *entries) {
            n = (behavior == UDK_RING_QUEUE_FIXED) ? 0 : *entries;
        }

        if (unlikely(n == 0)) {
            return 0;
        }

        *new_head = *old_head + n;
        if (is_sc != 0) {
            r->cons.head = *new_head;
            success = 1;
        } else {
            /* on failure, *old_head will be updated */
            success = __atomic_compare_exchange_n(&r->cons.head, old_head, *new_head,
                0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
        }
    } while (unlikely(success == 0));

    return n;
}

static udk_force_inline uint32_t udk_ring_do_dequeue(struct udk_ring *r, void **obj_table, uint32_t n,
    enum udk_ring_queue_behavior behavior, uint32_t is_sc, uint32_t *available)
{
    uint32_t cons_head, cons_next;
    uint32_t items;

    if ((r->flags & RING_F_LOCK) != 0) {
        pthread_mutex_lock(&r->lock);
    }

    n = udk_ring_move_cons_head(r, (int)is_sc, n, behavior, &cons_head, &cons_next, &items);
    if (n == 0) {
        goto end;
    }

    dequeue_ptrs_fun(r, cons_head, obj_table, n);

    udk_ring_update_tail(&r->cons, cons_head, cons_next, is_sc, 0);

end:
    if (available != NULL) {
        *available = items - n;
    }

    if ((r->flags & RING_F_LOCK) != 0) {
        pthread_mutex_unlock(&r->lock);
    }
    return n;
}

static udk_force_inline uint32_t udk_ring_mp_enqueue_bulk(struct udk_ring *r, void * const *obj_table,
    uint32_t n, uint32_t *free_space)
{
    return udk_ring_do_enqueue(r, obj_table, n, UDK_RING_QUEUE_FIXED, IS_MP, free_space);
}

static udk_force_inline uint32_t udk_ring_sp_enqueue_bulk(struct udk_ring *r, void *const *obj_table,
    uint32_t n, uint32_t *free_space)
{
    return udk_ring_do_enqueue(r, obj_table, n, UDK_RING_QUEUE_FIXED, IS_SP, free_space);
}

static udk_force_inline uint32_t udk_ring_mc_dequeue_bulk(struct udk_ring *r, void **obj_table,
    uint32_t n, uint32_t *available)
{
    return udk_ring_do_dequeue(r, obj_table, n, UDK_RING_QUEUE_FIXED, IS_MC, available);
}

static udk_force_inline uint32_t udk_ring_sc_dequeue_bulk(struct udk_ring *r, void **obj_table,
    uint32_t n, uint32_t *available)
{
    return udk_ring_do_dequeue(r, obj_table, n, UDK_RING_QUEUE_FIXED, IS_SC, available);
}


#define RING_F_SP_ENQ 0x0001 /* The default enqueue is "single-producer". */
#define RING_F_SC_DEQ 0x0002 /* The default dequeue is "single-consumer". */

struct udk_ring *udk_ring_create(const char *name, uint32_t count, int32_t socket_id, uint32_t flags);
struct udk_ring *udk_ring_lookup(const char *name);
void udk_ring_free(struct udk_ring *r);
int udk_ring_dequeue(struct udk_ring *r, void **obj_p);
uint32_t udk_ring_dequeue_burst(struct udk_ring *r, void **obj_table, uint32_t n, uint32_t *available);
int udk_ring_enqueue(struct udk_ring *r, void *obj);
int udk_ring_full(struct udk_ring *r);
void udk_ring_dump(FILE *f, const struct udk_ring *r);
uint32_t udk_ring_count(const struct udk_ring *r);
uint32_t udk_ring_free_count(const struct udk_ring *r);

#endif /* UDK_RING_H */