/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Description: UDK mempool header file
 * Author: -
 * Create: 2021.5.11
 */

#ifndef UDK_MEMPOOL_H
#define UDK_MEMPOOL_H

#include <sys/queue.h>
#include "securec.h"
#include "udk_common.h"
#include "udk_memzone.h"
#include "udk_spinlock.h"
#include "udk_lcore.h"
#include "udk_log.h"

#ifdef __cplusplus
extern "C" {
#endif

#ifdef UDK_MEMPOOL_DEBUG
struct udk_mempool_debug_stats {
    uint64_t put_bulk;
    uint64_t put_objs;
    uint64_t get_success_bulk;
    uint64_t get_success_objs;
    uint64_t get_fail_bulk;
    uint64_t get_fail_objs;
    uint64_t get_success_blks;
    uint64_t get_fail_blks;
} udk_cache_aligned;

#endif

#define UDK_MEMPOOL_HEADER_SIZE(mp, cs) \
    (sizeof(*(mp)) + (((cs) == 0) ? 0 : \
    (sizeof(struct udk_mempool_cache) * UDK_MAX_LCORE)))

#define UDK_MEMPOOL_CACHE_MAX_SIZE 512

#define UDK_MEMPOOL_ALIGN UDK_CACHE_LINE_SIZE
#define UDK_MEMPOOL_ALIGN_MASK (UDK_MEMPOOL_ALIGN - 1)

struct udk_mempool_cache {
    uint32_t size;          /* Size of the cache */
    uint32_t flushthresh;   /* Threshold before we flush excess elements */
    uint32_t len;           /* Current cache count */
    /*
     * Cache is allocated to this size to allow it to overflow in certain
     * cases to avoid needless emptying of cache.
     */
    void *objs[UDK_MEMPOOL_CACHE_MAX_SIZE * 3]; /* Cache objects */
} udk_cache_aligned;

struct udk_mempool_objsz {
    uint32_t elt_size;     /* Size of an element. */
    uint32_t header_size;  /* Size of header (before elt). */
    uint32_t trailer_size; /* Size of trailer (after elt). */
    uint32_t total_size;   /* Total size of an object (header + elt + trailer). */
};

#define UDK_MEMPOOL_MZ_MAGIC  "UDK"
#define UDK_MEMPOOL_MZ_PREFIX UDK_MEMPOOL_MZ_MAGIC "MP_"
/* Maximum length of a memory pool's name. */
#define UDK_MEMPOOL_NAMESIZE ((UDK_RING_NAMESIZE - sizeof(UDK_MEMPOOL_MZ_PREFIX)) + 1)

/* "MP_<name>" */
#define UDK_MEMPOOL_MZ_FORMAT UDK_MEMPOOL_MZ_PREFIX "%s"

/*
 * A list of memory where objects are stored
 */
STAILQ_HEAD(udk_mempool_memhdr_list, udk_mempool_memhdr);

typedef void (udk_mempool_memchunk_free_cb_t)(struct udk_mempool_memhdr *memhdr, void *opaque);

struct udk_mempool_memhdr {
    STAILQ_ENTRY(udk_mempool_memhdr) next; /* Next in list. */
    struct udk_mempool *mp;  /* The mempool owning the chunk */
    void *addr;              /* Virtual address of the chunk */
    UDK_STD_C11
    union {
        uint64_t iova;       /* IO address of the chunk */
        uint64_t phys_addr;  /* Physical address of the chunk */
    };

    size_t len;              /* length of the chunk */
    udk_mempool_memchunk_free_cb_t *free_cb; /* Free callback */
    void *opaque;            /* Argument passed to the free callback */
};

STAILQ_HEAD(udk_mempool_objhdr_list, udk_mempool_objhdr);
struct udk_mempool_objhdr {
    STAILQ_ENTRY(udk_mempool_objhdr) next; /* Next in list. */
    struct udk_mempool *mp;                /* The mempool owning the object. */
    UDK_STD_C11
    union {
        uint64_t iova;                     /* IO address of the object. */
        uint64_t physaddr;                 /* deprecated - Physical address of the object. */
    };
#ifdef UDK_MEMPOOL_DEBUG
	uint64_t cookie;                       /* Debug cookie. */
#endif
};

#ifdef UDK_MEMPOOL_DEBUG
struct udk_mempool_objtlr {
    uint64_t cookie;    /**< Debug cookie. */
};
#endif

struct udk_mempool_info {
    /* Number of objects in the contiguous block */
    uint32_t contig_block_size;
} udk_cache_aligned;

struct udk_mempool {
    char name[UDK_MEMZONE_NAMESIZE]; /* Name of mempool. */
    UDK_STD_C11
    union {
        void *pool_data;         /* Ring or pool to store objects. */
        uint64_t pool_id;        /* External mempool identifier. */
    };
    void *pool_config;               /* optional args for ops alloc. */
    const struct udk_memzone *mz;    /* Memzone where pool is alloc'd. */
    uint32_t flags;              /* Flags of the mempool. */
    int socket_id;                   /* Socket id passed at create. */
    uint32_t size;                   /* Max size of the mempool. */
    uint32_t cache_size;
    /* Size of per-lcore default local cache. */

    uint32_t elt_size;               /* Size of an element. */
    uint32_t header_size;            /* Size of header (before elt). */
    uint32_t trailer_size;           /* Size of trailer (after elt). */

    uint32_t private_data_size;      /* Size of private data. */
    int32_t ops_index;

    struct udk_mempool_cache *local_cache; /* Per-lcore local cache */

    uint32_t populated_size;                 /* Number of populated objects. */
    struct udk_mempool_objhdr_list elt_list; /* List of objects in pool */
    uint32_t nb_mem_chunks;                  /* Number of memory chunks */
    struct udk_mempool_memhdr_list mem_list; /* List of memory chunks */

#ifdef UDK_MEMPOOL_DEBUG
    /* Per-lcore statistics. */
    struct udk_mempool_debug_stats stats[UDK_MAX_LCORE];
#endif
} udk_cache_aligned;

#define UDK_MEMPOOL_F_NO_SPREAD      0x0001 /* Do not spread among memory channels. */
#define UDK_MEMPOOL_F_NO_CACHE_ALIGN 0x0002 /* Do not align objs on cache lines. */
#define UDK_MEMPOOL_F_SP_PUT         0x0004 /* Default put is "single-producer". */
#define UDK_MEMPOOL_F_SC_GET         0x0008 /* Default get is "single-consumer". */
#define UDK_MEMPOOL_F_POOL_CREATED   0x0010 /* Internal: pool is created. */
#define UDK_MEMPOOL_F_NO_IOVA_CONTIG 0x0020 /* Don't need IOVA contiguous objs. */
#define UDK_MEMPOOL_F_LOCK           0x0080 /* use pthread lock */
#define UDK_MEMPOOL_F_IOVA_CONTIG    0x0100 /* need IOVA contiguous objs */

/*
 * Store statistics when debug is enable
 */
#ifdef UDK_MEMPOOL_DEBUG
#define UDK_MEMPOOL_STAT_ADD(mp, name, n)         \
    do {                                          \
        uint32_t lcore_id = udk_lcore_id();       \
        if (lcore_id < UDK_MAX_LCORE) {           \
            mp->stats[lcore_id].name##_objs += n; \
            mp->stats[lcore_id].name##_bulk += 1; \
        }                                         \
    } while (0)

#define UDK_MEMPOOL_CONFIG_BLOCKS_STAT_ADD(mp, name, n) \
    do {                                                \
        uint32_t lcore_id = udk_lcore_id();             \
        if (lcore_id < UDK_MAX_LCORE) {                 \
            mp->stat[lcore_id].name##_blks += n;        \
            mp->stat[lcore_id].name##_bulk += 1;        \
        }                                               \
    } while (0)
#else
#define UDK_MEMPOOL_STAT_ADD(mp, name, n) \
    do {                                  \
    } while (0)
#define UDK_MEMPOOL_CONTIG_BLOCKS_STAT_ADD(mp, name, n) \
    do {                                                \
    } while (0)
#endif

#define UDK_MEMPOOL_HEADER_COOKIE1 0xbadbadbadadd2e55ULL /* Header cookie. */
#define UDK_MEMPOOL_HEADER_COOKIE2 0xf2eef2eedadd2e55ULL /* Header cookie. */
#define UDK_MEMPOOL_TRAILER_COOKIE 0xadd2e55badbadbadULL /* Trailer cookie. */

void udk_mempool_dump(FILE *f, struct udk_mempool *mp);

/* return the header of a mempool object */
static inline struct udk_mempool_objhdr *udk_mempool_get_header(void *obj)
{
    return (struct udk_mempool_objhdr *)UDK_PTR_SUB(obj, sizeof(struct udk_mempool_objhdr));
}

/* return a pointer to the mempool owning this object */
static inline struct udk_mempool *udk_mempool_from_obj(void *obj)
{
    struct udk_mempool_objhdr *hdr = udk_mempool_get_header(obj);
    return hdr->mp;
}

/* return the trailer of a mempool object */
static inline struct udk_mempool_objtlr *udk_mempool_get_trailer(void *obj)
{
    struct udk_mempool *mp = udk_mempool_from_obj(obj);
    return (struct udk_mempool_objtlr *)UDK_PTR_ADD(obj, mp->elt_size);
}

static inline udk_iova_t udk_mempool_virt2iova(const void *elt)
{
    const struct udk_mempool_objhdr *hdr;
    hdr = (const struct udk_mempool_objhdr *)UDK_PTR_SUB(elt, sizeof(*hdr));
    return hdr->iova;
}

static inline void *udk_mempool_get_priv(struct udk_mempool *mp)
{
    return (char *)mp + UDK_MEMPOOL_HEADER_SIZE(mp, mp->cache_size);
}

#define UDK_MEMPOOL_OPS_NAMESIZE            32      /* Max length of ops struct name. */
#define UDK_MEMPOOL_POPULATE_F_ALIGN_OBJ    0x0001  /* Align objects on addresses multiple of total_elt_sz. */

/* Prototype for implementation specific data provisioning function. */
typedef int (* udk_mempool_alloc_t)(struct udk_mempool *mp);
/* Free the opaque private data pointed to by mp->pool_data pointer. */
typedef void (* udk_mempool_free_t)(struct udk_mempool *mp);
/* Enqueue an object into the external pool. */
typedef int (* udk_mempool_enqueue_t)(struct udk_mempool *mp, void * const *obj_table, uint32_t n);
/* Dequeue an object from the external pool. */
typedef int (* udk_mempool_dequeue_t)(struct udk_mempool *mp, void **obj_table, uint32_t n);
/* Dequeue a number of contiguous object blocks from the external pool. */
typedef int (* udk_mempool_dequeue_contig_blocks_t)(struct udk_mempool *mp, void **first_obj_table, uint32_t n);
/* Return the number of available objects in the external pool. */
typedef uint32_t (* udk_mempool_get_count)(const struct udk_mempool *mp);
/* Calculate memory size required to store given number of objects. */
typedef ssize_t(* udk_mempool_calc_mem_size_t)(const struct udk_mempool *mp, uint32_t obj_num, uint32_t pg_shift,
    size_t *min_chunk_size, size_t *align);
/* Function to be called for each populated object. */
typedef void (udk_mempool_populate_obj_cb_t)(struct udk_mempool *mp, void *opaque, void *vaddr, udk_iova_t iova);
/* Populate memory pool objects using provided memory chunk. */
typedef int (* udk_mempool_populate_t)(struct udk_mempool *mp, unsigned int max_objs, void *vaddr, udk_iova_t iova,
    size_t len, udk_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
/* Get some additional information about a mempool. */
typedef int (* udk_mempool_get_info_t)(const struct udk_mempool *mp, struct udk_mempool_info *info);

/* Structure defining mempool operations structure */
struct udk_mempool_ops {
    char name[UDK_MEMPOOL_OPS_NAMESIZE];    /* Name of mempool ops struct. */
    udk_mempool_alloc_t alloc;              /* Allocate private data. */
    udk_mempool_free_t free;                /* Free the external pool. */
    udk_mempool_enqueue_t enqueue;          /* Enqueue an object. */
    udk_mempool_dequeue_t dequeue;          /* Dequeue an object. */
    udk_mempool_get_count get_count;        /* Get qty of available objs. */
    /* Optional callback to calculate memory size required to store specified number of objects. */
    udk_mempool_calc_mem_size_t calc_mem_size;
    /* Optional callback to populate mempool objects using provided memory chunk. */
    udk_mempool_populate_t populate;
    /* Get mempool info */
    udk_mempool_get_info_t get_info;
    /* Dequeue a number of contiguous object blocks. */
    udk_mempool_dequeue_contig_blocks_t dequeue_contig_blocks;
} udk_cache_aligned;

#define UDK_MEMPOOL_MAX_OPS_IDX 16  /* Max registered ops structs */

/* Structure storing the table of registered ops structs */
struct udk_mempool_ops_table {
    udk_spinlock_t sl;     /* Spinlock for add/delete. */
    uint32_t num_ops;      /* Number of used ops structs in the table. */
    /* Storage for all possible ops structs. */
    struct udk_mempool_ops ops[UDK_MEMPOOL_MAX_OPS_IDX];
} udk_cache_aligned;

extern struct udk_mempool_ops_table g_udk_mempool_ops_table;

/* mempool operations. */
int udk_mempool_register_ops(const struct udk_mempool_ops *ops);
int udk_mempool_ops_alloc(struct udk_mempool *mp);
void udk_mempool_ops_free(struct udk_mempool *mp);
ssize_t udk_mempool_ops_calc_mem_size(const struct udk_mempool *mp, uint32_t obj_num, uint32_t pg_shift,
    size_t *min_chunk_size, size_t *align);
int udk_mempool_ops_populate(struct udk_mempool *mp, unsigned int max_objs, void *vaddr, udk_iova_t iova,
    size_t len, udk_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg);
uint32_t udk_mempool_ops_get_count(const struct udk_mempool *mp);
int udk_mempool_ops_get_info(const struct udk_mempool *mp, struct udk_mempool_info *info);
static inline struct udk_mempool_ops *udk_mempool_get_ops(int ops_index)
{
    UDK_VERIFY((ops_index >= 0) && (ops_index < UDK_MEMPOOL_MAX_OPS_IDX));

    return &g_udk_mempool_ops_table.ops[ops_index];
}

static inline int udk_mempool_ops_enqueue_bulk(struct udk_mempool *mp, void * const *obj_table, uint32_t n)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    return ops->enqueue(mp, obj_table, n);
}

static inline int udk_mempool_ops_dequeue_bulk(struct udk_mempool *mp, void **obj_table, uint32_t n)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    return ops->dequeue(mp, obj_table, n);
}

#define UDK_MEMPOOL_REGISTER_OPS(ops)           \
    UDK_INIT(mp_hdlr_init_##ops)                \
    {                                           \
        (void)udk_mempool_register_ops(&(ops));   \
    }

typedef void (udk_mempool_ctor_t)(struct udk_mempool *, void *);
typedef void (udk_mempool_obj_cb_t)(struct udk_mempool *mp, void *opaque, void *obj, uint32_t obj_idx);

static udk_force_inline int udk_mempool_generic_get(struct udk_mempool *mp, void **obj_table, uint32_t n,
    struct udk_mempool_cache *cache)
{
    int ret;
    uint32_t index, len, req;
    void **cache_objs_tmp;

    /* No cache provided or cannot be satisfied from cache */
    if (unlikely((cache == NULL) || (n >= cache->size))) {
        goto ring_dequeue;
    }

    cache_objs_tmp = cache->objs;

    /* Can this be satisfied from the cache? */
    if (cache->len < n) {
        req = n + (cache->size - cache->len);

        ret = udk_mempool_ops_dequeue_bulk(mp, &cache->objs[cache->len], req);
        if (unlikely(ret < 0)) {
            goto ring_dequeue;
        }

        cache->len += req;
    }

    for (index = 0, len = cache->len - 1; index < n; ++index, len--, obj_table++) {
        *obj_table = cache_objs_tmp[len];
    }

    cache->len -= n;

    return 0;

ring_dequeue:
    /* get remaining objects from ring */
    ret = udk_mempool_ops_dequeue_bulk(mp, obj_table, n);
    return ret;
}

/* Get a pointer to the per-lcore default mempool cache. */
static udk_force_inline struct udk_mempool_cache *udk_mempool_default_cache(struct udk_mempool *mp, uint32_t lcore_id)
{
    if (mp->cache_size == 0) {
        return NULL;
    }

    if (lcore_id >= UDK_MAX_LCORE) {
        return NULL;
    }

    return &mp->local_cache[lcore_id];
}

/* Put several objects back in the mempool; used internally. */
static udk_force_inline void udk_mempool_generic_put(struct udk_mempool *mp, void *const *obj_table, uint32_t n,
    struct udk_mempool_cache *cache)
{
    void **cache_objs_tmp;

    /* No cache provided or if put would overflow mem allocated for cache */
    if (unlikely((cache == NULL) || (n > UDK_MEMPOOL_CACHE_MAX_SIZE))) {
        goto ring_enqueue;
    }

    cache_objs_tmp = &cache->objs[cache->len];

    /* Add elements back into the cache */
    if (memcpy_s(&cache_objs_tmp[0], sizeof(void *) * n, obj_table, sizeof(void *) * n) != 0) {
        UDK_LOG(WARNING, MEMPOOL, "copy table failed!\n");
    }

    cache->len += n;

    if (cache->len >= cache->flushthresh) {
        udk_mempool_ops_enqueue_bulk(mp, &cache->objs[cache->size], cache->len - cache->size);
        cache->len = cache->size;
    }

    return;

ring_enqueue:
    udk_mempool_ops_enqueue_bulk(mp, obj_table, n);
    return;
}

/* Put several objects back in the mempool. */
static udk_force_inline void udk_mempool_put_bulk(struct udk_mempool *mp, void *const *obj_table, uint32_t n)
{
    struct udk_mempool_cache *cache = NULL;
    if (mp->cache_size > 0) {
        cache = udk_mempool_default_cache(mp, udk_lcore_id());
    }
    udk_mempool_generic_put(mp, obj_table, n, cache);
    return;
}

void udk_mempool_put(struct udk_mempool *mp, void *obj);
int udk_mempool_get_bulk(struct udk_mempool *mp, void **obj_table, uint32_t n);
void udk_mempool_free(struct udk_mempool *mp);
uint32_t udk_mempool_avail_count(const struct udk_mempool *mp);
uint32_t udk_mempool_in_use_count(const struct udk_mempool *mp);
struct udk_mempool *udk_mempool_lookup(const char *name);

/*
When the udk_mempool_create() / udk_mempool_create_empty() interface is invoked in dlopen mode and the input parameter
cache_size should be 0. The cost of calling udk_lcore_id in the udk_mempool_put() and udk_mempool_get_bulk()
interfaces can be reduced.
*/
struct udk_mempool *udk_mempool_create(const char *name, uint32_t n, uint32_t elt_size,
    uint32_t cache_size, uint32_t private_data_size, udk_mempool_ctor_t *mp_init, void *mp_init_arg,
    udk_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, uint32_t flags);
struct udk_mempool *udk_mempool_create_empty(const char *name, uint32_t n, uint32_t elt_size,
    uint32_t cache_size, uint32_t private_data_size, int socket_id, uint32_t flags);
int udk_mempool_set_ops_byname(struct udk_mempool *mp, const char *name, void *pool_config);
uint32_t udk_mempool_obj_iter(struct udk_mempool *mp, udk_mempool_obj_cb_t *obj_cb, void *obj_cb_arg);
int udk_mempool_populate_default(struct udk_mempool *mp);
int udk_mempool_get_page_size(struct udk_mempool *mp, size_t *pg_size);

#ifdef __cplusplus
}
#endif

#endif