/*
 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
 * Description: udk mempool_ops source file
 * Author: -
 * Create: 2021.07.14
 */
#include <errno.h>

#include "securec.h"
#include "udk_spinlock.h"
#include "udk_mempool.h"

struct udk_mempool_ops_table g_udk_mempool_ops_table = {
    .sl = UDK_SPINLOCK_INITIALIZER,
    .num_ops = 0
};

/* add a new ops struct in g_udk_mempool_ops_table, return its index. */
int udk_mempool_register_ops(const struct udk_mempool_ops *h)
{
    struct udk_mempool_ops *ops = NULL;
    uint32_t ops_index;

    udk_spinlock_lock(&g_udk_mempool_ops_table.sl);

    if (g_udk_mempool_ops_table.num_ops >= UDK_MEMPOOL_MAX_OPS_IDX) {
        udk_spinlock_unlock(&g_udk_mempool_ops_table.sl);
        UDK_LOG(ERR, MEMPOOL, "Maximum number of mempool ops structs exceeded\n");
        return -ENOSPC;
    }

    if (h->alloc == NULL || h->enqueue == NULL ||
        h->dequeue == NULL || h->get_count == NULL) {
        udk_spinlock_unlock(&g_udk_mempool_ops_table.sl);
        UDK_LOG(ERR, MEMPOOL, "Missing callback while registering mempool ops\n");
        return -EINVAL;
    }

    if (strlen(h->name) >= (sizeof(ops->name) - 1)) {
        udk_spinlock_unlock(&g_udk_mempool_ops_table.sl);
        UDK_LOG(DEBUG, COMMON, "%s(): mempool_ops <%s>: name too long\n", __func__, h->name);
        return -EEXIST;
    }

    ops_index = g_udk_mempool_ops_table.num_ops++;
    ops = &g_udk_mempool_ops_table.ops[ops_index];
    if (snprintf_s(ops->name, sizeof(ops->name), sizeof(ops->name) - 1, "%s", h->name) < 0) {
        UDK_LOG(WARNING, MEMPOOL, "printf ops name failed\n");
    }
    ops->alloc = h->alloc;
    ops->free = h->free;
    ops->enqueue = h->enqueue;
    ops->dequeue = h->dequeue;
    ops->get_count = h->get_count;
    ops->dequeue_contig_blocks = h->dequeue_contig_blocks;
    ops->calc_mem_size = h->calc_mem_size;
    ops->populate = h->populate;
    ops->get_info = h->get_info;

    udk_spinlock_unlock(&g_udk_mempool_ops_table.sl);

    return (int)ops_index;
}

int udk_mempool_ops_alloc(struct udk_mempool *mp)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    return ops->alloc(mp);
}

void udk_mempool_ops_free(struct udk_mempool *mp)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    if (ops->free == NULL)
        return;
    ops->free(mp);
    return;
}

uint32_t udk_mempool_ops_get_count(const struct udk_mempool *mp)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    return ops->get_count(mp);
}

static ssize_t udk_mempool_op_calc_mem_size_helper(const struct udk_mempool *mp, uint32_t obj_num, uint32_t pg_shift,
    size_t chunk_reserve, size_t *min_chunk_size, size_t *align)
{
    size_t total_elt_size;
    size_t obj_per_pg, pg_size, objs_in_last_pg;
    size_t mem_size;

    total_elt_size = mp->header_size + mp->elt_size + mp->trailer_size;
    if (total_elt_size == 0) {
        mem_size = 0;
    } else if (pg_shift == 0) {
        mem_size = total_elt_size * obj_num + chunk_reserve;
    } else {
        pg_size = (size_t)1 << pg_shift;
        if (chunk_reserve >= pg_size) {
            return -EINVAL;
        }

        obj_per_pg = (pg_size - chunk_reserve) / total_elt_size;
        if (obj_per_pg == 0) {
            mem_size = UDK_ALIGN_CEIL(total_elt_size + chunk_reserve, pg_size) * obj_num;
        } else {
            objs_in_last_pg = ((obj_num - 1) % obj_per_pg) + 1;
            /* room required for the last page */
            mem_size = objs_in_last_pg * total_elt_size + chunk_reserve;
            /* room required for other pages */
            mem_size += ((obj_num - objs_in_last_pg) / obj_per_pg) << pg_shift;
            /* padding align */
            mem_size += total_elt_size - 1;
        }
    }

    *min_chunk_size = total_elt_size;
    *align = UDK_MEMPOOL_ALIGN;

    return (ssize_t)mem_size;
}

static ssize_t udk_mempool_op_calc_mem_size_default(const struct udk_mempool *mp, uint32_t obj_num, uint32_t pg_shift,
    size_t *min_chunk_size, size_t *align)
{
    return udk_mempool_op_calc_mem_size_helper(mp, obj_num, pg_shift, 0, min_chunk_size, align);
}

ssize_t udk_mempool_ops_calc_mem_size(const struct udk_mempool *mp, uint32_t obj_num, uint32_t pg_shift,
    size_t *min_chunk_size, size_t *align)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    if (ops->calc_mem_size == NULL) {
        return udk_mempool_op_calc_mem_size_default(mp, obj_num, pg_shift, min_chunk_size, align);
    }

    return ops->calc_mem_size(mp, obj_num, pg_shift, min_chunk_size, align);
}

static int check_obj_bounds(char *obj, size_t pg_sz, size_t elt_sz)
{
    if ((pg_sz == 0) || (elt_sz > pg_sz)) {
        return 0;
    }
    if (UDK_PTR_ALIGN(obj, pg_sz) != UDK_PTR_ALIGN(obj + elt_sz - 1, pg_sz)) {
        return -1;
    }
    return 0;
}

static int udk_mempool_op_populate_helper(struct udk_mempool *mp, uint32_t flags, uint32_t max_objs, void *vaddr,
    udk_iova_t iova, size_t len, udk_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
    char *va = vaddr;
    size_t total_elt_sz, pg_sz;
    size_t off;
    uint32_t i;
    void *obj = NULL;
    int ret;

    ret = udk_mempool_get_page_size(mp, &pg_sz);
    if (ret < 0) {
        return ret;
    }

    total_elt_sz = mp->header_size + mp->elt_size + mp->trailer_size;
    if (total_elt_sz == 0) {
        return -EINVAL;
    }

    if (flags & UDK_MEMPOOL_POPULATE_F_ALIGN_OBJ) {
        off = total_elt_sz - (((uintptr_t)(va - 1) % total_elt_sz) + 1);
    } else {
        off = 0;
    }

    for (i = 0; i < max_objs; i++) {
        /* avoid objects to cross page boundaries */
        if (check_obj_bounds(va + off, pg_sz, total_elt_sz) < 0) {
            off += UDK_PTR_ALIGN_CEIL(va + off, pg_sz) - (va + off);
            if (flags & UDK_MEMPOOL_POPULATE_F_ALIGN_OBJ) {
                off += total_elt_sz - (((uintptr_t)(va + off - 1) % total_elt_sz) + 1);
            }
        }

        if ((off + total_elt_sz) > len) {
            break;
        }

        off += mp->header_size;
        obj = va + off;
        obj_cb(mp, obj_cb_arg, obj, (iova == UDK_BAD_IOVA) ? UDK_BAD_IOVA : (iova + off));
        (void)udk_mempool_ops_enqueue_bulk(mp, &obj, 1);
        off += mp->elt_size + mp->trailer_size;
    }

    return (int)i;
}

static int udk_mempool_op_populate_default(struct udk_mempool *mp, uint32_t max_objs, void *vaddr, udk_iova_t iova,
    size_t len, udk_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
    return udk_mempool_op_populate_helper(mp, 0, max_objs, vaddr, iova, len, obj_cb, obj_cb_arg);
}

int udk_mempool_ops_populate(struct udk_mempool *mp, unsigned int max_objs, void *vaddr, udk_iova_t iova,
    size_t len, udk_mempool_populate_obj_cb_t *obj_cb, void *obj_cb_arg)
{
    struct udk_mempool_ops *ops;

    ops = udk_mempool_get_ops(mp->ops_index);
    if (ops->populate == NULL) {
        return udk_mempool_op_populate_default(mp, max_objs, vaddr, iova, len, obj_cb, obj_cb_arg);
    }

    return ops->populate(mp, max_objs, vaddr, iova, len, obj_cb, obj_cb_arg);
}

/* Sets mempool ops previously registered by udk_mempool_register_ops. */
int udk_mempool_set_ops_byname(struct udk_mempool *mp, const char *name, void *pool_config)
{
    struct udk_mempool_ops *ops = NULL;
    uint32_t i;

    /* The mempool is already populated. */
    if (mp->flags & UDK_MEMPOOL_F_POOL_CREATED) {
        return -EEXIST;
    }

    for (i = 0; i < g_udk_mempool_ops_table.num_ops; i++) {
        if (!strcmp(name, g_udk_mempool_ops_table.ops[i].name)) {
            ops = &g_udk_mempool_ops_table.ops[i];
            break;
        }
    }

    if (ops == NULL) {
        return -EINVAL;
    }

    mp->ops_index = (int32_t)i;
    mp->pool_config = pool_config;
    return 0;
}

int udk_mempool_ops_get_info(const struct udk_mempool *mp, struct udk_mempool_info *info)
{
    struct udk_mempool_ops *ops = udk_mempool_get_ops(mp->ops_index);

    if (!ops->get_info) {
        return -ENOTSUP;
    }
    return ops->get_info(mp, info);
}
