// hisi_l0_utils.c
#include <linux/slab.h>
#include <linux/mm.h>
#include "hisi_l0_mem_pool.h"

#ifdef USE_OBJECT_POOL

// Define the ratio for shared pool size relative to total pool size
// Shared pool will be 1/SHARED_POOL_RATIO of the total pool size
#define SHARED_POOL_RATIO 4
#define MIN_SHARED_POOL_SIZE 1
#define MAX_SHARED_POOL_SIZE 128

// - destroy_object_pool
static void destroy_object_pool(struct object_pool *pool)
{
    int i;
    unsigned long flags;

    if (!pool)
        return;

    // Clean up shared pool
    spin_lock_irqsave(&pool->shared_lock, flags);
    if (pool->shared_objects) {
        for (i = 0; i < pool->shared_count; i++) {
            if (pool->shared_objects[i]) {
                kfree(pool->shared_objects[i]);
            }
        }
        kfree(pool->shared_objects);
        pool->shared_objects = NULL;
        pool->shared_count = 0;
        pool->shared_size = 0;
    }
    spin_unlock_irqrestore(&pool->shared_lock, flags);
    
    // Ensure all fields are properly reset
    pool->obj_size = 0;
}

// - init_object_pool
static int init_object_pool(struct object_pool *pool, size_t obj_size, int pool_size)
{
    int i;
    int ret = -ENOMEM;

    if (!pool) {
        pr_err("Object pool init failed: NULL pool parameter\n");
        return -EINVAL;
    }

    if (obj_size == 0) {
        pr_err("Object pool init failed: invalid object size\n");
        return -EINVAL;
    }

    // Initialize pool structure
    memset(pool, 0, sizeof(*pool));
    spin_lock_init(&pool->shared_lock);

    // Calculate shared pool size with bounds checking
    // Shared pool size is a fraction of the total pool size to reduce memory footprint
    // while still providing a fallback when per-CPU pools are empty
    pool->shared_size = max(MIN_SHARED_POOL_SIZE, pool_size / SHARED_POOL_RATIO);
    pool->shared_size = min(pool->shared_size, MAX_SHARED_POOL_SIZE);
    pool->obj_size = obj_size;

    // Allocate shared objects array
    if (pool->shared_size > 0) {
        pool->shared_objects = kcalloc(pool->shared_size, sizeof(void *), GFP_KERNEL);
        if (!pool->shared_objects) {
            pr_err("Object pool init failed: shared objects allocation failed, obj_size=%zu, pool_size=%d, shared_size=%d\n",
                   obj_size, pool_size, pool->shared_size);
            ret = -ENOMEM;
            goto err_cleanup;
        }
    }

    // Pre-allocate objects for shared pool
    for (i = 0; i < pool->shared_size; i++) {
        pool->shared_objects[i] = kzalloc(obj_size, GFP_KERNEL);
        if (!pool->shared_objects[i]) {
            pr_err("Object pool init failed: shared object allocation failed, obj_size=%zu, index=%d\n",
                   obj_size, i);
            ret = -ENOMEM;
            goto err_cleanup;
        }
        pool->shared_count++;
    }

    return 0;

err_cleanup:
    destroy_object_pool(pool);
    return ret;
}

// - alloc_from_pool
// - alloc_from_pool
void *alloc_from_pool(struct object_pool *pool)
{
    unsigned long flags;
    void *obj = NULL;

    // Complete defensive checks
    if (unlikely(!pool)) {
        return NULL;
    }

    // Try shared pool with prefetching for better performance
    spin_lock_irqsave(&pool->shared_lock, flags);
    
    // Complete safety checks
    if (likely(pool->shared_objects && pool->shared_count > 0 && 
               pool->shared_count <= pool->shared_size)) {
        // Prefetch the object to improve cache performance
        obj = pool->shared_objects[pool->shared_count - 1];
        if (obj) {
            prefetchw(obj);
            pool->shared_count--;
        }
    }
    spin_unlock_irqrestore(&pool->shared_lock, flags);

    // Fallback to direct allocation
    if (unlikely(!obj)) {
        obj = kzalloc(pool->obj_size, GFP_ATOMIC);
        if (!obj) {
            // Try with potentially blocking allocation if in non-atomic context
            if (!in_atomic() && !in_interrupt()) {
                obj = kzalloc(pool->obj_size, GFP_KERNEL);
            }
        }
    }

    return obj;
}
// - free_to_pool
void free_to_pool(struct object_pool *pool, void *obj)
{
    unsigned long flags;

    if (unlikely(!obj)) {
        return;
    }

    if (unlikely(!pool)) {
        kfree(obj);
        return;
    }
    
    // Try shared pool
    spin_lock_irqsave(&pool->shared_lock, flags);
    if (likely(pool->shared_count < pool->shared_size && pool->shared_objects)) {
        pool->shared_objects[pool->shared_count++] = obj;
        spin_unlock_irqrestore(&pool->shared_lock, flags);
        return;
    }
    spin_unlock_irqrestore(&pool->shared_lock, flags);

    // Both pools full or not initialized, free directly
    kfree(obj);
}

// - numa_pool_object_pools_init
int numa_pool_object_pools_init(numa_pool *pool)
{
    int ret;

    if (!pool) {
        pr_err("NUMA pool is NULL\n");
        return -EINVAL;
    }

    // Initialize free_block object pool (64 objects)
    ret = init_object_pool(&pool->free_block_pool, sizeof(struct free_block), 64);
    if (ret) {
        pr_err("Failed to initialize free_block_pool: %d\n", ret);
        return ret;
    }

    // Initialize cache_block object pool (32 objects)
    ret = init_object_pool(&pool->cache_block_pool, sizeof(struct fix_cache_block), 32);
    if (ret) {
        pr_err("Failed to initialize cache_block_pool: %d\n", ret);
        destroy_object_pool(&pool->free_block_pool);
        return ret;
    }

    return 0;
}

// - numa_pool_object_pools_destroy
void numa_pool_object_pools_destroy(numa_pool *pool)
{
    if (!pool) {
        return;
    }
    
    // Use memory barrier to ensure all operations are completed before destruction
    smp_wmb();
    destroy_object_pool(&pool->free_block_pool);
    destroy_object_pool(&pool->cache_block_pool);
    
    // Ensure all changes are visible
    smp_rmb();
}
#endif

// - update_preferred_alloc_block
inline void update_preferred_alloc_block(numa_pool *pool, int level,
                                         struct fix_cache_block *block)
{
#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
    if (likely(pool && block && block->phys_base && level >= 0 && 
               level < FIX_CACHE_NUM)) {
        // Use memory barrier to ensure visibility of changes
        smp_store_release(&pool->alloc_preferred_blocks[level], block);
    }
#endif
}

// - update_preferred_free_block
inline void update_preferred_free_block(numa_pool *pool, int level,
                                        struct fix_cache_block *block)
{
#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
    if (likely(pool && block && block->phys_base && level >= 0 && 
               level < FIX_CACHE_NUM)) {
        // Use memory barrier to ensure visibility of changes
        smp_store_release(&pool->free_preferred_blocks[level], block);
    }
#endif
}