// hisi_l0_numa_pool.c
#include <linux/slab.h>
#include <linux/mm.h>
#include "hisi_l0_mem_pool.h"

// - get_and_reserve_alloc_size
static size_t get_and_reserve_alloc_size(const ssize_t expect_size, const int target_node_id)
{
    struct numa_pool_set *pool_list;
    size_t node_remain_size;
    size_t max_numa_capacity;
    size_t default_numa_pool_size;
    size_t actual_alloc_size;
    size_t old_remain, new_remain;

    RCU_READ_LOCK();
    pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);
    if (!pool_list)
    {
        RCU_READ_UNLOCK();
        return 0;
    }

    max_numa_capacity = pool_list[target_node_id].max_numa_capacity;
    default_numa_pool_size = pool_list[target_node_id].default_numa_pool_size;

    // Get current remaining size
    node_remain_size = atomic_read(&pool_list[target_node_id].numa_remain_capicity);

    if (node_remain_size < expect_size)
    {
        RCU_READ_UNLOCK();
        return 0;
    }

    // Calculate the actual allocation size based on the logic in original get_alloc_size
    if (expect_size >= default_numa_pool_size)
    {
        actual_alloc_size = expect_size;
    }
    else if (node_remain_size > default_numa_pool_size)
    {
        actual_alloc_size = default_numa_pool_size;
    }
    else
    {
        actual_alloc_size = node_remain_size;
    }

    // Atomically try to reserve the memory using cmpxchg for better concurrency
    do
    {
        old_remain = atomic_read(&pool_list[target_node_id].numa_remain_capicity);
        if (old_remain < actual_alloc_size)
        {
            // Not enough memory anymore
            RCU_READ_UNLOCK();
            return 0;
        }
        new_remain = old_remain - actual_alloc_size;
    } while (atomic_cmpxchg(&pool_list[target_node_id].numa_remain_capicity,
                            old_remain, new_remain) != old_remain);

    RCU_READ_UNLOCK();
    return actual_alloc_size;
}

// - numa_pool_init
static int numa_pool_init(numa_pool *pool, const size_t pool_size, const int target_node_id)
{
    int ret;

    atomic_set(&pool->initialized, NUMA_POOL_PARTIALLY_INITIALIZED);

    ret = numa_pool_object_pools_init(pool);
    if (ret)
    {
        atomic_set(&pool->initialized, NUMA_POOL_INITIALIZATION_FAILED);
        return ret;
    }
    if (flex_allocator_init(pool, pool_size, target_node_id) != 0)
    {
        numa_pool_object_pools_destroy(pool);
        atomic_set(&pool->initialized, NUMA_POOL_INITIALIZATION_FAILED);
        return -ENOMEM;
    }

    if (fix_allocator_init(pool) != 0)
    {
        flex_allocator_destroy(pool);
        numa_pool_object_pools_destroy(pool);
        atomic_set(&pool->initialized, NUMA_POOL_INITIALIZATION_FAILED); 
        return -ENOMEM;
    }

    atomic_set(&pool->initialized, NUMA_POOL_FULLY_INITIALIZED);
    return 0;
}

// - create_new_numa_pool
static int create_new_numa_pool(numa_pool *pool, const ssize_t expect_size, const int target_node_id)
{
    size_t size;
    int ret;
    struct numa_pool_set *pool_list;

    // This function now both checks availability and reserves the memory atomically
    size = get_and_reserve_alloc_size(expect_size, target_node_id);
    if (size == 0)
    {
        pr_err_ratelimited("create_new_numa_pool get_and_reserve_alloc_size failed, expect_size %lu, target_node %d.\n",
                           expect_size,
                           target_node_id);
        return -ENOMEM;
    }

    ret = numa_pool_init(pool, size, target_node_id);
    if (ret != 0)
    {
        // If initialization fails, we need to return the reserved memory
        RCU_READ_LOCK();
        pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);
        if (pool_list)
        {
            atomic_add(size, &pool_list[target_node_id].numa_remain_capicity);
        }
        RCU_READ_UNLOCK();
        return ret;
    }

    return 0;
}

// - should_prealloc_node
static bool should_prealloc_node(int node_id)
{
    // If no nodes were specified or parsing failed, pre-allocate all nodes
    if (!prealloc_nodes_parsed)
        return true;

    // If nodes were specified, only pre-allocate specified nodes
    if (node_id < 0 || node_id >= MAX_NUMA_NODES)
        return false;

    return prealloc_node_enabled[node_id];
}

// - pre_alloc_numa_pool
int pre_alloc_numa_pool(void)
{
    int j;
    int ret;
    size_t alloc_size;
    struct numa_pool_set *pool_list;
    numa_pool *numa_pool_ptr;

    RCU_READ_LOCK();
    pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);
    if (!pool_list)
    {
        RCU_READ_UNLOCK();
        pr_err("pre_alloc_numa_pool: numa_pool_list is NULL, pre-allocation failed\n");
        return -1;
    }

    for (j = 0; j < get_numa_node_count(); j++)
    {
        // Check if we should pre-allocate for this NUMA node
        if (!should_prealloc_node(j))
        {
            continue;
        }

        pr_info("Pre-allocating for NUMA node %d\n", j);
        numa_pool_ptr = &pool_list[j].numa_pools[0]; // Using pool_list
        // mutex_lock(&numa_pool_ptr->numa_pool_mutex);
        if (pre_alloc_size > 0 && pre_alloc_size <= pool_list[j].max_numa_capacity)
        {
            alloc_size = pre_alloc_size << MB_SHIFT;
        }
        else
        {
            alloc_size = pool_list[j].default_numa_pool_size;
        }

        ret = create_new_numa_pool(numa_pool_ptr, alloc_size, j);
        if (ret != 0)
        {
            // mutex_unlock(&numa_pool_ptr->numa_pool_mutex);
            RCU_READ_UNLOCK();
            pr_err("pre_alloc_numa_pool failed, node %d, alloc_size %lu.\n", j, alloc_size);
            return -1;
        }

        preinit_fix_cache_level_of_pool(numa_pool_ptr);
        // mutex_unlock(&numa_pool_ptr->numa_pool_mutex);
    }
    RCU_READ_UNLOCK();
    return 0;
}

// - can_use_mutex
static bool can_use_mutex(void)
{
    return !in_atomic() && !in_interrupt();
}

// - reset_numa_pool
static void reset_numa_pool(numa_pool *pool)
{
    unsigned long flags;
    spin_lock_irqsave(&pool->metadata_lock, flags);
    WRITE_ONCE(pool->pool_phys, 0);
    WRITE_ONCE(pool->flex_allocator, NULL);
    WRITE_ONCE(pool->flex_allocator_nr_pages, 0);
    WRITE_ONCE(pool->capacity, 0);
    atomic_set(&pool->free_size, 0);
    smp_wmb(); // Add write memory barrier to ensure previous writes complete
    atomic_set(&pool->initialized, NUMA_POOL_UNINITIALIZED);
    spin_unlock_irqrestore(&pool->metadata_lock, flags);
}

// - alloc_from_numa_pool
static phys_addr_t alloc_from_numa_pool(numa_pool *pool, const size_t req_size, bool should_align)
{
    if (req_size <= MAX_FIX_ALLOCATOR_SIZE)
    {
        phys_addr_t addr = fix_allocator_alloc(pool, req_size);
        return addr;
    }
    else
    {
        return flex_allocator_alloc(pool, req_size, MEM_USER_EXTERNAL, should_align);
    }
}

// - free_to_numa_pool
static int free_to_numa_pool(numa_pool *pool, const phys_addr_t mem_pa)
{
    if (fix_allocator_free(pool, mem_pa) == 0)
    {
        return 0;
    }
    else
    {
        return flex_allocator_free(pool, mem_pa, MEM_USER_EXTERNAL);
    }
}

static inline bool is_pool_sufficient(numa_pool *pool, size_t req_size)
{
    return atomic_read(&pool->free_size) >= req_size;
}

static phys_addr_t try_alloc_from_pool(numa_pool *pool, size_t req_size, bool should_align, numa_pool **out_pool)
{
    phys_addr_t addr;
    
    if (!is_pool_sufficient(pool, req_size)) {
        return 0;
    }
    
    addr = alloc_from_numa_pool(pool, req_size, should_align);
    if (addr) {
        *out_pool = pool;
    }
    
    return addr;
}

// - find_and_alloc_from_numa_pool_list
phys_addr_t find_and_alloc_from_numa_pool_list(const ssize_t req_size, numa_pool **pool,
                                            const int expect_node_id, bool should_align)
{
    phys_addr_t mem_pa = 0;
    int cur_node_id;
    int i, j;
    int ret;
    int node_count;
    numa_pool_set *pool_list;
    int **distance_matrix;
    *pool = NULL;

    RCU_READ_LOCK();

    // Access the pool list with RCU protection
    pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);
    if (!pool_list)
    {
        pr_err("l0_cache_tuner STATUS: numa_pool_list is NULL.\n");
        RCU_READ_UNLOCK();
        return 0;
    }
    node_count = get_numa_node_count();

    // Safely access numa_distance_matrix
    distance_matrix = RCU_DEREFERENCE(l0_cache_tuner.numa_distance_matrix);
    if (unlikely(!distance_matrix))
    {
        RCU_READ_UNLOCK();
        return 0;
    }

    // Search through NUMA nodes in order of distance
    for (j = 0; j < node_count; j++)
    {
        cur_node_id = distance_matrix[expect_node_id][j];

        for (i = 0; i < NUMA_POOL_MAX_NUM_PER_NODE; i++)
        {
            numa_pool *numa_pool_ptr = &pool_list[cur_node_id].numa_pools[i];

            if (READ_ONCE(numa_pool_ptr->pool_phys) == 0)
            {
                size_t node_remain_capicity = atomic_read(&pool_list[cur_node_id].numa_remain_capicity);
                if (node_remain_capicity < req_size)
                {
                    // If NUMA node remaining capacity is insufficient, skip to avoid unnecessary create_new_numa_pool calls
                    continue;
                }

                // This pool is not initialized, need to create it
                RCU_READ_UNLOCK();

                // Check if we can use mutex (not in interrupt context)
                if (!can_use_mutex())
                {
                    pr_err_ratelimited("Cannot alloc L0 in interrupt or atomic context.\n");
                    return 0;
                }

                ret = create_new_numa_pool(numa_pool_ptr, req_size, cur_node_id);
                if (ret != 0)
                {
                    // Failed to create pool, try next one
                    RCU_READ_LOCK();
                    continue;
                }
                RCU_READ_LOCK();
            }

            mem_pa = try_alloc_from_pool(numa_pool_ptr, req_size, should_align, pool);
            if (mem_pa != 0) {
                RCU_READ_UNLOCK();
                return mem_pa;
            }
            // If allocation failed, continue searching
        }
    }

    RCU_READ_UNLOCK();
    pr_err_ratelimited("find_and_alloc_from_numa_pool_list failed. %zd req_size, expect_node_id %d, should_align %d\n",
                        req_size, expect_node_id, should_align);
    return 0;
}

static inline bool is_address_in_pool_range(numa_pool *pool, phys_addr_t addr)
{
    phys_addr_t pool_phys = READ_ONCE(pool->pool_phys);
    size_t capacity = READ_ONCE(pool->capacity);
    
    return (pool_phys != 0 && 
            addr >= pool_phys && 
            addr < pool_phys + capacity);
}

static int try_free_in_pool(numa_pool *pool, phys_addr_t mem_pa)
{
    return free_to_numa_pool(pool, mem_pa);
}

static int fast_path_search(struct numa_pool_set *pool_list, phys_addr_t mem_pa, numa_pool **found_pool)
{
    int node_count = get_numa_node_count();
    int i, j;
    numa_pool *pool;
    int ret;
    
    for (j = 0; j < node_count; j++) {
        for (i = 0; i < NUMA_POOL_MAX_NUM_PER_NODE; i++) {
            pool = &pool_list[j].numa_pools[i];
            
            if (is_address_in_pool_range(pool, mem_pa)) {
                ret = try_free_in_pool(pool, mem_pa);
                if (ret == 0) {
                    *found_pool = pool;
                    return 0; // Success
                }
                // Found pool but free failed, break inner loop to continue with next node
                *found_pool = pool;
                return -1; // Found pool but free failed
            }
        }
    }
    
    return -2; // Pool not found in fast path
}

static int slow_path_search(struct numa_pool_set *pool_list, phys_addr_t mem_pa, numa_pool *skip_pool)
{
    int node_count = get_numa_node_count();
    int i, j;
    numa_pool *pool;
    int ret;
    
    for (j = 0; j < node_count; j++) {
        for (i = 0; i < NUMA_POOL_MAX_NUM_PER_NODE; i++) {
            pool = &pool_list[j].numa_pools[i];
            
            // Skip the pool we already tried in fast path
            if (pool == skip_pool) {
                continue;
            }
            
            // Try to free in this pool
            ret = try_free_in_pool(pool, mem_pa);
            if (ret == 0) {
                return 0; // Success
            }
        }
    }
    
    return -1; // Not found in any pool
}

// - find_and_free_to_numa_pool_list
int find_and_free_to_numa_pool_list(phys_addr_t mem_pa)
{
    struct numa_pool_set *pool_list;
    numa_pool *found_pool = NULL;
    int ret;
    
    // Input validation
    if (mem_pa == 0) {
        pr_err_ratelimited("find_and_free_to_numa_pool_list: invalid zero address\n");
        return -EINVAL;
    }

    // Use RCU read lock for fast path
    RCU_READ_LOCK();

    // Access with RCU protection
    pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);
    if (unlikely(!pool_list)) {
        RCU_READ_UNLOCK();
        pr_err("find_and_free_to_numa_pool_list: numa_pool_list is NULL\n");
        return -1;
    }

    // Fast path: Check pools in a heuristic order
    ret = fast_path_search(pool_list, mem_pa, &found_pool);
    
    switch (ret) {
    case 0: // Success
        RCU_READ_UNLOCK();
        return 0;
        
    case -1: // Found pool but free failed
        RCU_READ_UNLOCK();
        pr_err_ratelimited("find_and_free_to_numa_pool_list: found pool but free failed for addr=%llx\n", mem_pa);
        return -1;
        
    case -2: // Pool not found in fast path, continue to slow path
    default:
        break;
    }

    // Slow path: Exhaustive search through all pools
    ret = slow_path_search(pool_list, mem_pa, found_pool);
    
    RCU_READ_UNLOCK();
    
    if (ret != 0) {
        pr_err_ratelimited("find_and_free_to_numa_pool_list failed - not found for addr=%llx\n", mem_pa);
    }
    
    return ret;
}

// - numa_pool_destroy
static int numa_pool_destroy(numa_pool *pool)
{
    if (!pool) {
        return -1;
    }
    fix_allocator_destroy(pool);
    flex_allocator_destroy(pool);

    synchronize_rcu();

    numa_pool_object_pools_destroy(pool);
    reset_numa_pool(pool);
    return 0;
}

// - numa_pool_list_destroy
void numa_pool_list_destroy(struct numa_pool_set *pool_list)
{
    int i, j;
    numa_pool *pool;

    if (pool_list == NULL)
    {
        pr_err("numa_pool_list_destroy numa_pool_list is NULL\n");
        return;
    }

    for (j = 0; j < get_numa_node_count(); j++)
    {
        for (i = 0; i < NUMA_POOL_MAX_NUM_PER_NODE; i++)
        {
            pool = &pool_list[j].numa_pools[i];
            if (pool != NULL && pool->flex_allocator != NULL)
            {
                numa_pool_destroy(pool);
            }
        }
        atomic_set(&pool_list[j].numa_remain_capicity, 0);
    }

    kfree(pool_list);
}

// - numa_pool_list_init
int numa_pool_list_init(void)
{
    int i, j, k;
    size_t default_pool_size, max_capacity_bytes;

    for (j = 0; j < get_numa_node_count(); j++)
    {
        max_capacity_bytes = (size_t)max_numa_capacity << MB_SHIFT;
        default_pool_size = (max_numa_capacity / NUMA_POOL_MAX_NUM_PER_NODE) << MB_SHIFT;

        atomic_set(&l0_cache_tuner.numa_pool_list[j].numa_remain_capicity, max_capacity_bytes);
        // l0_cache_tuner.numa_pool_list[j].numa_remain_capicity = max_capacity_bytes;
        l0_cache_tuner.numa_pool_list[j].max_numa_capacity = max_capacity_bytes;
        l0_cache_tuner.numa_pool_list[j].default_numa_pool_size = default_pool_size;

        // mutex_init(&(l0_cache_tuner.numa_pool_list[j].numa_pool_set_mutex));
        // spin_lock_init(&(l0_cache_tuner.numa_pool_list[j].numa_pool_set_spinlock));

        for (i = 0; i < NUMA_POOL_MAX_NUM_PER_NODE; i++)
        {
            numa_pool *pool = &l0_cache_tuner.numa_pool_list[j].numa_pools[i];

            pool->node_id = j;
            pool->tuner = &l0_cache_tuner;

            spin_lock_init(&(pool->metadata_lock));
            spin_lock_init(&(pool->flex_allocator_spinlock));
            INIT_LIST_HEAD(&(pool->flex_allocator_free_list));
#ifdef USE_RANGE_TRACK
            INIT_LIST_HEAD(&(pool->cache_range_list));
            spin_lock_init(&(pool->cache_range_lock));
#endif

#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
            // Initialize preferred blocks array
            for (k = 0; k < FIX_CACHE_NUM; k++)
            {
                pool->alloc_preferred_blocks[k] = NULL;
                pool->free_preferred_blocks[k] = NULL;
            }
#endif

            for (k = 0; k < FIX_CACHE_NUM; k++)
            {
                fix_cache_init(&pool->caches[k]);
            }

            reset_numa_pool(pool);
        }
    }

    return 0;
}