// hisi_l0_fix_allocator.c
#include <linux/slab.h>
#include <linux/bitmap.h>
#include "hisi_l0_mem_pool.h"

// - get_fix_cache_level
static inline int get_fix_cache_level(size_t size)
{
    int level;
    if (size <= MIN_OBJ_SIZE)
        return 0;

    level = fls(size - 1) - fls(MIN_OBJ_SIZE - 1);
    return min(level, FIX_CACHE_NUM - 1);
}

// - fix_cache_init
void fix_cache_init(struct fix_cache *cache)
{
    cache->obj_size = 0;
    cache->block_size = 0;
    cache->block_count = 0;
    cache->objs_per_block = 0;
    INIT_LIST_HEAD(&cache->block_list);
    spin_lock_init(&cache->fix_cache_lock);
    atomic_set(&cache->initialized, 0);
}

// - fix_allocator_init
int fix_allocator_init(numa_pool *pool)
{
    int i;

    // Initialize all cache levels to zero state
    for (i = 0; i < FIX_CACHE_NUM; i++)
    {
        fix_cache_init(&pool->caches[i]);
    }

    return 0;
}

// - fix_cache_block_destroy
static void fix_cache_block_destroy(struct fix_cache_block *block, numa_pool *pool, int user)
{
    if (block)
    {
        if (block->phys_base)
        {
            flex_allocator_free(pool, block->phys_base, user);
            block->phys_base = 0;
        }
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, block);
#else
        kfree(block);
#endif
    }
}

// - fix_cache_block_init
static void fix_cache_block_init(struct fix_cache_block *block, phys_addr_t phys_base,
                                  unsigned int objs_per_block)
{
    block->phys_base = phys_base;
    bitmap_zero(block->bitmap, objs_per_block);
    INIT_LIST_HEAD(&block->list);
}

static struct fix_cache_block *allocate_cache_block_structure(numa_pool *pool)
{
    struct fix_cache_block *block = NULL;
    
#ifdef USE_OBJECT_POOL
    block = alloc_from_pool(&pool->cache_block_pool);
#else
    block = kzalloc(sizeof(struct fix_cache_block), GFP_KERNEL);
#endif
    
    return block;
}

static int initialize_cache_block_memory(numa_pool *pool, struct fix_cache_block *block, 
                                       size_t block_size, int level)
{
    // Setup block with pre-allocated data
    fix_cache_block_init(block, block->phys_base, BITMAP_BITS);
    
#ifdef USE_RANGE_TRACK
    struct cache_range_track *range_entry = kzalloc(sizeof(struct cache_range_track), GFP_KERNEL);
    if (range_entry) {
        range_entry->start_addr = block->phys_base;
        range_entry->end_addr = block->phys_base + block_size;
        range_entry->cache_level = level;
        spin_lock_irqsave(&pool->cache_range_lock, flags);
        list_add_tail(&range_entry->list, &pool->cache_range_list);
        spin_unlock_irqrestore(&pool->cache_range_lock, flags);
    }
#endif

    return 0;
}

static int setup_cache_parameters(numa_pool *pool, int level, size_t obj_size, 
                                unsigned int objs_per_block, size_t block_size)
{
    unsigned long flags;
    int ret = 0;
    
    spin_lock_irqsave(&pool->caches[level].fix_cache_lock, flags);

    // Double-check that cache is still not initialized (in case of race)
    if (atomic_read(&pool->caches[level].initialized)) {
        spin_unlock_irqrestore(&pool->caches[level].fix_cache_lock, flags);
        pr_debug("NUMA node %d: fix_cache_level_init race detected, level=%d, pool=%p\n",
                 pool->node_id, level, pool);
        return -EEXIST;
    }

    // Setup cache parameters
    pool->caches[level].obj_size = obj_size;
    pool->caches[level].objs_per_block = objs_per_block;
    pool->caches[level].block_size = block_size;
    pool->caches[level].block_count = 1;

    // Mark as initialized only after everything is set up
    atomic_set(&pool->caches[level].initialized, 1);
    spin_unlock_irqrestore(&pool->caches[level].fix_cache_lock, flags);
    
    return ret;
}

// - fix_cache_level_init
static struct fix_cache_block *fix_cache_level_init(numa_pool *pool, int level)
{
    struct fix_cache_block *block = NULL;
    size_t obj_size;
    phys_addr_t phys_base = 0;
    unsigned int objs_per_block;
    size_t block_size;
    int ret;
    unsigned long flags;

    // Fast path: use atomic operation to check if already initialized
    if (atomic_read(&pool->caches[level].initialized))
    {
        return NULL; // Already initialized
    }

    // Phase 1: Prepare data (outside lock)
    obj_size = MIN_OBJ_SIZE << level;
#ifdef FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE
    objs_per_block = FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE / obj_size;
    block_size = FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE;
#else
    objs_per_block = BITMAP_BITS;
    block_size = obj_size * objs_per_block; // Dynamically calculate block size
    // Ensure block size is at least a multiple of page size
    block_size = ALIGN(block_size, PAGE_SIZE);
#endif

    // Phase 2: Allocate memory outside the critical section
    block = allocate_cache_block_structure(pool);
    if (!block) {
        pr_err_ratelimited("NUMA node %d: fix_cache_level_init failed to allocate block, level=%d, obj_size=%zu, pool=%p\n",
               pool->node_id, level, obj_size, pool);
        return NULL;
    }

    // This is a potentially blocking operation, do it outside lock
    phys_base = flex_allocator_alloc(pool, block_size, MEM_USER_FIX_ALLOCATOR, false);
    if (!phys_base)
    {
        pr_err_ratelimited("NUMA node %d: fix_cache_level_init failed to allocate memory, level=%d, block_size=%zu, pool=%p\n",
               pool->node_id, level, block_size, pool);
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, block);
#else
        kfree(block);
#endif
        return NULL;
    }
    
    block->phys_base = phys_base;

    // Initialize cache block memory
    if (initialize_cache_block_memory(pool, block, block_size, level) < 0) {
        flex_allocator_free(pool, phys_base, MEM_USER_FIX_ALLOCATOR);
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, block);
#else
        kfree(block);
#endif
        return NULL;
    }

    // Setup cache parameters
    ret = setup_cache_parameters(pool, level, obj_size, objs_per_block, block_size);
    if (ret == -EEXIST) {
        // Another thread has already initialized, clean up current allocated resources
        flex_allocator_free(pool, phys_base, MEM_USER_FIX_ALLOCATOR);
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, block);
#else
        kfree(block);
#endif
        return NULL;
    } else if (ret < 0) {
        // Other errors
        flex_allocator_free(pool, phys_base, MEM_USER_FIX_ALLOCATOR);
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, block);
#else
        kfree(block);
#endif
        return NULL;
    }

    // Add block to cache list

    spin_lock_irqsave(&pool->caches[level].fix_cache_lock, flags);
    list_add(&block->list, &pool->caches[level].block_list);
    spin_unlock_irqrestore(&pool->caches[level].fix_cache_lock, flags);

    return block;
}

// - expand_fix_cache
static struct fix_cache_block *expand_fix_cache(numa_pool *pool, struct fix_cache *cache, const int level)
{
    struct fix_cache_block *new_block;
#ifdef FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE
    size_t block_size = FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE;
#else
    size_t block_size = cache->obj_size * BITMAP_BITS; // Dynamically calculate block size
    block_size = ALIGN(block_size, PAGE_SIZE);
#endif

#ifdef USE_OBJECT_POOL
    new_block = alloc_from_pool(&pool->cache_block_pool);
#else
    new_block = kzalloc(sizeof(struct fix_cache_block), GFP_ATOMIC);
#endif
    if (!new_block)
    {
        pr_err_ratelimited("NUMA node %d: expand_fix_cache failed to allocate block, level=%d, obj_size=%zu, pool=%p, cache=%p\n",
               pool->node_id, level, cache->obj_size, pool, cache);
        return NULL;
    }

    new_block->phys_base = flex_allocator_alloc(pool, block_size, MEM_USER_FIX_ALLOCATOR, false);
    if (!new_block->phys_base)
    {
        pr_err_ratelimited("NUMA node %d: expand_fix_cache failed to allocate memory, level=%d, block_size=%zu, pool=%p, cache=%p\n",
               pool->node_id, level, block_size, pool, cache);
#ifdef USE_OBJECT_POOL
        free_to_pool(&pool->free_block_pool, new_block);
#else
        kfree(new_block);
#endif
        return NULL;
    }

    fix_cache_block_init(new_block, new_block->phys_base, pool->caches[level].objs_per_block);

#ifdef USE_RANGE_TRACK
    struct cache_range_track *range_entry = kzalloc(sizeof(struct cache_range_track), GFP_ATOMIC);
    if (range_entry)
    {
        range_entry->start_addr = new_block->phys_base;
        range_entry->end_addr = new_block->phys_base + block_size;
        range_entry->cache_level = level;
        unsigned long flags;
        spin_lock_irqsave(&pool->cache_range_lock, flags);
        list_add_tail(&range_entry->list, &pool->cache_range_list);
        spin_unlock_irqrestore(&pool->cache_range_lock, flags);
    }
#endif

    return new_block;
}

static phys_addr_t alloc_from_preferred_block(numa_pool *pool, struct fix_cache *cache, int level)
{
#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
    struct fix_cache_block *preferred_block = pool->alloc_preferred_blocks[level];
    unsigned long offset;

    if (preferred_block && preferred_block->phys_base) {
        prefetchw(preferred_block->bitmap);
        offset = find_first_zero_bit(preferred_block->bitmap, cache->objs_per_block);
        if (offset < cache->objs_per_block && !test_and_set_bit(offset, preferred_block->bitmap)) {
            atomic_sub(cache->obj_size, &pool->free_size);
            return preferred_block->phys_base + offset * cache->obj_size;
        }
    }
#endif
    return 0;
}

static phys_addr_t alloc_from_cache_blocks(numa_pool *pool, struct fix_cache *cache, int level)
{
    unsigned long flags;
    struct fix_cache_block *cache_block;
    unsigned long offset;

    if (cache->block_count == 0)
        return 0;

    spin_lock_irqsave(&cache->fix_cache_lock, flags);
    list_for_each_entry(cache_block, &cache->block_list, list) {
        if (!cache_block->phys_base)
            continue;

        offset = find_first_zero_bit(cache_block->bitmap, cache->objs_per_block);
        if (offset < cache->objs_per_block && !test_and_set_bit(offset, cache_block->bitmap)) {
            phys_addr_t addr = cache_block->phys_base + offset * cache->obj_size;
            atomic_sub(cache->obj_size, &pool->free_size);
            spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
            update_preferred_alloc_block(pool, level, cache_block);
            return addr;
        }
    }
    spin_unlock_irqrestore(&cache->fix_cache_lock, flags);

    return 0;
}

static phys_addr_t expand_and_alloc_new_block(numa_pool *pool, struct fix_cache *cache, int level)
{
    struct fix_cache_block *new_block = expand_fix_cache(pool, cache, level);
    unsigned long flags;
    phys_addr_t addr;

    if (!new_block)
        return 0;

    if (test_and_set_bit(0, new_block->bitmap)) {
        pr_err_ratelimited("fix_allocator_alloc: failed to allocate first object from new block\n");
        fix_cache_block_destroy(new_block, pool, 1);
        return 0;
    }

    addr = new_block->phys_base;
    atomic_sub(cache->obj_size, &pool->free_size);
    update_preferred_alloc_block(pool, level, new_block);

    // Add to cache list
    spin_lock_irqsave(&cache->fix_cache_lock, flags);
    list_add(&new_block->list, &cache->block_list);
    cache->block_count++;
    spin_unlock_irqrestore(&cache->fix_cache_lock, flags);

    return addr;
}

// - fix_allocator_alloc
phys_addr_t fix_allocator_alloc(numa_pool *pool, size_t size)
{
    phys_addr_t addr;
    int level = get_fix_cache_level(size);
    struct fix_cache *cache = &pool->caches[level];

    // 1. Initialization check
    if (unlikely(!atomic_read(&cache->initialized))) {
        if (!fix_cache_level_init(pool, level)) {
            pr_err_ratelimited("fix_allocator_alloc: failed to init level %d for size %zu\n", level, size);
            return 0;
        }
    }

    // 2. Fast path: try to allocate from preferred block
    addr = alloc_from_preferred_block(pool, cache, level);
    if (addr)
        return addr;

    // 3. Traverse cache blocks to find free object
    addr = alloc_from_cache_blocks(pool, cache, level);
    if (addr)
        return addr;

    // 4. Expand cache and allocate
    addr = expand_and_alloc_new_block(pool, cache, level);
    if (!addr)
        pr_err_ratelimited("fix_allocator_alloc: failed to allocate size %zu\n", size);

    return addr;
}

// - guess_fix_cache_level_of_addr
static inline int guess_fix_cache_level_of_addr(phys_addr_t addr)
{
    int level;

    // For each cache level, check if the address is properly aligned
    // for that object size
    for (level = FIX_CACHE_NUM - 1; level >= 0; level--)
    {
        size_t obj_size = MIN_OBJ_SIZE << level;

        // If address is aligned to this object size, it might belong to this level
        if ((addr & (obj_size - 1)) == 0)
        {
            // This is a candidate level
            return level;
        }
    }

    // Fallback to level 0 if no alignment matches
    return -1;
}

// - find_fix_cache_level_of_addr
static int find_fix_cache_level_of_addr(numa_pool *pool, phys_addr_t addr)
{
    int level = -1;
    int i;
    struct fix_cache *cache;
    struct fix_cache_block *cache_block;
    unsigned long flags;
    
    for (i = 0; i < FIX_CACHE_NUM; i++)
    {
        cache = &pool->caches[i];
        if (atomic_read(&cache->initialized) == 0)
        {
            continue;
        }

        spin_lock_irqsave(&cache->fix_cache_lock, flags);
        // Search through blocks in this cache level
        list_for_each_entry(cache_block, &cache->block_list, list)
        {
            if (addr >= cache_block->phys_base && addr < cache_block->phys_base + cache->block_size)
            {
                level = i;
                spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
                return level;
            }
        }
        spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
    }
    return level;
}

static int get_cache_level_for_free(numa_pool *pool, phys_addr_t addr)
{
#ifdef USE_RANGE_TRACK
    unsigned long flags;
    struct cache_range_track *range_entry;
    int level = -1;

    spin_lock_irqsave(&pool->cache_range_lock, flags);
    list_for_each_entry(range_entry, &pool->cache_range_list, list) {
        if (addr >= range_entry->start_addr && addr < range_entry->end_addr) {
            level = range_entry->cache_level;
            break;
        }
    }
    spin_unlock_irqrestore(&pool->cache_range_lock, flags);
    return level;

#else
    int level = guess_fix_cache_level_of_addr(addr);
    if (level < 0 || level >= FIX_CACHE_NUM)
        level = find_fix_cache_level_of_addr(pool, addr);
    return level;
#endif
}

static int free_to_preferred_block(numa_pool *pool, struct fix_cache *cache, int level, phys_addr_t addr)
{
#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
    struct fix_cache_block *preferred_block = pool->free_preferred_blocks[level];

    if (preferred_block && addr >= preferred_block->phys_base &&
        addr < preferred_block->phys_base + cache->block_size) {
        unsigned long offset = (addr - preferred_block->phys_base) / cache->obj_size;

        if (offset < cache->objs_per_block &&
            (addr - preferred_block->phys_base) % cache->obj_size == 0) {
            if (test_and_clear_bit(offset, preferred_block->bitmap)) {
                atomic_add(cache->obj_size, &pool->free_size);
                return 0;
            }
        }
    }
#endif
    return -1;
}

static int free_to_cache_block(numa_pool *pool, struct fix_cache *cache, phys_addr_t addr, int level)
{
    unsigned long flags;
    struct fix_cache_block *cache_block;
    unsigned long offset;

    spin_lock_irqsave(&cache->fix_cache_lock, flags);
    list_for_each_entry(cache_block, &cache->block_list, list) {
        if (addr >= cache_block->phys_base && addr < cache_block->phys_base + cache->block_size) {
            offset = (addr - cache_block->phys_base) / cache->obj_size;

            if (offset >= cache->objs_per_block) {
                pr_err("fix_allocator_free: invalid offset %lu\n", offset);
                spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
                return -1;
            }

            if (test_and_clear_bit(offset, cache_block->bitmap)) {
                atomic_add(cache->obj_size, &pool->free_size);
                update_preferred_free_block(pool, level, cache_block);
                spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
                return 0;
            }
        }
    }
    spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
    return -1;
}

static int try_free_in_all_cache_levels(numa_pool *pool, phys_addr_t addr, int current_level)
{
    int i;
    struct fix_cache *cache;

    for (i = 0; i < FIX_CACHE_NUM; i++) {
        if (i == current_level)
            continue;

        cache = &pool->caches[i];
        if (atomic_read(&cache->initialized) == 0)
            continue;

        if (free_to_cache_block(pool, cache, addr, i) == 0)
            return 0;
    }

    pr_err_ratelimited("fix_allocator_free: addr %llx not found in any cache level\n", addr);
    return -1;
}

// - fix_allocator_free
int fix_allocator_free(numa_pool *pool, phys_addr_t addr)
{
    int level;
    struct fix_cache *cache;

    // 1. Address validity check
    if (addr == 0) {
        pr_err_ratelimited("fix_allocator_free: invalid zero address\n");
        return -1;
    }

    // 2. Get corresponding cache level
    level = get_cache_level_for_free(pool, addr);
    if (level < 0 || level >= FIX_CACHE_NUM) {
        pr_err_ratelimited("fix_allocator_free: failed to find cache level for addr %llx\n", addr);
        return -1;
    }

    cache = &pool->caches[level];

    // 3. Fast path: try to free to preferred block
    if (free_to_preferred_block(pool, cache, level, addr) != -1)
        return 0;

    // 4. Try to free to current cache level
    if (free_to_cache_block(pool, cache, addr, level) == 0)
        return 0;

    // 5. Try cross-level freeing (slow path)
    return try_free_in_all_cache_levels(pool, addr, level);
}

// - fix_allocator_destroy
void fix_allocator_destroy(numa_pool *pool)
{
    int i;
    unsigned long flags;
    struct fix_cache_block *cache_block, *tmp;

    for (i = 0; i < FIX_CACHE_NUM; i++)
    {
        spin_lock_irqsave(&pool->caches[i].fix_cache_lock, flags);
#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
        // Clear preferred block pointer for this level
        pool->alloc_preferred_blocks[i] = NULL;
        pool->free_preferred_blocks[i] = NULL;
#endif
        // Only destroy cache levels that were actually initialized
        if (pool->caches[i].obj_size != 0)
        {
            list_for_each_entry_safe(cache_block, tmp, &pool->caches[i].block_list, list)
            {
                list_del(&cache_block->list);
                fix_cache_block_destroy(cache_block, pool, 1);
            }
            atomic_set(&pool->caches[i].initialized, 0);
        }
        spin_unlock_irqrestore(&pool->caches[i].fix_cache_lock, flags);
    }

#ifdef USE_RANGE_TRACK
    struct cache_range_track *range_entry, *range_tmp;
    spin_lock_irqsave(&pool->cache_range_lock, flags);
    list_for_each_entry_safe(range_entry, range_tmp, &pool->cache_range_list, list)
    {
        list_del(&range_entry->list);
        kfree(range_entry);
    }
    spin_unlock_irqrestore(&pool->cache_range_lock, flags);
#endif
}

// - preinit_fix_cache_level_of_pool
void preinit_fix_cache_level_of_pool(numa_pool *pool)
{
    int level;

    // Pre-allocate cache levels based on module parameters
    for (level = 0; level < FIX_CACHE_NUM; level++)
    {
        // Check if corresponding bit is set in bitmask
        if (preinit_cache_levels & (1 << level))
        {
            if (fix_cache_level_init(pool, level) == NULL)
            {
                pr_err("Failed to preallocate cache level %d\n", level);
            }
        }
    }
}