// hisi_l0_flex_allocator.c
#include <linux/slab.h>
#include <linux/mm.h>
#include "hisi_l0_mem_pool.h"
#include "hisi_l3t.h"

#define MIN_BLOCK_SIZE_FOR_SPLIT 128

static int init_initial_free_block(numa_pool *pool, struct page *alloc_pages, 
                                 size_t size, int target_node_id)
{
    unsigned long flags;
    struct free_block *initial_block;

    // Allocate initial block structure
#ifdef USE_OBJECT_POOL
    initial_block = alloc_from_pool(&pool->free_block_pool);
#else
    initial_block = kzalloc(sizeof(struct free_block), GFP_KERNEL);
#endif
    if (!initial_block) {
        pr_err("NUMA node %d: Failed to allocate initial free block for flex_allocator system, pool=%p\n", 
               target_node_id, pool);
        return -ENOMEM;
    }

    // Initialize block parameters
    initial_block->start = page_to_phys(alloc_pages);
    initial_block->size = size;
    INIT_LIST_HEAD(&initial_block->list);
    initial_block->state = BLOCK_FREE;

    // Update metadata
    spin_lock_irqsave(&pool->metadata_lock, flags);
    pool->capacity = size;
    pool->flex_allocator_nr_pages = size >> PAGE_SHIFT;
    pool->flex_allocator = alloc_pages;
    pool->pool_phys = initial_block->start;
    spin_unlock_irqrestore(&pool->metadata_lock, flags);
    atomic_set(&pool->free_size, pool->capacity);

    // Add to free list
    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);
    list_add_tail(&initial_block->list, &pool->flex_allocator_free_list);
    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);

    return 0;
}

// - flex_allocator_init
int flex_allocator_init(numa_pool *pool, const size_t size, const int target_node_id)
{
    int ret;
    struct page *alloc_pages;
    unsigned long nr_pages;
    void *vaddr;
    nr_pages = size >> PAGE_SHIFT;

    // Allocate contiguous physical memory
    alloc_pages = alloc_contig_pages(nr_pages, GFP_KERNEL, target_node_id, NULL);
    if (!alloc_pages)
    {
        pr_err("NUMA node %d: Failed to allocate flex_allocator pool, nr_pages=%lu, size=%zu\n", 
               target_node_id, nr_pages, size);
        return -ENOMEM;
    }

    // Get virtual address and warm up cache
    vaddr = page_address(alloc_pages);
    if (vaddr)
    {
        // Use kernel-provided cache-friendly memory initialization
        memset(vaddr, 0, size); // This triggers cache loading
    }

    ret = l3t_shared_lock(target_node_id, page_to_pfn(alloc_pages), size);
    if (ret)
    {
        pr_err("NUMA node %d: Failed to allocate flex_allocator pool, nr_pages=%lu, size=%zu\n", 
               target_node_id, nr_pages, size);
        free_contig_range(page_to_pfn(alloc_pages), nr_pages);
        return -ENOMEM;
    }

    ret = init_initial_free_block(pool, alloc_pages, size, target_node_id);
    if (ret) {
        l3t_shared_unlock(target_node_id, page_to_pfn(alloc_pages), size);
        free_contig_range(page_to_pfn(alloc_pages), nr_pages);
        return ret;
    }

    return 0;
}

static struct free_block *find_best_free_block(numa_pool *pool, size_t req_size, bool should_align)
{
    unsigned long flags;
    struct free_block *chunk;
    struct free_block *best_chunk = NULL;
    size_t best_size = pool->capacity + 1;
    phys_addr_t aligned_start = 0;
    size_t offset = 0;

    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);
    
    if (list_empty(&pool->flex_allocator_free_list)) {
        spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
        pr_err_ratelimited("NUMA node %d: find_best_free_block failed - free list empty\n",
                           pool->node_id);
        return NULL;
    }
    
    // Find best matching block
    list_for_each_entry(chunk, &pool->flex_allocator_free_list, list) {
        if (chunk->state == BLOCK_FREE) {
            if (should_align) {
                aligned_start = ALIGN(chunk->start, HUGEPAGE_ALIGN_SIZE);
                offset = aligned_start - chunk->start;
                if (chunk->size >= offset + ALIGN(req_size, HUGEPAGE_ALIGN_SIZE)) {
                    if (chunk->size < best_size) {
                        best_chunk = chunk;
                        best_size = chunk->size;
                    }
                }
            } else {
                if (chunk->size >= req_size && chunk->size < best_size) {
                    best_chunk = chunk;
                    best_size = chunk->size;
                }
            }
        }
    }
    
    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
    return best_chunk;
}

static void insert_free_block_sorted(struct list_head *free_list, struct free_block *block)
{
    struct free_block *pos;
    
    list_for_each_entry(pos, free_list, list) {
        if (pos->start > block->start) {
            __list_add(&block->list, pos->list.prev, &pos->list);
            break;
        }
    }
    
    list_add_tail(&block->list, free_list);
}

static struct free_block *allocate_free_block_structure(numa_pool *pool)
{
    struct free_block *new_chunk = NULL;
    
#ifdef USE_OBJECT_POOL
    new_chunk = alloc_from_pool(&pool->free_block_pool);
#else
    new_chunk = kzalloc(sizeof(struct free_block), GFP_ATOMIC);
#endif
    
    if (!new_chunk) {
        pr_err_ratelimited("NUMA node %d: allocate_free_block_structure failed - cannot allocate new chunk\n",
                           pool->node_id);
    }
    
    return new_chunk;
}


static int prepare_split_chunks(numa_pool *pool, struct free_block *block,
                               size_t req_size, bool should_align,
                               struct free_block **chunk_pre, struct free_block **chunk_post)
{
    size_t aligned_req_size = req_size;
    phys_addr_t aligned_start = 0;
    size_t offset = 0;
    size_t remaining;
    size_t actual_size;
    
    *chunk_pre = NULL;
    *chunk_post = NULL;

    if (should_align) {
        aligned_req_size = ALIGN(req_size, HUGEPAGE_ALIGN_SIZE);
        aligned_start = ALIGN(block->start, HUGEPAGE_ALIGN_SIZE);
        offset = aligned_start - block->start;
    }

    // Check if front part needs splitting
    if (should_align && offset > 0) {
        *chunk_pre = allocate_free_block_structure(pool);
        if (!(*chunk_pre)) {
            return -ENOMEM;
        }
    }
    
    // Calculate rear part splitting requirement
    actual_size = should_align ? aligned_req_size : req_size;
    remaining = block->size - actual_size;
    
    // Check if rear part needs splitting
    if (remaining > MIN_BLOCK_SIZE_FOR_SPLIT) {
        *chunk_post = allocate_free_block_structure(pool);
        if (!(*chunk_post)) {
            // Recycle already allocated chunk1
            if (*chunk_pre) {
#ifdef USE_OBJECT_POOL
                free_to_pool(&pool->free_block_pool, *chunk_pre);
#else
                kfree(*chunk_pre);
#endif
            }
            return -ENOMEM;
        }
    }
    
    return 0;
}

// Modified split_free_block function
static int split_free_block(numa_pool *pool, struct free_block *block, 
                           size_t req_size, bool should_align)
{
    struct free_block *chunk_pre = NULL;
    struct free_block *chunk_post = NULL;
    unsigned long flags;
    size_t aligned_req_size = req_size;
    phys_addr_t aligned_start = 0;
    size_t offset = 0;
    size_t remaining;
    size_t actual_size;
    int ret = 0;

    // Step 1: Pre-allocate required memory blocks without holding lock
    ret = prepare_split_chunks(pool, block, req_size, should_align, &chunk_pre, &chunk_post);
    if (ret) {
        pr_err_ratelimited("NUMA node %d: split_free_block failed to prepare chunks\n",
                           pool->node_id);
        return ret;
    }

    // If no splitting needed, return directly
    if (!chunk_pre && !chunk_post) {
        return 0;
    }

    // Step 2: Acquire lock and perform actual splitting operation
    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);
    
    // Recalculate splitting parameters (ensure data consistency)
    if (should_align) {
        aligned_req_size = ALIGN(req_size, HUGEPAGE_ALIGN_SIZE);
        aligned_start = ALIGN(block->start, HUGEPAGE_ALIGN_SIZE);
        offset = aligned_start - block->start;
    }

    // Handle front part splitting
    if (chunk_pre && should_align && offset > 0) {
        chunk_pre->start = block->start;
        chunk_pre->size = offset;
        chunk_pre->state = BLOCK_FREE;
        INIT_LIST_HEAD(&chunk_pre->list);
        
        // Insert at correct position in list
        insert_free_block_sorted(&pool->flex_allocator_free_list, chunk_pre);
        
        // Adjust original block
        block->start = aligned_start;
        block->size -= offset;
    }
    
    // Handle rear part splitting
    actual_size = should_align ? aligned_req_size : req_size;
    remaining = block->size - actual_size;
    
    if (chunk_post && remaining > MIN_BLOCK_SIZE_FOR_SPLIT) {
        chunk_post->start = block->start + actual_size;
        chunk_post->size = remaining;
        chunk_post->state = BLOCK_FREE;
        INIT_LIST_HEAD(&chunk_post->list);
        
        // Insert at correct position in list
        insert_free_block_sorted(&pool->flex_allocator_free_list, chunk_post);
        
        // Adjust original block size
        block->size = actual_size;
    }
    
    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
    
    return 0;
}

// - flex_allocator_alloc
phys_addr_t flex_allocator_alloc(numa_pool *pool, const size_t req_size, int user, bool should_align)
{
    struct free_block *best_chunk;
    phys_addr_t result;
    size_t aligned_req_size = req_size;

    if (atomic_read(&pool->initialized) != NUMA_POOL_FULLY_INITIALIZED) {
        pr_err_ratelimited("NUMA node %d: flex_allocator_alloc failed - pool not initialized, req_size=%zu, user=%d, pool=%p\n",
                           pool->node_id, req_size, user, pool);
        return 0;
    }
    
    // If 2MB alignment is requested, align the requested size
    if (should_align) {
        aligned_req_size = ALIGN(req_size, HUGEPAGE_ALIGN_SIZE);
    }

    best_chunk = find_best_free_block(pool, req_size, should_align);
    if (!best_chunk) {
        pr_err_ratelimited("NUMA node %d: flex_allocator_alloc failed - no free chunk found, req_size=%zu, user=%d, pool=%p\n",
                           pool->node_id, req_size, user, pool);
        return 0;
    }

    // Handle block splitting
    if (split_free_block(pool, best_chunk, req_size, should_align) < 0) {
        return 0;
    }

    // Mark current block as allocated
    best_chunk->state = (user == MEM_USER_FIX_ALLOCATOR) ? BLOCK_ALLOCATED_TO_FIX_ALLOC : BLOCK_ALLOCATED_TO_EXTERNAL;
    // Set the actual allocated size
    if (should_align) {
        best_chunk->size = aligned_req_size;
    } else {
        best_chunk->size = req_size;
    }

    // Update pool free size for user allocations
    if (user == MEM_USER_EXTERNAL)
    {
        atomic_sub(best_chunk->size, &pool->free_size);
    }

    result = best_chunk->start;
    return result;
}
static void merge_two_blocks(numa_pool *pool, struct free_block *first, struct free_block *second)
{
    first->size += second->size;
    list_del(&second->list);
    
#ifdef USE_OBJECT_POOL
    free_to_pool(&pool->free_block_pool, second);
#else
    kfree(second);
#endif
}

static void merge_adjacent_free_blocks(numa_pool *pool, struct free_block *chunk)
{
    struct free_block *prev = NULL;
    struct free_block *next = NULL;
    
    // Check previous block
    if (chunk->list.prev != &pool->flex_allocator_free_list) {
        prev = list_entry(chunk->list.prev, struct free_block, list);
        if (prev->state == BLOCK_FREE && prev->start + prev->size == chunk->start) {
            // Merge previous block
            merge_two_blocks(pool, prev, chunk);
            chunk = prev; // Continue processing merged block
        }
    }
    
    // Check next block
    if (chunk->list.next != &pool->flex_allocator_free_list) {
        next = list_entry(chunk->list.next, struct free_block, list);
        if (next->state == BLOCK_FREE && chunk->start + chunk->size == next->start) {
            // Merge next block
            merge_two_blocks(pool, chunk, next);
        }
    }
}

static struct free_block *find_block_to_free(numa_pool *pool, phys_addr_t addr, int user, 
                                           size_t *chunk_size, bool *found)
{
    struct free_block *chunk;
    
    list_for_each_entry(chunk, &pool->flex_allocator_free_list, list) {
        if (chunk->start == addr) {
            // Check if block is already free (indicates double free attempt)
            if (chunk->state == BLOCK_FREE) {
                pr_err_ratelimited("NUMA node %d: flex_allocator_free detected double free attempt for addr=%llx, user=%d, pool=%p\n",
                                   pool->node_id, addr, user, pool);
                return NULL;
            }

            // Verify block state
            if ((user == MEM_USER_FIX_ALLOCATOR && chunk->state == BLOCK_ALLOCATED_TO_FIX_ALLOC) || 
                (user == MEM_USER_EXTERNAL && chunk->state == BLOCK_ALLOCATED_TO_EXTERNAL)) {
                chunk->state = BLOCK_FREE; // Mark as free
                *found = true;
                *chunk_size = chunk->size;
                return chunk;
            } else {
                pr_err_ratelimited("NUMA node %d: flex_allocator_free failed - block state mismatch for addr=%llx, user=%d, state=%d, pool=%p\n",
                                   pool->node_id, addr, user, chunk->state, pool);
                return NULL;
            }
        }
    }

    // Block not found
    pr_debug("NUMA node %d: flex_allocator_free failed - chunk not found for addr=%llx, user=%d, pool=%p\n",
             pool->node_id, addr, user, pool);
    return NULL;
}

// - flex_allocator_free
int flex_allocator_free(numa_pool *pool, phys_addr_t addr, int user)
{
    struct free_block *chunk;
    unsigned long flags;
    size_t chunk_size = 0;
    bool found = false;

    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);

    if (list_empty(&pool->flex_allocator_free_list))
    {
        spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
        pr_err_ratelimited("NUMA node %d: flex_allocator_free failed - free list empty, addr=%llx, user=%d, pool=%p\n",
                           pool->node_id, addr, user, pool);
        return -1;
    }

    // Find the block to free
    chunk = find_block_to_free(pool, addr, user, &chunk_size, &found);
    if (!chunk) {
        spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
        return -1;
    }

    // Update pool free size while still holding lock
    if (user == MEM_USER_EXTERNAL)
    {
        atomic_add(chunk_size, &pool->free_size);
    }
    
    merge_adjacent_free_blocks(pool, chunk);

    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
    return 0;
}

static void cleanup_free_blocks(numa_pool *pool)
{
    struct free_block *chunk, *tmp;
    unsigned long flags;

    synchronize_rcu();

    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);
    if (!list_empty(&pool->flex_allocator_free_list)) {
        list_for_each_entry_safe(chunk, tmp, &pool->flex_allocator_free_list, list) {
            list_del(&chunk->list);
#ifdef USE_OBJECT_POOL
            free_to_pool(&pool->free_block_pool, chunk);
#else
            kfree(chunk);
#endif
        }
    }
    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
}

static void release_physical_memory(numa_pool *pool)
{
    unsigned long flags;

    spin_lock_irqsave(&pool->metadata_lock, flags);
    if (pool->flex_allocator) {
        l3t_shared_unlock(pool->node_id, page_to_pfn(pool->flex_allocator), pool->capacity);
        free_contig_range(page_to_pfn(pool->flex_allocator), pool->flex_allocator_nr_pages);
    }
    WRITE_ONCE(pool->pool_phys, 0);
    WRITE_ONCE(pool->flex_allocator, NULL);
    WRITE_ONCE(pool->flex_allocator_nr_pages, 0);
    WRITE_ONCE(pool->capacity, 0);
    atomic_set(&pool->free_size, 0);
    spin_unlock_irqrestore(&pool->metadata_lock, flags);
}

// - flex_allocator_destroy
void flex_allocator_destroy(numa_pool *pool)
{
    if (!pool)
    {
        pr_err("flex_allocator_destroy: Invalid pool parameter (pool=%p)\n", pool);
        return;
    }

    cleanup_free_blocks(pool);

    release_physical_memory(pool);
}