// hisi_l0_debug.c
#include <linux/slab.h>
#include <linux/module.h>
#include "hisi_l0_mem_pool.h"

#define MAX_PRINT_LIST 100
// Function to print flex_allocator status
static void print_flex_allocator_stats(numa_pool *pool, 
                                       unsigned long *flex_allocator_allocated,
                                       unsigned long *flex_allocator_free)
{
    unsigned long flags;
    struct free_block *chunk;
    
    pr_info("==================flex_allocator Status:\n");
    spin_lock_irqsave(&pool->flex_allocator_spinlock, flags);
    
    if (!list_empty(&pool->flex_allocator_free_list)) {
        list_for_each_entry(chunk, &pool->flex_allocator_free_list, list) {
            pr_info("====numa_pool chunk STATUS: state=%d, phys_addr=%llx, size=%lu\n",
                    chunk->state,
                    chunk->start,
                    chunk->size);
            if (chunk->state == BLOCK_FREE)
                *flex_allocator_free += chunk->size;
            else
                *flex_allocator_allocated += chunk->size;
        }
    }
    
    spin_unlock_irqrestore(&pool->flex_allocator_spinlock, flags);
}

// Function to print fix allocator cache status for a single cache level
static void print_single_cache_stats(struct fix_cache *cache, 
                                     int cache_level,
                                     unsigned long *total_objects,
                                     unsigned long *total_allocated_objects,
                                     unsigned long *fix_allocator_used_in_flex_allocator,
                                     unsigned long *fix_allocator_objects_allocated)
{
    unsigned long flags;
    struct fix_cache_block *cache_block;
    unsigned long allocated_objects = 0;
    unsigned long free_objects = 0;
    unsigned long cache_total_bytes;
    unsigned long cache_allocated_bytes;
    int j = 0;
    
    if (atomic_read(&cache->initialized) == 0)
        return;

#ifdef ENABLE_FIX_CACHE_LOCK
    spin_lock_irqsave(&cache->fix_cache_lock, flags);
#endif

    if (list_empty(&cache->block_list)) {
        pr_info("print_memory_stats cache level %d block list is empty\n", cache_level);
#ifdef ENABLE_FIX_CACHE_LOCK
        spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
#endif
        return;
    }

    *total_objects = cache->objs_per_block * cache->block_count;

    list_for_each_entry(cache_block, &cache->block_list, list) {
        j++;
        if (j >= MAX_PRINT_LIST) {
            continue;
        }

        print_hex_dump(KERN_ERR,
                       "bitmap: ",
                       DUMP_PREFIX_OFFSET,
                       16,
                       4,
                       cache_block->bitmap,
                       BITS_TO_LONGS(cache->objs_per_block) * sizeof(unsigned long),
                       false);

        allocated_objects = bitmap_weight(cache_block->bitmap, cache->objs_per_block);
        *total_allocated_objects += allocated_objects;

        pr_info("cache level %d block pa %llx allocated_objects %lu free objs %lu\n",
                cache_level,
                cache_block->phys_base,
                allocated_objects,
                cache->objs_per_block - allocated_objects);
    }

    free_objects = *total_objects - *total_allocated_objects;

    // Fix allocator memory statistics (in bytes)
    cache_total_bytes = cache->block_count * cache->block_size;
    cache_allocated_bytes = *total_allocated_objects * cache->obj_size;

    *fix_allocator_used_in_flex_allocator += cache_total_bytes;
    *fix_allocator_objects_allocated += cache_allocated_bytes;

    pr_info("Cache Level %d: Object size %zu, Total objects %lu, Allocated %lu, Free %lu. block_count %u\n",
            cache_level,
            cache->obj_size,
            *total_objects,
            *total_allocated_objects,
            free_objects,
            cache->block_count);
            
#ifdef ENABLE_FIX_CACHE_LOCK
    spin_unlock_irqrestore(&cache->fix_cache_lock, flags);
#endif
}

// Function to print all fix allocator caches status
static void print_fix_allocator_stats(numa_pool *pool,
                                      unsigned long *fix_allocator_used_in_flex_allocator,
                                      unsigned long *fix_allocator_objects_allocated)
{
    int i;
    
    pr_info("================Fix Allocator Cache Status:\n");
    
    for (i = 0; i < FIX_CACHE_NUM; i++) {
        struct fix_cache *cache = &pool->caches[i];
        unsigned long total_objects = 0;
        unsigned long total_allocated_objects = 0;
        
        print_single_cache_stats(cache, i, &total_objects, &total_allocated_objects,
                                fix_allocator_used_in_flex_allocator, 
                                fix_allocator_objects_allocated);
    }
}

// Function to print overall memory usage summary
static void print_memory_usage_summary(unsigned long flex_allocator_allocated,
                                       unsigned long flex_allocator_free,
                                       unsigned long fix_allocator_used_in_flex_allocator,
                                       unsigned long fix_allocator_objects_allocated,
                                       unsigned long flex_allocator_nr_pages)
{
    unsigned long flex_allocator_total;
    unsigned long external_allocated;
    unsigned long calculated_total;
    unsigned long fix_allocator_internal_fragmentation;
    
    pr_info("=========================Total Memory Usage:\n");

    // flex_allocator system total memory = allocated (including to Fix allocator) + free
    flex_allocator_total = flex_allocator_allocated + flex_allocator_free;

    // Fix allocator system internal fragmentation = total memory allocated to Fix allocator - actual memory allocated to users
    fix_allocator_internal_fragmentation = fix_allocator_used_in_flex_allocator - fix_allocator_objects_allocated;

    pr_info("Flex Allocator Total: %lu bytes, Allocated to Fix Allocator: %lu bytes, Free in flex_allocator: %lu bytes\n",
            flex_allocator_total, fix_allocator_used_in_flex_allocator, flex_allocator_free);
    pr_info("Fix Allocator System Usage:  Objects allocated: %lu bytes, Internal fragmentation: %lu bytes, Free in Fix Allocator: %lu bytes\n",
            fix_allocator_objects_allocated, fix_allocator_internal_fragmentation, 
            (fix_allocator_used_in_flex_allocator - fix_allocator_objects_allocated));
            
    // Validate calculation consistency
    external_allocated = flex_allocator_allocated - fix_allocator_used_in_flex_allocator;
    calculated_total = flex_allocator_free + fix_allocator_used_in_flex_allocator + external_allocated;
    
    if (calculated_total != flex_allocator_total) {
        pr_warn("  WARNING: Memory calculation inconsistency detected!\n");
        pr_warn("  flex_allocator_total=%lu, calculated_total=%lu\n", flex_allocator_total, calculated_total);
        pr_warn("  flex_allocator_free=%lu, fix_allocator_used=%lu, external_allocated=%lu\n", 
                flex_allocator_free, fix_allocator_used_in_flex_allocator, external_allocated);
    }

    pr_info("Total flex_allocator_nr_pages: %lu (%lu bytes)\n",
            flex_allocator_nr_pages,
            flex_allocator_nr_pages * PAGE_SIZE);
}

// Main function - print_memory_stats
static void print_memory_stats(numa_pool *pool)
{
    unsigned long flex_allocator_allocated = 0; // flex_allocator system total allocated memory
    unsigned long flex_allocator_free = 0;      // flex_allocator system total free memory
    unsigned long fix_allocator_used_in_flex_allocator = 0;     // Fix allocator system memory allocated from flex_allocator
    unsigned long fix_allocator_objects_allocated = 0; // Fix allocator memory allocated to objects

    // Print flex_allocator status
    print_flex_allocator_stats(pool, &flex_allocator_allocated, &flex_allocator_free);
    
    // Print fix_allocator status
    print_fix_allocator_stats(pool, &fix_allocator_used_in_flex_allocator, 
                             &fix_allocator_objects_allocated);
    
    // Print total memory usage summary
    print_memory_usage_summary(flex_allocator_allocated, flex_allocator_free,
                              fix_allocator_used_in_flex_allocator, 
                              fix_allocator_objects_allocated,
                              pool->flex_allocator_nr_pages);
}

// - print_mem_pool_info
static void print_mem_pool_info(void)
 {
    int i, j;
    struct numa_pool_set *pool_list;
    size_t numa_remain_capicity, numa_alloc_size, numa_free_size;

    RCU_READ_LOCK();
    pool_list = RCU_DEREFERENCE(l0_cache_tuner.numa_pool_list);

    if (!pool_list)
    {
        pr_info("l0_cache_tuner STATUS: numa_pool_list is NULL.\n");
        RCU_READ_UNLOCK();
        return;
    }

    // Traverse all NUMA nodes.
    for (i = 0; i < get_numa_node_count(); i++)
    {
        pr_info("====================================================================================\n");

        // All memory pools of the current NUMA node
        for (j = 0; j < NUMA_POOL_MAX_NUM_PER_NODE; j++)
        {
            struct numa_pool *pool = &pool_list[i].numa_pools[j]; // 使用pool_list而不是直接访问
            size_t capacity = READ_ONCE(pool->capacity);
            phys_addr_t pool_phys = READ_ONCE(pool->pool_phys);
            numa_free_size = atomic_read(&pool->free_size);
            pr_info("numa_pool [%d][%d] %p STATUS: free_size=%lu (%lu MB), "
                    "pool_phys=%llx, flex_allocators=%p, capacity=%lu, node_id=%u\n",
                    i,
                    j,
                    pool,
                    numa_free_size,
                    numa_free_size >> MB_SHIFT,
                    pool_phys,
                    pool->flex_allocator,
                    capacity,
                    pool->node_id);

            print_memory_stats(pool);
        }
        numa_remain_capicity = atomic_read(&pool_list[i].numa_remain_capicity);
        numa_alloc_size = pool_list[i].max_numa_capacity - numa_remain_capicity;
        pr_info("numa %d cache STATUS: node_alloc_size=%lu (%lu MB), numa_remain_capicity=%lu (%lu MB)"
                " max_numa_capacity=%lu (%lu MB) default_numa_pool_size=%lu MB\n",
                i,
                numa_alloc_size, // 使用pool_list
                numa_alloc_size >> MB_SHIFT,
                numa_remain_capicity,             // Use atomic_read
                numa_remain_capicity >> MB_SHIFT, // Use atomic_read
                pool_list[i].max_numa_capacity,
                pool_list[i].max_numa_capacity >> MB_SHIFT,
                pool_list[i].default_numa_pool_size);
    }
    pr_info("====================================================================================\n");
    RCU_READ_UNLOCK();
    return;
}

// - l0_pool_dump_store
static ssize_t l0_pool_dump_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count)
{
    // Check if the input is exactly "1" or "1\n"
    if ((count == 1 && buf[0] == '1') || 
        (count == 2 && buf[0] == '1' && buf[1] == '\n')) {
        print_mem_pool_info();
        return count;
    }
    
    // For any other input, return an error
    pr_err("Invalid input: only '1' is accepted to trigger debug dump\n");
    return -EINVAL;
}

static struct kobj_attribute dump_attr = __ATTR(dump_l0_pool, 0200, NULL, l0_pool_dump_store);

static struct attribute *l3t_attrs[] = {
    &dump_attr.attr,
    NULL,
};

static struct attribute_group l3t_attr_group = {
    .attrs = l3t_attrs,
};

// - l0_pool_dump_init
int l0_pool_dump_init(void)
{
    int ret;

    dump_l0_pool_kobj = kobject_create_and_add("l0_pool", kernel_kobj);
    if (!dump_l0_pool_kobj)
    {
        pr_err("l0_pool_dump_init: Failed to create kobject for l0_pool dump interface\n");
        return -ENOMEM;
    }

    ret = sysfs_create_group(dump_l0_pool_kobj, &l3t_attr_group);
    if (ret)
    {
        kobject_put(dump_l0_pool_kobj);
        pr_err("l0_pool_dump_init: Failed to create sysfs group for l0_pool dump interface, error=%d\n", ret);
        return ret;
    }

    return 0;
}