/*
 * @copyright: Copyright (c) Huawei Technologies Co., Ltd. 2012-2018. All rights reserved.
 * @Author: YangYunYi
 * @Date: 2025-01-27 16:41:08
 * @LastEditTime: 2025-07-26 10:29:56
 * @LastEditors: YangYunYi
 * @Description: HiSilicon L0 Memory Pool Header
 * @FilePath: \L0demo\hisi_l0_mem_pool.h
 */
#ifndef _HISI_L0_MEM_POOL_H
#define _HISI_L0_MEM_POOL_H

#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/rcupdate.h>
#include <linux/bitops.h>

// Configuration constants
#define NUMA_POOL_MAX_NUM_PER_NODE 4
#define MEM_POOL_ALIGNMENT 4096
#define MB_SHIFT 20
#ifndef PAGE_SHIFT
#define PAGE_SHIFT 12
#endif

#define HUGEPAGE_ALIGN_SIZE 2097152
#define FIX_CACHE_NUM 10
#define MAX_FIX_ALLOCATOR_SIZE (8192)
#define MIN_OBJ_SIZE 32
#define BITMAP_BITS 512

// System limits
#define MAX_NUMA_NODES 64

// RCU optimization macros
#define USE_RCU_OPTIMIZATION
#ifdef USE_RCU_OPTIMIZATION
#define RCU_READ_LOCK() rcu_read_lock()
#define RCU_READ_UNLOCK() rcu_read_unlock()
#define RCU_DEREFERENCE(ptr) rcu_dereference(ptr)
#define RCU_ASSIGN_POINTER(p, v) rcu_assign_pointer(p, v)
#define SYNCHRONIZE_RCU() synchronize_rcu()
#else
#define RCU_READ_LOCK() do {} while (0)
#define RCU_READ_UNLOCK() do {} while (0)
#define RCU_DEREFERENCE(ptr) (ptr)
#define RCU_ASSIGN_POINTER(p, v) do { (p) = (v); } while (0)
#define SYNCHRONIZE_RCU() do {} while (0)
#endif

// Feature enablement
#define USE_PREFERRED_FIX_CACHE_BLOCKS
#define USE_OBJECT_POOL

// Forward declarations
typedef struct cache_tuner cache_tuner;
typedef struct numa_pool numa_pool;
typedef struct numa_pool_set numa_pool_set;

#ifdef USE_OBJECT_POOL
struct object_pool {
    void **shared_objects;    // Shared object array
    int shared_size;          // Shared pool size
    int shared_count;         // Current object count
    size_t obj_size;          // Object size
    spinlock_t shared_lock;   // Lock protecting shared pool
};
#endif

#ifdef USE_RANGE_TRACK
// Structure for tracking cache level ranges
struct cache_range_track {
    phys_addr_t start_addr;   // Start of physical address range
    phys_addr_t end_addr;     // End of physical address range (exclusive)
    int cache_level;          // Cache level this range belongs to
    struct list_head list;    // List for linking range entries
};
#endif

// Fix cache block structure
struct fix_cache_block {
    phys_addr_t phys_base ____cacheline_aligned;  // Physical base address
    struct list_head list;                        // List linkage
    
    // Pre-allocated bitmap space
#ifdef FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE
    unsigned long bitmap[BITS_TO_LONGS(FIX_ALLOCATOR_BLOCK_DEFAULT_SIZE / MIN_OBJ_SIZE) + 1];
#else
    unsigned long bitmap[BITS_TO_LONGS(BITMAP_BITS) + 1];
#endif
};

// Fix cache structure with cache line alignment optimizations
struct fix_cache {
    // Frequently accessed fields placed first for better cache locality
    spinlock_t fix_cache_lock ____cacheline_aligned;
    atomic_t initialized;
    size_t obj_size;
    size_t block_size;
    unsigned int block_count;
    unsigned int objs_per_block;
    
    struct list_head block_list ____cacheline_aligned;
};

// NUMA pool initialization states
enum numa_pool_init_state {
    NUMA_POOL_UNINITIALIZED = 0,
    NUMA_POOL_PARTIALLY_INITIALIZED = 1,
    NUMA_POOL_FULLY_INITIALIZED = 2,
    NUMA_POOL_INITIALIZATION_FAILED = -1
};

// Memory user types
enum mem_user_type {
    MEM_USER_FIX_ALLOCATOR = 0,
    MEM_USER_EXTERNAL = 1,
    MEM_USER_RESERVED = 2
};

// Block states
enum block_state {
    BLOCK_ALLOCATED_TO_EXTERNAL = 0,
    BLOCK_FREE = 1,
    BLOCK_ALLOCATED_TO_FIX_ALLOC = 2
};

// Free block structure
struct free_block {
    phys_addr_t start;
    size_t size;
    enum block_state state;
    struct list_head list;
};

// NUMA pool structure with cache optimizations
struct numa_pool {
    cache_tuner *tuner;
    unsigned int node_id;
    
    // Frequently accessed fields for better cache performance
    atomic_t free_size ____cacheline_aligned;
    size_t capacity;
    phys_addr_t pool_phys;
    atomic_t initialized;
    
    // Metadata protection
    spinlock_t metadata_lock ____cacheline_aligned;
    spinlock_t flex_allocator_spinlock ____cacheline_aligned;
    
    // Flex allocator fields
    struct page *flex_allocator;
    unsigned long flex_allocator_nr_pages;
    struct list_head flex_allocator_free_list;
    
    // Fix allocator caches
    struct fix_cache caches[FIX_CACHE_NUM];
    
#ifdef USE_RANGE_TRACK
    // Range tracking for cache levels
    spinlock_t cache_range_lock ____cacheline_aligned;
    struct list_head cache_range_list;
#endif

#ifdef USE_PREFERRED_FIX_CACHE_BLOCKS
    // Preferred block cache for fast path
    struct fix_cache_block *alloc_preferred_blocks[FIX_CACHE_NUM] ____cacheline_aligned;
    struct fix_cache_block *free_preferred_blocks[FIX_CACHE_NUM];
#endif

#ifdef USE_OBJECT_POOL
    // Object pools
    struct object_pool free_block_pool ____cacheline_aligned;
    struct object_pool cache_block_pool;
#endif
};

// NUMA pool set structure
struct numa_pool_set {
    atomic_t numa_remain_capicity ____cacheline_aligned;
    size_t max_numa_capacity;
    size_t default_numa_pool_size;
    struct numa_pool numa_pools[NUMA_POOL_MAX_NUM_PER_NODE];
};

// Cache tuner structure
struct cache_tuner {
#ifdef USE_RCU_OPTIMIZATION
    int __rcu **numa_distance_matrix;
    atomic_t numa_node_count;
    struct numa_pool_set __rcu *numa_pool_list;
#else
    int **numa_distance_matrix;
    atomic_t numa_node_count;
    struct numa_pool_set *numa_pool_list;
#endif
};

// Public API functions
void *l0_kmalloc(ssize_t size, int expect_node_id);
void l0_kfree(void *mem);
struct page *l0_alloc_pages(const int page_cnt, const int expect_node_id);
void l0_free_pages(struct page *page);

// Exported global variables
extern int base_node;
extern cache_tuner l0_cache_tuner;
extern struct kobject *dump_l0_pool_kobj;

// Module parameters
extern uint max_numa_capacity;
extern uint pre_alloc_size;
extern uint preinit_cache_levels;
extern bool prealloc_node_enabled[MAX_NUMA_NODES];
extern bool prealloc_nodes_parsed;
extern char *prealloc_numa_nodes;

// Core module functions
int get_numa_node_count(void);
phys_addr_t find_and_alloc_from_numa_pool_list(const ssize_t req_size, 
                                              numa_pool **pool, 
                                              const int expect_node_id, 
                                              bool should_align);
int find_and_free_to_numa_pool_list(phys_addr_t mem_pa);
int pre_alloc_numa_pool(void);

// Flex allocator functions
int flex_allocator_init(numa_pool *pool, const size_t size, const int target_node_id);
phys_addr_t flex_allocator_alloc(numa_pool *pool, const size_t req_size, 
                                int user, bool should_align);
int flex_allocator_free(numa_pool *pool, phys_addr_t addr, int user);
void flex_allocator_destroy(numa_pool *pool);

// Fix allocator functions
int fix_allocator_init(numa_pool *pool);
phys_addr_t fix_allocator_alloc(numa_pool *pool, size_t size);
int fix_allocator_free(numa_pool *pool, phys_addr_t addr);
void fix_allocator_destroy(numa_pool *pool);
void fix_cache_init(struct fix_cache *cache);
void preinit_fix_cache_level_of_pool(numa_pool *pool);

// Utility functions
#ifdef USE_OBJECT_POOL
int numa_pool_object_pools_init(numa_pool *pool);
void numa_pool_object_pools_destroy(numa_pool *pool);
void *alloc_from_pool(struct object_pool *pool);
void free_to_pool(struct object_pool *pool, void *obj);
#endif

void update_preferred_alloc_block(numa_pool *pool, int level, 
                                 struct fix_cache_block *block);
void update_preferred_free_block(numa_pool *pool, int level, 
                                struct fix_cache_block *block);

// NUMA cache functions
int numa_pool_list_init(void);
void numa_pool_list_destroy(struct numa_pool_set *pool_list);

// Debug functions
int l0_pool_dump_init(void);

#endif /* _HISI_L0_MEM_POOL_H */