#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>

// 空闲链表节点结构
typedef struct node_t {
    size_t size;          // 块大小（包括元数据）
    struct node_t *next;  // 指向下一个空闲块
} Node;

// 设备端全局内存池指针
__device__ void *global_memory_pool = NULL;

// 初始化内存池（设备端函数）
__device__ void init_memory_pool(void *start, size_t size) {
    if (size < sizeof(Node *) + sizeof(Node)) {
        *(Node **)start = NULL;  // 空间不足
        return;
    }

    Node **head_ptr = (Node **)start;
    Node *first_block = (Node *)((char *)start + sizeof(Node *));

    first_block->size = size - sizeof(Node *);
    first_block->next = NULL;
    *head_ptr = first_block;
}

// 获取线程内存池的辅助函数
__device__ void *get_thread_pool(void *start, size_t per_thread_size) {
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    return (char *)start + tid * per_thread_size;
}

// 分配内存（设备端函数）
__device__ void *cuda_kernel_malloc(void *pool, size_t per_thread_size, size_t user_size) {
    void *start = get_thread_pool(pool, per_thread_size);
    if (user_size == 0) return NULL;

    Node **head_ptr = (Node **)start;
    Node *head = *head_ptr;
    if (head == NULL) return NULL;

    // 8字节对齐
    size_t alloc_size = (user_size + 7) & ~7;
    size_t required_size = alloc_size + sizeof(Node);

    Node *prev = NULL;
    Node *curr = head;

    while (curr) {
        if (curr->size >= required_size) {
            size_t remaining = curr->size - required_size;
            if (remaining >= sizeof(Node)) {  // 分割块
                Node *new_block = (Node *)((char *)curr + required_size);
                new_block->size = remaining;
                new_block->next = curr->next;

                curr->size = required_size;
                if (prev)
                    prev->next = new_block;
                else
                    *head_ptr = new_block;
            } else {  // 使用整个块
                if (prev)
                    prev->next = curr->next;
                else
                    *head_ptr = curr->next;
            }
            return (void *)((char *)curr + sizeof(Node));
        }
        prev = curr;
        curr = curr->next;
    }
    return NULL;
}

// 释放内存（设备端函数）
__device__ void cuda_kernel_free(void *pool, size_t per_thread_size, void *ptr) {
    void *start = get_thread_pool(pool, per_thread_size);
    if (ptr == NULL) return;

    Node *block = (Node *)((char *)ptr - sizeof(Node));

    // 验证指针范围
    if ((char *)block < (char *)start || (char *)block + block->size > (char *)start + per_thread_size) {
        printf("Invalid free: block outside memory pool\n");
        return;
    }

    block->next = NULL;

    Node **head_ptr = (Node **)start;
    Node *curr = *head_ptr;
    Node *prev = NULL;

    // 按地址插入链表
    while (curr && curr < block) {
        prev = curr;
        curr = curr->next;
    }

    if (prev) {
        block->next = prev->next;
        prev->next = block;
    } else {
        block->next = *head_ptr;
        *head_ptr = block;
    }

    // 向前合并
    if (prev && (char *)prev + prev->size == (char *)block) {
        prev->size += block->size;
        prev->next = block->next;
        block = prev;
    }

    // 向后合并
    if (block->next && (char *)block + block->size == (char *)block->next) {
        block->size += block->next->size;
        block->next = block->next->next;
    }
}

// 初始化内存池的kernel
__global__ void init_pool_kernel(size_t per_thread_size) {
    int tid = blockIdx.x * blockDim.x + threadIdx.x;
    char *thread_pool = (char *)global_memory_pool + tid * per_thread_size;
    init_memory_pool(thread_pool, per_thread_size);
}

#define PER_THREAD_SIZE (1 << 13)
// 宏定义：初始化内存池并启动用户kernel
#define RUN_KERNEL_WITH_MEMPOOL(kernel, grid, block, ...)                                                 \
    do {                                                                                                  \
        void *d_pool;                                                                                     \
        size_t total_size = (grid.x * grid.y * grid.z) * (block.x * block.y * block.z) * PER_THREAD_SIZE; \
        cudaMalloc(&d_pool, total_size);                                                                  \
        cudaMemcpyToSymbol(global_memory_pool, &d_pool, sizeof(void *));                                  \
        init_pool_kernel<<<grid, block>>>(PER_THREAD_SIZE);                                               \
        kernel<<<grid, block>>>(__VA_ARGS__);                                                             \
        cudaDeviceSynchronize();                                                                          \
        cudaFree(d_pool);                                                                                 \
    } while (0)

// 示例kernel：分配和释放内存
__global__ void test_kernel() {
    // 分配内存
    for (int i = 0; i < 100; i++) {
        void *ptr = cuda_kernel_malloc(global_memory_pool, PER_THREAD_SIZE, 100);
        if (ptr) {
            // 使用内存...
            printf("Thread %d: Allocated 100 bytes at %p\n", blockIdx.x * blockDim.x + threadIdx.x, ptr);
        }

        // 释放内存
        cuda_kernel_free(global_memory_pool, PER_THREAD_SIZE, ptr);
    }
}