#include <mm/heap.h>
#include <mm/page_allocator.h>
#include <mm/paging.h>

#include <utils/list.h>
#include <utils/memutils.h>

#define HEAP_MAGIC 0x0d000721

#define HEAP_FREE (0)

typedef struct {
	list_node_t node;

	uint16_t number_of_allocated_block;
	uint16_t size_per_block;
	uint32_t magic;
} heap_cache_t;

typedef struct {
	list_node_t node;

	uint16_t page_count_per_cache;
	uint16_t size_per_block;
	uint32_t pad;

	heap_cache_t *cache;
} heap_cache_node_t;

struct cache_info {
	uint16_t page_count_per_cache;
	uint16_t size_per_block;
} cache_info[] = {
		/*
			此处数值通过tools/mm/calc_cache_lossrate.c计算
			使每cache的内存浪费小于size_per_block的四分之一 且每cache含有的块数量大于等于100
		*/
		{2, 32},
		{2, 64},
		{15, 96},
		{4, 128},
		{7, 256},
		{13, 512},
		{48, 1024},
		{191, 2048},
		{765, 4096},
		{1017, 8192},
		{401, 16384},
};

static heap_cache_node_t heap_cache_list = {0};

heap_cache_t *mm_heap_create_cache(uint16_t page_count_per_cache, uint16_t size_per_block) {
	heap_cache_t *cache = (heap_cache_t *)mm_pallocator_valloc4k(page_count_per_cache, PAGE_P | PAGE_RW);
	cache->node.next = NULL;
	cache->number_of_allocated_block = 0;
	cache->size_per_block = size_per_block;
	cache->magic = HEAP_MAGIC;

	uint32_t actual_size_per_block = size_per_block + sizeof(uint32_t);
	uint64_t block_per_cache = ((page_count_per_cache << PAGE_4KSHIFT) - sizeof(heap_cache_t)) / actual_size_per_block;
	uintptr_t base = (uintptr_t)cache + sizeof(heap_cache_t);

	for(uint64_t i = 0; i < block_per_cache; i++) {
		uint32_t *p = (uint32_t *)(base + i * actual_size_per_block);

		*p = HEAP_FREE;
	}

	return cache;
}

heap_cache_node_t *mm_heap_init_cache_node(heap_cache_node_t *node, uint16_t page_count_per_cache, uint16_t size_per_block) {
	heap_cache_t *cache = mm_heap_create_cache(page_count_per_cache, size_per_block);

	node->page_count_per_cache = page_count_per_cache;
	node->size_per_block = size_per_block;
	node->cache = cache;

	return node;
}

void mm_heap_init() {
	mm_heap_init_cache_node(&heap_cache_list, cache_info[0].page_count_per_cache, cache_info[0].size_per_block);

	for(uint64_t i = 1; i < sizeof(cache_info) / sizeof(struct cache_info); i++) {
		heap_cache_node_t *node = (heap_cache_node_t *)mm_heap_zeroalloc(sizeof(heap_cache_node_t));

		list_append(&heap_cache_list.node, &mm_heap_init_cache_node(node, cache_info[i].page_count_per_cache, cache_info[i].size_per_block)->node);
	}
}

void *mm_heap_alloc_free_block(heap_cache_t *cache, uint32_t block_per_cache, uint32_t actual_size_per_block) {
	uintptr_t base = (uintptr_t)cache + sizeof(heap_cache_t);

	for(uint64_t i = 0; i < block_per_cache; i++) {
		uint32_t *p = (uint32_t *)(base + i * actual_size_per_block);

		if(*p == HEAP_FREE) {
			*p = (uintptr_t)p - (uintptr_t)cache;
			++cache->number_of_allocated_block;
			return (void *)(p + 1);
		}
	}

	return NULL;
}

void *mm_heap_alloc_in_cache(heap_cache_node_t *node) {
	uint32_t actual_size_per_block = node->size_per_block + sizeof(uint32_t);
	uint64_t block_per_cache = ((node->page_count_per_cache << PAGE_4KSHIFT) - sizeof(heap_cache_t)) / actual_size_per_block;
	heap_cache_t *cache = node->cache;

	while(cache) {
		if(cache->number_of_allocated_block == block_per_cache) {
			cache = (heap_cache_t *)list_next_node(&cache->node);
			continue;
		}

		return mm_heap_alloc_free_block(cache, block_per_cache, actual_size_per_block);
	}

	cache = mm_heap_create_cache(node->page_count_per_cache, node->size_per_block);
	list_append(&node->cache->node, &cache->node);

	return mm_heap_alloc_free_block(cache, block_per_cache, actual_size_per_block);
}

void *mm_heap_alloc(size_t size) {
	heap_cache_node_t *node = &heap_cache_list;

	while(node) {
		if(size <= node->size_per_block) {
			return mm_heap_alloc_in_cache(node);
		}

		node = (heap_cache_node_t *)list_next_node(&node->node);
	}

	return NULL;
}

void *mm_heap_zeroalloc(size_t size) {
	void *p = mm_heap_alloc(size);

	if(p) {
		memset(p, 0, size);
	}

	return p;
}

void *mm_heap_realloc(void *ptr, size_t size) {
	if(!ptr) {
		return mm_heap_alloc(size);
	}

	if(size == 0) {
		mm_heap_free(ptr);
		return NULL;
	}

	uint32_t *p = (uint32_t *)((uintptr_t)ptr - sizeof(uint32_t));

	if(*p == 0) {
		return NULL;
	}

	heap_cache_t *cache = (heap_cache_t *)((uintptr_t)p - *p);

	if(cache->magic != HEAP_MAGIC) {
		return NULL;
	}

	if(size <= cache->size_per_block) {
		return ptr;
	}

	void *new_ptr = mm_heap_alloc(size);

	if(new_ptr) {
		memcpy(new_ptr, ptr, size);
		mm_heap_free(ptr);

		return new_ptr;
	}

	return NULL;
}

int mm_heap_free(void *ptr) {
	uint32_t *p = (uint32_t *)((uintptr_t)ptr - sizeof(uint32_t));

	if(*p) {
		heap_cache_t *cache = (heap_cache_t *)((uintptr_t)p - *p);

		if(cache->magic != HEAP_MAGIC) {
			return -EINVAL;
		}

		*p = HEAP_FREE;
		--cache->number_of_allocated_block;

		return 0;
	}

	return -EINVAL;
}