#include "slab.h"
#include "mm.h"
#include "mmu.h"
#include "lib.h"

#define SLAB_NUM     6
#define SLAB_SIZE(i) (BITUL(i+5))

static SlabCache kmalloc_cache_size[6] = {
	{32,    0,    0,    NULL},
	{64,    0,    0,    NULL},
	{128,   0,    0,    NULL},
	{256,   0,    0,    NULL},
	{512,   0,    0,    NULL},
	{1024,  0,    0,    NULL},
};


static Slab static_slab[6];


void slab_init() {
	/* init kmalloc_cache_size */
	u64* color_map_start = PhyToVirt(alloc_one_page());
	u64 rest_count = PAGE_SIZE / 64; // u64
	u64 need_count;
	for (u64 i = 0; i < SLAB_NUM; i++) {
		need_count = PAGE_SIZE / SLAB_SIZE(i) / 64;
		need_count = (need_count > 0) ? need_count : 1;

		kmalloc_cache_size[i].size = SLAB_SIZE(i);
		kmalloc_cache_size[i].total_using = 0;
		kmalloc_cache_size[i].total_free = PAGE_SIZE / SLAB_SIZE(i);
		kmalloc_cache_size[i]._slab = &static_slab[i];
		kmalloc_cache_size[i]._slab->color_size = need_count * 8;
		kmalloc_cache_size[i]._slab->color_count = need_count;

		/* set color map */
		if (need_count > rest_count) {
			color_map_start = PhyToVirt(alloc_one_page());
			rest_count = PAGE_SIZE / 64;
		}
		kmalloc_cache_size[i]._slab->color_map = color_map_start;
		color_map_start += need_count;
		rest_count -= need_count;
	}

	/* init first 6 slabs */
	for (u64 i = 0; i < SLAB_NUM; i++) {
		list_init(&kmalloc_cache_size[i]._slab->list);
		kmalloc_cache_size[i]._slab->using_count = 0;
		kmalloc_cache_size[i]._slab->free_count = PAGE_SIZE / SLAB_SIZE(i);
		kmalloc_cache_size[i]._slab->Vaddress = PhyToVirt(alloc_one_page());
	}
}

void* __kmalloc(u64 size) {
	u64 slab_order = 0;
	Slab* _slab;
	for (; slab_order < SLAB_NUM; slab_order++) {
		if (SLAB_SIZE(slab_order) >= size) {
			_slab = kmalloc_cache_size[slab_order]._slab;
			goto alloc_from_slab;
		}
	}
	return NULL;

alloc_from_slab:

	if (_slab->free_count == 0) return NULL;

	u64 _color_map, i = 0;
	for (; i < _slab->color_count; i++) {
		if (~(*(_slab->color_map+i))) {
			_color_map = *(_slab->color_map+i);
			break;
		}
	}

	for (u64 j = 0; j < 64; j++) {
		/* find 1 bit == 0 */
		if (!(BITUL(j) & _color_map)) {
			/* set bit */
			*(_slab->color_map+i) |= BITUL(j);
			kmalloc_cache_size[slab_order].total_using += 1;
			kmalloc_cache_size[slab_order].total_free  -= 1;
			_slab -> using_count += 1;
			_slab -> free_count  -= 1;
			/*  */
			u64 omit_bytes = SLAB_SIZE(slab_order) * (i*64 + j);
			return _slab->Vaddress + omit_bytes;
		}
	}
	return NULL;
}


void __kfree(void* addr) {
	SlabCache* slabCache = kmalloc_cache_size;
	Slab* _slab;
	void* page_addr = PAGE_ALIGN(addr);
	for (u64 i = 0; i < SLAB_NUM; i++) {
		slabCache = kmalloc_cache_size + i;
		_slab = slabCache->_slab;
		do {
			if (_slab->Vaddress == page_addr) {
				u64 idx = (addr - _slab->Vaddress) / slabCache->size;
				*(_slab->color_map + (idx/64)) &= ~BITUL(idx % 64);
				slabCache->total_using -= 1;
				slabCache->total_free  += 1;
				_slab -> using_count -= 1;
				_slab -> free_count  += 1;
				return;
			} else
				_slab = container_of(list_next(&_slab->list), Slab, list);
		} while(slabCache->_slab != container_of(list_next(&_slab->list), Slab, list));
	}
}

