#include <exec/kernel.h>
#include <exec/kslab.h>
#include "heap.h"



/*
// The cache structures for these caches, the object size, their
// names, and some number of pages that contain them. They might not
// necessarily be powers of 2s.
static struct
{
const char *			name;
ULONG					object_size;
UWORD					pages_per_slab;
struct kslab_cache *	cache;
} kmalloc_cache[] =
{
{"kmalloc 8B objects", 8, 1},
{"kmalloc 16B objects", 16, 1},
{"kmalloc 32B objects", 32, 1},
{"kmalloc 64B objects", 64, 1},
{"kmalloc 128B objects", 128, 1},
{"kmalloc 256B objects", 256, 2},
{"kmalloc 1024B objects", 1024, 2},
{"kmalloc 2048B objects", 2048, 3},
{"kmalloc 4096B objects", 4096, 4},
{"kmalloc 8192B objects", 8192, 8},
{"kmalloc 16384B objects", 16384, 12},
{NULL, 0, 0, NULL}
};
*/
/*
//
//
//
void kmalloc_init(void)
{
kprintf("KERNEL", "kmalloc_init)\n");

//int i;
//for (i = 0 ; kmalloc_cache[i].object_size != 0 ; i ++)
//{
//kslab_cache *new_cache = slab_cache_create(
//kmalloc_cache[i].name,
//kmalloc_cache[i].object_size,
//kmalloc_cache[i].pages_per_slab,
//0,
//SLAB_CREATE_MAP
//);

//kmalloc_cache[i].cache = new_cache;
//}

}
*/

/*
//
//
//
APTR kmalloc(ULONG size, ULONG flags)
{
	kprintf("KERNEL", "kmalloc(%i, %x)\n", size, flags);

	return 0;
}
*/

// kheap.c -- Kernel heap functions, also provides
//            a placement malloc() for use before the heap is 
//            initialised.
//            Written for JamesM's kernel development tutorials.

#include "heap.h"
#include "paging.h"

// end is defined in the linker script.
extern ULONG end;
ULONG placement_address = (ULONG)&end;
extern page_directory_t *kernel_directory;
Heap *kheap = 0;

ULONG kmalloc_int(ULONG sz, int align, ULONG *phys)
{
	if (kheap != 0)
	{
		void *addr = alloc(sz, (UBYTE)align, kheap);
		if (phys != 0)
		{
			page_t *page = get_page((ULONG)addr, 0, kernel_directory);
			*phys = page->frame * 0x1000 + (ULONG)addr & 0xFFF;
		}
		return (ULONG)addr;
	}
	else
	{
		if (align == 1 && (placement_address & 0xFFFFF000))
		{
			// Align the placement address;
			placement_address &= 0xFFFFF000;
			placement_address += 0x1000;
		}
		if (phys)
		{
			*phys = placement_address;
		}
		ULONG tmp = placement_address;
		placement_address += sz;
		return tmp;
	}
}

void kfree(void *p)
{
	free(p, kheap);
}

ULONG kmalloc_a(ULONG sz)
{
	return kmalloc_int(sz, 1, 0);
}

ULONG kmalloc_p(ULONG sz, ULONG *phys)
{
	return kmalloc_int(sz, 0, phys);
}

ULONG kmalloc_ap(ULONG sz, ULONG *phys)
{
	return kmalloc_int(sz, 1, phys);
}

ULONG kmalloc(ULONG sz)
{
	return kmalloc_int(sz, 0, 0);
}

static void expand(ULONG new_size, Heap *heap)
{
	// Sanity check.
	ASSERT(new_size > heap->End - heap->Base);

	// Get the nearest following page boundary.
	if (new_size & 0xFFFFF000 != 0)
	{
		new_size &= 0xFFFFF000;
		new_size += 0x1000;
	}

	// Make sure we are not overreaching ourselves.
	ASSERT(heap->Base + new_size <= heap->Top);

	// This should always be on a page boundary.
	ULONG old_size = heap->End - heap->Base;

	ULONG i = old_size;
	while (i < new_size)
	{
		alloc_frame(get_page(heap->Base + i, 1, kernel_directory),
			(heap->Supervisor) ? 1 : 0, (heap->ReadOnly) ? 0 : 1);
		i += 0x1000 /* page size */;
	}
	heap->End = heap->Base + new_size;
}

static ULONG contract(ULONG new_size, Heap *heap)
{
	// Sanity check.
	ASSERT(new_size < heap->End - heap->Base);

	// Get the nearest following page boundary.
	if (new_size & 0x1000)
	{
		new_size &= 0x1000;
		new_size += 0x1000;
	}

	// Don't contract too far!
	if (new_size < KERNEL_HEAP_START_SIZE)
		new_size = KERNEL_HEAP_START_SIZE;

	ULONG old_size = heap->End - heap->Base;
	ULONG i = old_size - 0x1000;
	while (new_size < i)
	{
		free_frame(get_page(heap->Base + i, 0, kernel_directory));
		i -= 0x1000;
	}

	heap->End = heap->Base + new_size;
	return new_size;
}

static LONG find_smallest_hole(ULONG size, UBYTE page_align, Heap *heap)
{
	// Find the smallest hole that will fit.
	ULONG iterator = 0;
	while (iterator < heap->Index.size)
	{
		heap_header *header = (heap_header *)lookup_ordered_array(iterator, &heap->Index);
		// If the user has requested the memory be page-aligned
		if (page_align > 0)
		{
			// Page-align the starting point of this header.
			ULONG location = (ULONG)header;
			LONG offset = 0;
			if ((location + sizeof(heap_header)) & 0xFFFFF000 != 0)
				offset = 0x1000 /* page size */ - (location + sizeof(heap_header)) % 0x1000;
			LONG hole_size = (LONG)header->size - offset;
			// Can we fit now?
			if (hole_size >= (LONG)size)
				break;
		}
		else if (header->size >= size)
			break;
		iterator++;
	}
	// Why did the loop exit?
	if (iterator == heap->Index.size)
		return -1; // We got to the end and didn't find anything.
	else
		return iterator;
}


//
//
//
static LONG heap_header_less_than(APTR a, APTR b)
{
	return (((heap_header *)a)->size < ((heap_header*)b)->size) ? 1 : 0;
}


//
//
//
Heap *create_heap(ULONG start, ULONG end_addr, ULONG max, UBYTE supervisor, UBYTE readonly)
{
	Heap *heap = (Heap*)kmalloc(sizeof(Heap));

	// All our assumptions are made on startAddress and endAddress being page-aligned.
	ASSERT(start % 0x1000 == 0);
	ASSERT(end_addr % 0x1000 == 0);

	// Initialise the index.
	heap->Index = place_ordered_array((void*)start, HEAP_INDEX_SIZE, &heap_header_less_than);

	// Shift the start address forward to resemble where we can start putting data.
	start += sizeof(APTR)*HEAP_INDEX_SIZE;

	// Make sure the start address is page-aligned.
	if (start & 0xFFFFF000 != 0)
	{
		start &= 0xFFFFF000;
		start += 0x1000;
	}
	// Write the start, end and max addresses into the heap structure.
	heap->Base = start;
	heap->End = end_addr;
	heap->Top = max;
	heap->Supervisor = supervisor;
	heap->ReadOnly = readonly;

	// We start off with one large hole in the index.
	heap_header *hole = (heap_header *)start;
	hole->size = end_addr - start;
	hole->magic = HEAP_MAGIC;
	hole->is_hole = 1;
	insert_ordered_array((void*)hole, &heap->Index);

	return heap;
}

APTR alloc(ULONG size, UBYTE page_align, Heap *heap)
{

	// Make sure we take the size of header/footer into account.
	ULONG new_size = size + sizeof(heap_header) + sizeof(heap_footer);
	// Find the smallest hole that will fit.
	LONG iterator = find_smallest_hole(new_size, page_align, heap);

	if (iterator == -1) // If we didn't find a suitable hole
	{
		// Save some previous data.
		ULONG old_length = heap->End - heap->Base;
		ULONG old_end_address = heap->End;

		// We need to allocate some more space.
		expand(old_length + new_size, heap);
		ULONG new_length = heap->End - heap->Base;

		// Find the endmost header. (Not endmost in size, but in location).
		iterator = 0;
		// Vars to hold the index of, and value of, the endmost header found so far.
		ULONG idx = -1; ULONG value = 0x0;
		while (iterator < heap->Index.size)
		{
			ULONG tmp = (ULONG)lookup_ordered_array(iterator, &heap->Index);
			if (tmp > value)
			{
				value = tmp;
				idx = iterator;
			}
			iterator++;
		}

		// If we didn't find ANY headers, we need to add one.
		if (idx == -1)
		{
			heap_header *header = (heap_header *)old_end_address;
			header->magic = HEAP_MAGIC;
			header->size = new_length - old_length;
			header->is_hole = 1;
			heap_footer *footer = (heap_footer *)(old_end_address + header->size - sizeof(heap_footer));
			footer->magic = HEAP_MAGIC;
			footer->header = header;
			insert_ordered_array((void*)header, &heap->Index);
		}
		else
		{
			// The last header needs adjusting.
			heap_header *header = lookup_ordered_array(idx, &heap->Index);
			header->size += new_length - old_length;
			// Rewrite the footer.
			heap_footer *footer = (heap_footer *)((ULONG)header + header->size - sizeof(heap_footer));
			footer->header = header;
			footer->magic = HEAP_MAGIC;
		}
		// We now have enough space. Recurse, and call the function again.
		return alloc(size, page_align, heap);
	}

	heap_header *orig_hole_header = (heap_header *)lookup_ordered_array(iterator, &heap->Index);
	ULONG orig_hole_pos = (ULONG)orig_hole_header;
	ULONG orig_hole_size = orig_hole_header->size;
	// Here we work out if we should split the hole we found into two parts.
	// Is the original hole size - requested hole size less than the overhead for adding a new hole?
	if (orig_hole_size - new_size < sizeof(heap_header) + sizeof(heap_footer))
	{
		// Then just increase the requested size to the size of the hole we found.
		size += orig_hole_size - new_size;
		new_size = orig_hole_size;
	}

	// If we need to page-align the data, do it now and make a new hole in front of our block.
	if (page_align && orig_hole_pos & 0xFFFFF000)
	{
		ULONG new_location = orig_hole_pos + 0x1000 /* page size */ - (orig_hole_pos & 0xFFF) - sizeof(heap_header);
		heap_header *hole_header = (heap_header *)orig_hole_pos;
		hole_header->size = 0x1000 /* page size */ - (orig_hole_pos & 0xFFF) - sizeof(heap_header);
		hole_header->magic = HEAP_MAGIC;
		hole_header->is_hole = 1;
		heap_footer *hole_footer = (heap_footer *)((ULONG)new_location - sizeof(heap_footer));
		hole_footer->magic = HEAP_MAGIC;
		hole_footer->header = hole_header;
		orig_hole_pos = new_location;
		orig_hole_size = orig_hole_size - hole_header->size;
	}
	else
	{
		// Else we don't need this hole any more, delete it from the index.
		remove_ordered_array(iterator, &heap->Index);
	}

	// Overwrite the original header...
	heap_header *block_header = (heap_header *)orig_hole_pos;
	block_header->magic = HEAP_MAGIC;
	block_header->is_hole = 0;
	block_header->size = new_size;
	// ...And the footer
	heap_footer *block_footer = (heap_footer *)(orig_hole_pos + sizeof(heap_header) + size);
	block_footer->magic = HEAP_MAGIC;
	block_footer->header = block_header;

	// We may need to write a new hole after the allocated block.
	// We do this only if the new hole would have positive size...
	if (orig_hole_size - new_size > 0)
	{
		heap_header *hole_header = (heap_header *)(orig_hole_pos + sizeof(heap_header) + size + sizeof(heap_footer));
		hole_header->magic = HEAP_MAGIC;
		hole_header->is_hole = 1;
		hole_header->size = orig_hole_size - new_size;
		heap_footer *hole_footer = (heap_footer *)((ULONG)hole_header + orig_hole_size - new_size - sizeof(heap_footer));
		if ((ULONG)hole_footer < heap->End)
		{
			hole_footer->magic = HEAP_MAGIC;
			hole_footer->header = hole_header;
		}
		// Put the new hole in the index;
		insert_ordered_array((void*)hole_header, &heap->Index);
	}

	// ...And we're done!
	return (void *)((ULONG)block_header + sizeof(heap_header));
}

void free(void *p, Heap *heap)
{
	// Exit gracefully for null pointers.
	if (p == 0)
		return;

	// Get the header and footer associated with this pointer.
	heap_header *header = (heap_header*)((ULONG)p - sizeof(heap_header));
	heap_footer *footer = (heap_footer*)((ULONG)header + header->size - sizeof(heap_footer));

	// Sanity checks.
	ASSERT(header->magic == HEAP_MAGIC);
	ASSERT(footer->magic == HEAP_MAGIC);

	// Make us a hole.
	header->is_hole = 1;

	// Do we want to add this header into the 'free holes' index?
	char do_add = 1;

	// Unify left
	// If the thing immediately to the left of us is a footer...
	heap_footer *test_footer = (heap_footer*)((ULONG)header - sizeof(heap_footer));
	if (test_footer->magic == HEAP_MAGIC &&
		test_footer->header->is_hole == 1)
	{
		ULONG cache_size = header->size; // Cache our current size.
		header = test_footer->header;     // Rewrite our header with the new one.
		footer->header = header;          // Rewrite our footer to point to the new header.
		header->size += cache_size;       // Change the size.
		do_add = 0;                       // Since this header is already in the index, we don't want to add it again.
	}

	// Unify right
	// If the thing immediately to the right of us is a header...
	heap_header *test_header = (heap_header*)((ULONG)footer + sizeof(heap_footer));
	if (test_header->magic == HEAP_MAGIC &&
		test_header->is_hole)
	{
		header->size += test_header->size; // Increase our size.
		test_footer = (heap_footer*)((ULONG)test_header + // Rewrite it's footer to point to our header.
			test_header->size - sizeof(heap_footer));
		footer = test_footer;
		// Find and remove this header from the index.
		ULONG iterator = 0;
		while ((iterator < heap->Index.size) &&
			(lookup_ordered_array(iterator, &heap->Index) != (void*)test_header))
			iterator++;

		// Make sure we actually found the item.
		ASSERT(iterator < heap->Index.size);
		// Remove it.
		remove_ordered_array(iterator, &heap->Index);
	}

	// If the footer location is the end address, we can contract.
	if ((ULONG)footer + sizeof(heap_footer) == heap->End)
	{
		ULONG old_length = heap->End - heap->Base;
		ULONG new_length = contract((ULONG)header - heap->Base, heap);
		// Check how big we will be after resizing.
		if (header->size - (old_length - new_length) > 0)
		{
			// We will still exist, so resize us.
			header->size -= old_length - new_length;
			footer = (heap_footer*)((ULONG)header + header->size - sizeof(heap_footer));
			footer->magic = HEAP_MAGIC;
			footer->header = header;
		}
		else
		{
			// We will no longer exist :(. Remove us from the index.
			ULONG iterator = 0;
			while ((iterator < heap->Index.size) &&
				(lookup_ordered_array(iterator, &heap->Index) != (void*)test_header))
				iterator++;
			// If we didn't find ourselves, we have nothing to remove.
			if (iterator < heap->Index.size)
				remove_ordered_array(iterator, &heap->Index);
		}
	}

	// If required, add us to the index.
	if (do_add == 1)
		insert_ordered_array((void*)header, &heap->Index);

}
