/*
Copyright (C) 2011 Salil Bhagurkar

This file is part of illusion

illusion is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.

illusion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License
along with illusion.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <klib/lib.h>
#include <klib/memory.h>
#include <klib/bitmap.h>
#include <klib/format.h>
#include <klib/string.h>
#include <kernel/list.h>
#include <kernel/vmpage.h>
#include <kernel/init.h>
#include <kernel/console.h>
#include <arch/power.h>
#include <kernel/kmalloc.h>
#include <kernel/sys.h>
#include <kernel/errors.h>
#include <kernel/config.h>

#ifdef CONFIG_KMALLOC_TRACE_CALLS
//Trace that is currently being used to make entries to
struct kmalloc_trace *current_kmalloc_trace = null;

void kmalloc_debug_traces()
{
	struct kmalloc_trace *t = current_kmalloc_trace;
	if(t != null) {
		debug("\n");
		debug("$Bcapacity = %u\n", t->capacity);
		debug("$Bcount = %u\n", t->count);
		for(uint_t i = 0; i < t->count; i++) {
			struct kmalloc_trace_entry *e = &t->entries[i];
			debug("$B%u: [%s, 0x%x (%u B)]\n", i, e->name, e->mem, e->size);
		}
	}
}

void kmalloc_create_trace(uint_t count)
{
	current_kmalloc_trace = null;
	struct kmalloc_trace *trace =
			(struct kmalloc_trace *)kmalloc(sizeof(struct kmalloc_trace)
					+ count * sizeof(struct kmalloc_trace_entry), "trace");
	trace->count = 0;
	trace->capacity = count;
	memset(trace->entries, 0, count * sizeof(struct kmalloc_trace_entry));
	current_kmalloc_trace = trace;
}

void kmalloc_free_trace()
{
	struct kmalloc_trace *trace = current_kmalloc_trace;
	current_kmalloc_trace = null;
	kfree(trace, sizeof(struct kmalloc_trace)
					+ trace->capacity * sizeof(struct kmalloc_trace_entry), "trace");
}

static void push_trace(void *mem, uint_t size, char *name)
{
	if(current_kmalloc_trace != null)
	{
		debug("[%s, 0x%x (%u B)]\n", name, mem, size);
		struct kmalloc_trace_entry *entry =
				&current_kmalloc_trace->entries[current_kmalloc_trace->count];
		entry->mem = mem;
		entry->size = size;
		strcpy(entry->name, name, KMALLOC_TRACE_NAME_LEN);
		current_kmalloc_trace->count++;
	}
}

static int remove_trace_entry(void *mem, uint_t size, char *name)
{
	struct kmalloc_trace *trace = current_kmalloc_trace;
	//Find the trace entry
	for(uint_t i = 0; i < trace->count; i++) {
		if(trace->entries[i].mem == mem &&
				trace->entries[i].size == size &&
				streq(trace->entries[i].name, name)) {

				//Shift all trace entries
				for(uint_t j = i + 1; j < trace->count; j++) {
					memcpy(&trace->entries[j - 1], &trace->entries[j],
							sizeof(struct kmalloc_trace_entry));
				}
				trace->count--;
				return 0;
		}
	}
	return ENOENT;
}

static void pop_trace(void *mem, uint_t size, char *name)
{
	if(current_kmalloc_trace != null)
	{
		debug("[%s, 0x%x (%u B)]\n", name, mem, size);
		if(current_kmalloc_trace->count == 0) {
			//Allow this if we are not checking ordering
#ifdef CONFIG_KMALLOC_TRACE_ORDERING
			assertv(false, "Freeing without allocating\n");
#else
			debug("Freeing without allocating\n");
#endif
		}
#ifdef CONFIG_KMALLOC_TRACE_ORDERING
		current_kmalloc_trace->count--;
		struct kmalloc_trace_entry *entry =
				&current_kmalloc_trace->entries[current_kmalloc_trace->count];
		if(entry->mem != mem || entry->size != size || !streq(entry->name, name)) {
			assertv(false, "Memory free ordering not followed: Freeing [%s, 0x%x (%u B)], Should free [%s, 0x%x (%u B)]\n",
					name, mem, size, entry->name, entry->mem, entry->size);
		}
#else
		//Find if the allocation was done before and remove it.
		int err = remove_trace_entry(mem, size, name);
		if(err) {
			assertv(false, "Unknown free\n");
		}
#endif
	}
}
#else
#define push_trace
#define pop_trace
#endif


/*
 * Intra page allocation
 * Get a page from the vm_get_pages (which will be set to allocated)
 * Set up a sub-bitmap at the start of the page that allows
 * allocation of a minimum of 32 Bytes and in multiples of 32 bytes
 */

#define PAGE_MASK 0xFFFFF000

//The sign at the beginning of each page
#define MEM_PAGE_SIGN 0x32313132
//The size of one block that can be allocated
#define MEM_BLOCK_SIZE 32
//The total number of blocks in one page
#define MEM_PAGE_BLOCKS (PAGE_SIZE / MEM_BLOCK_SIZE)
//The number of u32s that are required for the bitmap
#define MEM_PAGE_MAP_WORDS (MEM_PAGE_BLOCKS / (8 * sizeof(u32)))
//The number of blocks required to reserve space for the header itself
#define MEM_PAGE_HEADER_BLOCKS (sizeof(struct kmem_page) / MEM_BLOCK_SIZE + !!((sizeof(struct kmem_page)) % MEM_BLOCK_SIZE))
//The number of blocks that are really available for allocation
#define MEM_PAGE_AVL_BLOCKS (MEM_PAGE_BLOCKS - MEM_PAGE_HEADER_BLOCKS)
//The memory available in one page
#define MEM_PAGE_AVL_MEM (MEM_PAGE_AVL_BLOCKS * MEM_BLOCK_SIZE)

/*
 * The header of any memory page
 */
struct kmem_page {
	u32 sign;
	u32 bitmap[MEM_PAGE_MAP_WORDS]; //Maps 128 32 byte blocks
	uint_t free_blocks; //Maximum free blocks in this page
	struct kmem_page *next, *prev;
};


/*
 * The list of pages kept in pool when only a part of them is allocated
 */
static struct kmem_page *pages = null, *pages_tail = null;

/*
 * Get the number of pages in the pages, pagesTail list
 * These pages are kept if they have partially free memory
 */
static uint_t pool_page_count()
{
	struct kmem_page *page;
	uint_t nr_pages = 0;
	list_for(pages, page) {
		nr_pages++;
	}
	return nr_pages;
}


/*
 * Get the total number of free blocks summing up all
 * in each of the pages in the pool.
 */
static uint_t pool_free_blocks()
{
	struct kmem_page *page;
	uint_t free_blocks = 0;
	list_for(pages, page) {
		free_blocks += page->free_blocks;
	}
	return free_blocks;
}

uint_t kmalloc_pool_memory()
{
	return pool_page_count() * PAGE_SIZE;
}

uint_t kmalloc_pool_free_memory()
{
	return pool_free_blocks() * MEM_BLOCK_SIZE;
}

uint_t memory_available()
{
	return vm_count_free(&kernel_vmmap) * PAGE_SIZE +
			kmalloc_pool_free_memory();
}

uint_t memory_total()
{
	return vm_total_count(&kernel_vmmap) * PAGE_SIZE;
}

/*
 * Initialize a page and add it to the list
 */
static void init_mem_page(struct kmem_page *page)
{
	page->sign = MEM_PAGE_SIGN;
	memset(page->bitmap, 0, sizeof(page->bitmap));
	//Allocate the memory used by the header
	uint_t i;
	for(i = 0; i < MEM_PAGE_HEADER_BLOCKS; i++)
		set_bit(&page->bitmap, i);
	page->free_blocks = MEM_PAGE_AVL_BLOCKS;
	page->next = page->prev = null;
}


/*
 * Find a location where we could fit the required amount of free blocks
 */
static uint_t get_fit(void *bitmap, uint_t req_blocks)
{
	uint_t at = 0, i;
	uint_t len = 0;
	char in_contig = 0;
	for(i = 0; i < MEM_PAGE_BLOCKS; i++) {
		if(!get_bit(bitmap, i)) {
			if(!in_contig) {
				in_contig = 1;
				at = i;
			}
			len++;
			if(len >= req_blocks) {
				return at;
			}
		} else {
			in_contig = 0;
			len = 0;
		}
	}
	if(len >= req_blocks) {
		return at;
	}
	return 0;
}

/*
 * Find a page with enough free space from the list of pages
 */
static struct kmem_page *find_page(uint_t size, uint_t *location)
{
	struct kmem_page *page;
	uint_t fit;
	*location = 0;
	list_for(pages, page) {
		fit = get_fit(&page->bitmap, size);
		if(fit > 0) {
			*location = fit;
			return page;
		}
	}
	return null;
}

/***********Adds a signature to each allocation for corruption testing*******/
#ifdef CONFIG_KMALLOC_GUARD_SIGN
static void alloc_enter(uint_t *new_size, uint_t size)
{
	*new_size = size + 2 * sizeof(u32);
}

static void alloc_exit(void **new_mem, void *mem, uint_t size)
{
	u32 *start_addr = (u32 *)mem;
	u32 *end_addr = mem + size - sizeof(u32);
	*start_addr = MEM_PAGE_SIGN;
	*end_addr = MEM_PAGE_SIGN;
	*new_mem = mem + sizeof(u32);
}

static void free_verify(void **real_mem, uint_t *real_size, void *mem, uint_t size, char *name)
{
	*real_size = size + 2 * sizeof(u32);
	u32 *start_addr = (u32 *)(mem - sizeof(u32));
	u32 *end_addr = (u32 *)(((void *)start_addr) + (*real_size) - sizeof(u32));
	if((*start_addr) != MEM_PAGE_SIGN || (*end_addr) != MEM_PAGE_SIGN) {
		//Throw an error with what the caller thinks are the right
		//attributes of the allocation
		assertv(false, "Signature mismatch: %x, %x, [0x%x, %u B] '%s'\n",
				(*start_addr), (*end_addr), mem, size, name);
	}
	*real_mem = mem - sizeof(u32);
}
#else
#define alloc_enter
#define alloc_exit
#define free_verify
#endif
/****************************************************************************/

void *kmalloc(uint_t size, char *name)
{
	if(size == 0)
		return null;

	alloc_enter(&size, size);

	//If the size is greater than one page, allocate a page
	if(size > MEM_PAGE_AVL_MEM) {
		void *ret = vm_get_pages(&kernel_vmmap, (size / PAGE_SIZE) + !!(size % PAGE_SIZE),
				PAGE_PRESENT | PAGE_WRITE);
		alloc_exit(&ret, ret, size);
		return ret;
	}

	uint_t size_blocks = size / MEM_BLOCK_SIZE + !!(size % MEM_BLOCK_SIZE);

	//Find if there is any memory available in the page pool
	uint_t index;
	struct kmem_page *page = find_page(size_blocks, &index);

	if(!page) {
		//If none available, get a free page
		page = vm_get_pages(&kernel_vmmap, 1, PAGE_PRESENT | PAGE_WRITE);
		init_mem_page(page);

		list_attach(pages, pages_tail, page);
		//Starting index of allocation will be after the header
		index = MEM_PAGE_HEADER_BLOCKS;
	}

	uint_t i;

	//Allocate the blocks
	uint_t end = index + size_blocks;
	for(i = index; i < end; i++) {
		set_bit(&page->bitmap, i);
	}

	page->free_blocks -= size_blocks;
	//If the page doesn't have any free memory left now, remove the page
	//It will come back once any memory from it is freed
	if(!(page->free_blocks)) {
		list_detach(pages, pages_tail, page);
	}

	void *ret = (void *)page + index * MEM_BLOCK_SIZE;

	push_trace(ret, size, name);

	alloc_exit(&ret, ret, size);

	return ret;
}

void kfree(void *addr, uint_t size, char *name)
{
	if(!addr || size == 0) {
		assertv(false,
				"Null pointer or zero size passed to free: 0x%x of claimed size %u B\n",
				addr, size);
	}

	free_verify(&addr, &size, addr, size, name);

	if(size > MEM_PAGE_AVL_MEM) {
		vm_free_pages(&kernel_vmmap, addr, size / PAGE_SIZE + !!(size % PAGE_SIZE));
		return;
	}

	//Verify that the address is aligned on MEM_BLOCK_SIZE boundary
	ptr_t addr_i = (ptr_t)addr;
	if(!((addr_i % MEM_BLOCK_SIZE) == 0)) {
		assertv(false,
				"Memory being freed is not block aligned: 0x%x of claimed size %u B\n",
				addr, size);
	}

	//Get the page address from the addr
	struct kmem_page *page = (void *)(addr_i & PAGE_MASK);
	if(page->sign != MEM_PAGE_SIGN) {
		assertv(false,
				"mem_page signature mismatch for 0x%x of claimed size %u B\n",
				addr, size);
	}

	//Store the number of free blocks it has before we free
	uint_t init_free_blocks = page->free_blocks;
	//Mark the bit not-free
	uint_t bit = (addr_i - (uint_t)page) / MEM_BLOCK_SIZE;
	uint_t size_blocks = size / MEM_BLOCK_SIZE + !!(size % MEM_BLOCK_SIZE);

	uint_t i;
	for(i = bit; i < (bit + size_blocks); i++)
		clear_bit(&page->bitmap, i);

	page->free_blocks += size_blocks;
	//See if page is partially/fully free
	if(page->free_blocks == MEM_PAGE_AVL_BLOCKS) {
		//Detach from pool and free it
		list_detach(pages, pages_tail, page);
		vm_free_pages(&kernel_vmmap, page, 1);
	} else if(init_free_blocks == 0) {
		//Plug the page in the list of pages if
		//it was out (had no free blocks)
		list_attach(pages, pages_tail, page);
	}

	pop_trace(addr, size, name);
}

