/*
	Copyright (C) 2009 Salil Bhagurkar
	
	This file is part of ViSE
	
	ViSE is free software: you can redistribute it and/or modify
	it under the terms of the GNU Lesser General Public License as published by
	the Free Software Foundation, either version 3 of the License, or
	(at your option) any later version.
	
	ViSE is distributed in the hope that it will be useful,
	but WITHOUT ANY WARRANTY; without even the implied warranty of
	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
	GNU Lesser General Public License for more details.
	
	You should have received a copy of the GNU Lesser General Public License
	along with ViSE.  If not, see <http://www.gnu.org/licenses/>.
*/

#include <shared.h>
#include <config.h>

/*Table manipulation macros*/
#define set_allocated(n) table.flag[n]=1
#define set_free(n) table.flag[n]=0
#define is_allocated(n) (table.flag[n]&1)

#define make_entry(n,a) table.mem[n]=a
#define insert_entry(n,a) \
	shift_table_down(n); \
	table.mem[n]=a
#define delete_entry(n) shift_table_up(n)
#define allocation_size(n) (table.mem[n+1]-table.mem[n])

enum {MEM_SYS,MEM_KERN,MEM_USE,MEM_NOUSE,MEM_LEAKED};


static char *maps[]={"SYSTEM","KERNEL","USABLE","UNUSABLE","LEAKED"};

#define TBL_SZ 0x800

/*This constant defines the alignment that the allocation performs before allocation. This is set to 4 bytes by default for more speed on 32 bit architecture*/
#define MEMALIGN CONFIG_MALLOC_ALIGN

#define MAX_ALLOC ((void *)(-1))

struct table {
	//The memory table, in which entries are registered
	void * mem[TBL_SZ];
	//This table tells the kind of memory in the above table.mem for the corresponding entry
	int_t map[TBL_SZ];
	//These are general flags, including free/allocated
	int_t flag[TBL_SZ];
	//The string that identifies the memory allocation. This is the function name if you use new or newarr defined in mm.h
	const char *id[TBL_SZ];
	
	int_t table_end;
};

static struct table table;

#define TABLE_SHLEN (table.table_end - 1)
#define TABLE_SCAN (table.table_end)


/*Shift table down in order to accomodate an entry*/
static int shift_table_down(int_t start)
{
	int_t i;
	for(i=TABLE_SHLEN;i>start;i--)	{
		table.mem[i]	=	table.mem[i-1];
		table.flag[i]	=	table.flag[i-1];
		table.map[i]	=	table.map[i-1];
		table.id[i]	=	table.id[i-1];
	}
	return 0;
}

static int shift_table_up(int_t start)
{
	int_t i;
	for(i=start;i<TABLE_SHLEN;i++)	{
		table.mem[i]	=	table.mem[i+1];
		table.flag[i]	=	table.flag[i+1];
		table.map[i]	=	table.map[i+1];
		table.id[i]	=	table.id[i+1];
	}
	return 0;
}

#define MH_MAGIC 0xabcd1a2a
#define MF_MAGIC 0xddcca0b0
#define ID_LEN 4
struct memory_header {
	unsigned long magic;
};

struct memory_footer {
	unsigned long magic;
};

static void init_memory_header(struct memory_header *h)
{
	h->magic=MH_MAGIC;
}

static void init_memory_footer(struct memory_footer *f)
{
	f->magic=MF_MAGIC;
}

static inline int is_header_valid(struct memory_header *h)
{
	return (h->magic == MH_MAGIC);
}

static inline int is_footer_valid(struct memory_footer *f)
{
	return (f->magic == MF_MAGIC);
}

/*This finds a free entry in the table.mem and then:
Allocate if the size is equal to the size required
Resize, shift the table down and allocate
*/

static inline void check_table_end()
{
	if(table.table_end >= TBL_SZ) {
		//panic
		while(1);
	}
}


/*Raw malloc that puts the location in index*/
static void *__malloc(int_t *index,size_t b)
{
	int_t i;
	void *ret_mem=NULL;
	
	for(i=0;i<TABLE_SCAN;i++) {
		if((!is_allocated(i)) && table.map[i]==MEM_USE) {
			if(allocation_size(i)>b) {
				shift_table_down(i);
				make_entry(i+1,table.mem[i]+b);
				table.table_end++;
				check_table_end();
				set_allocated(i);
				ret_mem=table.mem[i];
				(*index)=i;
				goto out___malloc;
			}
			else if(allocation_size(i)==b) {
				set_allocated(i);
				ret_mem=table.mem[i];
				(*index)=i;
				goto out___malloc;
			}
		}
	}
	
	out___malloc:
		
	return ret_mem;
}

void *malloc(size_t b)
{
	int_t i;
	size_t x,actual_size;
	void *ret_mem;
	if((!b))	{
		return NULL;
	}
	x=b / MEMALIGN * MEMALIGN;
	if(x<b)
		b=x+MEMALIGN;
	actual_size=b;
	b+=sizeof(struct memory_header) + sizeof(struct memory_footer);
	
	ret_mem=__malloc(&i,b);
	
	if(!ret_mem) {
		//panic
	}
	
	//Here we modify some memory based structs.
	init_memory_header((struct memory_header *)ret_mem);
	ret_mem+=sizeof(struct memory_header);
	init_memory_footer((struct memory_footer *)(ret_mem+actual_size));
	
	return ret_mem;
}

int_t malloc_get_index(void **memory,size_t b)
{
	int_t i;
	size_t x,actual_size;
	void *ret_mem;
	
	if(!b)	{
		(*memory) = NULL;
		return 0;
	}
	x=b / MEMALIGN * MEMALIGN;
	if(x<b)
		b=x+MEMALIGN;
	actual_size=b;
	b+=sizeof(struct memory_header) + sizeof(struct memory_footer);
	
	ret_mem=__malloc(&i,b);
	
	if(!ret_mem) {
		//panic
	}
	
	init_memory_header((struct memory_header *)ret_mem);
	ret_mem+=sizeof(struct memory_header);
	init_memory_footer((struct memory_footer *)(ret_mem+actual_size));
	
	table.id[i]="__new__";
	
	(*memory)=ret_mem;
	
	return i;
}

/*To register stuff that was allocated before we came into existence*/
/*Use this function only before sched. This does not lock.*/
int malloc_register(void *start,size_t b,int_t map,char *name)
{
	int_t i;
	if(!b)
		return EPINV;
	for(i=0;i<TABLE_SCAN;i++) {
		if((unsigned)start>=(unsigned)table.mem[i] && ((unsigned)start+b)<(unsigned)table.mem[i+1]) /*Find the right slot*/
			break;
	}
	if(!is_allocated(i))	{
		if(allocation_size(i)>b)	{
			shift_table_down(i);
			make_entry(i+1,table.mem[i]+b);
			table.table_end++;
			check_table_end();
			set_allocated(i);
			table.map[i]=map;
			table.id[i]=name;
			return 0;
		}
		else if(allocation_size(i)==b)	{
			set_allocated(i);
			table.map[i]=map;
			table.id[i]=name;
			return 0;
		}
	}
	return ESTATEINV;
}

/*Register memory ranges eg low_memory 0-1MB*/
int malloc_register_range(void *start,size_t b,int_t tag,char *name)
{
	int_t i;
	if(!b)
		return EPINV;
	for(i=0;i<TABLE_SCAN;i++) {
		if((unsigned)start>=(unsigned)table.mem[i] && ((unsigned)start+b)<(unsigned)table.mem[i+1]) /*Find the right slot*/
			break;
	}
	if(!is_allocated(i))	{
		if(allocation_size(i)>b)	{
			shift_table_down(i);
			make_entry(i+1,table.mem[i]+b);
			table.table_end++;
			check_table_end();
			table.map[i]=tag;
			table.id[i]=name;
			return 0;
		}
		else if(allocation_size(i)==b)	{
			table.map[i]=tag;
			table.id[i]=0;
			return 0;
		}
	}
	return ESTATEINV;
}

/*Search for an allocation using the entry*/
static int search_allocation(void *p)
{
	int_t i;
	for(i=0;i<TABLE_SCAN;i++)	{
		if((table.mem[i])==p)
			return i;
	}
	return ENOREG;
}

/*This is a rather heavy function that searches for consecutive free entries and merges them into a single entry of size equal to the summation of the sizes of those entries*/
/**This function could be registered as a low priority thread, which runs only when the system is idle.*/
static int merge_free()
{
	int_t i,merges=0;
	for(i=0;i<TABLE_SHLEN;i++) {
		if((!is_allocated(i)) && (!is_allocated(i+1)) && ((table.mem[i+1])!=MAX_ALLOC))	{
			shift_table_up(i+1);
			table.table_end--;
			check_table_end();
			merges++;
		}
	}
	return merges;
}

/*This sets the allocation entry free and calls merge_free*/
int malloc_free(void *p)
{
	int_t i;
	void *actual_p;
	if(!p) {
		return EPINV;
	}
	actual_p=p;
	p-=sizeof(struct memory_header);
	
	i=search_allocation(p);
	
	if(i<0) {
		//panic
		return ENOREG;
	}
	
	set_free(i);
	table.map[i]=MEM_USE;
	i=1;
	while(i)	{
		i=merge_free();
	}

	return 0;
}

/*This frees all the allocations of the passed id*/
int malloc_free_id(char *id)
{
	int_t i;
	int_t nr_freed=0;
		
	for(i=0;i<TABLE_SCAN;i++) {
		if(!strcmp(table.id[i],id)) {
			set_free(i);
			table.map[i]=MEM_USE;
			nr_freed++;
		}
	}
	
	i=1;
	while(i)	{
		i=merge_free();
	}
	
	if(!nr_freed)
		return ENODATA;
	
	return nr_freed;
}

struct memory_header *header_of(int_t table_index)
{
	return (struct memory_header *)(table.mem[table_index]);
}

struct memory_footer *footer_of(int_t table_index)
{
	return (struct memory_footer *)(table.mem[table_index]+allocation_size(table_index)-sizeof(struct memory_footer));
}

void malloc_make_memory_id(int_t index,const char *id)
{
	table.id[index]=id;
}

void *actual_allocation(void *memory_table_entry)
{
	return (memory_table_entry+sizeof(struct memory_header));
}

int malloc_scan_for_leaks()
{
	int_t i;
	for(i=0;i<TABLE_SCAN;i++) {
		
		if(is_allocated(i) && table.map[i]==MEM_USE) {
			
			if(!is_header_valid(header_of(i))) {
				//Do either of these to prevent memory leak messages of the same allocation
				//header_of(i)->magic=MH_MAGIC;
				table.map[i]=MEM_LEAKED;
			}
			if(!is_footer_valid(footer_of(i))) {
				//footer_of(i)->magic=MF_MAGIC;
				table.map[i]=MEM_LEAKED;
			}
		}
	}
	return 0;
}


static int realloc(void *old,size_t b)
{
	int_t i;
	size_t size;
	void *temp;
	temp=old;
	i=search_allocation(old);
	size=allocation_size(i);
	malloc_free(old);
	old=malloc(b);
	memcpy(old,temp,size);
	return 0;
}

size_t avg_allocation_size()
{
	size_t i;
	size_t avg=0;
	for(i=0;i<TABLE_SCAN;i++)	{
		if(!allocation_size(i))
			break;
		if(is_allocated(i))
			avg+=allocation_size(i);
	}
	return avg/i;
}

/*This returns the total memory occupied by the allocations*/
size_t malloc_mem_occ()
{
	int_t i;
	size_t t=0;
	for(i=1;i<TABLE_SCAN;i++)	{
		if(table.mem[i]==MAX_ALLOC)
			break;
		if(is_allocated(i-1))
			t+=allocation_size(i-1);
	}
	return t;
}

/*This returns the memory occupied by the kernel. This is specified by variables in asm in the arch folder and are filled in by the linker*/
size_t malloc_sys_occ()
{
	return (size_t)(KERNEL_END - KERNEL_START);
}


/*This initializes the memory allocator by filling in proper values in the tables*/
int init_mm()
{
	int_t i;
	for(i=0;i<TBL_SZ;i++)	{
		table.mem[i]=MAX_ALLOC;
		table.map[i]=MEM_NOUSE;
		table.flag[i]=0;
		table.id[i]="undefined";
	}
	table.table_end = 3;
	
	make_entry(0,0);
	//Now register the architecture based fixed allocations
	arch_malloc_init();
	return 0;
}
