/************************************************************************
 *				SRLOS Team CopyRight
 *@file		mem_physical.c
 *@author	bloceanc
 *@date		7/12/2010
 *@note		physical memory management(page managerment)
 ************************************************************************/

#ifndef	_MEM_PHYSICAL_C_
#define	_MEM_PHYSICAL_C_

#include "commdef.h"
#include "os_config.h"
#include "mem_config.h"
#include "mem_physical.h"

#define	DEF_MEM_PHY_PAGE_BASE_IDX 	(K_CONFIG_SDRAM_START_PHYADDR >> 12)

extern void comm_global_critical_section_enter(void);
extern void comm_global_critical_section_leave(void);


/* physical memory page manager table , local*/
static k_phy_pageblock k_s_phymem_free_blocks[MEM_CONFIG_SDRAM_SIZE>>13];	// SIZE >> 12 (4kb) >> 1 (/ 2, max free block count)
static k_mem_phy_page k_s_phy_page_table[MEM_CONFIG_SDRAM_SIZE>>12];	// SIZE >> 12(4KB)

static unsigned int	k_s_phy_page_os_end;	// index of end of OS space(512M from ram start address)! Warnning: not include use < !
static k_phy_pageblock *k_s_phy_first_block;	// free page block list
static k_phy_pageblock *k_s_phy_first_unused_descriptor;	// unused block descriptor list

static void* s_mem_physical_page_allocate(int page_count, int is_sysmod);

/*
 * physical memory page managerment initialize 
 */
void mem_physical_page_mgr_init(unsigned int dram_start_addr)
{
	unsigned int i = 0;
	unsigned int tab_count = MEM_CONFIG_SDRAM_SIZE>>12;
	unsigned int block_count = tab_count >> 1;
	k_s_phy_first_unused_descriptor = NULL;
	
	// the first elements will be used later, so, do not need to init it^_^
	for(i = 1; i < block_count; i++)
	{
		k_s_phymem_free_blocks[i].next = k_s_phy_first_unused_descriptor;
		k_s_phymem_free_blocks[i].prev = NULL;
		k_s_phymem_free_blocks[i].page_count = 0;
		k_s_phymem_free_blocks[i].page_start_idx =0xffffffff;
		k_s_phy_first_unused_descriptor = &k_s_phymem_free_blocks[i];
	}
	
	// OS has been used
	// just initalize the kernel load size!because others will be used as user mode.
	// so, _ref_count can't be set.when user free it, it must be freeable!
	for(i = 0; i < K_CONFIG_OS_MEM_INIT_PAGE_COUNT; i++)
	{
		k_s_phy_page_table[i]._ref_count = 1;
		k_s_phy_page_table[i]._ref_fork = 0;
	}
	
	
	for(i = K_CONFIG_OS_MEM_INIT_PAGE_COUNT; i < tab_count; i++)
	{
		k_s_phy_page_table[i]._ref_count = 0;
		k_s_phy_page_table[i]._ref_fork = 0;
		k_s_phy_page_table[i]._block_pt = NULL;
	}
	
	
	if((MEM_CONFIG_SDRAM_SIZE >> 20) > K_CONFIG_OS_MEM_SPACE_SIZE)
	{
		k_s_phy_page_os_end = DEF_MEM_PHY_PAGE_BASE_IDX + ((K_CONFIG_OS_MEM_SPACE_SIZE << 20) >> 12);	// index , for 4kb page
	}
	else
	{
		k_s_phy_page_os_end = DEF_MEM_PHY_PAGE_BASE_IDX + (MEM_CONFIG_SDRAM_SIZE >> 12);	// so , if not reside 4kb, it will lose some memory!
	}
	
	k_s_phymem_free_blocks[0].next = NULL;
	k_s_phymem_free_blocks[0].prev = NULL;
	k_s_phymem_free_blocks[0].page_count = (MEM_CONFIG_SDRAM_SIZE >> 12) - K_CONFIG_OS_MEM_INIT_PAGE_COUNT;
	k_s_phymem_free_blocks[0].page_start_idx = DEF_MEM_PHY_PAGE_BASE_IDX + K_CONFIG_OS_MEM_INIT_PAGE_COUNT;
	k_s_phy_first_block = &k_s_phymem_free_blocks[0];
}

/*
 *	free block structure 
 */
static void s_mem_physical_freeblocks_free(k_phy_pageblock *block)
{
	if(block->next == NULL && block->prev == NULL)
	{
		system_faital_error();
	}
	
	if(block->next == NULL)
	{
		// this is the last one
		block->prev->next = NULL;
	}

	if(block->prev == NULL)
	{
		// this is the first one
		block->next->prev = NULL;
		k_s_phy_first_block = block->next;
	}
	
	if(block->next != NULL && block->prev != NULL)
	{
		block->next->prev = block->prev;
		block->prev->next = block->next;
	}
	
	// free it
	block->next = k_s_phy_first_unused_descriptor;
	block->page_count = 0;
	block->page_start_idx = 0xffffffff;
	
	k_s_phy_first_unused_descriptor = block;
}

/*
 * allocate continuous physical memory page
 * return physical address of first page
 */
void* s_mem_physical_page_allocate(int page_count, int is_sysmod)
{
	if(page_count <= 0)
		return NULL;
	
	if(is_sysmod)
	{
		// should allocate in OS mem space
		k_phy_pageblock *cur_block = k_s_phy_first_block;
		k_phy_pageblock *pre_block = NULL;
		if(cur_block == NULL)
		{
			return NULL; 
		}
		
		for(;cur_block != NULL; cur_block = cur_block->next)
		{
			if(cur_block->page_count >= page_count && 
				cur_block->page_start_idx < k_s_phy_page_os_end && (k_s_phy_page_os_end - cur_block->page_start_idx) >= page_count )
			{
				unsigned int start_page_idx = cur_block->page_start_idx;
				unsigned int i;
				
				// allocate from header
				cur_block->page_count -= page_count;
				cur_block->page_start_idx += page_count;
				
				if(cur_block->page_count == 0)
				{
					// there's no more free page in this block!
					s_mem_physical_freeblocks_free(cur_block);
				}
				for(i = 0; i < page_count; i++)
				{
					k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork = 0;
					k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count = 1;
					k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._flag = K_MEM_PHY_PAGE_FLAG_SYSUSING;
				}
				
				// return the first page address
				return (void *)(start_page_idx << 12);
			}
			pre_block = cur_block;
		}
		
		// there's no more system space memory.
		return NULL;
	}
	else
	{
		// allocate in user mode
		// in this mode.It's better to allocate from the user memory space to avoid waste system memory space!
		// and it's a good idea that allocate page from the end of page tables no matter whether system memory space 
		// includes all the memory.
		if(k_s_phy_page_os_end == ((MEM_CONFIG_SDRAM_SIZE >> 12) + DEF_MEM_PHY_PAGE_BASE_IDX))
		{
			// all in system space.just allocate
			k_phy_pageblock *cur_block = k_s_phy_first_block;
			k_phy_pageblock *pre_block = NULL;
			if(k_s_phy_first_block == NULL)
			{
				return NULL;
			}
			for(;cur_block != NULL; cur_block = cur_block->next)
			{
				if(cur_block->page_count >= page_count)
				{
					unsigned int start_page_idx = cur_block->page_start_idx;
					unsigned int i;
					cur_block->page_count -= page_count;
					cur_block->page_start_idx += page_count;
					
					if(cur_block->page_count == 0)
					{
						s_mem_physical_freeblocks_free(cur_block);
					}
					for(i = 0; i < page_count; i++)
					{
						k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork = 0;
						k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count = 1;
						k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._flag = K_MEM_PHY_PAGE_FLAG_USRUSING;
					}
					return (void *)(start_page_idx << 12);
				}
				pre_block = cur_block;
			}
			
			// no more page(s)
			return NULL;
		}
		else
		{
			// some pages are not included in system address space. so, it's better allocating from these first.
			unsigned int i;
			unsigned int start_idx;
			k_phy_pageblock *cur_block = k_s_phy_first_block;
			k_phy_pageblock *pre_block = NULL;
			
			for(;cur_block != NULL; cur_block = cur_block->next)
			{
				if(cur_block->page_count < page_count)
				{
					pre_block = cur_block;
					continue;
				}
			
				if((cur_block->page_start_idx + cur_block->page_count) <= k_s_phy_page_os_end)
				{
					pre_block = cur_block;
					continue;
				}
				
				if(((cur_block->page_count - page_count) + cur_block->page_start_idx) < k_s_phy_page_os_end)
				{	
					pre_block = cur_block;
					continue;
				}
				
				// must allocate from bottom! start_idx will not be changed!must make sure pages in block continours
				cur_block->page_count -= page_count;
				
				start_idx = cur_block->page_start_idx + cur_block->page_count;
				
				if(cur_block->page_count == 0)
				{
					s_mem_physical_freeblocks_free(cur_block);
				}
				for(i = 0; i < page_count; i++)
				{
					k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._flag = K_MEM_PHY_PAGE_FLAG_USRUSING;
					k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count = 1;
					k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork = 0;
				}
				return (void *)(start_idx << 12);
			}
			
			// there no more use memory. allocate from OS space!
			cur_block = k_s_phy_first_block;
			for(;cur_block != NULL;cur_block = cur_block->next)
			{
				if(cur_block->page_count >= page_count)
				{
					start_idx = cur_block->page_start_idx;
					cur_block->page_count -= page_count;
					cur_block->page_start_idx += page_count;
					
					if(cur_block->page_count == 0)
					{
						s_mem_physical_freeblocks_free(cur_block);
					}
					for(i = 0; i < page_count; i++)
					{
						k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._flag = K_MEM_PHY_PAGE_FLAG_USRUSING;
						k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count = 1;
						k_s_phy_page_table[start_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork = 0;
					}
					
					return (void *)(start_idx << 12);
				}
			}
			
			// there's no more momory
			return NULL;
		}
	}
}

/*
 *	get free physical page block discriptor
 */
static k_phy_pageblock *s_mem_phy_allocate_pageblockdiscriptor(void)
{
	k_phy_pageblock *tmp;
	if(k_s_phy_first_unused_descriptor == NULL)
		return NULL;	// no more descriptors
		
	tmp = k_s_phy_first_unused_descriptor;
	k_s_phy_first_unused_descriptor = k_s_phy_first_unused_descriptor->next;
	return tmp;
}


/*
 * 	allocate physical page with reside at 16KB...Yeah, this is used for FLPT^_^
 */
static void *s_mem_phy_allocate_flpt(void)
{
	// this must allocate from system mode
	// should allocate in OS mem space
	const unsigned int page_count = 4;
	k_phy_pageblock *cur_block = k_s_phy_first_block;
	k_phy_pageblock *pre_block = NULL;
	if(cur_block == NULL)
	{
		return NULL; 
	}
	
	for(;cur_block != NULL; cur_block = cur_block->next)
	{
		unsigned int tmp_page_start_idx = (cur_block->page_start_idx + (page_count - 1))& ~(page_count-1);
		if((tmp_page_start_idx + page_count) <= (cur_block->page_start_idx + cur_block->page_count) &&
			tmp_page_start_idx < k_s_phy_page_os_end && (k_s_phy_page_os_end - tmp_page_start_idx)>= page_count )
		{
			unsigned int start_page_idx = tmp_page_start_idx;
			unsigned int i;
			unsigned int leaved_page_count = (cur_block->page_start_idx + cur_block->page_count) - (tmp_page_start_idx + page_count);
			// first, we have to create a new free page block if nessesarry.....
			
			if(leaved_page_count)
			{
				// it will cut into two area after allocate pages...so, we have to create a new free page block.
				k_phy_pageblock *newblock = s_mem_phy_allocate_pageblockdiscriptor();
				if(newblock==NULL)
				{
					system_faital_error();
				}
				newblock->next = NULL;
				newblock->prev = NULL;
				newblock->page_count = leaved_page_count;
				newblock->page_start_idx = tmp_page_start_idx + page_count;
				
				k_s_phy_first_block->prev = newblock;
				newblock->next = k_s_phy_first_block;
				k_s_phy_first_block = newblock;
			}
			
			// modify current block page counts...here, it still contain the counts of pages which will allocated.!^_^!
			cur_block->page_count -= leaved_page_count;
			
			// now allcate pages^_^
			cur_block->page_count -= page_count;
			
			// now, we infact allocated from bottom, so we shouldn't change the start index of first page in current block^_^
			// cur_block->page_start_idx += page_count;
			// shuldn't use above ....^_^
			
			if(cur_block->page_count == 0)
			{
				// there's no more free page in this block!
				s_mem_physical_freeblocks_free(cur_block);
			}
			for(i = 0; i < page_count; i++)
			{
				k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork = 0;
				k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count = 1;
				k_s_phy_page_table[start_page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._flag = K_MEM_PHY_PAGE_FLAG_SYSUSING;
			}
			
			// return the first page address
			return (void *)(start_page_idx << 12);
		}
		pre_block = cur_block;
	}
	
	// there's no more system space memory.
	return NULL;
}



/*
 * free Physical pages
 */
static void s_mem_physical_page_free(void *phy_page_addr, unsigned int page_count)
{
	/* free some pages that allocate use s_mem_physical_page_allocate function */
	
	// get free page block descriptor
	unsigned int page_block_desc;
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	unsigned int i,j,m,b;
	k_phy_pageblock *new_block;
	unsigned int count_end = page_count + 1;
	for(i = 0,j = 0; i < count_end; i++)
	{
		b = 0;
		if(i != page_count)
		{
			b = --k_s_phy_page_table[page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count;
		}
		
		if((b != 0) || (i == page_count))	// reference count is not zero or at the end of pages!
		{
			// free count : i - j
			unsigned int prev_idx = page_idx + j - DEF_MEM_PHY_PAGE_BASE_IDX - 1;
			unsigned int next_idx = page_idx + i - DEF_MEM_PHY_PAGE_BASE_IDX;
			
			if(i != j)
			{
				// free this(these) page(s)
				// check whether can be linked to other free block!
				if((!k_s_phy_page_table[prev_idx]._ref_count) && (!k_s_phy_page_table[next_idx]._ref_count))
				{
					// ok. now we can join three blocks!
					k_phy_pageblock *prev_block_pt = k_s_phy_page_table[prev_idx]._block_pt;
					k_phy_pageblock *next_block_pt = k_s_phy_page_table[next_idx]._block_pt;
					unsigned int prev_block_start = prev_block_pt->page_start_idx;
					unsigned int next_block_start = next_block_pt->page_start_idx;
					unsigned int k;
					for(k = 0; k < next_block_pt->page_count; k++)
					{
						k_s_phy_page_table[next_block_start + k - DEF_MEM_PHY_PAGE_BASE_IDX]._block_pt = prev_block_pt;
					}
						
					for(k = j; k < i; k++)
					{
						k_s_phy_page_table[page_idx + k - DEF_MEM_PHY_PAGE_BASE_IDX]._block_pt = prev_block_pt;
					}
					
					prev_block_pt->page_count += next_block_pt->page_count + i - j;
					s_mem_physical_freeblocks_free(next_block_pt);
					
					j = i + 1;
					continue;
				}
				if(!k_s_phy_page_table[prev_idx]._ref_count)
				{
					// link these to the previous.
					k_phy_pageblock *free_block_pt = k_s_phy_page_table[prev_idx]._block_pt;
					free_block_pt->page_count += i - j;
					for(m = j; m < i; m++)
					{
						k_s_phy_page_table[page_idx + m - DEF_MEM_PHY_PAGE_BASE_IDX]._block_pt = free_block_pt;
					}
					
					j = i + 1;
					continue;
				}
				if(!k_s_phy_page_table[next_idx]._ref_count)
				{
					// link these to the next . link next header.
					k_phy_pageblock *free_block_pt = k_s_phy_page_table[next_idx]._block_pt;
					free_block_pt->page_count += i - j;
					free_block_pt->page_start_idx = page_idx + j;
					for(m = j; m < i; m++)
					{
						k_s_phy_page_table[page_idx + m - DEF_MEM_PHY_PAGE_BASE_IDX]._block_pt = free_block_pt;
					}
					
					j = i + 1;
					continue;
				}
				
				// there can't be joined. just create new block, and link to the block list.
				new_block = s_mem_phy_allocate_pageblockdiscriptor();
				if(new_block == NULL)
				{
					system_faital_error();
				}
				new_block->page_start_idx = page_idx + j;
				new_block->page_count = i - j;
				new_block->next = k_s_phy_first_block;
				new_block->prev = NULL;
				k_s_phy_first_block->prev = new_block;
				k_s_phy_first_block = new_block;
				for(m = j; m < i; m++)
				{
					k_s_phy_page_table[page_idx + m - DEF_MEM_PHY_PAGE_BASE_IDX]._block_pt = new_block;
				}
				j = i + 1;
				continue;
			}
			j = i + 1;
		}
	}
}

/*
 * Allocate Physical Memory Page
 */
void *mem_physical_page_allocate(int page_count, int mode)
{
	// disable interrupt first.into PV operation!
	void *page = NULL;

	comm_global_critical_section_enter();
	
	page = s_mem_physical_page_allocate(page_count, mode);

	// enable interrupt. out PV operation
	comm_global_critical_section_leave();

	return page;
}

/*
 * Allocate Physical FLPT(16KB, reside at 16KB^_^)
 */
void *mem_physical_page_allocate_flpt(void)
{
	void *page = NULL;
	comm_global_critical_section_enter();
	
	page = s_mem_phy_allocate_flpt();
	
	comm_global_critical_section_leave();
	
	return page;
}

/*
 *	Free physical pages 
 */
void mem_physical_page_free(void *phy_page_addr, unsigned int page_count)
{
	// disable interrupt first.into PV operation!
	comm_global_critical_section_enter();
	
	s_mem_physical_page_free(phy_page_addr, page_count);

	// enable interrupt. out PV operation
	comm_global_critical_section_leave();
}

/*
 * Incride references for one physical page
 */
void mem_physical_page_inc_ref(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count++;
	// enable interrupt!
	comm_global_critical_section_leave();
}

/*
 *  Sub references for one physical page
 * 	NOTE: This function only used for special! 
 * 		Because free physical page also sub the references!
 * 			This function just sub the references!
 */
void mem_physical_page_sub_ref(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	if(!k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count)
	{
		// free this page..
		mem_physical_page_free(phy_page_addr, 1);
	}
	else
		k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count--;
	// enable interrupt!
	comm_global_critical_section_leave();
}

/*
 *  Get references count value
 */
int mem_physical_page_get_ref(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	int count = 0;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	count = k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_count;
	// enable interrupt!
	comm_global_critical_section_leave();
	return count;
}

/*
 * 	incrate fork count of physical page
 */
void mem_physical_page_inc_fork(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork++;
	// enable interrupt!
	comm_global_critical_section_leave();
}

/*
 * 	sub fork count of physical page
 */
void mem_physical_page_sub_fork(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	if(!k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork)
	{
		// free this physical page
		mem_physical_page_free(phy_page_addr, 1);
	}
	else
		k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork--;
	// enable interrupt!
	comm_global_critical_section_leave();	
}

/*
 *  Get fork count value
 */
int mem_physical_page_get_fork(void *phy_page_addr)
{
	unsigned int page_idx = ((unsigned int)phy_page_addr) >> 12;
	int count = 0;
	
	// diable interrupt  first, this is PV operation!
	comm_global_critical_section_enter();
	count = k_s_phy_page_table[page_idx - DEF_MEM_PHY_PAGE_BASE_IDX]._ref_fork;
	// enable interrupt!
	comm_global_critical_section_leave();
	return count;
}

// Warning! this is no decride function for physical page . because it should be done by free!
#endif	/* _MEM_PHYSICAL_C_ */

