//#include "proc.h"
#include "base.h"
#include "type.h"

#include "page.h"
#include "kparam.h"
#include "proc.h"

#include "string.h"

/* kmap takes a page size,
	it is used for mapping user space linear address to user space physical address.
	kmap[0] maps itself.
*/
PUBLIC uint_32 kmap[1024] _kmap_section;


int phy2vm(struct mm_frame *mm,uint_32 *phy, uint_32 *vm){
	int_32 offset = (int_32)*phy - (int_32)mm->start_phy;
	if (offset < 0){
		return -1;
	}
	*vm = (uint32_t)offset + (uint32_t)mm->start_vm;
	return 0;
}

int vm2phy(struct mm_frame *mm,uint_32 *vm, uint_32 *phy){
	int_32 offset = (int_32)*vm - (int_32)mm->start_vm;
	if (offset < 0){
		return -1;
	}
	*phy = (uint32_t)offset + (uint32_t)mm->start_phy;
	return 0;
}

int phycopy(uint_32 phy_dst, uint_32 phy_src, uint_32 len){
	return 0;
}

int vmcopy(uint_32 vm_dst, uint_32 vm_src, uint_32 len){
	memcpy((char *)vm_dst,(char *)vm_src,len);
	return 0;
}



#if 0
int kmap_init(void){
	kmap[0] = elf_read(K_BASE) + kmap - K_VM;	/* physical memory */
	return 0;
}


int kremap(int map_index, uint_32 phy){
	kmap[map_index] = phy;
	return 0;
}
#endif

#define kremap(map_index, phy)		kmap[map_index] = phy
/*
 * every bit of pg_map represents one page block equal 64 pages
 * the physical address starts from 1M
 *
 * the map can reach 4 *1024 * 8 * 64 * 4096 = 8G
 */
static uint_8 pg_map[4 * 1024];


/* pg_block_max records the maxium page number that has been token*/
static uint_32 pg_block_max;

/* pg_block_nr records total page block number */
static uint_32 pg_block_nr;	

/* since page alloc need check out the whole map,
 * that is low, we do set a flag to indicate the 
 * availability of the page
 */
static BOOL pg_is_full = FALSE;

/*
 * physical memory in KB. Since we do alloc 64 pages continuously one time.
 * So pg_block_nr = phy_size / (64 * 4)
 */
int page_info_init(uint_32 phy_size){
	pg_block_nr = phy_size / (64 * 4);

	/* kernel has token the first 64 pages */
	pg_map[0] |= 0x80;
	pg_block_max = 0;
	return 0;
}

/*
 * we do alloc 64 pages one time
 */
int page_alloc(uint_32 *phy_start){
	int i = pg_block_max ;
	int pg_map_array_index;
	int pg_map_bit_index;
	uint_8 token = 0x80;

	if (!phy_start){
		return 1;
	}
	
	if (pg_is_full == TRUE){
		return 1;
	}

	while(TRUE){
		i ++;
		i %= pg_block_nr;

		if (i == pg_block_max){
			pg_is_full = TRUE;
			return 1;
		}

		pg_map_array_index = i / 8;
		pg_map_bit_index = i % 8;
		token >>= pg_map_bit_index;
		
		if ((pg_map[pg_map_array_index] & token) == 0){
			/* this page block is available, then take it */
			pg_map[pg_map_array_index] |= token;
			break;
		}
	}

	pg_is_full = FALSE;
	pg_block_max = i;
	*phy_start = i * PAGE_BLOCK + PHY_START;
	
	return 0;
}

int page_copy(uint_32 kmap_vm_dst,uint_32 kmap_vm_src){
	int i = 0;
	for (; i < 1024; i++){
		*((uint_32*)kmap_vm_dst + i) = *((uint_32*)kmap_vm_src + i);
	}
	return 0;
}

int page_free(void){
	return 0;
}


int page_dir_map(uint_32 kmap_vm_pg_dir,uint_32 index,uint_32 phy){
	uint_32 *pd = (uint_32 *)kmap_vm_pg_dir;
	*(pd + index) = phy + + PT_P + PT_RW + PT_U;
	return 0;
}

int page_tbl_map(uint_32 kmap_vm_pt_dir,uint_32 index,uint_32 phy){
	uint_32 *pt = (uint_32 *)kmap_vm_pt_dir;
	*(pt + index) = phy + PG_P + PG_RW + PG_U;
	return 0;
}

int page_map(struct mm_frame *mm){
#if (KDEBUG == 1)
	uint_32 phy = mm->start_phy;
	uint_32 vm = mm->start_vm;

	uint_32 pd_index;
	uint_32 pt_index;
	uint_32 i;

	PD_INDEX(pd_index,vm);
	PT_INDEX(pt_index,vm);

	/* since the page entries that map the kernel space are the same,
	 * so we do copy father process's page directory,firstly.
	 */
	kremap(0,phy);	/* index 0 reprents new process's page directory */
	/* index 1 reprents father process's page directory */
	kremap(1,cur_proc->context.cr3);
	/* index 2 reprents new process's page table */
	kremap(2,phy + PG_SIZE);
	
	page_copy((uint_32)KMAP_VM, (uint_32)(KMAP_VM + 0x04));

	/* map the kernel stack and user stack */
	/* kernel stack always takes the last page, */
	/* while user stack always takes the last page but one */
	PD_INDEX(pd_index, K_STACK);
	page_dir_map(KMAP_VM,pd_index,phy + PG_SIZE);
	PT_INDEX(pt_index, K_STACK);
	page_tbl_map(KMAP_VM + 0x08,pt_index,phy + PG_SIZE * 63);

	/* round to next page table*/
	kremap(2,phy + PG_SIZE * 2);
	PD_INDEX(pd_index, U_STACK);
	page_dir_map(KMAP_VM,pd_index,phy + PG_SIZE * 2);
	PT_INDEX(pt_index, U_STACK);
	page_tbl_map(KMAP_VM + 0x08,pt_index,phy + PG_SIZE * 62);

	
	/* now we do map the user space*/
	PD_INDEX(pd_index, U_VM);
	page_dir_map(KMAP_VM,pd_index,phy);

	/* physical address of page that process used */
	phy += (PG_SIZE * 3);
	for (i = 0; i < (63 - 5); i++,phy += PG_SIZE){
		page_tbl_map(KMAP_VM + 0x08,i,phy);
	}
#endif
	return 0;
}

int page_sched(void){
	return 0;
}

