#include "kernel/memory/memory.h"
#include "kernel/log/log.h"
#include "libs/lib.h"
#include "kernel/task/task.h"

uint64_t * Global_CR3 = NULL;
//// each zone index

int ZONE_DMA_INDEX	= 0;
int ZONE_NORMAL_INDEX	= 0;	//low 1GB RAM ,was mapped in pagetable
int ZONE_UNMAPED_INDEX	= 0;	//above 1GB RAM,unmapped in pagetable



extern char _text;

extern char _etext;

extern char _data;

extern char _edata;

extern char _bss;

extern char _ebss;

extern char _end;

extern char _erodata;


void mapApicBaseAddress(uint64_t address,int isUser,int isForce){

    uint64_t vbaseAddress = (uint64_t)Phy_To_Virt(address);

    uint64_t* ppml4e;
	uint64_t* ppdpte;
	uint64_t* ppdte;
	
	MEMORY->get_page_table_entry((uint64_t)vbaseAddress,&ppml4e,&ppdpte,&ppdte);

    if (isForce || *ppml4e == 0)
    {
        uint64_t* virtualAddress = (uint64_t*)MEMORY->kmalloc(PAGE_4K_SIZE, 0);
		
		LOG_ERROR("address:%lx\n",*ppml4e);

        set_mpl4t(ppml4e,mk_mpl4t(Virt_To_Phy(virtualAddress),isUser ?  PAGE_USER_GDT : PAGE_KERNEL_GDT));

		LOG_ERROR("address:%lx\n",*ppml4e);

    }

    if (isForce || *ppdpte == 0)
    {
        uint64_t* virtualAddress = (uint64_t*)MEMORY->kmalloc(PAGE_4K_SIZE, 0);
        set_pdpt(ppdpte,mk_pdpt(Virt_To_Phy(virtualAddress),isUser ? PAGE_USER_Dir : PAGE_KERNEL_Dir));
    }
    
	if(isForce || *ppdte == 0){

		uint64_t* virtualAddress = (uint64_t*)MEMORY->kmalloc(PAGE_4K_SIZE, 0);
		set_pdt(ppdte,mk_pdpt(address,isUser ? PAGE_USER_Page : PAGE_KERNEL_Page ));

	}

    flush_tlb();

}


slab::slab(/* args */)
{
}

slab::~slab()
{
}

slabcache::slabcache(/* args */)
{
}

slabcache::~slabcache()
{
}





memorymanage* memorymanage::memory = 0;

memorymanage::memorymanage(/* args */)
{
 
	initMemoryManage();
	initSlabCachePool();
	frame_buffer_init();
	initPageTable();

}




memorymanage::~memorymanage()
{

}



//初始化slab内存池管理结构
void memorymanage::initSlabCachePool(){

	
	Page* page = NULL;
	uint64_t* virtualAddress = NULL;

	
	for (uint16_t i = 0; i < 16; i++)
	{
		kmalloc_cache_size[i].size = pow(2,i + 5);
	}
	

	uint64_t temp_address = globalMemoryDesc.end_of_struct;
	

	for (uint16_t i = 0; i < 16; i++)
	{
		kmalloc_cache_size[i].cache_pool = (slab*)globalMemoryDesc.end_of_struct;
		globalMemoryDesc.end_of_struct = (globalMemoryDesc.end_of_struct + sizeof(slab) + sizeof(uint64_t) * 10 ) & ( ~sizeof(uint64_t) - 1);
		
		
		
		list_init(&kmalloc_cache_size[i].cache_pool->list);
			
		kmalloc_cache_size[i].cache_pool->using_count = 0;
		kmalloc_cache_size[i].cache_pool->free_count = PAGE_2M_SIZE / kmalloc_cache_size[i].size;
		kmalloc_cache_size[i].cache_pool->color_length = ((PAGE_2M_SIZE / kmalloc_cache_size[i].size + sizeof(uint64_t) * 8 - 1)  >> 6) << 3;
		kmalloc_cache_size[i].cache_pool->color_count = kmalloc_cache_size[i].cache_pool->free_count;
		kmalloc_cache_size[i].cache_pool->color_map = (uint64_t*)globalMemoryDesc.end_of_struct;
		globalMemoryDesc.end_of_struct = (globalMemoryDesc.end_of_struct + kmalloc_cache_size[i].cache_pool->color_length + sizeof(uint64_t)) & (~(sizeof(uint64_t)) - 1 ); 
		
		memset(kmalloc_cache_size[i].cache_pool->color_map,0xff,kmalloc_cache_size[i].cache_pool->color_length);
		for (uint64_t j = 0; j < kmalloc_cache_size[i].cache_pool->color_count; j++){
			*(kmalloc_cache_size[i].cache_pool->color_map + (j >> 6)) = 1UL << (j % 64);

		}
		kmalloc_cache_size[i].total_free = kmalloc_cache_size[i].cache_pool->color_count;
		kmalloc_cache_size[i].total_using = 0;
		
	}
	
	for ( uint64_t i = (PAGE_2M_ALIGN(temp_address)) >> PAGE_2M_SHIFT; i < (Virt_To_Phy(globalMemoryDesc.end_of_struct) >> PAGE_2M_SHIFT); i++)
	{
		page = globalMemoryDesc.pages_struct + i;
		
		*(globalMemoryDesc.bits_map + ((page->PHY_address >> PAGE_2M_SHIFT) >> 6) ) |= 1UL << ((page->PHY_address >> PAGE_2M_SHIFT) % 64);
		page->zone_struct->page_using_count ++;
		page->zone_struct->page_free_count --;

		page_init(page,PG_PTable_Maped | PG_Kernel_Init | PG_Kernel);
	}

	// LOG_INFO("2.globalMemoryDesc.bits_map:%#018lx\tzone_struct->page_using_count:%d\tzone_struct->page_free_count:%d\n",*globalMemoryDesc.bits_map,globalMemoryDesc.zones_struct->page_using_count,globalMemoryDesc.zones_struct->page_free_count);
	
	for (uint16_t i = 0; i < 16; i++){

		virtualAddress = (uint64_t *)((globalMemoryDesc.end_of_struct + PAGE_2M_SIZE * i + PAGE_2M_SIZE - 1) & PAGE_2M_MASK);
		page = Virt_To_2M_Page(virtualAddress);

		*(globalMemoryDesc.bits_map + ((page->PHY_address >> PAGE_2M_SHIFT) >> 6)) |= 1UL << (page->PHY_address >> PAGE_2M_SHIFT) % 64;
		page->zone_struct->page_using_count++;
		page->zone_struct->page_free_count--;

		page_init(page,PG_PTable_Maped | PG_Kernel_Init | PG_Kernel);

		kmalloc_cache_size[i].cache_pool->page = page;
		kmalloc_cache_size[i].cache_pool->Vaddress = virtualAddress;


	}

	// LOG_INFO("3.globalMemoryDesc.bits_map:%#018lx\tzone_struct->page_using_count:%d\tzone_struct->page_free_count:%d\n",*globalMemoryDesc.bits_map,globalMemoryDesc.zones_struct->page_using_count,globalMemoryDesc.zones_struct->page_free_count);

	// LOG_INFO("start_code:%#018lx,end_code:%#018lx,end_data:%#018lx,end_brk:%#018lx,end_of_struct:%#018lx\n",globalMemoryDesc.start_code,globalMemoryDesc.end_code,globalMemoryDesc.end_data,globalMemoryDesc.end_brk, globalMemoryDesc.end_of_struct);

}

//申请slab内存
void* memorymanage::kmalloc(uint64_t size, uint64_t gfp_flages){
	int i,j;
	slab* slab = NULL;
	if(size > 1048576)
	{
		LOG_INFO("kmalloc() ERROR: kmalloc size too long:%08d\n",size);
		return NULL;
	}
	for(i = 0;i < 16;i++)
		if(kmalloc_cache_size[i].size >= size)
			break;
	slab = kmalloc_cache_size[i].cache_pool;

	static int count = 0;
	count++;
	if(count > 3000)
	{
		BREAK();

	}
	if(false)
		BREAK();
	if(kmalloc_cache_size[i].total_free != 0)
	{
		
		do
		{
			

			if(slab->free_count == 0)
				slab = container_of(list_next(&slab->list),class slab,list);
			else
				break;
		}while(slab != kmalloc_cache_size[i].cache_pool);	
	}
	else
	{
		slab = kmalloc_create(kmalloc_cache_size[i].size);
		
		if(slab == NULL)
		{
			LOG_INFO("kmalloc()->kmalloc_create()=>slab == NULL\n");
			return NULL;
		}
		
		kmalloc_cache_size[i].total_free += slab->color_count;

		LOG_INFO("kmalloc()->kmalloc_create()<=size:%#010x\n",kmalloc_cache_size[i].size);///////
		
		list_add_to_before(&kmalloc_cache_size[i].cache_pool->list,&slab->list);
	}

	for(j = 0;j < slab->color_count;j++)
	{
		
		if(*(slab->color_map + (j >> 6)) == 0xffffffffffffffffUL)
		{
			j += 63;
			continue;
		}
			
		if( (*(slab->color_map + (j >> 6)) & (1UL << (j % 64))) == 0 )
		{
			*(slab->color_map + (j >> 6)) |= 1UL << (j % 64);
			slab->using_count++;
			slab->free_count--;

			kmalloc_cache_size[i].total_free--;
			kmalloc_cache_size[i].total_using++;
			return (void *)((char *)slab->Vaddress + kmalloc_cache_size[i].size * j);
		}
	}

	LOG_INFO("kmalloc() ERROR: no memory can alloc\n");
	return NULL; 







}

slab* memorymanage::kmalloc_create(uint64_t size){


	int i;
	slab * slab = NULL;
	Page * page = NULL;
	uint64_t * vaddresss = NULL;
	long structsize = 0;

	page = alloc_pages(ZONE_NORMAL,1, 0);
	
	if(page == NULL)
	{
		LOG_INFO("kmalloc_create()->alloc_pages()=>page == NULL\n");
		return NULL;
	}
	
	page_init(page,PG_Kernel);

	switch(size)
	{
		////////////////////slab + map in 2M page

		case 32:
		case 64:
		case 128:
		case 256:
		case 512:

			vaddresss = Phy_To_Virt(page->PHY_address);
			structsize = sizeof(class slab) + PAGE_2M_SIZE / size / 8;

			slab = (class slab *)((unsigned char *)vaddresss + PAGE_2M_SIZE - structsize);
			slab->color_map = (uint64_t *)((unsigned char *)slab + sizeof(class slab));

			slab->free_count = (PAGE_2M_SIZE - (PAGE_2M_SIZE / size / 8) - sizeof(class slab)) / size;
			slab->using_count = 0;
			slab->color_count = slab->free_count;
			slab->Vaddress = vaddresss;
			slab->page = page;
			list_init(&slab->list);

			slab->color_length = ((slab->color_count + sizeof(uint64_t) * 8 - 1) >> 6) << 3;
			memset(slab->color_map,0xff,slab->color_length);

			for(i = 0;i < slab->color_count;i++)
				*(slab->color_map + (i >> 6)) ^= 1UL << i % 64;

			break;

		///////////////////kmalloc slab and map,not in 2M page anymore

		case 1024:		//1KB
		case 2048:
		case 4096:		//4KB
		case 8192:
		case 16384:

		//////////////////color_map is a very short buffer.

		case 32768:
		case 65536:
		case 131072:		//128KB
		case 262144:
		case 524288:
		case 1048576:		//1MB

			slab = (class slab *)kmalloc(sizeof(class slab),0);

			slab->free_count = PAGE_2M_SIZE / size;
			slab->using_count = 0;
			slab->color_count = slab->free_count;

			slab->color_length = ((slab->color_count + sizeof(uint64_t) * 8 - 1) >> 6) << 3;

			slab->color_map = (uint64_t *)kmalloc(slab->color_length,0);
			memset(slab->color_map,0xff,slab->color_length);

			slab->Vaddress = Phy_To_Virt(page->PHY_address);
			slab->page = page;
			list_init(&slab->list);

			for(i = 0;i < slab->color_count;i++)
				*(slab->color_map + (i >> 6)) ^= 1UL << i % 64;

			break;

		default:

			LOG_INFO("kmalloc_create() ERROR: wrong size:%08d\n",size);
			free_pages(page,1);
			
			return NULL;
	}	
	
	return slab;

}

//释放slab内存
uint64_t memorymanage::kfree(void* address){

			int i;
	int index;
	class slab * slab = NULL;
	void * page_base_address = (void *)((uint64_t)address & PAGE_2M_MASK);

	for(i = 0;i < 16;i++)
	{
		slab = kmalloc_cache_size[i].cache_pool;
		do
		{
			if(slab->Vaddress == page_base_address)
			{
				index = ((uint64_t)address - (uint64_t)slab->Vaddress) / kmalloc_cache_size[i].size;

				*(slab->color_map + (index >> 6)) ^= 1UL << index % 64;

				slab->free_count++;
				slab->using_count--;

				kmalloc_cache_size[i].total_free++;
				kmalloc_cache_size[i].total_using--;

				if((slab->using_count == 0) && (kmalloc_cache_size[i].total_free >= slab->color_count * 3 / 2) && (kmalloc_cache_size[i].cache_pool != slab))
				{
					switch(kmalloc_cache_size[i].size)
					{
						////////////////////slab + map in 2M page
				
						case 32:
						case 64:
						case 128:
						case 256:	
						case 512:
							list_del(&slab->list);
							kmalloc_cache_size[i].total_free -= slab->color_count;

							page_clean(slab->page);
							free_pages(slab->page,1);
							break;
				
						default:
							list_del(&slab->list);
							kmalloc_cache_size[i].total_free -= slab->color_count;

							kfree(slab->color_map);

							page_clean(slab->page);
							free_pages(slab->page,1);
							kfree(slab);
							break;
					}
 
				}

				return 1;
			}
			else
				slab = container_of(list_next(&slab->list),class slab,list);				

		}while(slab != kmalloc_cache_size[i].cache_pool);
	
	}
	
	LOG_INFO("kfree() ERROR: can`t free memory\n");
	
	return 0;


}




//初始化物理内存管理结构
void memorymanage::initMemoryManage(){

	globalMemoryDesc.end_code = (uint64_t)&_etext;
	globalMemoryDesc.end_data = (uint64_t)&_edata;
	globalMemoryDesc.end_rodata = (uint64_t)&_erodata;
	globalMemoryDesc.start_code = (uint64_t)&_text;
	globalMemoryDesc.start_brk = (uint64_t)&_end;

    int i,j;
	uint64_t TotalMem = 0 ;
	MemoryE820Info  *p = NULL;	
	// vbelog::LOG_INFO("Display Physics Address MAP,Type(1:RAM,2:ROM or Reserved,3:ACPI Reclaim Memory,4:ACPI NVS Memory,Others:Undefine)\n");
	p = (MemoryE820Info *)0xffff800000007e00;

	for(i = 0;i < 32;i++)
	{
		// vbelog::LOG_INFO("Address:%#018lx\tLength:%#018x\tType:%#010x\n",p->address,p->length,p->type);

		if(p->type == 1)
			TotalMem +=  p->length;

        globalMemoryDesc.e820[i].address = p->address;
        globalMemoryDesc.e820[i].length = p->length;
        globalMemoryDesc.e820[i].type = p->type;
        globalMemoryDesc.e820_length = i;

        p++;

        if(p->type > 4 || p->length == 0 || p->type < 1)
            break;	

		
			
	}

	// vbelog::LOG_INFO("OS Can Used Total RAM:%#018lx\n",TotalMem);

    TotalMem = 0;

    //计算有多少可用的2M PAGE
	for(i = 0;i <= globalMemoryDesc.e820_length;i++)
	{
		uint64_t start,end;
		if(globalMemoryDesc.e820[i].type != 1)
			continue;
		start = PAGE_2M_ALIGN(globalMemoryDesc.e820[i].address);
		end   = ((globalMemoryDesc.e820[i].address + globalMemoryDesc.e820[i].length) >> PAGE_2M_SHIFT) << PAGE_2M_SHIFT;
		if(end <= start)
			continue;
		TotalMem += (end - start) >> PAGE_2M_SHIFT;
	}

	// vbelog::LOG_INFO("OS Can Used Total 2M PAGEs:%#010x=%010d\n",TotalMem,TotalMem);

    TotalMem = globalMemoryDesc.e820[globalMemoryDesc.e820_length].address \
    + globalMemoryDesc.e820[globalMemoryDesc.e820_length].length;

	//bits map construction init
    // 获取开始地址
    globalMemoryDesc.bits_map = (uint64_t *)((globalMemoryDesc.start_brk + PAGE_4K_SIZE - 1) & PAGE_4K_MASK);

    //位数
	globalMemoryDesc.bits_size = TotalMem >> PAGE_2M_SHIFT;

    //占用空间   按8向下对齐
    globalMemoryDesc.bits_length = (((uint64_t)(TotalMem >> PAGE_2M_SHIFT) + sizeof(uint64_t) * 8 - 1) / 8) & ( ~ (sizeof(uint64_t) - 1));

    //设置为0xff
	memset(globalMemoryDesc.bits_map,0xff,globalMemoryDesc.bits_length);		//init bits map memory

    	//pages construction init

	globalMemoryDesc.pages_struct = (struct Page *)(((uint64_t)globalMemoryDesc.bits_map + globalMemoryDesc.bits_length + PAGE_4K_SIZE - 1) & PAGE_4K_MASK);

	globalMemoryDesc.pages_size = TotalMem >> PAGE_2M_SHIFT;

	globalMemoryDesc.pages_length = ((TotalMem >> PAGE_2M_SHIFT) * sizeof(struct Page) + sizeof(uint64_t) - 1) & ( ~ (sizeof(uint64_t) - 1));

	memset(globalMemoryDesc.pages_struct,0x00,globalMemoryDesc.pages_length);	//init pages memory

	//zones construction init

	globalMemoryDesc.zones_struct = (struct Zone *)(((uint64_t)globalMemoryDesc.pages_struct + globalMemoryDesc.pages_length + PAGE_4K_SIZE - 1) & PAGE_4K_MASK);

	globalMemoryDesc.zones_size   = 0;

	globalMemoryDesc.zones_length = (5 * sizeof(struct Zone) + sizeof(long) - 1) & (~(sizeof(long) - 1));

	memset(globalMemoryDesc.zones_struct,0x00,globalMemoryDesc.zones_length);	//init zones memory

    for(i = 0;i <= globalMemoryDesc.e820_length;i++)
	{
		uint64_t start,end;
		struct Zone * z;
		struct Page * p;
		uint64_t * b;

		if(globalMemoryDesc.e820[i].type != 1)
			continue;
		start = PAGE_2M_ALIGN(globalMemoryDesc.e820[i].address);
		end   = ((globalMemoryDesc.e820[i].address + globalMemoryDesc.e820[i].length) >> PAGE_2M_SHIFT) << PAGE_2M_SHIFT;
		if(end <= start)
			continue;
		
		//zone init

		z = globalMemoryDesc.zones_struct + globalMemoryDesc.zones_size;
		globalMemoryDesc.zones_size++;

		z->zone_start_address = start;
		z->zone_end_address = end;
		z->zone_length = end - start;

		z->page_using_count = 0;
		z->page_free_count = (end - start) >> PAGE_2M_SHIFT;

		z->total_pages_link = 0;

		z->attribute = 0;
		z->GMD_struct = &globalMemoryDesc;

		z->pages_length = (end - start) >> PAGE_2M_SHIFT;
		z->pages_group =  (struct Page *)(globalMemoryDesc.pages_struct + (start >> PAGE_2M_SHIFT));

		//page init
		p = z->pages_group;
		for(j = 0;j < z->pages_length; j++ , p++)
		{
			p->zone_struct = z;
			p->PHY_address = start + PAGE_2M_SIZE * j;
			p->attribute = 0;

			p->reference_count = 0;

			p->age = 0;
            //1bit 代表 64KB,
			*(globalMemoryDesc.bits_map + ((p->PHY_address >> PAGE_2M_SHIFT) >> 6)) ^= 1UL << (p->PHY_address >> PAGE_2M_SHIFT) % 64;

		}
		
	}

    	/////////////init address 0 to page struct 0; because the globalMemoryDesc.e820[0].type != 1
	
	globalMemoryDesc.pages_struct->zone_struct = globalMemoryDesc.zones_struct;


	globalMemoryDesc.pages_struct->PHY_address = 0UL;

	set_page_attribute(globalMemoryDesc.pages_struct,PG_PTable_Maped | PG_Kernel_Init | PG_Kernel);
	globalMemoryDesc.pages_struct->reference_count = 1;
	globalMemoryDesc.pages_struct->age = 0;

	/////////////

	globalMemoryDesc.zones_length = (globalMemoryDesc.zones_size * sizeof(struct Zone) + sizeof(long) - 1) & ( ~ (sizeof(long) - 1));

	// vbelog::LOG_INFO("bits_map:%#018lx,bits_size:%#018lx,bits_length:%#018lx\n",globalMemoryDesc.bits_map,globalMemoryDesc.bits_size,globalMemoryDesc.bits_length);

	// vbelog::LOG_INFO("pages_struct:%#018lx,pages_size:%#018lx,pages_length:%#018lx\n",globalMemoryDesc.pages_struct,globalMemoryDesc.pages_size,globalMemoryDesc.pages_length);

	// vbelog::LOG_INFO("zones_struct:%#018lx,zones_size:%#018lx,zones_length:%#018lx\n",globalMemoryDesc.zones_struct,globalMemoryDesc.zones_size,globalMemoryDesc.zones_length);

	ZONE_DMA_INDEX = 0;	//need rewrite in the future
	ZONE_NORMAL_INDEX = 0;	//	need rewrite in the future
	ZONE_UNMAPED_INDEX = 0;
	for(i = 0;i < globalMemoryDesc.zones_size;i++)	//need rewrite in the future
	{
		struct Zone * z = globalMemoryDesc.zones_struct + i;
		// vbelog::LOG_INFO("zone_start_address:%#018lx,zone_end_address:%#018lx,zone_length:%#018lx,pages_group:%#018lx,pages_length:%#018lx\n",z->zone_start_address,z->zone_end_address,z->zone_length,z->pages_group,z->pages_length);

		if(z->zone_start_address == 0x100000000 && !ZONE_UNMAPED_INDEX)
			ZONE_UNMAPED_INDEX = i;
	}
	
	globalMemoryDesc.end_of_struct = (uint64_t)((uint64_t)globalMemoryDesc.zones_struct + globalMemoryDesc.zones_length + sizeof(long) * 32) & ( ~ (sizeof(long) - 1));	////need a blank to separate globalMemoryDesc

	// vbelog::LOG_INFO("start_code:%#018lx,end_code:%#018lx,end_data:%#018lx,end_brk:%#018lx,end_of_struct:%#018lx\n",globalMemoryDesc.start_code,globalMemoryDesc.end_code,globalMemoryDesc.end_data,globalMemoryDesc.end_brk, globalMemoryDesc.end_of_struct);

	
	i = Virt_To_Phy(globalMemoryDesc.end_of_struct) >> PAGE_2M_SHIFT;

	for(j = 0;j <= i;j++)
	{
		Page * tmp_page =  globalMemoryDesc.pages_struct + j;
		page_init(globalMemoryDesc.pages_struct + j,PG_PTable_Maped | PG_Kernel_Init  | PG_Kernel);
		*(globalMemoryDesc.bits_map + ((tmp_page->PHY_address >> PAGE_2M_SHIFT) >> 6)) |= 1UL << (tmp_page->PHY_address >> PAGE_2M_SHIFT) % 64;
		tmp_page->zone_struct->page_using_count++;
		tmp_page->zone_struct->page_free_count--;
	}


	Global_CR3 = Get_gdt();

	// vbelog::color_printk(INDIGO,BLACK,"Global_CR3\t:%#018lx\n",Global_CR3);
	// vbelog::color_printk(INDIGO,BLACK,"*Global_CR3\t:%#018lx\n",*Phy_To_Virt(Global_CR3) & (~0xff));
	// vbelog::color_printk(PURPLE,BLACK,"**Global_CR3\t:%#018lx\n",*Phy_To_Virt(*Phy_To_Virt(Global_CR3) & (~0xff)) & (~0xff));


    //清空PML4T的前10项
	// for(i = 0;i < 10;i++)
	// 	*(Phy_To_Virt(Global_CR3)  + i) = 0UL;
	
	flush_tlb();
	
	memory = this;


}


//设置属性,设置引用
uint64_t memorymanage::page_init(struct Page * page,uint64_t flags)
{
	page->attribute |= flags;
	
	if(!page->reference_count || (page->attribute & PG_Shared))
	{
		page->reference_count++;
		page->zone_struct->total_pages_link++;		
	}	
	
	return 1;
}

//设置属性,设置引用
uint64_t memorymanage::page_clean(struct Page * page)
{
	page->reference_count--;
	page->zone_struct->total_pages_link--;

	if(!page->reference_count)
	{
		page->attribute &= PG_PTable_Maped;
	}
	
	return 1;
}

//释放物理页,设置bitmap,zone struct
void memorymanage::free_pages(Page * page,int number)
{	
	int i = 0;
	
	if(page == NULL)
	{
		LOG_INFO("free_pages() ERROR: page is invalid\n");
		return ;
	}	

	if(number >= 64 || number <= 0)
	{
		LOG_INFO("free_pages() ERROR: number is invalid\n");
		return ;	
	}
	
	for(i = 0;i<number;i++,page++)
	{
		*(globalMemoryDesc.bits_map + ((page->PHY_address >> PAGE_2M_SHIFT) >> 6)) &= ~(1UL << (page->PHY_address >> PAGE_2M_SHIFT) % 64);
		page->zone_struct->page_using_count--;
		page->zone_struct->page_free_count++;
		page->attribute = 0;
	}
}

//分配物理页，设置bitmap,zone struct
Page* memorymanage::alloc_pages(int zone_select,int number,uint64_t page_flags)
{
	int i;
	uint64_t page = 0;
	uint64_t attribute = 0;
	int zone_start = 0;
	int zone_end = 0;

	if (number >= 64 || number <= 0)
	{
		LOG_INFO("alloc_pages() ERROR: number is invalid \n");
		return NULL;
		/* code */
	}
	

	switch(zone_select)
	{
		case ZONE_DMA:
				zone_start = 0;
				zone_end = ZONE_DMA_INDEX;
				attribute = PG_PTable_Maped;
			break;

		case ZONE_NORMAL:
				zone_start = ZONE_DMA_INDEX;
				zone_end = ZONE_NORMAL_INDEX;
				attribute = PG_PTable_Maped;

			break;

		case ZONE_UNMAPED:
				zone_start = ZONE_UNMAPED_INDEX;
				zone_end = globalMemoryDesc.zones_size - 1;
				attribute = 0;
			break;

		default:
			LOG_INFO("alloc_pages error zone_select index\n");
			return NULL;
			break;
	}

	for(i = zone_start;i <= zone_end; i++)
	{
		struct Zone * z;
		uint64_t j;
		uint64_t start,end,length;
		uint64_t tmp;

		if((globalMemoryDesc.zones_struct + i)->page_free_count < number)
			continue;

		z = globalMemoryDesc.zones_struct + i;
		start = z->zone_start_address >> PAGE_2M_SHIFT;
		end = z->zone_end_address >> PAGE_2M_SHIFT;

        //用于后面对齐64位
		tmp = 64 - start % 64;

        //64位为一个搜索空间  j为搜索空间
		for(j = start;j <= end;j += j % 64 ? tmp : 64)
		{
            //获取bit位在哪个64位 空间
			uint64_t * p = globalMemoryDesc.bits_map + (j >> 6);
			uint64_t shift = j % 64;  //偏移
			uint64_t k;  
			uint64_t num = ((1UL << number) - 1);
            //从开始的后面开始查找   k为空间偏移
			for(k = shift;k < 64;k++)
			{
                //拼接两个空间 然后  与要分配的空间做对比，看是否能匹配上
				if( !((k ? *p : ((*p >> k) | (*(p + 1) << (64 - k)))) & (num)) )
				{
					uint64_t	l;
					page = j + k - shift;  //？ 这里为什么这样减
					for(l = 0;l < number;l++)
					{	

						Page * x = globalMemoryDesc.pages_struct + page + l;
						*(globalMemoryDesc.bits_map + ((x->PHY_address >> PAGE_2M_SHIFT) >> 6 )) |= 1UL << (x->PHY_address >> PAGE_2M_SHIFT) % 64;
						z->page_using_count++;
						z->page_free_count--;
						x->attribute = attribute;
					}
					goto find_free_pages;
				}
			}
		
		}
	}

	return NULL;

find_free_pages:

	return (Page *)(globalMemoryDesc.pages_struct + page);
}

uint64_t memorymanage::set_page_attribute(Page* page,uint64_t flags){

	if(page == NULL)
	{
		LOG_INFO("set_page_attribute() ERROR: page == NULL\n");
		return 0;
	}
	else
	{
		page->attribute = flags;
		return 1;
	}


}

uint64_t memorymanage::get_page_attribute(Page * page)
{
	if(page == NULL)
	{
		LOG_INFO("get_page_attribute() ERROR: page == NULL\n");
		return 0;
	}
	else
		return page->attribute;
}








void memorymanage::initPageTable(){

	uint64_t i,j;
	uint64_t * tmp = NULL;

	Global_CR3 = Get_gdt();

	tmp = (uint64_t *)(((uint64_t)Phy_To_Virt((uint64_t)Global_CR3 & (~ 0xfffUL))) + 8 * 256);
		
	LOG_INFO("1:%#018lx,%#018lx\t\t\n",(uint64_t)tmp,*tmp);

	tmp = Phy_To_Virt(*tmp & (~0xfffUL));

	// LOG_INFO("2:%#018lx,%#018lx\t\t\n",(uint64_t)tmp,*tmp);

	tmp = Phy_To_Virt(*tmp & (~0xfffUL));

	// LOG_INFO("3:%#018lx,%#018lx\t\t\n",(uint64_t)tmp,*tmp);
	
	// LOG_WARN("before data:%#018lx\n",*(uint64_t*)(0xffff800003000000));
	// *(uint64_t*)(0xffff800003000000) = 12;
	// uint64_t ppml4e;
	// uint64_t ppdpte;
	// uint64_t ppdte;
	// get_page_table_entry(0xffff800003000000,&ppml4e,&ppdpte,&ppdte);
	// LOG_WARN("before data:%#018lx pml4e:%#018lx pdpte:%#018lx pdte:%#018lx\n",*(uint64_t*)(0xffff800003000000),*(uint64_t*)ppml4e,*(uint64_t*)ppdpte,*(uint64_t*)ppdte);
	
	for(i = 0;i < globalMemoryDesc.zones_size;i++)
	{
		Zone * z = globalMemoryDesc.zones_struct + i;
		Page * p = z->pages_group;

		if(ZONE_UNMAPED_INDEX && i == ZONE_UNMAPED_INDEX)
			break;

		for(j = 0;j < z->pages_length ; j++,p++)
		{
			//
			tmp = (uint64_t *)(((uint64_t)Phy_To_Virt((uint64_t)Global_CR3 & (~ 0xfffUL))) + (((uint64_t)Phy_To_Virt(p->PHY_address) >> PAGE_GDT_SHIFT) & 0x1ff) * 8);
			
			if(*tmp == 0)
			{			
				uint64_t * virtualtmp = (uint64_t*)kmalloc(PAGE_4K_SIZE,0);
				set_mpl4t(tmp,mk_mpl4t(Virt_To_Phy(virtualtmp),PAGE_USER_GDT));
			}

			tmp = (uint64_t *)((uint64_t)Phy_To_Virt(*tmp & (~ 0xfffUL)) + (((uint64_t)Phy_To_Virt(p->PHY_address) >> PAGE_1G_SHIFT) & 0x1ff) * 8);
			
			if(*tmp == 0)
			{
				uint64_t* virtualtmp = (uint64_t*)kmalloc(PAGE_4K_SIZE,0);
				set_pdpt(tmp,mk_pdpt(Virt_To_Phy(virtualtmp),PAGE_USER_Dir));
			}

			tmp = (uint64_t *)((uint64_t)Phy_To_Virt(*tmp & (~ 0xfffUL)) + (((uint64_t)Phy_To_Virt(p->PHY_address) >> PAGE_2M_SHIFT) & 0x1ff) * 8);
			
			set_pdt(tmp,mk_pdt(p->PHY_address,PAGE_USER_Page));

			if(j % 50 == 0);
				// LOG_INFO("@:%#018lx,%#018lx\t\n",(uint64_t)tmp,*tmp);
		}
	}
	

	flush_tlb();
	// get_page_table_entry(0xffff800003000000,&ppml4e,&ppdpte,&ppdte);
	// LOG_WARN("after data:%#018lx pml4e:%#018lx pdpte:%#018lx pdte:%#018lx\n",*(uint64_t*)(0xffff800003000000),*(uint64_t*)ppml4e,*(uint64_t*)ppdpte,*(uint64_t*)ppdte);


}





void memorymanage::frame_buffer_init()
{
	////re init frame buffer;
	uint64_t i;
	uint64_t * tmp;
	uint64_t * tmp1;
	uint64_t phyaddress = 0xe0000000;
	uint64_t * FB_addr = (uint64_t *)Phy_To_Virt(phyaddress);

	Global_CR3 = Get_gdt();

	tmp = (uint64_t *)(((uint64_t)Phy_To_Virt((uint64_t)Global_CR3 & (~ 0xfffUL))) + (((uint64_t)FB_addr >> PAGE_GDT_SHIFT) & 0x1ff) * 8);
	
	if(*tmp == 0)
	{			
		uint64_t * virtualtmp = (uint64_t*)kmalloc(PAGE_4K_SIZE,0);
		set_mpl4t(tmp,mk_mpl4t(Virt_To_Phy(virtualtmp),PAGE_KERNEL_GDT));
	}

	tmp = (uint64_t *)((uint64_t)Phy_To_Virt(*tmp & (~ 0xfffUL)) + (((uint64_t)FB_addr >> PAGE_1G_SHIFT) & 0x1ff) * 8);
	
	if(*tmp == 0)
	{
		uint64_t* virtualtmp = (uint64_t*)kmalloc(PAGE_4K_SIZE,0);
		set_pdpt(tmp,mk_pdpt(Virt_To_Phy(virtualtmp),PAGE_KERNEL_Dir));
	}

	for(i = 0;i < vbelog::vbe->FB_length;i += PAGE_2M_SIZE)
	{
		tmp1 = Phy_To_Virt((uint64_t *)(*tmp & (~ 0xfffUL)) + (((uint64_t)((uint64_t)FB_addr + i) >> PAGE_2M_SHIFT) & 0x1ff));
		uint64_t phy = phyaddress + i;
		set_pdt(tmp1,mk_pdt(phy,PAGE_KERNEL_Page | PAGE_PWT | PAGE_PCD));
	}

	vbelog::vbe->FB_addr= (uint32_t *)Phy_To_Virt(phyaddress);

	flush_tlb();
}

void memorymanage::get_page_table_entry(uint64_t FB_addr,uint64_t** pml4e,uint64_t** pdpte,uint64_t** pdte){


	// uint64_t pml4e_index = ((virtualaddress >> PAGE_GDT_SHIFT) & 0x1ff);
	// uint64_t pdpte_index = ((virtualaddress >> PAGE_1G_SHIFT) & 0x1ff);
	// uint64_t pdte_index = ((virtualaddress >> PAGE_2M_SHIFT) & 0x1ff);
	Global_CR3 = Get_gdt();
	*pml4e = (uint64_t*)(((uint64_t)Phy_To_Virt((uint64_t)Global_CR3 & (~ 0xfffUL))) + (((uint64_t)FB_addr >> PAGE_GDT_SHIFT) & 0x1ff) * 8);
	*pdpte = (uint64_t*)((uint64_t)Phy_To_Virt(*(uint64_t*)*pml4e & (~ 0xfffUL)) + (((uint64_t)FB_addr >> PAGE_1G_SHIFT) & 0x1ff) * 8);
	*pdte = (uint64_t*)((uint64_t)Phy_To_Virt(*(uint64_t*)*pdpte & (~ 0xfffUL)) + (((uint64_t)FB_addr >> PAGE_2M_SHIFT) & 0x1ff) * 8);
}


uint64_t memorymanage::doBrk(uint64_t endBrk,uint64_t length){


	uint64_t ret = 0;

    Page * p = NULL;
    int64_t pos = 0;
    uint64_t* tmp = NULL;
    uint64_t* virtualAddrss = NULL;
	uint64_t i;


	

	for(i = endBrk;i < endBrk + length; i+=PAGE_2M_SIZE){

		
		tmp = Phy_To_Virt((uint64_t *)((uint64_t)current->mm->pgd & (~ 0xfffUL)) + ((i >> PAGE_GDT_SHIFT) & 0x1ff));
		if(*tmp == NULL){
			virtualAddrss = (uint64_t*)MEMORY->kmalloc(PAGE_4K_SIZE,0);
			memset(virtualAddrss,0,PAGE_4K_SIZE);
			set_mpl4t(tmp,mk_mpl4t(Virt_To_Phy(virtualAddrss),PAGE_USER_GDT));

		}

		tmp = Phy_To_Virt((uint64_t *)(*tmp & (~ 0xfffUL)) + ((i >> PAGE_1G_SHIFT) & 0x1ff));

		if(*tmp == NULL){

			virtualAddrss = (uint64_t*)MEMORY->kmalloc(PAGE_4K_SIZE,0);
			memset(virtualAddrss,0,PAGE_4K_SIZE);
			set_pdpt(tmp,mk_pdpt(Virt_To_Phy(virtualAddrss),PAGE_USER_Dir));

		}
		
		tmp = Phy_To_Virt((uint64_t *)(*tmp & (~ 0xfffUL)) + ((i >> PAGE_2M_SHIFT) & 0x1ff));

		if(*tmp == NULL){

			p = MEMORY->alloc_pages(ZONE_NORMAL,1,PG_PTable_Maped);
			set_pdt(tmp,mk_pdt(p->PHY_address,PAGE_USER_Page));

		}

	}

	current->mm->end_brk = i;

	flush_tlb();

	return i;
}




void* operator new(unsigned long size){
	return MEMORY->kmalloc(size,0);
}

void operator delete(void* ptr){

	MEMORY->kfree(ptr);
}

void* operator new[](unsigned long size){

	return MEMORY->kmalloc(size,0);
}
void operator delete[](void* ptr){
	MEMORY->kfree(ptr);
}

void operator delete(void* ptr,unsigned long size){

	MEMORY->kfree(ptr);
	
}
