#include <cnix/head.h>
#include <cnix/mm.h>
#include <cnix/kernel.h>

/*
 * get the physical memory info
 */
void get_mem(unsigned long * start_memp, unsigned long * end_memp)
{
	//I do not have time to get around this now.
	//Let's hardware wire this for a short moment.
	*end_memp = KERN_VA(0x8000000);

	/*
	 * Free physical memory starts from the end of kernel code/data area.
	 */
	*start_memp = (unsigned long)&_end;
}

extern void kmalloc_init(void);

/* 
 * add all free pages to free_area_list 
 * and caculate the Reserved page number
 */
void mem_init(unsigned long start_mem, unsigned long end_mem)
{
	unsigned long addr;
	struct page * pg;
	nfreepages = 0;
	ncodepages = ndatapages = nreservedpages = nmemmap_and_bitmap = 0;

	addr = KERN_VA(RESERVED_MEM);
	while(addr < KERN_VA(LOW_MEM)){
		pg = &mem_map[(KERN_PA(addr) >> PAGE_SHIFT)];

		/* address < 16M , is the region of dma using */
		if(addr > KERN_VA(DMA_HIGH_MEM)) 
			pg->flags &= ~(1 << PageDMA);
		pg->flags &= ~( 1 << PageReserved);
		pg->count = 1;
		free_one_page(addr,1);
		nfreepages++;
		addr += PAGE_SIZE;
	}

	addr = __PAGE_OFFSET;
	start_mem = PAGE_ALIGN(start_mem);
	end_mem = (end_mem) & PAGE_MASK;
	while(addr < start_mem){
		if((addr >= KERN_VA(0x100000)) && (addr < ((unsigned long)&_etext)))
			ncodepages++;
		else if((addr >= ((unsigned long)&_etext))
			&& (addr < ((unsigned long)&_end)))
			ndatapages++;
		else if(addr >= ((unsigned long)&_end))
			nmemmap_and_bitmap++;
		nreservedpages++;

		addr += PAGE_SIZE;
	}

	while(addr < end_mem){
		pg = &mem_map[(KERN_PA(addr) >> PAGE_SHIFT)];

		/* address less than 16M , can be used for dma */
		if(addr > KERN_VA(DMA_HIGH_MEM)) 
			pg->flags &= ~(1 << PageDMA);
		pg->flags &= ~(1 << PageReserved);
		pg->count = 1;
		free_one_page(addr,1);
		nfreepages++;
		addr += PAGE_SIZE;
	}

	kmalloc_init();

	if( (nfreepages&0x1) )
		DIE("wrong number of free pages.\n");

	nfreepages >>= 1;

#if 1
	printk("total pages: %d\n",(KERN_PA(end_mem)>>PAGE_SHIFT));
	printk("page start at 0x%x\n", start_mem);
	printk("Reserved Pages: %d\n", nreservedpages);
	printk("Code  Pages: %d\n", ncodepages);
	printk("Data  Pages: %d\n", ndatapages);
	printk("mem_map and bitmap Pages: %d\n", nmemmap_and_bitmap);
	printk("Free  Pages:%d\n", nfreepages);
#endif	
}

struct page *virt_to_page(const void *vaddr)
{
	struct page *pg;
	pg = &mem_map[(KERN_PA((unsigned long)vaddr) >> PAGE_SHIFT)];
	return pg;
} 

unsigned long page_address(struct page *page)
{
	unsigned long page_nr = (unsigned long) (page - mem_map);
	page_nr <<= PAGE_SHIFT;

	return KERN_VA(page_nr);
}

unsigned long page_to_phys(struct page *page)
{
	unsigned long page_nr = (unsigned long) (page - mem_map);
	return (page_nr << PAGE_SHIFT); 
}

struct page *pfn_to_page(unsigned long page_nr)
{
	return &mem_map[page_nr];
}

unsigned long page_to_pfn(struct page *page)
{
	return (unsigned long) (page - mem_map);
}
