//
// PageTable.cpp
// Usage: Page table of a process
// CreateDate: 2009-03-26
// Author: MaJiuyue
//
// TODO: When dumplicate page table, make all pte and pde Read-only
//
#include "PageTable.h"
#include "SCI.h"

pde_t * PageTable::gKernelPageDirectory = NULL;

PageTable::PageTable()
{
	// Request a PageFrameAllocator resource
	Resource * res = ResourceManager.RequestResource(ServiceCallInterface::PageFrameAllocator::ResourceName);

	// Allocate one page for page directory
	m_PageDirectory = (pde_t *)res->CallSCI(ServiceCallInterface::PageFrameAllocator::SCIID_GetPageAddr);
	if(!m_PageDirectory)
		panic("No enough kernel memory for new page table");

	// Dumplicate kernel page directory
	for(int i=0; i<1024; i++)
		m_PageDirectory[i] = gKernelPageDirectory[i];
}

PageTable::PageTable(const PageTable& target)
{
	// Request a PageFrameAllocator resource
	Resource * res = ResourceManager.RequestResource(ServiceCallInterface::PageFrameAllocator::ResourceName);

	// Allocate one page for page directory
	m_PageDirectory = (pde_t *)res->CallSCI(ServiceCallInterface::PageFrameAllocator::SCIID_GetPageAddr);
	if(!m_PageDirectory)
		panic("No enough kernel memory for new page table");

	// Dumplicate page directory
	for(int i=0; i<768; i++)
	{
		// Make pde as Read-only
		m_PageDirectory[i] = target.m_PageDirectory[i];

		// Should make all pte and pde as Read-only!!!!!!

	}
	for(int i=768; i<1024; i++)
		m_PageDirectory[i] = gKernelPageDirectory[i];	
}

PageTable::~PageTable()
{
	// Free pages used by PageTable
	Clear();

	// Request a PageFrameAllocator resource
	Resource * res = ResourceManager.RequestResource(ServiceCallInterface::PageFrameAllocator::ResourceName);

	// Foreach PDE in user space, check the PRESENT bit
	// Free the page used by m_PageDirectory
	res->CallSCI(ServiceCallInterface::PageFrameAllocator::SCIID_FreePageAddr, (u32)m_PageDirectory);
}

void PageTable::Clear()
{
	// Request a PageFrameAllocator resource
	Resource * res = ResourceManager.RequestResource(ServiceCallInterface::PageFrameAllocator::ResourceName);

	// Foreach PDE in user space, check the PRESENT bit
	for(int i=0; i<768; i++)
	{
		// If present
		if(m_PageDirectory[i] & PG_PRESENT)
		{
			// Free the page used by this PageTable
			res->CallSCI(ServiceCallInterface::PageFrameAllocator::SCIID_FreePageAddr, m_PageDirectory[i]&PG_BASE_MASK);
			m_PageDirectory[i] = 0;
		}
	}
}

void PageTable::SwitchTo()
{
	__asm__ __volatile__("mov %%eax, %%cr3"::"a"(m_PageDirectory));	
}

void PageTable::SyncKPD()
{
	for(int i=768; i<1024; i++)
		m_PageDirectory[i] = gKernelPageDirectory[i];
}

void PageTable::MapUserSpace(addr_t vaddr, addr_t paddr, int order)
{
	pde_t *pde = NULL;
	pte_t *pte = NULL;

	// Make sure `paddr' and `vaddr' are 4K-aligned
	paddr &= 0xFFFFF000;
	vaddr &= 0xFFFFF000;

	// Request a PageFrameAllocator resource
	Resource * res = ResourceManager.RequestResource(ServiceCallInterface::PageFrameAllocator::ResourceName);

	for(int pg_left=1UL<<order; pg_left>0; pg_left--)
	{
		// Check vaddr, it must NOT IN physical memory map range
		if( vaddr>=0xC0000000 )
			panic("Try to map kernel space: [0x%x]", vaddr);

		// get vaddr's pde
		pde = &m_PageDirectory[vaddr>>22];

		// if PDE is not present, allocate a zeroed page for it
		if(!(*pde & PG_PRESENT))
		{
			addr_t addr = res->CallSCI(ServiceCallInterface::PageFrameAllocator::SCIID_GetZeroedPageAddr);
			*pde = addr | PG_PRESENT | PG_RW | PG_US;
		}
		
		// get vaddr's pte
		pte = &((pte_t *)(*pde & PG_BASE_MASK))[(vaddr&0x3FFFFF)>>12];

		// if already mapped, panic kernel
		if(*pte & PG_PRESENT)
			panic("Virtual address 0x%x already mapped to 0x%x.", vaddr, *pte&PG_BASE_MASK);

		// map this page
		*pte = paddr | PG_PRESENT | PG_RW;
		if(vaddr<0xC0000000)
			*pte |= PG_US;

		// address ++
		vaddr += PAGE_SIZE;
		paddr += PAGE_SIZE;
	}
}

void PageTable::UnmapUserSpace(addr_t vaddr, int order)
{
	pde_t *pde = NULL;
	pte_t *pte = NULL;

	// Make sure `vaddr' are 4K-aligned
	vaddr &= 0xFFFFF000;

	for(int pg_left=1UL<<order; pg_left>0; pg_left--)
	{
		// Check vaddr, it must NOT IN physical memory map range
		if( vaddr>=0xC0000000 && vaddr<vir2lin(PHYMAP_END) )
			panic("Try to unmap address in PHYMAP: [0x%x].", vaddr);

		// get vaddr's pde
		pde = &m_PageDirectory[vaddr>>22];

		// if PDE is not present, panic kernel
		if(!(*pde & PG_PRESENT))
			panic("Try to unmap UNMAPPED-PAGE: [0x%x]. PDE not present.", vaddr);
		
		// get vaddr's pte
		pte = &((pte_t *)(*pde & PG_BASE_MASK))[(vaddr&0x3FFFFF)>>12];

		// if already mapped, panic kernel
		if(!(*pte & PG_PRESENT))
			panic("Try to unmap UNMAPPED-PAGE: [0x%x]. PTE not present.", vaddr);

		// map this page
		*pte = 0;

		// address ++
		vaddr += PAGE_SIZE;
	}
}

addr_t PageTable::GetPhyAddress(addr_t vaddr)
{
	addr_t offset = vaddr & 0xFFF;
	vaddr &= 0xFFFFF000;

	// get vaddr's pde
	pde_t *pde = &m_PageDirectory[vaddr>>22];

	// if PDE is not present, return 0
	if(!(*pde & PG_PRESENT))
		return 0;
		
	// get vaddr's pte
	pte_t *pte = &((pte_t *)(*pde & PG_BASE_MASK))[(vaddr&0x3FFFFF)>>12];

	// if PTE is not present, return 0
	if(!(*pte & PG_PRESENT))
		return 0;

	return (*pte & PG_BASE_MASK) + offset;
}

void PageTable::InitializeKPD(pde_t * KernelPD)
{
	gKernelPageDirectory = KernelPD;
	
	// Make physical memory map at linear address 0xC0000000
	addr_t paddr = 0x0;
	int index;
	for(index=768; index<768+PHYMAP_END/0x400000; index++)
	{
		gKernelPageDirectory[index] = paddr | 0x1E3;
		paddr += 0x400000;
	}

	if(paddr<PHYMAP_END)
	{
		pte_t *pt = (pte_t *)((addr_t)gKernelPageDirectory+PAGE_SIZE);
		gKernelPageDirectory[index] = (addr_t)pt | 3; 

		while(paddr<PHYMAP_END)
		{
			*pt = paddr | 3;
			pt++;
			paddr += PAGE_SIZE;
		}
	}

	// All VM area PDT unchanged, inited as `Not Present' at `nfldr()'

}

void PageTable::SetKPDE(int index, pde_t pde)
{
	gKernelPageDirectory[index] = pde;
}

pde_t PageTable::GetKPDE(int index)
{
	return gKernelPageDirectory[index];
}

void PageTable::ClearKPDE(int index)
{
	gKernelPageDirectory[index] = 2;
}

