#include <kiddie/kiddie.h>
#include <arm/platform.h>
#include <sys/errno.h>

#include <Console.h>
#include <hw/Platform.h>
#include <arm/ArchPager.h>

extern kernel::Console cout;
extern kernel::Platform hwplatform;

namespace kernel {
namespace mem {
namespace page {

int
VmMap::normalize (MemArea *area)
{
	MemArea *_area;
	vaddr_t addr_end;
	int ret;

ENTER;
	ret = 0;

	for (_area = this->areas.next;
			_area && (_area != &this->areas);
				_area = _area->next)
	{
		/* skip area itself */
		if (area == _area)
		{
			continue;
		}

		if (area->v_addr >= _area->v_addr && area->v_addr <= (_area->v_addr + _area->length))
		{
			/* create union  */
			addr_end =
				((area->v_addr + area->length) > (_area->v_addr + _area->length)) ?
						(area->v_addr + area->length) : (_area->v_addr + _area->length);
			_area->length = addr_end - _area->v_addr;
			ret = 1;
			break;
		}
	}
EXIT;
	return ret;
}

int
VmMap::normalize (void)
{
	int run;
	MemArea *area, *tmp_area;

ENTER;

	for (area = this->areas.next;
			area && area != &this->areas; )
	{
		tmp_area = NULL;
		if (normalize(area))
		{
			/* remove area from list */
			area->prev->next = area->next;
			area->next->prev = area->prev;
			tmp_area = area;
		}
		area = area->next;
		// if (NULL != tmp_area) kfree(tmp_area);
	}

EXIT;
	return 0;
}


/**
 * @param paddr Physical address of memory region
 * @param siz Size of region
 * @return virtual address of pysical memory region
 *
 * Allocates 'siz' bytes of memory and maps them into
 * requested 'phys' physical address.
 */
vaddr_t VmMap::remap(paddr_t paddr, size_t siz)
{
	/* TODO vaddr needs to be allocated */
	vaddr_t vaddr = 0x20000000;
	remap(paddr, vaddr, siz);
	return vaddr;
}

/**
 * @param paddr Physical address of memory region
 * @param vaddr Virtual address of memory region
 * @param siz Size of region
 * @return virtual address of pysical memory region
 *
 * Allocates 'siz' bytes of memory and maps them into
 * requested 'phys' physical address.
 */
vaddr_t VmMap::remap(paddr_t paddr, vaddr_t _vaddr, size_t siz)
{
	vaddr_t vaddr, vaddr_end;
	pte *l2_page;
	pte entry, entry2;
	pgd_t pgd;
	int l1_idx, l2_idx, i;

ENTER;

	siz = PAGE_ALIGN(siz);
	this->add(_vaddr, siz);
	paddr = PAGE_TRUNC(paddr);
	pgd = (pgd_t)phys_to_virt(this->pgd);

	for (
			vaddr = _vaddr,vaddr_end = _vaddr + siz;
			vaddr < vaddr_end;
			vaddr += PAGE_SIZE,paddr+=PAGE_SIZE
		)
	{
		l1_idx = PTE_L1_IDX(vaddr);
		entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

		switch (PTE_L1_TYPE_MASK & entry)
		{
			case PTE_L1_COARSE:

				l2_page = (pte *)phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
				l2_idx = PTE_L2_COARSE_IDX(vaddr);
				l2_page[l2_idx] = (paddr & ~0x00000FFF)
							| (PTE_AP_RW_RW << 4)
							| (PTE_AP_RW_RW << 6)
							| (PTE_AP_RW_RW << 8)
							| (PTE_AP_RW_RW << 10)
							| 0xE; /* Cachable & buffered & small page*/
				break;

			case PTE_L1_SECTION:
				break;

			case PTE_L1_FINE:
				/* TODO Implement Fine page table mapping */
				::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
				::hwplatform.idle();
				break;

			case PTE_L1_FAULT:
			default:
				l2_page = (pte *)pNode->alloc(PAGE_SIZE);
				/*
				 * PAGE_SIZE contains 4 coarse page tables
				 * create entries for each of page table
				 *
				 * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
				 *
				 * l2_page incremented by 0x100 because of it is a (pte *) type
				 * and contains 256 (0x100) pte entries, so each page table is
				 * 0x100*sizeof(pte) bytes in size.
				 */
				for (i = 0; i < 4; i++, l2_page += 0x100)
				{
					pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
								| (1 << 4)
								| PTE_L1_COARSE;
				}

				l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
				l2_idx = PTE_L2_COARSE_IDX(vaddr);

				l2_page[l2_idx] = (paddr & ~0x00000FFF)
							| (PTE_AP_RW_RW << 4)
							| (PTE_AP_RW_RW << 6)
							| (PTE_AP_RW_RW << 8)
							| (PTE_AP_RW_RW << 10)
							| 0xE; /* Cachable & buffered & small page*/
				break;
		};
	}

EXIT;
	return _vaddr;
}


/**
 * @param map memory map structure
 * @return	0 on success
 *
 * Maps memory areas from 'map' into map->pgd
 */
int
VmMap::doPgd (void)
{
	int err = 0;
	MemArea *area;
	vaddr_t vaddr, vaddr_end, paddr;
	pte *l2_page;
	pte entry, entry2;
	pgd_t pgd;
	int l1_idx, l2_idx, i;

ENTER;

	this->normalize();
	pgd = (pgd_t)phys_to_virt(this->pgd);

	/*
	 * We assume here that memory map is normalized
	 */
	for (area = this->areas.next;
			area && (area != &this->areas) ;
				area = area->next)
	{
		if (!area->length)
		{
			continue;
		}

		paddr = area->p_addr = (paddr_t)pNode->alloc(area->length);

		if (NULL == area->p_addr)
		{
			::cout << (char *)"[vm] %s:%s:%d ERROR!! Out of memory\n";
			err = -ENOMEM;
			break;
		}

		for (vaddr = PAGE_TRUNC(area->v_addr),
				vaddr_end = PAGE_ALIGN(area->v_addr + area->length);
				vaddr < vaddr_end;
					vaddr+=PAGE_SIZE,paddr+=PAGE_SIZE)
		{
			l1_idx = PTE_L1_IDX(vaddr);
			entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

			switch (PTE_L1_TYPE_MASK & entry)
			{
				case PTE_L1_COARSE:

					l2_page = phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
					l2_idx = PTE_L2_COARSE_IDX(vaddr);
					l2_page[l2_idx] = (paddr & ~0x00000FFF)
								| (PTE_AP_RW_RW << 4)
								| (PTE_AP_RW_RW << 6)
								| (PTE_AP_RW_RW << 8)
								| (PTE_AP_RW_RW << 10)
								| 0xE; /* Cachable & buffered & small page*/
					break;

				case PTE_L1_SECTION:
					break;

				case PTE_L1_FINE:
					/* TODO Implement Fine page table mapping */
					::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
					::hwplatform.idle();
					break;

				case PTE_L1_FAULT:
				default:
					l2_page = (pte *)pNode->alloc(PAGE_SIZE);
					/*
					 * PAGE_SIZE contains 4 coarse page tables
					 * create entries for each of page table
					 *
					 * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
					 *
					 * l2_page incremented by 0x100 because of it is a (pte *) type
					 * and contains 256 (0x100) pte entries, so each page table is
					 * 0x100*sizeof(pte) bytes in size.
					 */
					for (i = 0; i < 4; i++, l2_page += 0x100)
					{
						pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
									| (1 << 4)
									| PTE_L1_COARSE;
					}

					l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
					l2_idx = PTE_L2_COARSE_IDX(vaddr);

					l2_page[l2_idx] = (paddr & ~0x00000FFF)
								| (PTE_AP_RW_RW << 4)
								| (PTE_AP_RW_RW << 6)
								| (PTE_AP_RW_RW << 8)
								| (PTE_AP_RW_RW << 10)
								| 0xE; /* Cachable & buffered & small page*/
					break;
			};
		}
	}

	if (err)
	{
		/* ... */
		/* FIXME TODO sys/alphabet/vm.c: Free allocated physical pages on error. */
		;
	}

EXIT;

	return err;
}


int
VmMap::add(vaddr_t addr, size_t len)
{
	MemArea *area;
	vaddr_t addr_start, addr_end;

ENTER;

	if (NULL == addr || len <= 0)
	{
		::cout << (char *)"[MemMap]  ERROR!! Invalid parameters\n";
EXIT;
		return -EINVAL;
	}

	/* normalize area */
	addr_start = PAGE_TRUNC(addr);
	addr_end = PAGE_ALIGN(addr + len);
	len = addr_start - addr_end;

	if (addr_end >= (vaddr_t)&KERNEL_BASE)
	{
		::cout << (char *)"[vm] %s:%s:%d ERROR!! Invalid mem area\n";
EXIT;
		return -EINVAL;
	}

	/* find overlapped area */
	for (area = this->areas.next;
			area && (area != &this->areas) ;
				area = area->next)
	{
		/* find overlapped area */
		if (addr_start >= area->v_addr && addr_start <= (area->v_addr + area->length))
		{
			/* create union  */
			addr_end = (addr_end > (area->v_addr + area->length)) ? addr_end : (area->v_addr + area->length);
			area->length = addr_end - area->v_addr;
			::cout << (char *)"[vm] %s:%s:%d New area: area->v_addr=0x%x, area->length=0x%x\n";
EXIT;
			return 0;
		}
	}

	area = new MemArea();
	if (!area)
	{
		::cout << (char *)"[vm] %s:%s:%d ERROR! No mem space\n";
		EXIT;
		return -ENOMEM;
	}

	area->length = addr_end - addr_start;
	area->v_addr = addr_start;
	area->alloc = true;

	area->prev = this->areas.prev;
	area->next = &this->areas;
	area->prev->next = area;
	this->areas.prev = area;

EXIT;
	return 0;
}


/**
 * @return pointer to newly created and initialized v_mem_map_t structure
 *
 * Creates new memory map. Copies kernel pgd into map->pgd and initilizes
 * memory area head
 */
VmMap::VmMap(PageFactory *p) : pNode(p)
{
	pgd_t new_pgd;
	pgd_t _new_pgd;
ENTER;

	_new_pgd = this->__alloc_pgd();
	if (!pNode)
	{
		return;
	}

	/* initialize new structure */
	this->pgd = (pgd_t)virt_to_phys(new_pgd);
	this->areas.next = &this->areas;
	this->areas.prev = &this->areas;
	this->areas.p_addr = 0;
	this->areas.v_addr = 0;
	this->areas.length = 0;

EXIT;

}


VmMap::VmMap(PageFactory *p, pgd_t _pgd1) : pNode(p), pgd(_pgd1)
{
ENTER;
	/* initialize new structure */
	this->areas.next = &this->areas;
	this->areas.prev = &this->areas;
	this->areas.p_addr = 0;
	this->areas.v_addr = 0;
	this->areas.length = 0;
EXIT;
}


/**
 * Allocate new page directory
 * It must be 16KB aligned
 */
pgd_t VmMap::__alloc_pgd(void)
{
	pgd_t new_pgd;
	pgd_t _new_pgd;

	if (!pNode)
	{
		return NULL;
	}

	/* allocate 8 pages*/
	_new_pgd = new_pgd = (pgd_t)pNode->alloc(PAGE_SIZE*8);

	if (NULL == new_pgd)
	{
		::cout << (char *)"[vm] %s:%s:%d ERROR! Cannot allocate page directory\n";
		EXIT;
		return NULL;
	}

	_new_pgd = (pgd_t)((vaddr_t)_new_pgd & 0xFFFFC000);

	while ( _new_pgd < new_pgd )
	{
		_new_pgd = (pgd_t)((vaddr_t)_new_pgd + 0x4000); /*  */
	}

	if ((_new_pgd - new_pgd) > 0)
	{
		pNode->free ((void *)new_pgd,(vaddr_t)_new_pgd - (vaddr_t)new_pgd);
	}

	if (((vaddr_t)_new_pgd + PAGE_SIZE*4) < ((vaddr_t)new_pgd + PAGE_SIZE*8))
	{
		pNode->free ((void *)((vaddr_t)_new_pgd + PAGE_SIZE*4),
				((vaddr_t)new_pgd + PAGE_SIZE*8) - ((vaddr_t)_new_pgd + PAGE_SIZE*4));
	}
	/* end of PGD allocation */
	return _new_pgd;
}

VmMap::VmMap(const VmMap& _from)
{
	pgd_t _pgd1;
	pgd_t _pgd2;
	this->pNode = _from.pNode;
	this->pgd = this->__alloc_pgd();

	if (this->pgd && _from.pgd)
	{
		/* get virtual address of new page directory */
		_pgd1 = (pgd_t)phys_to_virt(this->pgd);
		_pgd2 = (pgd_t)phys_to_virt(_from.pgd);
		/* copy kernel pgd to new pgd */
		util::memcpy((void *)_pgd1, (unsigned char*)_pgd2, PAGE_SIZE * 4);
	}
	/* initialize new structure */
	this->areas.next = &this->areas;
	this->areas.prev = &this->areas;
	this->areas.p_addr = 0;
	this->areas.v_addr = 0;
	this->areas.length = 0;
}

VmMap::VmMap(VmMap& _from)
{
	pgd_t _pgd1;
	pgd_t _pgd2;
	this->pNode = _from.pNode;
	this->pgd = this->__alloc_pgd();

	if (this->pgd && _from.pgd)
	{
		/* get virtual address of new page directory */
		_pgd1 = (pgd_t)phys_to_virt(this->pgd);
		_pgd2 = (pgd_t)phys_to_virt(_from.pgd);
		/* copy kernel pgd to new pgd */
		util::memcpy(_pgd1, (unsigned char*)_pgd2, PAGE_SIZE * 4);
	}
	/* initialize new structure */
	this->areas.next = &this->areas;
	this->areas.prev = &this->areas;
	this->areas.p_addr = 0;
	this->areas.v_addr = 0;
	this->areas.length = 0;
}

VmMap &VmMap::operator= (VmMap &_from)
{
	pgd_t _pgd1;
	pgd_t _pgd2;

	/* get virtual address of new page directory */
	if (this->pgd && _from.pgd)
	{
		/* get virtual address of new page directory */
		_pgd1 = (pgd_t)phys_to_virt(this->pgd);
		_pgd2 = (pgd_t)phys_to_virt(_from.pgd);
		/* copy kernel pgd to new pgd */
		util::memcpy(_pgd1, (unsigned char*)_pgd2, PAGE_SIZE * 4);
	}
}

}
}
}
