#include <kiddie/kiddie.h>
#include <arm/platform.h>
#include <sys/errno.h>

#include <Console.h>
#include <hw/Platform.h>
#include <arm/ArchPager.h>

extern kernel::Console cout;
extern kernel::Platform hwplatform;

namespace kernel {
namespace mem {
//namespace page {

/**
 * TODO This (mapping) should be done somewhere in a page or
 * platform context.
 */
vaddr_t
ArchVmMap::archMapArea(kernel::mem::MemArea &area)
{
    int err = 0;
    vaddr_t vaddr, vaddr_end, paddr;
    pte *l2_page;
    pte entry;
    pgd_t pgd;
    int l1_idx, l2_idx, i;

    if (!area.length || !pNode)
    {
        return NULL;
    }

    /* check alloc flag before allocation */
    if (area.alloc)
    {
        area.p_addr = (paddr_t)pNode->alloc(area.length,2);
    }

    paddr = area.p_addr;

    if (NULL == area.p_addr)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR!! Out of memory\n";
        err = -ENOMEM;
        return NULL;
    }

    for (vaddr = PAGE_TRUNC(area.v_addr),
         vaddr_end = PAGE_ALIGN(area.v_addr + area.length);
         vaddr < vaddr_end;
         vaddr+=PAGE_SIZE,paddr+=PAGE_SIZE)
    {
        l1_idx = PTE_L1_IDX(vaddr);
        entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

        switch (PTE_L1_TYPE_MASK & entry)
        {
        case PTE_L1_COARSE:

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);
            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;

        case PTE_L1_SECTION:
            break;

        case PTE_L1_FINE:
            /* TODO Implement Fine page table mapping */
            ::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
            ::hwplatform.idle();
            break;

        case PTE_L1_FAULT:
        default:
            l2_page = (pte *)pNode->alloc(PAGE_SIZE,0);
            /*
                 * PAGE_SIZE contains 4 coarse page tables
                 * create entries for each of page table
                 *
                 * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
                 *
                 * l2_page incremented by 0x100 because of it is a (pte *) type
                 * and contains 256 (0x100) pte entries, so each page table is
                 * 0x100*sizeof(pte) bytes in size.
                 */
            for (i = 0; i < 4; i++, l2_page += 0x100)
            {
                pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
                        | (1 << 4)
                        | PTE_L1_COARSE;
            }

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);

            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;
        };
    }

    if (err)
    {
        /* ... */
        /* FIXME TODO sys/alphabet/vm.c: Free allocated physical pages on error. */
        ;
    }

    EXIT;
}


/**
 * Allocate new page directory
 * It must be 16KB aligned
 */
pgd_t
ArchVmMap::__alloc_pgd(void)
{
    pgd_t new_pgd;
    pgd_t _new_pgd;

    if (!pNode)
    {
        return NULL;
    }

    /* allocate 8 pages*/
    _new_pgd = new_pgd = (pgd_t)pNode->alloc(PAGE_SIZE*8,2);

    if (NULL == new_pgd)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR! Cannot allocate page directory\n";
        EXIT;
        return NULL;
    }

    _new_pgd = (pgd_t)((vaddr_t)_new_pgd & 0xFFFFC000);

    while ( _new_pgd < new_pgd )
    {
        _new_pgd = (pgd_t)((vaddr_t)_new_pgd + 0x4000); /*  */
    }

    if ((_new_pgd - new_pgd) > 0)
    {
        pNode->free ((void *)new_pgd,(vaddr_t)_new_pgd - (vaddr_t)new_pgd);
    }

    if (((vaddr_t)_new_pgd + PAGE_SIZE*4) < ((vaddr_t)new_pgd + PAGE_SIZE*8))
    {
        pNode->free ((void *)((vaddr_t)_new_pgd + PAGE_SIZE*4),
                     ((vaddr_t)new_pgd + PAGE_SIZE*8) - ((vaddr_t)_new_pgd + PAGE_SIZE*4));
    }
    /* end of PGD allocation */
    return _new_pgd;
}

ArchVmMap
&ArchVmMap::operator= (ArchVmMap &_from)
{
    pgd_t _pgd1;
    pgd_t _pgd2;
    this->pNode = _from.pNode;
    this->pgd = this->__alloc_pgd();

    if (this->pgd && _from.pgd)
    {
        /* get virtual address of new page directory */
        _pgd1 = (pgd_t)phys_to_virt(this->pgd);
        _pgd2 = (pgd_t)phys_to_virt(_from.pgd);
        /* copy kernel pgd to new pgd */
        util::memcpy(_pgd1, (unsigned char*)_pgd2, PAGE_SIZE * 4);
    }
}

}
}
//}
