#include <kiddie/kiddie.h>
#include <arm/platform.h>
#include <sys/errno.h>

#include <Console.h>
#include <hw/Platform.h>
#include <arm/ArchPager.h>

extern kernel::Console cout;
extern kernel::Platform hwplatform;

namespace kernel {
namespace mem {
namespace page {


/**
 * @param paddr Physical address of memory region
 * @param vaddr Virtual address of memory region
 * @param siz Size of region
 * @return virtual address of pysical memory region
 *
 * Allocates 'siz' bytes of memory and maps them into
 * requested 'phys' physical address.
 */
vaddr_t
ArchVmMap::remap(paddr_t paddr, vaddr_t _vaddr, size_t siz)
{
    vaddr_t vaddr, vaddr_end;
    pte *l2_page;
    pte entry, entry2;
    pgd_t pgd;
    int l1_idx, l2_idx, i;

    ENTER;

    siz = PAGE_ALIGN(siz);
    this->add(_vaddr, siz);
    paddr = PAGE_TRUNC(paddr);
    pgd = (pgd_t)phys_to_virt(this->pgd);

    for (
         vaddr = _vaddr,vaddr_end = _vaddr + siz;
         vaddr < vaddr_end;
         vaddr += PAGE_SIZE,paddr+=PAGE_SIZE
         )
    {
        l1_idx = PTE_L1_IDX(vaddr);
        entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

        switch (PTE_L1_TYPE_MASK & entry)
        {
        case PTE_L1_COARSE:

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);
            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;

        case PTE_L1_SECTION:
            break;

        case PTE_L1_FINE:
            /* TODO Implement Fine page table mapping */
            ::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
            ::hwplatform.idle();
            break;

        case PTE_L1_FAULT:
        default:
            l2_page = (pte *)pNode->alloc(PAGE_SIZE,0);
            /*
                 * PAGE_SIZE contains 4 coarse page tables
                 * create entries for each of page table
                 *
                 * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
                 *
                 * l2_page incremented by 0x100 because of it is a (pte *) type
                 * and contains 256 (0x100) pte entries, so each page table is
                 * 0x100*sizeof(pte) bytes in size.
                 */
            for (i = 0; i < 4; i++, l2_page += 0x100)
            {
                pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
                        | (1 << 4)
                        | PTE_L1_COARSE;
            }

            l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
            l2_idx = PTE_L2_COARSE_IDX(vaddr);

            l2_page[l2_idx] = (paddr & ~0x00000FFF)
                    | (PTE_AP_RW_RW << 4)
                    | (PTE_AP_RW_RW << 6)
                    | (PTE_AP_RW_RW << 8)
                    | (PTE_AP_RW_RW << 10)
                    | 0xE; /* Cachable & buffered & small page*/
            break;
        };
    }

    EXIT;
    return _vaddr;
}

/**
 * Map one page
 */
vaddr_t
ArchVmMap::mapPage(paddr_t paddr, vaddr_t vaddr)
{
    int _i; /* starting page table index */
    int _j; /* starting page directory index */
    pgd_t vpgd; /* pointer to virtual address of page directory */
    pgd_t pgt1, vpgt1; /* ptr to physical and virtual addresses of page table */

    vpgd = (pgd_t) phys_to_virt(this->pgd);

    /* PGT index */
    _j = pgd_idx(vaddr);

    /* Initial page index */
    _i = pgt_idx(vaddr);

    /* Physical page address (if used by remap() syscall) */
    paddr = PAGE_TRUNC(paddr);

    /* take 'pgt' */
    if ((vpgd[_j] & PDE_PRESENT))
    {
        pgt1 = (pgd_t)(vpgd[_j] & ~(PAGE_SIZE - 1));
    }
    // Page table is not present
    else
    {
        pgt1 = (pgd_t) ::hwplatform.pageFactory.alloc(PAGE_SIZE, 0);
    }

    /* get virtual address of pgt */
    vpgt1 = (pgd_t) phys_to_virt(pgt1);

    /* if page is present do nothing */
    if ((vpgt1[_i] & PTE_PRESENT))
    {
        paddr = vpgt1[_i] & ~(PAGE_SIZE - 1);
        return paddr;
    }
    /* If page is not present */
    else
    {
        // paddr = (paddr_t) ::hwplatform.pageFactory.alloc(PAGE_SIZE,0);
        vpgt1[_i] = paddr | PTE_PRESENT | PTE_WRITE | PTE_USER;
    }

    /* map page table */
    vpgd[_j] = (u32)((pgt1)) | PDE_PRESENT | PDE_WRITE | PDE_USER;

    return paddr;
}

/**
 * Map one page
 */
vaddr_t
ArchVmMap::mapPage(vaddr_t vaddr)
{
    int _i; /* starting page table index */
    int _j; /* starting page directory index */
    paddr_t paddr;
    pgd_t vpgd; /* pointer to virtual address of page directory */
    pgd_t pgt1, vpgt1; /* ptr to physical and virtual addresses of page table */

    vpgd = (pgd_t) phys_to_virt(this->pgd);

    /* PGT index */
    _j = pgd_idx(vaddr);

    /* Initial page index */
    _i = pgt_idx(vaddr);

    /* take 'pgt' */
    if ((vpgd[_j] & PDE_PRESENT))
    {
        pgt1 = (pgd_t)(vpgd[_j] & ~(PAGE_SIZE - 1));
    }
    // Page table is not present
    else
    {
        pgt1 = (pgd_t) ::hwplatform.pageFactory.alloc(PAGE_SIZE, 0);
        /* map page table */
        vpgd[_j] = (u32)((pgt1)) | PDE_PRESENT | PDE_WRITE | PDE_USER;
    }

    /* get virtual address of pgt */
    vpgt1 = (pgd_t) phys_to_virt(pgt1);

    /* if page is present do nothing */
    if (!(vpgt1[_i] & PTE_PRESENT))
    {
        paddr = (paddr_t) ::hwplatform.pageFactory.alloc(PAGE_SIZE, 0);
        vpgt1[_i] = paddr | PTE_PRESENT | PTE_WRITE | PTE_USER;
    }

    return paddr;
}

/**
 * TODO This (mapping) should be done somewhere in a page or
 * platform context.
 */
vaddr_t
ArchVmMap::doMapping(const kernel::mem::MemArea & area)
{
    int _i; /* starting page table index */
    int i_; /* ending page table index */
    int _j; /* starting page directory index */
    int j_; /* ending page directory index */
    int num; /* number of pages in area */
    paddr_t address; /* physical address variable */
    vaddr_t _end; /* last virtual address of memory area */
    pgd_t vpgd; /* pointer to virtual address of page directory */
    pgd_t pgt1, vpgt1; /* ptr to physical and virtual addresses of page table */

    vpgd = (pgd_t) phys_to_virt(this->pgd);
    _end = area.v_addr + area.length;

    /* PGT index */
    _j = pgd_idx(area.v_addr);
    j_ = pgd_idx(_end);

    /* Initial page index */
    _i = pgt_idx(area.v_addr);
    /* Number of pages to map */
    num = (int) (PAGE_ALIGN((area.length + (area.v_addr % PAGE_SIZE)))
                 / PAGE_SIZE);

#if 0
#ifdef DEBUG
    ::cout << "[ VmMap ] " << __FUNCTION__ << kernel::Console::HEX << "() Vaddr=0x"
           << (int) area.v_addr << ": "
           << kernel::Console::DEC << " _j=" << _j << ",j_=" << j_ << ",i=" << _i << ",numpages=" << num
           << "; len=" << (int)area.length << "\n";
#endif
#endif

    /* Physical page address (if used by remap() syscall) */
    address = PAGE_TRUNC(area.p_addr);

    /* main mapping loop */
    for (_j; (_j <= j_) && num; _j++)
    {

        /* take 'pgt' */
        if ((vpgd[_j] & PDE_PRESENT))
        {
            pgt1 = (pgd_t)(vpgd[_j] & ~(PAGE_SIZE - 1));
        }
        // Page table is not present
        else
        {
            pgt1 = (pgd_t) ::hwplatform.pageFactory.alloc(PAGE_SIZE, 0);
        }

        /* get virtual address of pgt */
        vpgt1 = (pgd_t) phys_to_virt(pgt1);

        /* do page mapping */
        while (num--)
        {
            /* if page is present do nothing */
            if ((vpgt1[_i] & PTE_PRESENT))
            {
                address = vpgt1[_i] & ~(PAGE_SIZE - 1);
            }
            /* If page is not present */
            else
            {
                if (area.alloc)
                {
                    address = (paddr_t) ::hwplatform.pageFactory.alloc(
                                PAGE_SIZE, 0);
                    vpgt1[_i] = address | PTE_PRESENT | PTE_WRITE | PTE_USER;
                }
                else
                {
                    vpgt1[_i] = address | PTE_PRESENT | PTE_WRITE | PTE_USER;
                    address += PAGE_SIZE;
                }
            }

            _i++;
            if (_i >= pgt_len())
            {
                _i = 0;
                /* exit 'while(num--)' */
                break;
            }
        }

#ifdef DEBUG
        ::cout << "[ VmMap ] " << __FUNCTION__
               << kernel::Console::DEC << "() ###---->>> &&& Mapping PGD: _j=" << _j << ",j_=" << j_ << ",i=" << _i
               << "; len=" << (int)area.length << "\n";
#endif
        /* map page table */
        vpgd[_j] = (u32)((pgt1)) | PDE_PRESENT | PDE_WRITE | PDE_USER;

    }
}

/**
 * @param map memory map structure
 * @return	0 on success
 *
 * Maps memory areas from 'map' into map->pgd
 */
int
ArchVmMap::doMapping (void)
{
    int err = 0;
    MemArea *area;
    vaddr_t vaddr, vaddr_end, paddr;
    pte *l2_page;
    pte entry, entry2;
    pgd_t pgd;
    int l1_idx, l2_idx, i;

    ENTER;

    this->normalize();
    pgd = (pgd_t)phys_to_virt(this->pgd);

    /*
     * We assume here that memory map is normalized
     */
    for (area = this->areas.next;
         area && (area != &this->areas) ;
         area = area->next)
    {
        if (!area->length)
        {
            continue;
        }

        paddr = area->p_addr = (paddr_t)pNode->alloc(area->length,2);

        if (NULL == area->p_addr)
        {
            ::cout << (char *)"[vm] %s:%s:%d ERROR!! Out of memory\n";
            err = -ENOMEM;
            break;
        }

        for (vaddr = PAGE_TRUNC(area->v_addr),
             vaddr_end = PAGE_ALIGN(area->v_addr + area->length);
             vaddr < vaddr_end;
             vaddr+=PAGE_SIZE,paddr+=PAGE_SIZE)
        {
            l1_idx = PTE_L1_IDX(vaddr);
            entry =  (pte)pgd[l1_idx]; /* virtual address of entry */

            switch (PTE_L1_TYPE_MASK & entry)
            {
            case PTE_L1_COARSE:

                l2_page = (pte *)phys_to_virt((pgd[l1_idx] & (PTE_L1_MASK | PTE_L2_COARSE_MASK)));
                l2_idx = PTE_L2_COARSE_IDX(vaddr);
                l2_page[l2_idx] = (paddr & ~0x00000FFF)
                        | (PTE_AP_RW_RW << 4)
                        | (PTE_AP_RW_RW << 6)
                        | (PTE_AP_RW_RW << 8)
                        | (PTE_AP_RW_RW << 10)
                        | 0xE; /* Cachable & buffered & small page*/
                break;

            case PTE_L1_SECTION:
                break;

            case PTE_L1_FINE:
                /* TODO Implement Fine page table mapping */
                ::cout << (char *)"[vm] BUG!!!!!!!!!!!!! Fine Page Tables not supported!\n";
                ::hwplatform.idle();
                break;

            case PTE_L1_FAULT:
            default:
                l2_page = (pte *)pNode->alloc(PAGE_SIZE,0);
                /*
                     * PAGE_SIZE contains 4 coarse page tables
                     * create entries for each of page table
                     *
                     * TODO Mapping continuosly 4KB page to 4 coarse page tables may cause an overlap of page tables!!!!
                     *
                     * l2_page incremented by 0x100 because of it is a (pte *) type
                     * and contains 256 (0x100) pte entries, so each page table is
                     * 0x100*sizeof(pte) bytes in size.
                     */
                for (i = 0; i < 4; i++, l2_page += 0x100)
                {
                    pgd[l1_idx + i] = ((unsigned int)l2_page & 0xFFFFFC00)
                            | (1 << 4)
                            | PTE_L1_COARSE;
                }

                l2_page = (pte *)phys_to_virt((pgd[l1_idx] & 0xFFFFFC00));
                l2_idx = PTE_L2_COARSE_IDX(vaddr);

                l2_page[l2_idx] = (paddr & ~0x00000FFF)
                        | (PTE_AP_RW_RW << 4)
                        | (PTE_AP_RW_RW << 6)
                        | (PTE_AP_RW_RW << 8)
                        | (PTE_AP_RW_RW << 10)
                        | 0xE; /* Cachable & buffered & small page*/
                break;
            };
        }
    }

    if (err)
    {
        /* ... */
        /* FIXME TODO sys/alphabet/vm.c: Free allocated physical pages on error. */
        ;
    }

    EXIT;

    return err;
}


int
ArchVmMap::add(vaddr_t addr, size_t len)
{
    MemArea *area;
    vaddr_t addr_start, addr_end;

    ENTER;

    if (NULL == addr || len <= 0)
    {
        ::cout << (char *)"[MemMap]  ERROR!! Invalid parameters\n";
        EXIT;
        return -EINVAL;
    }

    /* normalize area */
    addr_start = PAGE_TRUNC(addr);
    addr_end = PAGE_ALIGN(addr + len);
    len = addr_start - addr_end;

    if (addr_end >= (vaddr_t)&KERNEL_BASE)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR!! Invalid mem area\n";
        EXIT;
        return -EINVAL;
    }

    /* find overlapped area */
    for (area = this->areas.next;
         area && (area != &this->areas) ;
         area = area->next)
    {
        /* find overlapped area */
        if (addr_start >= area->v_addr && addr_start <= (area->v_addr + area->length))
        {
            /* create union  */
            addr_end = (addr_end > (area->v_addr + area->length)) ? addr_end : (area->v_addr + area->length);
            area->length = addr_end - area->v_addr;
            ::cout << (char *)"[vm] %s:%s:%d New area: area->v_addr=0x%x, area->length=0x%x\n";
            EXIT;
            return 0;
        }
    }

    area = new MemArea();
    if (!area)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR! No mem space\n";
        EXIT;
        return -ENOMEM;
    }

    area->length = addr_end - addr_start;
    area->v_addr = addr_start;
    area->alloc = true;

    area->prev = this->areas.prev;
    area->next = &this->areas;
    area->prev->next = area;
    this->areas.prev = area;

    EXIT;
    return 0;
}


/**
 * @return pointer to newly created and initialized v_mem_map_t structure
 *
 * Creates new memory map. Copies kernel pgd into map->pgd and initilizes
 * memory area head
 */
ArchVmMap::ArchVmMap(PageFactory *p) : pNode(p)
{
    pgd_t new_pgd;
    pgd_t _new_pgd;
    ENTER;

    _new_pgd = this->__alloc_pgd();
    if (!pNode)
    {
        return;
    }

    /* initialize new structure */
    this->pgd = (pgd_t)virt_to_phys(new_pgd);
    this->areas.next = &this->areas;
    this->areas.prev = &this->areas;
    this->areas.p_addr = 0;
    this->areas.v_addr = 0;
    this->areas.length = 0;

    EXIT;

}


ArchVmMap::ArchVmMap(PageFactory *p, pgd_t _pgd1) : pNode(p), pgd(_pgd1)
{
    ENTER;
    /* initialize new structure */
    this->areas.next = &this->areas;
    this->areas.prev = &this->areas;
    this->areas.p_addr = 0;
    this->areas.v_addr = 0;
    this->areas.length = 0;
    EXIT;
}


/**
 * Allocate new page directory
 * It must be 16KB aligned
 */
pgd_t ArchVmMap::__alloc_pgd(void)
{
    pgd_t new_pgd;
    pgd_t _new_pgd;

    if (!pNode)
    {
        return NULL;
    }

    /* allocate 8 pages*/
    _new_pgd = new_pgd = (pgd_t)pNode->alloc(PAGE_SIZE*8,2);

    if (NULL == new_pgd)
    {
        ::cout << (char *)"[vm] %s:%s:%d ERROR! Cannot allocate page directory\n";
        EXIT;
        return NULL;
    }

    _new_pgd = (pgd_t)((vaddr_t)_new_pgd & 0xFFFFC000);

    while ( _new_pgd < new_pgd )
    {
        _new_pgd = (pgd_t)((vaddr_t)_new_pgd + 0x4000); /*  */
    }

    if ((_new_pgd - new_pgd) > 0)
    {
        pNode->free ((void *)new_pgd,(vaddr_t)_new_pgd - (vaddr_t)new_pgd);
    }

    if (((vaddr_t)_new_pgd + PAGE_SIZE*4) < ((vaddr_t)new_pgd + PAGE_SIZE*8))
    {
        pNode->free ((void *)((vaddr_t)_new_pgd + PAGE_SIZE*4),
                     ((vaddr_t)new_pgd + PAGE_SIZE*8) - ((vaddr_t)_new_pgd + PAGE_SIZE*4));
    }
    /* end of PGD allocation */
    return _new_pgd;
}

ArchVmMap
&ArchVmMap::operator= (ArchVmMap &_from)
{
    pgd_t _pgd1;
    pgd_t _pgd2;
    this->pNode = _from.pNode;
    this->pgd = this->__alloc_pgd();

    if (this->pgd && _from.pgd)
    {
        /* get virtual address of new page directory */
        _pgd1 = (pgd_t)phys_to_virt(this->pgd);
        _pgd2 = (pgd_t)phys_to_virt(_from.pgd);
        /* copy kernel pgd to new pgd */
        util::memcpy(_pgd1, (unsigned char*)_pgd2, PAGE_SIZE * 4);
    }
    /* initialize new structure */
    this->areas.next = &this->areas;
    this->areas.prev = &this->areas;
    this->areas.p_addr = 0;
    this->areas.v_addr = 0;
    this->areas.length = 0;
}

}
}
}
