//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include <_hal.h>
#include <init.h>
#include "setup.h"
#include <mantle.h>

#define __PHYSADDR(va, physbase) \
    ((physaddr_t)(va) - (offset_t)KERNEL_BASE + (physbase))

#define __VIRTADDR(pa, physbase) \
    ((virtaddr_t)((pa) - (physbase) + (offset_t)KERNEL_BASE))

#if defined(_GNUC)

DECL_SECTION(".initpagedir")
    PageDirEntry g_initPageDir[c_uNumberOfPageDirEntries];

DECL_SECTION(".vtpagetab")
    PageTabEntry g_vtPageTable[c_uNumberOfPageTabEntries];

DECL_SECTION(".vtarea")
    byte_t g_vectorTableArea[c_uMaxSizeOfVectorTable];
#elif defined(_EVC)
#pragma data_seg(".initpagedir")
__declspec(allocate(".initpagedir")) \
   PageDirEntry g_initPageDir[c_uNumberOfPageDirEntries];

#pragma data_seg(".vtpagetab")
__declspec(allocate(".vtpagetab")) \
   PageTabEntry g_vtPageTable[c_uNumberOfPageTabEntries];

#pragma data_seg(".vtarea")
__declspec(allocate(".vtarea")) \
   byte_t g_vectorTableArea[c_uMaxSizeOfVectorTable];
#else

#(Unknown the C++ compiler)

#endif

#ifdef _TEST_TYPE
#define NUMBEROF_TEMPSECTIONS   8
#else

#ifdef _RELEASE
#define NUMBEROF_TEMPSECTIONS   6
#else
#define NUMBEROF_TEMPSECTIONS   12
#endif // _RELEASE

#endif // _TEST_TYPE

EXTERN_C DECL_CODEINIT void CDECL SetupPaging(
    physaddr_t paKernelPhysBase, physaddr_t paKernelImagePhysBase,
    physaddr_t paMemoryMappedIoBase, physaddr_t paMemoryMappedIoLimit)
{
    PageDirEntry *pInitPageDir =
        (PageDirEntry *)(virtaddr_t)__PHYSADDR(g_initPageDir,
            paKernelPhysBase);

    memset((void *)pInitPageDir, 0, c_uSizeOfPageDir);

    uint_t i;

    // Make a temp kernel mapping
    for (i = 0; i < NUMBEROF_TEMPSECTIONS; i++) {
        pInitPageDir[PAGEDIR_INDEX(paKernelPhysBase) + i].uValue =
        pInitPageDir[PAGEDIR_INDEX(KERNEL_BASE) + i].uValue =
            (RoundDown2(paKernelPhysBase, SECTION_SIZE) + SECTION_SIZE * i)
                | PAGEDIRENTRY_TYPE_SECTION
                | PAGEDIRENTRY_SECTION_AP_NA
                | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
                | PAGEDIRENTRY_BACKWORD
                | PAGEDIRENTRY_SECTION_C | PAGEDIRENTRY_SECTION_B;
    }

    // Map the memory-mapped-io space
    // Assume: we can use the section to do it
    uint_t uNumberOfMemoryMappedIoSections =
        RoundUp2(paMemoryMappedIoLimit - paMemoryMappedIoBase, SECTION_SIZE)
            / SECTION_SIZE;
    for (i = 0; i < uNumberOfMemoryMappedIoSections ; i++) {
        pInitPageDir[PAGEDIR_INDEX(MEMORY_MAPPED_IO_BASE) + i].uValue =
            (RoundDown2(paMemoryMappedIoBase, SECTION_SIZE) + SECTION_SIZE * i)
                | PAGEDIRENTRY_TYPE_SECTION
                | PAGEDIRENTRY_SECTION_AP_NA
                | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
                | PAGEDIRENTRY_BACKWORD;
    }

    // Map the vector table
    PageTabEntry *pVTPageTable =
        (PageTabEntry *)(virtaddr_t)
                __PHYSADDR(g_vtPageTable, paKernelPhysBase);
    assert((uint32_t)pVTPageTable == ((uint32_t)pVTPageTable & 0xfffffc00));

    byte_t *pVectorTableArea =
        (byte_t *)(virtaddr_t)__PHYSADDR(g_vectorTableArea, paKernelPhysBase);
    assert((uint32_t)pVectorTableArea ==
        ((uint32_t)pVectorTableArea & 0xfffff000));

    memset((void *)pVTPageTable, 0, c_uSizeOfPageTab);

    pVTPageTable[PAGETAB_INDEX(VECTOR_TABLE_BASE)].uValue =
        (physaddr_t)pVectorTableArea
        | PAGETABENTRY_AP_NA | PAGETABENTRY_C | PAGETABENTRY_B
        | PAGETABENTRY_TYPE_SMALL;

    pInitPageDir[PAGEDIR_INDEX(VECTOR_TABLE_BASE)].uValue =
        (physaddr_t)pVTPageTable
        | PAGEDIRENTRY_TYPE_PAGE | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
        | PAGEDIRENTRY_BACKWORD;

#if defined(_xscale)
    // Map the reserved cacheable region for clean data cache
    assert(RESERVED_CACHEABLE_REGION
        == RoundDown2(RESERVED_CACHEABLE_REGION, SECTION_SIZE));
    pInitPageDir[PAGEDIR_INDEX(RESERVED_CACHEABLE_REGION)].uValue =
        RESERVED_CACHEABLE_REGION
            | PAGEDIRENTRY_TYPE_SECTION
            | PAGEDIRENTRY_SECTION_AP_NA
            | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
            | PAGEDIRENTRY_BACKWORD
            | PAGEDIRENTRY_SECTION_C | PAGEDIRENTRY_SECTION_B;
#endif

    // Enable MMU
    uint32_t cr3 =
        SET_DOMAIN(DOMAIN_USER, DOMAIN_CONTROL_CLIENT)
        | SET_DOMAIN(DOMAIN_IO, DOMAIN_CONTROL_CLIENT)
        | SET_DOMAIN(DOMAIN_KERNEL, DOMAIN_CONTROL_CLIENT);

    uint32_t cr1 =  CR1_M | CR1_C | CR1_W | CR1_LDP;

#if defined(_arm920) || defined(_xscale)  || defined(_arm926) || defined(_arm11)
    cr1 |= CR1_I;
#endif

#ifdef KCONFIG_HIGH_VECTOR
    cr1 |= CR1_V;
#endif // KCONFIG_HIGH_VECTOR

    assert((uint32_t)pInitPageDir == ((uint32_t)pInitPageDir & 0xffffe000));
    EnableMMU(cr3, (physaddr_t)pInitPageDir, cr1);
}

EXTERN_C DECL_CODEINIT void CDECL AdjustPaging(physaddr_t paKernelPhysBase)
{
    if ((uint_t)paKernelPhysBase == (uint_t)KERNEL_BASE) return;

    for (uint_t i = 0; i < NUMBEROF_TEMPSECTIONS; i++) {
        g_initPageDir[PAGEDIR_INDEX(paKernelPhysBase) + i].uValue = 0;
    }

    FlushCache();
    FlushAllTlbs();
}

EXTERN DECL_CODEINIT bool_t CDECL InitKernelMapping(
    uint_t uBasePageNo, uint_t uLimitPageNo)
{
    assert(IS_VALID_PAGENO(uLimitPageNo - 1));
    assert(uBasePageNo < uLimitPageNo);

#ifndef _neptune
    uint_t nPageDirEntryIndex = 0;
#else
    uint_t nPageDirEntryIndex = PHYSADDR_TO_PAGETABNO(COMMON_BASE);
#endif // _neptune

    PageDirEntry pde0 = g_initPageDir[nPageDirEntryIndex];

    for (uint_t uPageNo = uBasePageNo; uPageNo < uLimitPageNo;) {
        PageTabEntry *pPageTable = AllocKernelPageTable();
        if (NULL == pPageTable) return FALSE;

        physaddr_t paPageTable          = PHYSADDR(pPageTable);
        physaddr_t paSectionOfPageTable =
            RoundDown2(paPageTable, SECTION_SIZE);
        PageTabEntry *pMappedPageTable  =
            (PageTabEntry *)(paPageTable - paSectionOfPageTable +
                (nPageDirEntryIndex << PAGEDIR_SHIFT));

        g_initPageDir[nPageDirEntryIndex].uValue = paSectionOfPageTable
            | PAGEDIRENTRY_TYPE_SECTION
            | PAGEDIRENTRY_SECTION_AP_NA
            | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
            | PAGEDIRENTRY_BACKWORD;
        FlushCache();
        FlushTlb(pMappedPageTable);

        memset(pMappedPageTable, 0, c_uSizeOfPageTab);

        uint_t uBasePageNoOfPageTable =
            RoundDown2(uPageNo, c_uNumberOfPageTabEntries);

        for (uint_t i = uPageNo % c_uNumberOfPageTabEntries;
            i < Min(c_uNumberOfPageTabEntries, \
                    uLimitPageNo - uBasePageNoOfPageTable);
            i++, uPageNo++) {
            pMappedPageTable[i].uValue =
                PAGENO_TO_PHYSADDR(VIRTPAGENO_TO_PHYSPAGENO(
                    uBasePageNoOfPageTable + i))
                | PAGETABENTRY_AP_NA | PAGETABENTRY_C | PAGETABENTRY_B
                | PAGETABENTRY_TYPE_SMALL;
        }

        g_initPageDir[PAGENO_TO_PAGETABNO(uBasePageNoOfPageTable)].uValue =
            paPageTable
            | PAGEDIRENTRY_TYPE_PAGE | PAGEDIRENTRY_DOMAIN(DOMAIN_KERNEL)
            | PAGEDIRENTRY_BACKWORD;
    }

    // Assume: need't remap the vector table
    assert(uLimitPageNo <= NUMBEROF_PAGES((uint_t)VECTOR_TABLE_BASE));

    // Assume: we need't remap the memory-mapped-io space

    g_initPageDir[nPageDirEntryIndex] = pde0;

    FlushCache();
    FlushAllTlbs();

    return TRUE;
}
