//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include <_hal.h>
#include <mantle.h>

#ifdef DEBUG_KMUTEX
STATIC KMutex       s_freePageTableListLock(__FILE__, __LINE__);
#else
STATIC KMutex       s_freePageTableListLock;
#endif //DEBUG_KMUTEX

STATIC DLinkNode    s_freePageTableListHead;

EXTERN virtaddr_t g_vaKVMAreaBase;
EXTERN virtaddr_t c_vaKVMAreaLimit;

EXTERN PageTabEntry *AllocPageTable()
{
    s_freePageTableListLock.Lock();

    if (s_freePageTableListHead.IsEmpty()) {
        PageTabEntry (*pPageTables)[c_uNumberOfPageTabEntries] =
            (PageTabEntry (*)[c_uNumberOfPageTabEntries])DzAllocKernelPage();
        if (NULL != pPageTables) {
            for (uint_t i = 1; i < PAGE_SIZE / c_uSizeOfPageTab; i++) {
                s_freePageTableListHead.InsertFirst(
                    (DLinkNode *)&pPageTables[i]);
            }
        }

        s_freePageTableListLock.Unlock();

        return (PageTabEntry *)pPageTables;
    }

    register DLinkNode *pFreePageTab = s_freePageTableListHead.First();
    pFreePageTab->Detach();

    s_freePageTableListLock.Unlock();

    return (PageTabEntry *)pFreePageTab;
}

EXTERN void FreePageTable(PageTabEntry *pPageTab)
{
    assert(pPageTab);

    s_freePageTableListLock.Lock();

    s_freePageTableListHead.InsertFirst((DLinkNode *)pPageTab);

    s_freePageTableListLock.Unlock();
}

EXTERN DECL_CODEINIT bool_t CDECL InitHAT()
{
    s_freePageTableListHead.Initialize();
    return TRUE;
}

//
// class HAT
//
// BUG:
#ifndef _neptune
const uint_t c_cUDirEntryBase   = 0;
const uint_t c_cUDirEntryLimit  = PHYSADDR_TO_PAGETABNO(0x80000000) - 1;
#else
const uint_t c_cUDirEntryBase   = (PHYSADDR_TO_PAGETABNO(COMMON_BASE) - 1);
const uint_t c_cUDirEntryLimit  = (PHYSADDR_TO_PAGETABNO(PROCESS_TOP) - 1);
#endif // _neptune

const uint_t c_cUDirEntries = c_cUDirEntryLimit - c_cUDirEntryBase;
const uint_t c_cKDirEntries = c_uNumberOfPageDirEntries - c_cUDirEntries;

HAT *HAT::s_pCurrent = (HAT *)g_initPageDir;

HAT::HAT()
{
#ifndef _neptune
    // Clear page directory entries of user space.
    //
    memset(&m_pageDir[0], 0, c_cUDirEntries * sizeof(PageDirEntry));

    // Copy page directory entries of kernel to new page dir.
    //
    memcpy(
        &m_pageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));

#if defined(_arm920) || defined(_xscale) || defined(_arm926) || defined(_arm11)
    FlushCache();
#endif
#else
    // [0, c_cUDirEntryBase)
    //
    memcpy(&m_pageDir[0], &g_initPageDir[0],
        c_cUDirEntryBase * sizeof(PageDirEntry));

    // [c_cUDirEntryBase, c_cUDirEntryLimit)
    //
    memset(&m_pageDir[c_cUDirEntryBase], 0,
        (c_cUDirEntryLimit - c_cUDirEntryBase) * sizeof(PageDirEntry));

    // [c_cUDirEntryLimit, c_uNumberOfPageDirEntries)
    //
    memcpy(&m_pageDir[c_cUDirEntryLimit], &g_initPageDir[c_cUDirEntryLimit],
        (c_uNumberOfPageDirEntries - c_cUDirEntryLimit) * sizeof(PageDirEntry));

    FlushCache();
#endif // _neptune
}

void HAT::SyncGlobalHAT()
{
#ifndef _neptune
    memcpy(
        &m_pageDir[c_cUDirEntries],
        &g_initPageDir[c_cUDirEntries],
        c_cKDirEntries * sizeof(PageDirEntry));
#else
    // [g_vaKVMAreaBase, c_vaKVMAreaLimit)
    //
    assert(IsAlignment2((uint_t)g_vaKVMAreaBase, SECTION_SIZE));
    assert(IsAlignment2((uint_t)c_vaKVMAreaLimit, SECTION_SIZE));

    uint_t uSectionBase = (uint_t)g_vaKVMAreaBase >> PAGEDIR_SHIFT;
    uint_t uSectionLimit = (uint_t)c_vaKVMAreaLimit >> PAGEDIR_SHIFT;

    memcpy(&m_pageDir[uSectionBase], &g_initPageDir[uSectionBase],
        (uSectionLimit - uSectionBase) * sizeof(PageDirEntry));
#endif // _neptune
}

bool_t HAT::LoadMap(
    uint_t vBase, uint_t vLimit, uint_t pBase,
    dword_t dwProtect, _ELASTOS Boolean bIsFlushCache)
{
    assert((uint32_t)this == ((uint32_t)this & 0xffffe000));
    assert(vLimit > vBase);
    assert(FALL_INTO(vBase, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(FALL_INTO(vLimit - 1, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(PAGE_OFFSET(pBase) == 0);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

//    kprintf("LoadMap(%x, %x, %x, %x);\n",
//            vBase, vLimit, pBase, dwProtect);

    bool_t bIsActive;
    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    if (dwProtect & MemoryProtection_Write) {
        dwProt = PAGETABENTRY_AP_WR;
    }
    else {
        dwProt = PAGETABENTRY_AP_RO;
    }

    if (!(dwProtect & IoRemapFlag_Direct)) {
        dwProt |= PAGETABENTRY_C | PAGETABENTRY_B;
    }
    else {
#ifdef _xscale
        if (IoRemapFlag_BufferedWrite == (dwProtect & IoRemapFlag_BufferedWrite)) {
            dwProt |= PAGETABENTRY_B;
        }
#endif
    }

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);
        }
        else {
            // No page table be pointed by PageDirEntry, create one.
            pPageTab = AllocPageTable();
            if (!pPageTab) return FALSE;
            memset(pPageTab, 0, c_uSizeOfPageTab);
            m_pageDir[PAGEDIR_INDEX(vBase)].uValue =
                __PA(pPageTab)
                | PAGEDIRENTRY_TYPE_PAGE | PAGEDIRENTRY_DOMAIN(DOMAIN_USER)
                | PAGEDIRENTRY_BACKWORD;
        }

        m = Min(c_cPageTabEntries,
            (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
        for (n = PAGETAB_INDEX(vBase); n < m; n++) {
            assert(c_uNumberOfPageTabEntries > n);
            pPageTab[n].uValue = pBase | dwProt | PAGETABENTRY_TYPE_SMALL;
            pBase += PAGE_SIZE;
            vBase += PAGE_SIZE;
        }
    }

#if defined(_arm920) || defined(_xscale) || defined(_arm926) || defined(_arm11)
    if (bIsActive && bIsFlushCache) FlushCache();
#endif
    return TRUE;
}

void HAT::UnloadMap(uint_t vBase, uint_t vLimit)
{
    assert(vLimit > vBase);
    assert(FALL_INTO(vBase, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(FALL_INTO(vLimit - 1, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
                assert(c_uNumberOfPageTabEntries > n);
                pPageTab[n].uValue = 0;
                if (bIsActive) FlushTlb((virtaddr_t)vBase);
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PAGEDIR_MASK + 1)) & PAGEDIR_MASK;
        }
    }

    if (bIsActive) {
#ifdef _arm720
        FlushCacheByRange(vBase, vLimit);
#else
        FlushCache();
#endif
    }
}

bool_t HAT::SetProtect(uint_t vBase, uint_t vLimit, dword_t dwProtect)
{
    assert(vLimit > vBase);
    assert(FALL_INTO(vBase, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(FALL_INTO(vLimit - 1, (COMMON_BASE - 0x10000), \
        PROCESS_LIMIT - (COMMON_BASE - 0x10000)));
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    bool_t bIsActive;
    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    bIsActive = (this == HAT::GetCurrent());

    if (dwProtect & MemoryProtection_Write) {
        dwProt = PAGETABENTRY_AP_WR;
    }
    else {
        dwProt = PAGETABENTRY_AP_RO;
    }

    if (!(dwProtect & IoRemapFlag_Direct)) {
        dwProt |= PAGETABENTRY_C | PAGETABENTRY_B;
    }

    const dword_t c_dwProtMask =
            ~(PAGETABENTRY_AP_MASK | PAGETABENTRY_C | PAGETABENTRY_B);

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)m_pageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
                assert(c_uNumberOfPageTabEntries > n);
                if (0 != pPageTab[n].uValue) {
                    pPageTab[n].uValue =
                        (pPageTab[n].uValue & c_dwProtMask) | dwProt;
                    if (bIsActive) FlushTlb((virtaddr_t)vBase);
                }
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PAGEDIR_MASK + 1)) & PAGEDIR_MASK;
        }
    }

#if defined(_arm920) || defined(_xscale) || defined(_arm926) || defined(_arm11)
    if (bIsActive) FlushCache();
#endif
    return TRUE;
}

void HAT::Dispose()
{
    assert(this != HAT::GetCurrent());

    register uint_t n;

    for (n = c_cUDirEntryBase; n < c_cUDirEntryLimit; n++) {
        if (m_pageDir[n].uValue) {
            FreePageTable(
                (PageTabEntry *)(__VA(m_pageDir[n].uValue
                    & COARSE_PAGETAB_BASEADDR_MASK)));
            m_pageDir[n].uValue = 0;
        }
    }
}

void HAT::ActivateKHAT()
{
    uint32_t uFlags = SaveFlagsAndCli();

    s_pCurrent = (HAT *)g_initPageDir;

    FlushCache();
    SetPageDirAddress(PHYSADDR(g_initPageDir));
    FlushAllTlbs();

    RestoreIF(uFlags);
}

bool_t HAT::LoadKernelMap(
    uint_t vBase, uint_t vLimit, uint_t pBase, dword_t dwProtect)
{
    assert(vLimit > vBase);
    assert(FALL_INTO(vBase, (uint_t)g_vaKVMAreaBase, \
        ((uint_t)c_vaKVMAreaLimit - (uint_t)g_vaKVMAreaBase)));
    assert(FALL_INTO(vLimit - 1, (uint_t)g_vaKVMAreaBase, \
        ((uint_t)c_vaKVMAreaLimit - (uint_t)g_vaKVMAreaBase)));
    assert(PAGE_OFFSET(pBase) == 0);
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    if (dwProtect & MemoryProtection_Write) {
        dwProt = PAGETABENTRY_AP_WR;
    }
    else if (dwProtect & MemoryProtection_Read) {
        dwProt = PAGETABENTRY_AP_RO;
    }
    else {
	    dwProt = PAGETABENTRY_AP_NA;
    }

    if (!(dwProtect & IoRemapFlag_Direct)) {
        dwProt |= PAGETABENTRY_C | PAGETABENTRY_B;
    }
    else {
#ifdef _xscale
        if (IoRemapFlag_BufferedWrite == (dwProtect & IoRemapFlag_BufferedWrite)) {
            dwProt |= PAGETABENTRY_B;
        }
#endif
    }

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)g_initPageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);
        }
        else {
            // No page table be pointed by PageDirEntry, create one.
            pPageTab = AllocPageTable();
            if (!pPageTab) return FALSE;
            memset(pPageTab, 0, c_uSizeOfPageTab);
            g_initPageDir[PAGEDIR_INDEX(vBase)].uValue =
                __PA(pPageTab)
                | PAGEDIRENTRY_TYPE_PAGE | PAGEDIRENTRY_DOMAIN(DOMAIN_USER)
                | PAGEDIRENTRY_BACKWORD;
        }

        m = Min(c_cPageTabEntries,
            (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
        for (n = PAGETAB_INDEX(vBase); n < m; n++) {
            assert(c_uNumberOfPageTabEntries > n);
            pPageTab[n].uValue = pBase | dwProt | PAGETABENTRY_TYPE_SMALL;
            FlushTlb((virtaddr_t)vBase);
            pBase += PAGE_SIZE;
            vBase += PAGE_SIZE;
        }
    }

#if defined(_arm920) || defined(_xscale) || defined(_arm926) || defined(_arm11)
    FlushCache();
#endif
    return TRUE;
}

void HAT::UnloadKernelMap(uint_t vBase, uint_t vLimit)
{
    assert(vLimit > vBase);
    assert(FALL_INTO(vBase, (uint_t)g_vaKVMAreaBase, \
        ((uint_t)c_vaKVMAreaLimit - (uint_t)g_vaKVMAreaBase)));
    assert(FALL_INTO(vLimit - 1, (uint_t)g_vaKVMAreaBase, \
        ((uint_t)c_vaKVMAreaLimit - (uint_t)g_vaKVMAreaBase)));
    assert(PAGE_OFFSET(vBase) == 0);
    assert(PAGE_OFFSET(vLimit) == 0);

    PageTabEntry *pPageTab;
    register uint_t m, n;

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)g_initPageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
                assert(c_uNumberOfPageTabEntries > n);
                pPageTab[n].uValue = 0;
                FlushTlb((virtaddr_t)vBase);
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PAGEDIR_MASK + 1)) & PAGEDIR_MASK;
        }
    }

#ifdef _arm720
        FlushCacheByRange(vBase, vLimit);
#else
        FlushCache();
#endif
}
