//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include <init.h>
#include <mantle.h>
#include "salloc.h"
#include "buddysys.h"

//
// Local function declarations
//
STATIC DECL_CODEINIT void DumpRegions(const char *szTitle,
        const MemoryRegion regions[], uint_t uNumberOfRegions);

STATIC DECL_CODEINIT void SortRegions(
        const MemoryRegion originRegions[], uint_t uNumberOfOriginRegions,
        MemoryRegion sortedRegions[], uint_t *puNumberOfSortedRegions);

STATIC DECL_CODEINIT void FlattenRegions(
        const MemoryRegion originRegions[], uint_t uNumberOfOriginRegions,
        MemoryRegion flattenedRegions[]);

STATIC DECL_CODEINIT void BuildHoles(
        const MemoryZone zones[], uint_t uNumberOfZones,
        const MemoryHole holes[], uint_t uNumberOfHoles,
        MemoryHole newHoles[], uint_t *puNumberOfNewHoles);

STATIC DECL_CODEINIT void BuildFreeRegions(
        const MemoryZone zones[], uint_t uNumberOfZones,
        const MemoryHole holes[], uint_t uNumberOfHoles,
        MemoryRegion freeRegions[], uint_t *puNumberOfFreeRegions);

STATIC DECL_CODEINIT bool_t ReclaimFreeRegions(
        const MemoryRegion freeRegions[], uint_t uNumberOfFreeRegions);

STATIC DECL_CODEINIT bool_t CDECL InitKVMAreas();

//
// Global struct definitions
//
DzGlobals dz = {
    0        // uNumberOfZones;
};

virtaddr_t g_vaKernelHighMemory = NULL;

MemoryHole g_holes[c_uMaxNumberOfMemoryHoles];
uint_t g_uNumberOfHoles;

//
// Function definitions
//
EXTERN int MiscICmp(const char *pszName, const wchar_t *pwszName, size_t count);

STATIC BootModule *FindBootModule(const wchar_t *wszName)
{
    assert(wszName);

    for (uint_t i = 0; i < bsp.uNumberOfBootModules; i++) {
        if (0 == MiscICmp(bsp.pBootModules[i].szName, wszName, wcslen(wszName))) {
            return &bsp.pBootModules[i];
        }
    }

    return NULL;
}

EXTERN ECode GetBootModule(
    const wchar_t *wszName, virtaddr_t *pvaBase, virtaddr_t *pvaLimit)
{
    BootModule *pBootModule = FindBootModule(wszName);
    if (NULL == pBootModule) {
#ifdef _DEBUG
        if (pvaBase && pvaLimit) {
            *pvaBase  = (virtaddr_t)0xcccccccc;
            *pvaLimit = (virtaddr_t)0xcccccccc;
        }
#endif
        return E_FILE_NOT_FOUND;
    }

    if (pvaBase && pvaLimit) {
        *pvaBase  = PAGENO_TO_VIRTADDR(pBootModule->region.uBasePageNo);
        *pvaLimit = (virtaddr_t)((uint_t)*pvaBase + pBootModule->size);
        assert(*pvaLimit <= PAGENO_TO_VIRTADDR(pBootModule->region.uLimitPageNo));
    }

    return NOERROR;
}

EXTERN ECode ReclaimBootModule(const wchar_t *wszName)
{
    BootModule *pBootModule = FindBootModule(wszName);
    if (NULL == pBootModule) return E_FAIL;

    strcpy(pBootModule->szName, "");

    // TODO: need some kind of lock or AddRef/Release?
    bool_t bResult = ReclaimPages(
            pBootModule->region.uBasePageNo, pBootModule->region.uLimitPageNo);
    if (!bResult) return E_FAIL;

    return NOERROR;
}

EXTERN DECL_CODEINIT bool_t CDECL InitPhysicalMemory()
{
    // Zones
    assert(c_uMaxNumberOfMemoryZones >= bsp.uNumberOfMemoryZones);

    dz.uNumberOfZones = bsp.uNumberOfMemoryZones;

    MemoryZone  sortedZones[c_uMaxNumberOfMemoryZones];
    uint_t      uNumberOfSortedZones;

    SortRegions(
        bsp.pMemoryZones, bsp.uNumberOfMemoryZones,
        sortedZones, &uNumberOfSortedZones);

    DumpRegions("Zones", sortedZones, uNumberOfSortedZones);

    // Holes
    DumpRegions("BSP Holes", bsp.pMemoryHoles, bsp.uNumberOfMemoryHoles);

    assert(c_uMaxNumberOfMemoryHoles >= bsp.uNumberOfMemoryHoles + 2);
    MemoryHole  flattenedHoles[c_uMaxNumberOfMemoryHoles + 2];
    uint_t      uNumberOfFlattenedHoles;

    BuildHoles(
        sortedZones, uNumberOfSortedZones,
        bsp.pMemoryHoles, bsp.uNumberOfMemoryHoles,
        flattenedHoles, &uNumberOfFlattenedHoles);

    FlattenRegions(flattenedHoles, uNumberOfFlattenedHoles, flattenedHoles);

    SortRegions(
        flattenedHoles, uNumberOfFlattenedHoles,
        g_holes, &g_uNumberOfHoles);

    DumpRegions("Holes", g_holes, g_uNumberOfHoles);

    // Holes or Reversed Regions
    flattenedHoles[uNumberOfFlattenedHoles]      = bsp.regionOfInitStack;
    flattenedHoles[uNumberOfFlattenedHoles + 1]  = bsp.regionOfBootModules;
    uNumberOfFlattenedHoles += 2;

    MemoryHole  holesOrReversedRegions[c_uMaxNumberOfMemoryHoles + 2];
    uint_t      uNumberOfHolesOrReversedRegions;

    SortRegions(
        flattenedHoles, uNumberOfFlattenedHoles,
        holesOrReversedRegions, &uNumberOfHolesOrReversedRegions);

    DumpRegions("Holes or Reversed Regions",
        holesOrReversedRegions, uNumberOfHolesOrReversedRegions);

    // Free regions
    MemoryRegion    freeRegions[2 * c_uMaxNumberOfMemoryHoles];
    uint_t          uNumberOfFreeRegions;

    BuildFreeRegions(
        sortedZones, uNumberOfSortedZones,
        holesOrReversedRegions, uNumberOfHolesOrReversedRegions,
        freeRegions, &uNumberOfFreeRegions);

    DumpRegions("Free regions", freeRegions, uNumberOfFreeRegions);

    // Count the number of actual pages
    g_uNumberOfActualPages =
        bsp.regionOfBootModules.uLimitPageNo -
        bsp.regionOfBootModules.uBasePageNo +
        bsp.regionOfInitStack.uLimitPageNo -
        bsp.regionOfInitStack.uBasePageNo;
    for (uint_t n = 0; n < uNumberOfFreeRegions; n++) {
        g_uNumberOfActualPages +=
            freeRegions[n].uLimitPageNo - freeRegions[n].uBasePageNo;
    }

    // Initialize static allocation
    if (!InitStaticAlloc(freeRegions, uNumberOfFreeRegions)) {
        return FALSE;
    }
    DumpStaticAlloc("Origin");

    // Kernel mapping
    MemoryRegion kernelMappingRegion;
    kernelMappingRegion.uBasePageNo  = c_uMaxNumberOfPages;
    kernelMappingRegion.uLimitPageNo = 0;

    for (uint_t i = 0; i <= bsp.uKernelPreferredZoneNo; i++) {
        kernelMappingRegion.uBasePageNo  =
            Min(kernelMappingRegion.uBasePageNo,
                bsp.pMemoryZones[i].uBasePageNo);
        kernelMappingRegion.uLimitPageNo =
            Max(kernelMappingRegion.uLimitPageNo,
                bsp.pMemoryZones[i].uLimitPageNo);
    }

    kprintf("Kernel mapping [%x, %x)\n",
            kernelMappingRegion.uBasePageNo, kernelMappingRegion.uLimitPageNo);

    kernelMappingRegion.uBasePageNo =
            PHYSPAGENO_TO_VIRTPAGENO(kernelMappingRegion.uBasePageNo);
    kernelMappingRegion.uLimitPageNo =
            PHYSPAGENO_TO_VIRTPAGENO(kernelMappingRegion.uLimitPageNo);

    // The physical memory of kernel perferred zone must <= 2GB!
    if (!InitKernelMapping(
            kernelMappingRegion.uBasePageNo,
            kernelMappingRegion.uLimitPageNo)) {
        return FALSE;
    }
    DumpStaticAlloc("After Kernel Mapping");

    g_vaKernelHighMemory =
        (virtaddr_t)VIRTPAGENO_TO_VIRTADDR(kernelMappingRegion.uLimitPageNo);

    kprintf("High Memory: %x\n", g_vaKernelHighMemory);

    InitKVMAreas();

    // Pages
    if (!InitPages(
            sortedZones, uNumberOfSortedZones,
            freeRegions, &uNumberOfFreeRegions)) {
        return FALSE;
    }
    DumpStaticAlloc("After initialize pages");

    // Zones
    if (!InitZones()) {
        return FALSE;
    }
    DumpStaticAlloc("Final");

    // Uninitialize static allocation
    UninitStaticAlloc(freeRegions, uNumberOfFreeRegions);

    DumpRegions("free regions", freeRegions, uNumberOfFreeRegions);

    // Reclaim all pages in free regions
    if (!ReclaimFreeRegions(freeRegions, uNumberOfFreeRegions)) {
        return FALSE;
    }

    // Proxy entry table
    if (!InitProxyEntryTable()) {
        return FALSE;
    }

    // User process entry table
    if (!InitUserEntryTable()) {
        return FALSE;
    }

#if defined(_arm) || defined(_mips)
    DumpZones();
#endif // _arm || _mips

    return TRUE;
}

#define IS_VALID_REGION(region) ((region).uBasePageNo <= (region).uLimitPageNo)
#define IS_EMPTY_REGION(region) ((region).uBasePageNo == (region).uLimitPageNo)

//
// Sort regions and remove empty ones
// Assume: size of the sorted regions >= size of the origin regions;
//         The origin regions is flattened.
//
STATIC DECL_CODEINIT void SortRegions(
    const MemoryRegion originRegions[], uint_t uNumberOfOriginRegions,
    MemoryRegion sortedRegions[], uint_t *puNumberOfSortedRegions)
{
    uint_t uNumberOfSortedRegions = 0;

    for (uint_t i = 0; i < uNumberOfOriginRegions; i++) {
        assert(IS_VALID_REGION(originRegions[i]));

        if (IS_EMPTY_REGION(originRegions[i])) continue;

        uint_t j;
        for (j = 0; j < uNumberOfSortedRegions; j++) {
            if (originRegions[i].uBasePageNo < sortedRegions[j].uBasePageNo) {
                // Assert regions not overlay
                assert(originRegions[i].uLimitPageNo
                    <= sortedRegions[j].uBasePageNo);

                // Insert originRegions[i] to sortedRegions[j]
                for (uint_t k = uNumberOfSortedRegions; k > j; k--) {
                    sortedRegions[k] = sortedRegions[k - 1];
                }
                sortedRegions[j] = originRegions[i];

                uNumberOfSortedRegions++;
                break;
            }
            else {
                // Assert regions not overlay
                assert(originRegions[i].uBasePageNo
                    >= sortedRegions[j].uLimitPageNo);
            }
        }

        // Append originRegions[i] to sortedRegions[uNumberOfSortedRegions]
        if (uNumberOfSortedRegions == j) {
            sortedRegions[uNumberOfSortedRegions] = originRegions[i];
            uNumberOfSortedRegions++;
        }
    }

    *puNumberOfSortedRegions = uNumberOfSortedRegions;
}

STATIC DECL_CODEINIT void DumpRegions(const char *szTitle,
    const MemoryRegion regions[], uint_t uNumberOfRegions)
{
#ifndef _RELEASE
    kprintf("%d %s:\n", uNumberOfRegions, szTitle);
    for (uint_t i = 0; i < uNumberOfRegions; i++) {
        kprintf("    [%x, %x)\n",
                regions[i].uBasePageNo, regions[i].uLimitPageNo);
    }
#endif
}

//
// Eliminate overlayed regions
//
STATIC DECL_CODEINIT void FlattenRegions(
    const MemoryRegion originRegions[], uint_t uNumberOfOriginRegions,
    MemoryRegion flattenedRegions[])
{
    for (uint_t i = 0; i < uNumberOfOriginRegions; i++) {
        assert(IS_VALID_REGION(originRegions[i]));

        flattenedRegions[i] = originRegions[i];

        if (IS_EMPTY_REGION(flattenedRegions[i])) continue;

        for (uint_t j = 0; j < i; j++) {
            if (flattenedRegions[i].uBasePageNo <
                flattenedRegions[j].uBasePageNo) {
                if (flattenedRegions[i].uLimitPageNo >
                    flattenedRegions[j].uBasePageNo) {
                    flattenedRegions[i].uLimitPageNo =
                        flattenedRegions[j].uBasePageNo;
                }
            }
            else if (flattenedRegions[i].uBasePageNo <
                    flattenedRegions[j].uLimitPageNo) {
                if (flattenedRegions[i].uLimitPageNo <
                    flattenedRegions[j].uLimitPageNo) {
                    // set empty
                    flattenedRegions[i].uBasePageNo =
                        flattenedRegions[i].uLimitPageNo;
                }
                else {
                    flattenedRegions[i].uBasePageNo =
                        flattenedRegions[j].uLimitPageNo;
                }
            }
        }
    }
}

STATIC DECL_CODEINIT void BuildHoles(
    const MemoryZone zones[], uint_t uNumberOfZones,
    const MemoryHole holes[], uint_t uNumberOfHoles,
    MemoryHole newHoles[], uint_t *puNumberOfNewHoles)
{
    uint_t i;
    uint_t uNumberOfNewHoles = 0;

    if (0 != zones[0].uBasePageNo) {
        newHoles[uNumberOfNewHoles].uBasePageNo = 0;
        newHoles[uNumberOfNewHoles].uLimitPageNo = zones[0].uBasePageNo;
        uNumberOfNewHoles++;
    }

    for (i = 0; i < uNumberOfZones - 1; i++) {
        if (zones[i].uLimitPageNo == zones[i + 1].uBasePageNo) continue;

        newHoles[uNumberOfNewHoles].uBasePageNo = zones[i].uLimitPageNo;
        newHoles[uNumberOfNewHoles].uLimitPageNo = zones[i + 1].uBasePageNo;
        uNumberOfNewHoles++;
    }

    assert(i == uNumberOfZones - 1);
    if (zones[i].uLimitPageNo < c_uMaxNumberOfPages) {
        newHoles[uNumberOfNewHoles].uBasePageNo = zones[i].uLimitPageNo;
        newHoles[uNumberOfNewHoles].uLimitPageNo = c_uMaxNumberOfPages;
        uNumberOfNewHoles++;
    }

    for (i = 0; i < uNumberOfHoles; i++) {
        newHoles[uNumberOfNewHoles].uBasePageNo = holes[i].uBasePageNo;
        newHoles[uNumberOfNewHoles].uLimitPageNo = holes[i].uLimitPageNo;
        uNumberOfNewHoles++;
    }

    *puNumberOfNewHoles = uNumberOfNewHoles;
}

//
// Build free regions that included by zones but excludes by holes.
// Assume: The free regions' buffer is big enough, and zone/hole list have
//         been flattened and sorted and contain no empty regions.
//
STATIC DECL_CODEINIT void BuildFreeRegions(
    const MemoryZone zones[], uint_t uNumberOfZones,
    const MemoryHole holes[], uint_t uNumberOfHoles,
    MemoryRegion freeRegions[], uint_t *puNumberOfFreeRegions)
{
    uint_t uCurrZoneNo = 0, uCurrHoleNo = 0, uCurrFreeRegionNo = 0;

    uint_t uCurrPageNo = zones[uCurrZoneNo].uBasePageNo;

    while (uCurrZoneNo < uNumberOfZones) {
        if (uCurrHoleNo >= uNumberOfHoles) {
            if (SetFreeRegion(&freeRegions[uCurrFreeRegionNo], uCurrPageNo,
                        zones[uCurrZoneNo].uLimitPageNo)) {
                uCurrFreeRegionNo++;
            }

            uCurrPageNo = zones[uCurrZoneNo].uLimitPageNo;
        }
        else if (holes[uCurrHoleNo].uLimitPageNo <= uCurrPageNo) {
            uCurrHoleNo++;
        }
        else if (holes[uCurrHoleNo].uBasePageNo > uCurrPageNo) {
            if (zones[uCurrZoneNo].uLimitPageNo <=
                holes[uCurrHoleNo].uBasePageNo) {
                if (SetFreeRegion(&freeRegions[uCurrFreeRegionNo], uCurrPageNo,
                        zones[uCurrZoneNo].uLimitPageNo)) {
                    uCurrFreeRegionNo++;
                }

                uCurrPageNo = zones[uCurrZoneNo].uLimitPageNo;
            }
            else {
                if (SetFreeRegion(&freeRegions[uCurrFreeRegionNo], uCurrPageNo,
                        holes[uCurrHoleNo].uBasePageNo)) {
                    uCurrFreeRegionNo++;
                }

                uCurrPageNo = holes[uCurrHoleNo].uLimitPageNo;
                uCurrHoleNo++;
            }
        }
        else {
            uCurrPageNo = holes[uCurrHoleNo].uLimitPageNo;
            uCurrHoleNo++;
        }

        while (uCurrPageNo >= zones[uCurrZoneNo].uLimitPageNo) {
            uCurrZoneNo++;
            if (uCurrZoneNo >= uNumberOfZones) break;

            uCurrPageNo = Max(uCurrPageNo, zones[uCurrZoneNo].uBasePageNo);
        }
    }

    *puNumberOfFreeRegions = uCurrFreeRegionNo;
}

STATIC DECL_CODEINIT bool_t ReclaimFreeRegions(
    const MemoryRegion freeRegions[], uint_t uNumberOfFreeRegions)
{
    for (uint_t i = 0; i < uNumberOfFreeRegions; i++) {
        if (!ReclaimPages(freeRegions[i].uBasePageNo,
                freeRegions[i].uLimitPageNo)) {
            return FALSE;
        }
    }
    return TRUE;
}

//
// KVMArea
//
class KVMArea : public DLinkNode, public VDataSource
{
public:
    KVMArea(virtaddr_t vaBase, virtaddr_t vaLimit)
    {
        assert((uint_t)vaBase < (uint_t)vaLimit);

        m_vaBase    = vaBase;
        m_vaLimit   = vaLimit;
    }

    PPage * GetPage(UInt32 uOffset)
    {
        return NULL;
    }

    ECode Flush()
    {
        return E_NOT_IMPLEMENTED;
    }

public:
    virtaddr_t  m_vaBase;
    virtaddr_t  m_vaLimit;
};

STATIC DLinkNode s_kvmAreaList;
#ifdef DEBUG_KMUTEX
STATIC KMutex s_kvmAreaListLock(__FILE__, __LINE__);
#else
STATIC KMutex s_kvmAreaListLock;
#endif //DEBUG_KMUTEX
virtaddr_t g_vaKVMAreaBase = 0;
#ifndef _neptune
virtaddr_t c_vaKVMAreaLimit = (virtaddr_t)0xef000000;
#else
virtaddr_t c_vaKVMAreaLimit = (virtaddr_t)0xe0000000;
#endif // _neptune

STATIC DECL_CODEINIT bool_t CDECL InitKVMAreas()
{
    s_kvmAreaList.Initialize();

    g_vaKVMAreaBase =
#ifndef _neptune
        (virtaddr_t)(RoundUp2((uint_t)g_vaKernelHighMemory, PAGE_SIZE)
            + PAGE_SIZE);
#else
        (virtaddr_t)0x80000000;
#endif // _neptune

    if (g_vaKVMAreaBase > c_vaKVMAreaLimit) {
        g_vaKVMAreaBase = c_vaKVMAreaLimit;
        kprintf("*ERROR* Too large kernel map: 0x%x\n", g_vaKernelHighMemory);
        return FALSE;
    }

    return TRUE;
}

STATIC KVMArea * KVMAlloc(size_t size)
{
    assert(s_kvmAreaListLock.IsHeld());

    if (0 == size) return NULL;

    if (PAGE_OFFSET(size)
            || (size > (size_t)c_vaKVMAreaLimit - (size_t)g_vaKVMAreaBase)) {
        return NULL;
    }

    virtaddr_t vaBase = g_vaKVMAreaBase;
    register KVMArea *pNextKVMArea;
    ForEachDLinkNode(KVMArea *, pNextKVMArea, &s_kvmAreaList) {
        if ((size_t)vaBase + size <= (size_t)pNextKVMArea->m_vaBase) {
            break;
        }

        vaBase = pNextKVMArea->m_vaLimit;

        if (size > (size_t)c_vaKVMAreaLimit - (size_t)vaBase) {
            return NULL;
        }
    }

    KVMArea *pNewKVMArea =
            new KVMArea(vaBase, (virtaddr_t)((size_t)vaBase + size));
    if (NULL != pNewKVMArea) {
        pNextKVMArea->InsertPrev(pNewKVMArea);
    }

    return pNewKVMArea;
}

STATIC KVMArea * KVMFind(virtaddr_t vaBase)
{
    assert(s_kvmAreaListLock.IsHeld());

    register KVMArea *pKVMArea;
    ForEachDLinkNode(KVMArea *, pKVMArea, &s_kvmAreaList) {
        if (pKVMArea->m_vaBase == vaBase) {
            return pKVMArea;
        }
    }
    return NULL;
}

STATIC void KVMFree(KVMArea * pKVMArea)
{
    assert(s_kvmAreaListLock.IsHeld());

    if (NULL != pKVMArea) {
        pKVMArea->Detach();
        delete pKVMArea;
    }
}

#ifndef _RELEASE

#include <util/kprint.h>

void KVMDump()
{
    s_kvmAreaListLock.Lock();

    kprintf("\n>>> KVM Dump >>>\n"
            "+- [virtual memory area)                        pages\n"
            "|                   +- [physical memory area)\n");

    KVMArea *pKVMArea;
    PPage   *pPage;

    ForEachDLinkNode(KVMArea *, pKVMArea, &s_kvmAreaList) {
        kprintf("+- [%08x, %08x)                         %d\n",
            (uint_t)pKVMArea->m_vaBase, (uint_t)pKVMArea->m_vaLimit,
            ((uint_t)pKVMArea->m_vaLimit
                - (uint_t)pKVMArea->m_vaBase) / PAGE_SIZE);

        ForEachDLinkNode(PPage *, pPage, &pKVMArea->m_cache.m_pagesList) {
            kprintf("|                   +- [%08x, %08x)         %d\n",
                (uint_t)pPage->m_uAddr,
                (uint_t)pPage->m_uAddr + (PAGE_SIZE << pPage->m_uOrder),
                (1u << pPage->m_uOrder));
        }
    }

    kprintf("+-------------------------------------------------------\n\n");

    s_kvmAreaListLock.Unlock();
}

#endif // _RELEASE

Void UpdateExistingHAT()
{
    CProcess::s_processListLock.Lock();

    bool_t bPreemption = DzDisablePreemption();

    CProcess *pProc;
    ForEachDLinkNode(CProcess *, pProc, &(CProcess::s_processList)) {
        pProc->m_AS.m_pHat->SyncGlobalHAT();
    }

    DzRestorePreemption(bPreemption);
    CProcess::s_processListLock.Unlock();
}

EXTERN virtaddr_t DzKIoRemap(physaddr_t paBase, size_t size, uint_t protect)
{
    if (0 == size) return NULL;

    if (0 != paBase + size && paBase >= paBase + size) return NULL;

//    if (!IsMemoryMappedIo(paBase, size)) return NULL;

    if (PAGE_OFFSET(paBase)) return NULL;

    s_kvmAreaListLock.Lock();

    KVMArea *pNewKVMArea = KVMAlloc(size);
    if (NULL == pNewKVMArea) {
        s_kvmAreaListLock.Unlock();
        return NULL;
    }

    virtaddr_t vaBase = pNewKVMArea->m_vaBase;

    if (!HAT::LoadKernelMap(
            (uint_t)vaBase, (uint_t)pNewKVMArea->m_vaLimit,
            (uint_t)paBase, MemoryProtection_RW | protect)) {
        KVMFree(pNewKVMArea);
        s_kvmAreaListLock.Unlock();
        return NULL;
    }

    s_kvmAreaListLock.Unlock();

    UpdateExistingHAT();

    return vaBase;
}

EXTERN void DzKIoUnmap(virtaddr_t vaBase)
{
    s_kvmAreaListLock.Lock();

    KVMArea *pKVMArea = KVMFind(vaBase);
    if (NULL == pKVMArea) {
        s_kvmAreaListLock.Unlock();
        return;
    }

    HAT::UnloadKernelMap(
            (uint_t)pKVMArea->m_vaBase, (uint_t)pKVMArea->m_vaLimit);
    KVMFree(pKVMArea);

    s_kvmAreaListLock.Unlock();

    UpdateExistingHAT();
}

EXTERN virtaddr_t DzAllocDiscontinuousKernelPages(uint_t uPages, uint_t protect)
{
    if (0 == uPages) return NULL;

    if (uPages > B_TO_P((uint_t)c_vaKVMAreaLimit - (uint_t)g_vaKVMAreaBase)) {
        return NULL;
    }

    s_kvmAreaListLock.Lock();

    ECode ec;
    virtaddr_t vaBase;
    uint_t uSize;
    KVMArea *pNewKVMArea = NULL;
    PPage * pPage;

    pNewKVMArea = KVMAlloc((size_t)P_TO_B(uPages));
    if (NULL == pNewKVMArea) {
        goto ErrorExit;
    }

    ec = pNewKVMArea->m_cache.AllocPages(0, uPages);
    if (FAILED(ec)) {
        goto ErrorExit;
    }

    vaBase = pNewKVMArea->m_vaBase;

    ForEachDLinkNode(PPage *, pPage, &pNewKVMArea->m_cache.m_pagesList) {
        uSize = pPage->m_uSize;
        if (!HAT::LoadKernelMap((uint_t)vaBase, (uint_t)vaBase + uSize,
                pPage->m_uAddr, protect)) {
            goto ErrorExit;
        }
        vaBase = (virtaddr_t)((uint_t)vaBase + uSize);
    }

    vaBase = pNewKVMArea->m_vaBase;

    s_kvmAreaListLock.Unlock();

    UpdateExistingHAT();

    return vaBase;

ErrorExit:
    if (NULL != pNewKVMArea) {
        HAT::UnloadKernelMap(
                (uint_t)pNewKVMArea->m_vaBase, (uint_t)pNewKVMArea->m_vaLimit);
        KVMFree(pNewKVMArea);
    }

    s_kvmAreaListLock.Unlock();

    return NULL;
}

EXTERN void DzFreeDiscontinuousKernelPages(virtaddr_t va)
{
    s_kvmAreaListLock.Lock();

    KVMArea *pKVMArea = KVMFind(va);
    if (NULL == pKVMArea) {
        s_kvmAreaListLock.Unlock();
        return;
    }

    HAT::UnloadKernelMap(
            (uint_t)pKVMArea->m_vaBase, (uint_t)pKVMArea->m_vaLimit);

    KVMFree(pKVMArea);

    s_kvmAreaListLock.Unlock();

    UpdateExistingHAT();
}

EXTERN bool_t DzFreeCoalesedDiscontinuousKernelPages(virtaddr_t va, uint_t uPages)
{
    size_t RegionSize;
    s_kvmAreaListLock.Lock();

    while (uPages) {

        KVMArea *pKVMArea = KVMFind(va);
        if (NULL == pKVMArea) {
            s_kvmAreaListLock.Unlock();
            return FALSE;
        }

        RegionSize = (uint_t)pKVMArea->m_vaLimit - (uint_t)pKVMArea->m_vaBase;

        if (pKVMArea->m_vaBase != va || (RegionSize >> PAGE_SHIFT) > uPages) {
            assert(0);
            return FALSE;
        }

        HAT::UnloadKernelMap(
                (uint_t)pKVMArea->m_vaBase, (uint_t)pKVMArea->m_vaLimit);

        KVMFree(pKVMArea);

        va = (UInt8*)va + RegionSize;
        uPages -= RegionSize >> PAGE_SHIFT;
    }

    s_kvmAreaListLock.Unlock();

    UpdateExistingHAT();
    return TRUE;
}

EXTERN virtaddr_t DzRemapDiscontinuousKernelPages(
    virtaddr_t va, uint_t uPages, int_t protect)
{
    ECode ec;
    Address uvaddr;
    KVMArea * pArea;

    assert(::GetCurrentProcess());

    s_kvmAreaListLock.Lock();

    pArea = KVMFind(va);
    if (NULL != pArea) {
        uint_t uSize = P_TO_B(uPages);
        assert(uSize == (uint_t)pArea->m_vaLimit - (uint_t)pArea->m_vaBase);
        ec = ::GetCurrentProcess()->m_AS.Map(0, uSize, MemoryMap_Shared,
            protect, (VDataSource *)pArea, 0, &uvaddr);
        if (SUCCEEDED(ec)) {
            s_kvmAreaListLock.Unlock();
            return (virtaddr_t)uvaddr;
        }
    }

    s_kvmAreaListLock.Unlock();
    return NULL;
}

EXTERN void DzUnmapDiscontinuousKernelPages(virtaddr_t va, uint_t uPages)
{
    DzMemoryUnmap((Address)va, P_TO_B(uPages));
}

#ifdef _neptune
static void SetProtect(uint_t vBase, uint_t vLimit, Int32 bUserAccess)
{
    dword_t dwProt;
    PageTabEntry *pPageTab;
    register uint_t m, n;

    if (bUserAccess) {
        dwProt = PAGETABENTRY_AP_WR | PAGETABENTRY_C | PAGETABENTRY_B;
    } else {
        dwProt = PAGETABENTRY_AP_NA | PAGETABENTRY_C | PAGETABENTRY_B;
    }

    const dword_t c_dwProtMask =
            ~(PAGETABENTRY_AP_MASK | PAGETABENTRY_C | PAGETABENTRY_B);

    while (vBase < vLimit) {
        pPageTab = (PageTabEntry *)g_initPageDir[PAGEDIR_INDEX(vBase)].uValue;

        if (pPageTab) {
            pPageTab = (PageTabEntry *)((uint_t)__VA(pPageTab)
                    & COARSE_PAGETAB_BASEADDR_MASK);

            m = Min(c_cPageTabEntries,
                (vLimit - (vBase & PAGEDIR_MASK)) >> PAGE_SHIFT);
            for (n = PAGETAB_INDEX(vBase); n < m; n++) {
                assert(c_uNumberOfPageTabEntries > n);
                if (0 != pPageTab[n].uValue) {
                    pPageTab[n].uValue =
                        (pPageTab[n].uValue & c_dwProtMask) | dwProt;
                }
                vBase += PAGE_SIZE;
            }
        }
        else {
            vBase = (vBase + (~PAGEDIR_MASK + 1)) & PAGEDIR_MASK;
        }
    }

    FlushCache();
    FlushAllTlbs();
}

void Nu_DmaBufFree(physaddr_t ptr, size_t size)
{
    size_t nSize = AlignPageUp(size);
    size_t uSize = nSize;
    UInt32 pageOrder = c_uMaxOrder;
    UInt32 ptrOffsetOrder;
    UInt32 uPtrHead = (UInt32)ptr;


    while (nSize != 0) {
        if (nSize & (1 << (pageOrder + PAGE_SHIFT))) {
            ptrOffsetOrder = pageOrder;
            while (((size_t)ptr)
                    & ((1 << (ptrOffsetOrder + PAGE_SHIFT)) - 1)) {
                ptrOffsetOrder--;
            }
            DzFreeKernelPages((void*)ptr, ptrOffsetOrder);
            ptr = (physaddr_t)ptr + (1 << (ptrOffsetOrder + PAGE_SHIFT));
            nSize -= 1 << (ptrOffsetOrder + PAGE_SHIFT);
            if (ptrOffsetOrder != pageOrder) continue;
        }
        pageOrder--;
    }

    SetProtect(uPtrHead, uPtrHead + uSize, FALSE);
    return;
}

void *Nu_DmaBufAlloc(size_t size, Int32 bUserAccess)
{
    uint_t uOrder;
    void *pBuf = NULL;
    physaddr_t ptr;
    uint_t uLimit = AlignPageUp(size);//B_TO_P(size + PAGE_SIZE) * PAGE_SIZE;

    uOrder = GET_UP_ORDER(B_TO_P(AlignPageUp(size)));

    pBuf = (void *) DzAllocKernelPages(uOrder);

    if (!pBuf) {
        kprintf("Not Enough Memory!\n");
        return NULL;
    }

    //set protect the limit must be page align
    SetProtect((uint_t)pBuf, (uint_t)((physaddr_t)pBuf + uLimit), bUserAccess);

    // reclaim  the overtop part buf than the pointed  size part.
    // such as the order is 7, it will alloc a buf about 512KB.
    // But the display driver use memory about 384KB,
    // we will reclaim the last 128KB.
    ptr = (physaddr_t) ((physaddr_t)pBuf + uLimit);
    size = P_TO_B(1 << uOrder) - uLimit;
    Nu_DmaBufFree(ptr, size);

    return pBuf;
}
#endif

