//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <mantle.h>

extern "C" {
    Boolean g_bIsLowMemory = FALSE;
    Boolean g_bIsCriticallyLowMemory = FALSE;
}

inline void CheckAndRestoreHeapGrowThreshold()
{
    Int32 nFreePages = (Int32)GetNumberOfFreePages();

    if (g_UserHeapGrowThreshold != g_HeapGrow1stThreshold
        && nFreePages > g_RestoreHeapGrowThreshold) {
        g_UserHeapGrowThreshold = g_HeapGrow1stThreshold;
        kputs("\n--------------------------------------------------------");
        kputs("\n>>>> *NOTE* Restore heap Grow threshold to 1st threshold");
        kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
    }

    if (nFreePages > g_LowMemoryThreshold) {
        if (g_bIsLowMemory) {
            ClearSystemEvent(SystemEvent_LowMemory);
            kputs("\n-----------------------------------------");
            kputs("\n>>>> *NOTE* Clear low memory system event");
            kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
            g_bIsLowMemory = FALSE;
        }
    }
    else {
        if (!g_bIsLowMemory) {
            TriggerSystemEvent(SystemEvent_LowMemory);
            kputs("\n-------------------------------------------");
            kputs("\n>>>> *NOTE* Trigger low memory system event");
            kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
            g_bIsLowMemory = TRUE;
        }
    }

    if (nFreePages > g_CriticallyLowMemoryThreshold) {
        if (g_bIsCriticallyLowMemory) {
            ClearSystemEvent(SystemEvent_CriticallyLowMemory);
            kputs("\n----------------------------------------------------");
            kputs("\n>>>> *NOTE* Clear critically low memory system event");
            kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
            g_bIsCriticallyLowMemory = FALSE;
        }
    }
    else {
        if (!g_bIsCriticallyLowMemory) {
            TriggerSystemEvent(SystemEvent_CriticallyLowMemory);
            kputs("\n------------------------------------------------------");
            kputs("\n>>>> *NOTE* Trigger critically low memory system event");
            kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
            g_bIsCriticallyLowMemory = TRUE;
        }
    }
}

StdSegment::~StdSegment()
{
    m_pOwner->m_pHat->UnloadMap(m_uBase, m_uBase + m_uSize);
    if (m_pPrivCache) {
        delete m_pPrivCache;
    }
}

ECode StdSegment::MapCache(PageContainer *pCache, UInt32 uMapBase, UInt32 uMapSize)
{
    UInt32 vBase;
    HAT * pHat;
    PPage * pPage;

    pHat = m_pOwner->m_pHat;

    if (m_dwFlags & MemoryMap_UpGrow) {
        pPage = (PPage*)pCache->m_pagesList.m_pPrev;
    }
    else {
        pPage = (PPage*)pCache->m_pagesList.m_pNext;
    }

    while (pPage != (PPage*)&pCache->m_pagesList) {
        vBase = pPage->m_uOffset - m_uOffset + m_uBase;
#if 0
        assert(vBase >= m_uBase && vBase + pPage->m_uSize <= m_uBase + m_uSize);
        if FALL_INTO(vBase, uMapBase, uMapSize) {
            if (FALSE == pHat->LoadMap(vBase, vBase + pPage->m_uSize,
                pPage->m_uAddr, m_dwProtect, FALSE)) return E_OUT_OF_MEMORY;
        }
#else
        // TODO: To be remove when load module from user address space.
        // Because of export table and import table need continue memory.
        // e.g. export table need 0x5000, it will alloc 0x8000, but we only
        // need to map 0x5000!
        assert(vBase >= m_uBase);
        if FALL_INTO(vBase, uMapBase, uMapSize) {
            if (FALSE == pHat->LoadMap(vBase, \
                                 MIN(vBase + pPage->m_uSize, m_uBase + m_uSize),
                pPage->m_uAddr, m_dwProtect, FALSE)) return E_OUT_OF_MEMORY;
        }
#endif
        else if (m_dwFlags & (MemoryMap_UpGrow | MemoryMap_DownGrow)) {
            break;
        }

        if (m_dwFlags & MemoryMap_UpGrow) {
            pPage = (PPage*)pPage->m_pPrev;
        }
        else {
            pPage = (PPage*)pPage->m_pNext;
        }
    }

#if defined(_arm920) || defined(_xscale) || defined(_arm926) || defined(_arm11)
    FlushCache();
#endif

    return NOERROR;
}

ECode StdSegment::InitMap()
{
    if (m_pDataSource) {
        if (m_dwFlags & MemoryMap_Private) {
            m_pPrivCache = m_pDataSource->m_cache.Clone();
            if (!m_pPrivCache) {
                return E_OUT_OF_MEMORY;
            }
            return this->MapCache(m_pPrivCache, m_uBase, m_uSize);
        }
        else {
            return this->MapCache(&(m_pDataSource->m_cache),
                                  m_uBase, m_uSize);
        }
    }
    assert(m_dwFlags & MemoryMap_Private);

    m_pPrivCache = new PageContainer;
    if (!m_pPrivCache) {
        return E_OUT_OF_MEMORY;
    }

    return NOERROR;
}

ECode StdSegment::CloneMemBlockForProcess(UInt32 uAddr, UInt32 uSize)
{
    if ((m_dwFlags & MemoryMap_Private) ||
            (m_dwProtect & MemoryProtection_Write)) {
        return NOERROR;
    }

    assert(uAddr < uAddr + uSize);
    assert((m_dwFlags & MemoryMap_Shared) && m_pDataSource);

    if (!m_pPrivCache) {
        m_pPrivCache = new PageContainer;
        if (!m_pPrivCache) {
            return E_OUT_OF_MEMORY;
        }
    }

    // Make the memory block page-aligned
    //
    UInt32 uLimit = RoundUp2(uAddr + uSize, PAGE_SIZE);
    uAddr = RoundDown2(uAddr, PAGE_SIZE);

    UInt32 uStartPageOffset = m_uOffset + uAddr - m_uBase;
    UInt32 uEndPageOffset = m_uOffset + uLimit - m_uBase;
    UInt32 uOffset = uStartPageOffset;

    ECode ec = NOERROR;
    DLinkNode oldPageList;
    oldPageList.Initialize();

    // Search the memory gaps
    //
    PPage * pPage = (PPage *)m_pPrivCache->m_pagesList.First();
    PPage * pNextPage;
    while ((DLinkNode *)pPage != &m_pPrivCache->m_pagesList) {
        pNextPage = (PPage *)pPage->Next();

        // Make the memory block PPage-aligned
        //
        if (FALL_INTO(uStartPageOffset, pPage->m_uOffset, pPage->m_uSize)) {
            uStartPageOffset = pPage->m_uOffset + pPage->m_uSize;
        }

        if (FALL_INTO(uEndPageOffset - 1, pPage->m_uOffset, pPage->m_uSize)) {
            uEndPageOffset = pPage->m_uOffset;
        }

        // If some PPage contains the memory block
        //
        if ((pPage->m_uOffset <= uStartPageOffset)
                && (uEndPageOffset <= pPage->m_uOffset + pPage->m_uSize)) {
            assert(oldPageList.IsEmpty());
            return NOERROR;
        }

        // If some PPage falls into the memory block
        //
        if ((uStartPageOffset <= pPage->m_uOffset)
                && (pPage->m_uOffset + pPage->m_uSize <= uEndPageOffset)) {
            PAGE_ASSERT(pPage);
            pPage->Detach();
            oldPageList.InsertLast(pPage);
        }

        assert(uStartPageOffset < uEndPageOffset);

        pPage = pNextPage;
    }

    // Allocate physical pages for the memory gaps
    //
    uOffset = uStartPageOffset;
    ForEachDLinkNode(PPage *, pPage, &oldPageList) {
        if (uOffset < pPage->m_uOffset) {
            uSize = pPage->m_uOffset - uOffset;
            assert(IsAlignment2(uSize, PAGE_SIZE));
            ec = m_pPrivCache->AllocPages(uOffset, uSize / PAGE_SIZE);
            if (FAILED(ec)) {
                goto Exit;
            }
        }

        uOffset = pPage->m_uOffset + pPage->m_uSize;
    }

    if (uOffset < uEndPageOffset) {
        uSize = uEndPageOffset- uOffset;
        assert(IsAlignment2(uSize, PAGE_SIZE));
        ec = m_pPrivCache->AllocPages(uOffset, uSize / PAGE_SIZE);
        if (FAILED(ec)) {
            goto Exit;
        }
    }
    else {
        assert(uOffset == uEndPageOffset);
    }

    // Fill the new allocated pages with the datas from current address space,
    // and reestablish the mapping
    //
    pPage = (PPage *)m_pPrivCache->m_pagesList.First();
    while ((DLinkNode *)pPage != &m_pPrivCache->m_pagesList) {
        if (FALL_INTO(pPage->m_uOffset, uStartPageOffset, uEndPageOffset)) {
            pPage->MemCopy((void *)(pPage->m_uOffset - m_uOffset + m_uBase));
        }
        else if (pPage->m_uOffset >= uEndPageOffset) {
            break;
        }
        pPage = (PPage *)pPage->Next();
    }
    uAddr = uStartPageOffset - m_uOffset + m_uBase;
    uSize = uEndPageOffset - uStartPageOffset;
    this->MapCache(m_pPrivCache, uAddr, uSize);
    FlushAllTlbs();

Exit:
    if (FAILED(ec)) {
        // Free the allocated physical pages
        //
        pPage = (PPage *)m_pPrivCache->m_pagesList.First();
        while ((DLinkNode *)pPage != &m_pPrivCache->m_pagesList) {
            pNextPage = (PPage *)pPage->Next();
            if (FALL_INTO(pPage->m_uOffset, uStartPageOffset, uEndPageOffset)) {
                pPage->Detach();
                DzFreePages(pPage->m_uAddr, pPage->m_uOrder);
                delete pPage;
            }
            else if (pPage->m_uOffset >= uEndPageOffset) {
                break;
            }
            pPage = pNextPage;
        }
    }

    // Put back the old pages
    //
    pPage = (PPage *)oldPageList.First();
    while ((DLinkNode *)pPage != &oldPageList) {
        pPage->Detach();
        m_pPrivCache->PutPage(pPage);
        pPage = (PPage *)oldPageList.First();
    }

    return ec;
}

ECode StdSegment::SetProtect(UInt32 dwProtect)
{
	m_dwProtect = dwProtect;
    m_pOwner->m_pHat->SetProtect(m_uBase, m_uBase + m_uSize, dwProtect);
    return NOERROR;
}

Boolean StdSegment::CheckProtect(UInt32 protect)
{
    return (!(protect & MemoryProtection_Read) \
       		 	 || (MemoryProtection_Read & m_dwProtect)) \
       		 && (!(protect & MemoryProtection_Write) \
        		 || (MemoryProtection_Write & m_dwProtect)) \
       		 && (!(protect & MemoryProtection_Exec) \
        		 || (MemoryProtection_Exec & m_dwProtect)) \
       		 && (!(protect & IoRemapFlag_Direct) \
        		 || (IoRemapFlag_Direct & m_dwProtect));
}

Boolean StdSegment::IsAutoCommit()
{
    return (m_dwFlags & MemoryMap_UpGrow) || (m_dwFlags & MemoryMap_DownGrow) || m_pDataSource;
}

ECode StdSegment::Hit(UInt32 uAddr)
{
    assert(FALL_INTO(uAddr, m_uBase, m_uSize));

    PPage *pPage, *pPageList;
    uint_t cPage, uOffset, uSize, uBase;
    ECode ec;

    if (m_pPrivCache && !m_pPrivCache->m_pagesList.IsEmpty()) {
        if (m_dwFlags & MemoryMap_UpGrow) {
            pPage = (PPage *)m_pPrivCache->m_pagesList.Last();
            if (uAddr >= m_uBase
                && uAddr < pPage->m_uOffset - m_uOffset \
                + m_uBase + pPage->m_uSize) {
                return NOERROR;
            }
        }
        else if (m_dwFlags & MemoryMap_DownGrow) {
            if (uAddr >= ((PPage *)m_pPrivCache->m_pagesList.First()) \
                ->m_uOffset - m_uOffset + m_uBase
                && uAddr < m_uBase + m_uSize) {
                return NOERROR;
            }
        }
        else {
            ForEachDLinkNode(PPage *, pPage, &(m_pPrivCache->m_pagesList)) {
                if FALL_INTO(uAddr, \
                        pPage->m_uOffset - m_uOffset + m_uBase, pPage->m_uSize){
                    return NOERROR;
                }
            }
        }
    }

    if (!m_pDataSource) {
        if (!m_pPrivCache) {
            m_pPrivCache = new PageContainer();
            if (!m_pPrivCache) return E_OUT_OF_MEMORY;
        }
        pPageList = (PPage *)&(m_pPrivCache->m_pagesList);

        if (m_dwFlags & MemoryMap_UpGrow) {
            if (pPageList == (PPage*)pPageList->First()) {
                uOffset = m_uOffset;
            }
            else {
                uOffset = ((PPage*)pPageList->Last())->m_uOffset
                    + ((PPage*)pPageList->Last())->m_uSize;
            }

            uBase = uOffset - m_uOffset + m_uBase;
            cPage = B_TO_P(uAddr - uBase + 1);

            if (g_UserHeapGrowThreshold > (Int32)(GetNumberOfFreePages() - cPage)) {
                kputs("\n-----------------------------------------------------");
                kputs("\n>>>> *ERROR* Not enough memory for segment up-Growing");
                kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
                return E_OUT_OF_MEMORY;
            }

            ec = m_pPrivCache->AllocPages(uOffset, cPage);
            if (FAILED(ec)) return ec;

            CheckAndRestoreHeapGrowThreshold();
            return this->MapCache(m_pPrivCache, uBase, uAddr - uBase + 1);
        }
        else if (m_dwFlags & MemoryMap_DownGrow) {
            uBase = AlignPageDown(uAddr);
            if (pPageList == (PPage*)pPageList->First()) {
                uSize = m_uBase + m_uSize - uBase;
                uOffset = uBase - m_uBase + m_uOffset;
            }
            else {
                uSize = ((PPage*)pPageList->First())->m_uOffset - m_uOffset
                    + m_uBase - uBase;
                uOffset = uBase - m_uBase + m_uOffset;
            }
            cPage = B_TO_P(uSize);

            if (g_UserStackGrowThreshold > (Int32)(GetNumberOfFreePages() - cPage)) {
                kputs("\n-------------------------------------------------------");
                kputs("\n>>>> *ERROR* Not enough memory for segment down-Growing");
                kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
                return E_OUT_OF_MEMORY;
            }

            ec = m_pPrivCache->AllocPages(uOffset, cPage);
            if (FAILED(ec)) return ec;

            CheckAndRestoreHeapGrowThreshold();
            return this->MapCache(m_pPrivCache, uBase, uSize);
        }
        else {
            if (g_UserStackGrowThreshold > (Int32)(GetNumberOfFreePages() - 1)) {
                kputs("\n----------------------------------------------");
                kputs("\n>>>> *ERROR* Not enough memory for segment hit");
                kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
                return E_OUT_OF_MEMORY;
            }

            uOffset = AlignPageDown(uAddr) - m_uBase + m_uOffset;
            pPage = m_pPrivCache->CreatePage(uOffset, 0);
            if (!pPage) {
                return E_OUT_OF_MEMORY;
            }
            uBase = uOffset - m_uOffset + m_uBase;
            assert(uBase >= m_uBase && uBase + PAGE_SIZE <= m_uBase + m_uSize);
            m_pOwner->m_pHat->LoadMap(uBase, uBase + PAGE_SIZE,
                pPage->m_uAddr, m_dwProtect, TRUE);

            CheckAndRestoreHeapGrowThreshold();
            return NOERROR;
        }
    // if has Data Source
    } else {
        pPage = m_pDataSource->GetPage(uAddr - m_uBase + m_uOffset);
        if (!pPage) return E_FAIL;

        if (m_dwFlags & MemoryMap_Private) {
            if (g_UserStackGrowThreshold > \
                    (Int32)(GetNumberOfFreePages() - B_TO_P(pPage->m_uSize))) {
                kputs("\n------------------------------------------------------------");
                kputs("\n>>>> *ERROR* Not enough memory for segment hit (Data source)");
                kputs("\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
                return E_OUT_OF_MEMORY;
            }

            pPage = pPage->Clone();
            if (!pPage) {
                return E_OUT_OF_MEMORY;
            }
            m_pPrivCache->PutPage(pPage);
            m_pDataSource->Flush();
        }

        uBase = pPage->m_uOffset - m_uOffset + m_uBase;
        assert(uBase >= m_uBase && uBase + PAGE_SIZE <= m_uBase + m_uSize);
        m_pOwner->m_pHat->LoadMap(uBase, uBase + PAGE_SIZE,
            pPage->m_uAddr, m_dwProtect, TRUE);

        CheckAndRestoreHeapGrowThreshold();
        return NOERROR;
    }
}

ECode StdSegment::Unmap()
{
    m_pOwner->m_pHat->UnloadMap(m_uBase, m_uBase + m_uSize);
    if (m_pPrivCache) {
        delete m_pPrivCache;
    }
    m_pPrivCache = NULL;
    CheckAndRestoreHeapGrowThreshold();
    return NOERROR;
}

ECode StdSegment::Decrease(UInt32 uAddr)
{
    PPage *pPage;
    PPage *pPageList;
    uint_t uUnloadMapBase;
    uint_t uUnloadMapLimit;

    assert(m_pPrivCache != NULL);
    if (!(m_dwFlags & (MemoryMap_DownGrow | MemoryMap_UpGrow))) {
        return E_INVALID_OPERATION;
    }

    if (m_dwFlags & MemoryMap_UpGrow) {
        //
        // Heap decrease
        //
        uUnloadMapBase = 0;
        uUnloadMapLimit = m_uBase + m_uSize;
        m_uSize = uAddr - m_uBase;

        pPageList = (PPage *)&m_pPrivCache->m_pagesList;
        pPage = (PPage *)(pPageList->m_pPrev);
        while (pPage != pPageList) {
            if (pPage->m_uOffset - m_uOffset + m_uBase >= uAddr) {
                PAGE_ASSERT(pPage);

                uUnloadMapBase = pPage->m_uOffset - m_uOffset + m_uBase;
                pPage->Detach();
                DzFreePages(pPage->m_uAddr, pPage->m_uOrder);
                delete pPage;
                pPage = (PPage *)pPageList->m_pPrev;
            }
            else {
                break;
            }
        }
    }
    else { // m_dwFlage is MemoryMap_DownGrow
        //
        // Thread stack decrease
        //
        assert(m_dwFlags & MemoryMap_DownGrow);

        uUnloadMapBase = m_uBase;
        uUnloadMapLimit = 0;

        pPageList = (PPage *)&m_pPrivCache->m_pagesList;
        pPage = (PPage *)(pPageList->m_pNext);
        while (pPage != pPageList) {
            if (pPage->m_uOffset + pPage->m_uSize
                - m_uOffset + m_uBase <= uAddr) {
                PAGE_ASSERT(pPage);

                uUnloadMapLimit = pPage->m_uOffset + pPage->m_uSize
                                  - m_uOffset + m_uBase;
                pPage->Detach();
                DzFreePages(pPage->m_uAddr, pPage->m_uOrder);
                delete pPage;
                pPage = (PPage *)pPageList->m_pNext;
            }
            else {
                break;
            }
        }
    }
    if (uUnloadMapBase && uUnloadMapLimit) {
        m_pOwner->m_pHat->UnloadMap(uUnloadMapBase, uUnloadMapLimit);
        CheckAndRestoreHeapGrowThreshold();
    }
    return NOERROR;
}

UInt32 StdSegment::GetPhyscialMemorySize()
{
    if (m_pDataSource) {
        return m_pDataSource->m_cache.GetTotalSize();
    }
    else if (m_pPrivCache) {
        return m_pPrivCache->GetTotalSize();
    }
    else {
        return 0;
    }
}

ECode StdSegment::QueryPhysicalBlock(
    UInt32 uVAddr, UInt32 *puPAddr, MemorySize *pSize)
{
    ECode ec;
    UInt32 uOffset;

    if (m_dwProtect & IoRemapFlag_Direct) {
        return E_INVALID_ARGUMENT;
    }

    uOffset = uVAddr - m_uBase + m_uOffset;

    if (NULL != m_pPrivCache) {
        ec = m_pPrivCache->QueryPhysicalBlock(uOffset, puPAddr, pSize);
        if (SUCCEEDED(ec)) {
            return ec;
        }
    }

    if (NULL != m_pDataSource) {
        ec = m_pDataSource->m_cache.QueryPhysicalBlock(uOffset, puPAddr, pSize);
        if (SUCCEEDED(ec)) {
            return ec;
        }
    }

    return E_DOES_NOT_EXIST;
}
