//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#ifndef __ELASTOS_PHYSMEM_H__
#define __ELASTOS_PHYSMEM_H__

const uint_t c_uMaxNumberOfMemoryZones  = 5;
const uint_t c_uMaxNumberOfMemoryHoles  = 20;

//
// Page
//
struct Zone;

typedef enum PageFlag
{
    PageFlag_Reserved   = 0x1,
    PageFlag_Illusive   = 0x2,
} PageFlag;

typedef struct Page : public DLinkNode
{
    uint32_t        m_uFlags;
    Zone            *m_pZone;
} Page;

//
// Buddy List
//
typedef struct BuddyList
{
    DLinkNode       m_listHead;
    byte_t          *m_pbMap;
} BuddyList;

//
// Memory allocator lock
//
#ifdef _DEBUG

class CMemoryAllocatorLock
{
friend bool_t LockMemoryAllocatorAndDisablePreemption();
friend void UnlockMemoryAllocatorAndRestorePreemption(bool_t bPreemption);

public:
    CMemoryAllocatorLock()
#ifdef DEBUG_KMUTEX
        :m_kmutex(__FILE__, __LINE__)
#endif //DEBUG_KMUTEX
    {
        m_nLocked = 0;
    }

    void Lock()
    {
        if (m_nLocked == 0) {
            assert(PreemptionEnabled());
        }
        GetCurrentThread()->m_activeLock._Lock();
        m_kmutex._Lock();
    }

    void Unlock()
    {
        m_kmutex.Unlock();
    }

private:
    void AllocatorLock()
    {
        m_kmutex.Lock();
        assert(m_nLocked == 0);
        m_nLocked = 1;
    }

    void AllocatorUnlock()
    {
        assert(m_nLocked == 1);
        m_nLocked = 0;
        m_kmutex.Unlock();
    }

    KMutex  m_kmutex;
    int     m_nLocked;
};

#else

typedef KMutex CMemoryAllocatorLock;

#endif // _DEBUG

//
// Zone
//
typedef struct Zone
{
    MemoryRegion    m_region;

    int             m_nApproxMaxOrder;
    uint_t          m_uNumberOfFreePages;
    BuddyList       m_buddyLists[c_uNumberOfOrders];

    CMemoryAllocatorLock    m_lock;
} Zone;

//
// Global variable declarations
//
EXTERN Page         *g_pPages;
EXTERN uint_t       g_uNumberOfPages;
EXTERN uint_t       g_uNumberOfActualPages;

EXTERN Zone         g_zones[];
EXTERN MemoryHole   g_holes[];
EXTERN uint_t       g_uNumberOfHoles;

EXTERN virtaddr_t   g_vaKernelHighMemory;

EXTERN ECode ReclaimBootModule(const wchar_t *wszName);

INLINE bool_t IsMemoryMappedIo(physaddr_t pa, uint_t size)
{
    assert(0 < size);
    assert(0 == pa + size || pa < pa + size);

    uint_t uBasePageNo = PHYSADDR_TO_PAGENO(pa);
    uint_t uLimitPageNo =
        (PAGENO_TO_PHYSADDR(c_uMaxNumberOfPages - 1) < pa + size)?
            c_uMaxNumberOfPages
            : PHYSADDR_TO_PAGENO(RoundUp2(pa + size, PAGE_SIZE));

    for (register uint_t i = 0; i < g_uNumberOfHoles; i++) {
        if (g_holes[i].uBasePageNo <= uBasePageNo
            && uLimitPageNo <= g_holes[i].uLimitPageNo) {
            return TRUE;
        }
    }

    return FALSE;
}

INLINE bool_t IsPhysicalMemory(physaddr_t pa, uint_t size)
{
    assert(0 < size);
    assert(0 == pa + size || pa < pa + size);

    uint_t uBasePageNo = PHYSADDR_TO_PAGENO(pa);
    uint_t uLimitPageNo =
        (PAGENO_TO_PHYSADDR(c_uMaxNumberOfPages - 1) < pa + size)?
            c_uMaxNumberOfPages
            : PHYSADDR_TO_PAGENO(RoundUp2(pa + size, PAGE_SIZE));

    for (register uint_t i = 0; i < g_uNumberOfHoles; i++) {
        if (!(uLimitPageNo <= g_holes[i].uBasePageNo
                || g_holes[i].uLimitPageNo <= uBasePageNo)) {
            return FALSE;
        }
    }

    return TRUE;
}

#endif // __ELASTOS_PHYSMEM_H__
