//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#ifndef __ELASTOS_ALLOCPAGE_H__
#define __ELASTOS_ALLOCPAGE_H__

// assume the macro: KCONFIG_NUMBEROF_ORDERS is 11
#define GET_UP_ORDER(n) ( \
    n == 1 ? 0 : ( \
    n == 2 ? 1 : ( \
    n <= 4 ? 2 : ( \
    n <= 8 ? 3 : ( \
    n <= 16 ? 4 : ( \
    n <= 32 ? 5 : ( \
    n <= 64 ? 6 : ( \
    n <= 128 ? 7 : ( \
    n <= 256 ? 8 : ( \
    n <= 512 ? 9 : ( \
    n <= 1024 ? 10 : -1)))))))))))

#define INVALID_PAGE_ADDRESS        ((physaddr_t)0xffffffff)
#define IS_VALID_PAGE_ADDRESS(pa)   (INVALID_PAGE_ADDRESS != (pa))

const uint_t c_uNumberOfOrders  = KCONFIG_NUMBEROF_ORDERS;
const uint_t c_uMaxOrder        = c_uNumberOfOrders - 1;

typedef struct DzGlobals
{
    uint_t uNumberOfZones;
} DzGlobals;

EXTERN_C DzGlobals dz;

// Page allocator
EXTERN physaddr_t DzAllocPages(uint_t uOrder, uint_t uPreferredZoneNo);

INLINE physaddr_t DzAllocPage(uint_t uPreferredZoneNo)
{
    return DzAllocPages(0, uPreferredZoneNo);
}

EXTERN void DzFreePages(physaddr_t pa, uint_t uOrder);

INLINE void DzFreePage (physaddr_t pa)
{
    DzFreePages(pa, 0);
}

// Kernel page allocator
enum {
    AKPFlag_DMA = 0x0001
};

INLINE virtaddr_t DzAllocKernelPages(uint_t uOrder, uint_t uFlags = 0)
{
    uint_t uPreferredZoneNo = (uFlags & AKPFlag_DMA)?
            bsp.uDmaPreferredZoneNo : bsp.uKernelPreferredZoneNo;

    physaddr_t paPages = DzAllocPages(uOrder, uPreferredZoneNo);
    if (INVALID_PAGE_ADDRESS == paPages) return NULL;

    STAT_ALLOC_KERNEL_PAGES

    return VIRTADDR(paPages);
}

INLINE virtaddr_t DzAllocKernelPage()
{
    return DzAllocKernelPages(0);
}

INLINE void DzFreeKernelPages(virtaddr_t va, uint_t uOrder)
{
    DzFreePages(PHYSADDR(va), uOrder);

    STAT_FREE_KERNEL_PAGES
}

INLINE void DzFreeKernelPage (virtaddr_t va)
{
    DzFreeKernelPages(va, 0);
}

EXTERN virtaddr_t DzAllocDiscontinuousKernelPages(uint_t uPages, uint_t protect);

EXTERN void DzFreeDiscontinuousKernelPages(virtaddr_t va);

EXTERN bool_t DzFreeCoalesedDiscontinuousKernelPages(virtaddr_t va, uint_t uPages);

EXTERN virtaddr_t DzRemapDiscontinuousKernelPages(
        virtaddr_t va, uint_t uPages, int_t protect);

EXTERN void DzUnmapDiscontinuousKernelPages(virtaddr_t va, uint_t uPages);

// Misc
EXTERN bool_t ReclaimPages(uint_t uBasePageNo, uint_t uLimitPageNo);

EXTERN uint_t GetNumberOfFreePages();

EXTERN virtaddr_t DzKIoRemap(
        physaddr_t paBase, size_t size, uint_t protect = IoRemapFlag_Direct);
EXTERN void DzKIoUnmap(virtaddr_t vaBase);

// only for KERNEL_MEMORY_DETECT
#if defined(KERNEL_MEMORY_DETECT)

EXTERN physaddr_t KMemAllocPages(uint_t uOrder, uint_t uPreferredZoneNo);
EXTERN void KMemFreePages(physaddr_t pa, uint_t uOrder);

INLINE virtaddr_t KMemAllocKernelPages(uint_t uOrder)
{
    physaddr_t paPages = KMemAllocPages(uOrder, bsp.uKernelPreferredZoneNo);
    if (INVALID_PAGE_ADDRESS == paPages) return NULL;

    return VIRTADDR(paPages);
}

INLINE virtaddr_t KMemAllocKernelPage()
{
    return KMemAllocKernelPages(0);
}

INLINE void KMemFreeKernelPages(virtaddr_t va, uint_t uOrder)
{
    KMemFreePages(PHYSADDR(va), uOrder);
}

INLINE void KMemFreeKernelPage (virtaddr_t va)
{
    KMemFreeKernelPages(va, 0);
}

#endif //KERNEL_MEMORY_DETECT

EXTERN ECode GetBootModule(const wchar_t *wszName, virtaddr_t *pvaBase,
                virtaddr_t *pvaLimit);

#endif // __ELASTOS_ALLOCPAGE_H__
