//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================

#include <core.h>
#include "salloc.h"
#include "buddysys.h"

// Only for KERNEL_MEMORY_DETECT
#if defined(KERNEL_MEMORY_DETECT)

EXTERN Void KMemInsertIntoHash(const PVoid pv, const UInt32 uSizeOrOrder,
                                Boolean bPhysPage);
EXTERN Boolean KMemDetachFromHash(PVoid pv, Boolean bPhysPage);

#ifndef KMEM_INSERT_PAGES_INTO_HASH
#define KMEM_INSERT_PAGES_INTO_HASH \
    do { \
        KMemInsertIntoHash((PVoid)paPages, uOrder, TRUE); \
    } while(0);

#endif //KMEM_INSERT_PAGES_INTO_HASH

#ifndef KMEM_DETACH_PAGES_FROM_HASH
#define KMEM_DETACH_PAGES_FROM_HASH \
    do { \
        if (!KMemDetachFromHash((PVoid)pa, TRUE)) { \
            kprintf("\nERROR: Try to free invalid kernel page(s)!\n"); \
            DebugBreak(); \
        } \
    } while(0);
#endif //KMEM_DETACH_PAGES_FROM_HASH

#endif //KERNEL_MEMORY_DETECT

Page    *g_pPages               = NULL;
uint_t  g_uNumberOfPages        = 0;
uint_t  g_uNumberOfActualPages  = 0;

Zone    g_zones[c_uMaxNumberOfMemoryZones];

STATIC void __FreePages(uint_t uPageNo, uint_t uOrder);

//
// Return FALSE if the region too small. (less than one page)
//
EXTERN DECL_CODEINIT bool_t CDECL SetFreeRegion(
    MemoryRegion *pFreeRegion, uint_t uBasePageNo, uint_t uLimitPageNo)
{
    assert(pFreeRegion);

    // uBasePageNo maybe equal uLimitPageNo + 1 due to round up.
    assert(uBasePageNo <= uLimitPageNo + 1);

    pFreeRegion->uBasePageNo    = uBasePageNo;
    pFreeRegion->uLimitPageNo   = uLimitPageNo;

    return (uBasePageNo < uLimitPageNo);
}

INLINE Zone *FindZone(const MemoryZone *pZone)
{
    uint_t i;
    for (i = 0; i < bsp.uNumberOfMemoryZones; i++) {
        if (pZone->uBasePageNo == bsp.pMemoryZones[i].uBasePageNo &&
            pZone->uLimitPageNo == bsp.pMemoryZones[i].uLimitPageNo) {
            break;
        }
    }
    assert(i < bsp.uNumberOfMemoryZones);

    return &g_zones[i];
}

//
// Initialize Page structures, and add free regions for illusive pages,
// if any.
// Assume: The zones has been sorted without empty regions,
//         and the free regions is big enough.
//
EXTERN DECL_CODEINIT bool_t CDECL InitPages(
    const MemoryZone zones[], uint_t uNumberOfZones,
    MemoryRegion freeRegions[], uint_t *puNumberOfFreeRegions)
{
    uint_t i;

    MemoryRegion zonesRegion;
    zonesRegion.uBasePageNo  = c_uMaxNumberOfPages;
    zonesRegion.uLimitPageNo = 0;

    for (i = 0; i < bsp.uNumberOfMemoryZones; i++) {
        zonesRegion.uBasePageNo  =
            Min(zonesRegion.uBasePageNo, bsp.pMemoryZones[i].uBasePageNo);
        zonesRegion.uLimitPageNo =
            Max(zonesRegion.uLimitPageNo, bsp.pMemoryZones[i].uLimitPageNo);
    }

    g_uNumberOfPages = zonesRegion.uLimitPageNo - zonesRegion.uBasePageNo;

    kprintf("Total: 0x%x pages, structure size: 0x%x\n",
            g_uNumberOfPages, sizeof(Page) * g_uNumberOfPages);

    g_pPages = (Page *)StaticAlloc(sizeof(Page) * g_uNumberOfPages);
    if (NULL == g_pPages) return FALSE;

    g_pPages -= zonesRegion.uBasePageNo;

    // Assume: zones has been sorted
    uint_t uCurrZoneNo = 0;
    Zone *pZone = FindZone(&zones[uCurrZoneNo]);

    for (i = zonesRegion.uBasePageNo; i < zonesRegion.uLimitPageNo; i++) {
        if (i >= zones[uCurrZoneNo].uLimitPageNo) {
            uCurrZoneNo++;
            assert(uCurrZoneNo < uNumberOfZones);
            pZone = FindZone(&zones[uCurrZoneNo]);
        }

        g_pPages[i].Initialize();

        if (i < zones[uCurrZoneNo].uBasePageNo) {
            g_pPages[i].m_uFlags = PageFlag_Illusive;
            g_pPages[i].m_pZone  = NULL;
        }
        else {
            g_pPages[i].m_uFlags = PageFlag_Reserved;
            g_pPages[i].m_pZone  = pZone;
        }
    }

    // Add illusive page structures to free regions
    for (i = 1; i < uNumberOfZones; i++) {
        uint_t uIllusiveBasePageNo  = PHYSADDR_TO_PAGENO(
            RoundUp2(PHYSADDR(&g_pPages[zones[i - 1].uLimitPageNo]),
                    PAGE_SIZE));

        uint_t uIllusiveLimitPageNo = PHYSADDR_TO_PAGENO(
            PHYSADDR(&g_pPages[zones[i].uBasePageNo]));

        if (SetFreeRegion(&freeRegions[*puNumberOfFreeRegions],
                uIllusiveBasePageNo, uIllusiveLimitPageNo)) {
            (*puNumberOfFreeRegions)++;
        }
    }

    return TRUE;
}

STATIC DECL_CODEINIT bool_t CDECL InitZone(
    Zone *pZone, const MemoryRegion *pZoneRegion)
{
    pZone->m_region             = *pZoneRegion;
    pZone->m_nApproxMaxOrder    = -1;
    pZone->m_uNumberOfFreePages = 0;

    uint_t uBitsOfBuddyMap =
        pZone->m_region.uLimitPageNo - pZone->m_region.uBasePageNo;
    uint_t uBytesOfBuddyMap;

    for (uint_t i = 0; i < c_uNumberOfOrders; i++) {
        pZone->m_buddyLists[i].m_listHead.Initialize();

        uBitsOfBuddyMap  = RoundUp2(uBitsOfBuddyMap, 2) >> 1;
        uBytesOfBuddyMap = RoundUp2(uBitsOfBuddyMap, 8) >> 3;

        pZone->m_buddyLists[i].m_pbMap =
            (byte_t *)StaticAlloc(uBytesOfBuddyMap);
        if (NULL == pZone->m_buddyLists[i].m_pbMap) return FALSE;

        memset(pZone->m_buddyLists[i].m_pbMap, 0, uBytesOfBuddyMap);
    }

    return TRUE;
}

EXTERN DECL_CODEINIT bool_t CDECL InitZones()
{
    for (uint_t i = 0; i < bsp.uNumberOfMemoryZones; i++) {
        assert(bsp.pMemoryZones[i].uBasePageNo
            == RoundDown2(bsp.pMemoryZones[i].uBasePageNo, 1 << c_uMaxOrder));
        if (!InitZone(&g_zones[i], &bsp.pMemoryZones[i])) return FALSE;
    }

    return TRUE;
}

EXTERN uint_t GetNumberOfFreePages()
{
    uint_t uNumberOfFreePages = 0;

    bool_t bOriginalPreemption = DzDisablePreemption();
    for (uint_t i = 0; i < dz.uNumberOfZones; i++) {
        uNumberOfFreePages += g_zones[i].m_uNumberOfFreePages;
    }
    DzRestorePreemption(bOriginalPreemption);

    return uNumberOfFreePages;
}

#if defined(KERNEL_MEMORY_DETECT)
EXTERN physaddr_t KMemAllocPages(uint_t uOrder, uint_t uPreferredZoneNo)
#else
EXTERN physaddr_t DzAllocPages(uint_t uOrder, uint_t uPreferredZoneNo)
#endif
{
    assert(c_uNumberOfOrders > uOrder);
    assert(dz.uNumberOfZones > uPreferredZoneNo);

    Zone *pZone = &g_zones[uPreferredZoneNo];

    pZone->m_lock.Lock();

Restart:
    while (pZone->m_nApproxMaxOrder < (int)uOrder) {
//        kprintf("Not enough physical memory in zone[%d], move previous\n",
//                pZone - g_zones);

        pZone->m_lock.Unlock();

        pZone--;

        if (pZone < g_zones) {
#ifdef _DEBUG
            if (uOrder == 0) {
                kputs("*ERROR* Not enough physical memory\n");
            }
#endif //_DEBUG

            return INVALID_PAGE_ADDRESS;
        }

        pZone->m_lock.Lock();
    }

    Page        *pPage;
    BuddyList   *pBuddyList = &pZone->m_buddyLists[uOrder];
    uint_t      uNewOrder   = uOrder;

    do {
        if (!pBuddyList->m_listHead.IsEmpty()) {
            pPage = (Page *)pBuddyList->m_listHead.First();
            goto Found;
        }

        uNewOrder++;
        pBuddyList++;
    } while (c_uNumberOfOrders > uNewOrder);

    // The approximate max order should greater than actual max order,
    // So we must adjust it and restart.
//    kprintf("Zone[%d]: Invalid approximate max order: %d (actual < %d)\n",
//            pZone - g_zones, pZone->m_nApproxMaxOrder, uOrder);
    pZone->m_nApproxMaxOrder = uOrder - 1;
    goto Restart;

Found:
    assert(c_uNumberOfOrders        >  uNewOrder);
    assert(pZone->m_nApproxMaxOrder >= (int)uNewOrder);

    // Update approximate max order if necessary
    if (pZone->m_nApproxMaxOrder == (int)uNewOrder &&
        pBuddyList->m_listHead.Last() == pPage) {
        pZone->m_nApproxMaxOrder = uNewOrder - 1;
    }

    // Detach the block page and mask the buddy map
    pPage->Detach();
    uint_t uOffsetPageNo = (pPage - g_pPages) - pZone->m_region.uBasePageNo;
    ChangeBit(uOffsetPageNo >> (uNewOrder + 1), pBuddyList->m_pbMap);

    uint_t uNumberOfPagesPerBlock = 1u << uNewOrder;
    while (uNewOrder > uOrder) {
        uNewOrder--;
        pBuddyList--;
        uNumberOfPagesPerBlock >>= 1;

        pBuddyList->m_listHead.InsertNext(pPage);
        ChangeBit(uOffsetPageNo >> (uNewOrder + 1), pBuddyList->m_pbMap);

        uOffsetPageNo += uNumberOfPagesPerBlock;
        pPage         += uNumberOfPagesPerBlock;
    }

    assert((1u << uOrder) == uNumberOfPagesPerBlock);

    pZone->m_uNumberOfFreePages -= uNumberOfPagesPerBlock;

    pZone->m_lock.Unlock();

    STAT_ALLOC_PAGES

    return PAGENO_TO_PHYSADDR(pPage - g_pPages);
}

//
// Free pages that are originally marked as PageFlag_Reserved.
//
EXTERN bool_t ReclaimPages(uint_t uBasePageNo, uint_t uLimitPageNo)
{
    assert(IS_VALID_PAGENO(uBasePageNo));
    assert(IS_VALID_PAGENO(uLimitPageNo - 1));
    assert(uBasePageNo <= uLimitPageNo);

    Zone *pZone = g_pPages[uBasePageNo].m_pZone;

    // Remove PageFlag_Reserved from all free pages
    for (uint_t i = uBasePageNo; i < uLimitPageNo; i++) {
        assert(g_pPages[i].m_uFlags & PageFlag_Reserved);
        assert(!(g_pPages[i].m_uFlags & PageFlag_Illusive));
        assert(g_pPages[i].m_pZone == pZone); // can't across zones
        assert(pZone->m_region.uBasePageNo <= i
                && i < pZone->m_region.uLimitPageNo);

        g_pPages[i].m_uFlags &= ~PageFlag_Reserved;
    }

    // Call __FreePages to reclaim them.
    uint_t uOffsetPageNo        = uBasePageNo  - pZone->m_region.uBasePageNo;
    uint_t uOffsetLimitPageNo   = uLimitPageNo - pZone->m_region.uBasePageNo;

    uint_t uOrder;
    uint_t uPageNoMask;

RestartLoop:
    while (uOffsetPageNo < uOffsetLimitPageNo) {
        uOrder = c_uNumberOfOrders;
        while (0 < uOrder) {
            uOrder--;
            uPageNoMask = (~0u) << uOrder;  // -uPageNoMask == 1 << uOrder

            if (0 == (uOffsetPageNo & ~uPageNoMask)) {
                while (TRUE) {
                    if (uOffsetPageNo - uPageNoMask <= uOffsetLimitPageNo) {
                        __FreePages(
                            pZone->m_region.uBasePageNo + uOffsetPageNo,
                            uOrder);
                        uOffsetPageNo -= uPageNoMask;
                        goto RestartLoop;
                    }

                    uOrder--;
                    uPageNoMask = (~0u) << uOrder;
                }
                assert0();
            }
        }
        assert0();
    }

    return TRUE;
}

#if defined(KERNEL_MEMORY_DETECT)
EXTERN void KMemFreePages(physaddr_t pa, uint_t uOrder)
#else
EXTERN void DzFreePages(physaddr_t pa, uint_t uOrder)
#endif
{
    assert(RoundUp(pa, PAGE_SIZE) == pa);

    __FreePages(PHYSADDR_TO_PAGENO(pa), uOrder);

    STAT_FREE_PAGES
}

STATIC void __FreePages(uint_t uPageNo, uint_t uOrder)
{
    assert(IS_VALID_PAGENO(uPageNo));
    assert(c_uNumberOfOrders > uOrder);

    uint_t uPageNoMask = (~0u) << uOrder; // -uPageNoMask == 1 << uOrder

    Zone *pZone = g_pPages[uPageNo].m_pZone;
    assert(pZone);
    assert(pZone->m_region.uBasePageNo  <= uPageNo);
    assert(pZone->m_region.uLimitPageNo >= uPageNo - uPageNoMask);

    pZone->m_lock.Lock();

    pZone->m_uNumberOfFreePages -= uPageNoMask;

    uint_t uBasePageNo      = pZone->m_region.uBasePageNo;
    uint_t uOffsetPageNo    = uPageNo - uBasePageNo;
    assert((uOffsetPageNo & uPageNoMask) == uOffsetPageNo);

    uint_t uBuddyMapBitNo = uOffsetPageNo >> (uOrder + 1);

    BuddyList *pBuddyList = &pZone->m_buddyLists[uOrder];

    while ((1 << (c_uNumberOfOrders - 1)) + uPageNoMask) {
        // If the the map bit == 0, means the buddy block is busy,
        //      so exit from loop.
        if (0 == TestAndChangeBit(uBuddyMapBitNo, pBuddyList->m_pbMap)) {
            break;
        }

        // Remove the buddy block from the free list.
        g_pPages[uBasePageNo + (uOffsetPageNo ^ -(int)uPageNoMask)].Detach();

        uPageNoMask     <<= 1;
        uOffsetPageNo   &= uPageNoMask;
        uBuddyMapBitNo  >>= 1;
        pBuddyList++;
    }

    pBuddyList->m_listHead.InsertLast(&g_pPages[uBasePageNo + uOffsetPageNo]);

    if (pZone->m_nApproxMaxOrder < (pBuddyList - pZone->m_buddyLists)) {
        pZone->m_nApproxMaxOrder = (pBuddyList - pZone->m_buddyLists);
    }

    pZone->m_lock.Unlock();
}

#ifndef _RELEASE

STATIC DECL_CODEINIT bool_t ExistBlockInList(Zone *pZone, uint_t uBlockPageNo,
    uint_t uOrder)
{
    assert(pZone);
    for (Page *pPage = (Page *)pZone->m_buddyLists[uOrder].m_listHead.First();
        (Page *)&pZone->m_buddyLists[uOrder].m_listHead != pPage;
        pPage = (Page *)pPage->Next()) {
        if (uBlockPageNo == (uint_t)(pPage - g_pPages)) return TRUE;
    }

    return FALSE;
}

STATIC DECL_CODEINIT uint_t DumpBuddyList(Zone *pZone, uint_t uOrder)
{
    kprintf("    No.%d Buddy List\n", uOrder);

    uint_t uBasePageNo = pZone->m_region.uBasePageNo;
    uint_t uNumberOfFreePages = 0;

    for (Page *pPage = (Page *)pZone->m_buddyLists[uOrder].m_listHead.First();
        (Page *)&pZone->m_buddyLists[uOrder].m_listHead != pPage;
        pPage = (Page *)pPage->Next(), uNumberOfFreePages += 1 << uOrder) {

        uint_t uPageNo       = pPage - g_pPages;
        uint_t uOffsetPageNo = uPageNo - uBasePageNo;
        uint_t uPageNoMask   = (~0u) << uOrder;
        if ((uOffsetPageNo & uPageNoMask) != uOffsetPageNo) {
            kprintf("*ERROR* Invalid block (BlockPageNo: 0x%x)\n", uPageNo);
        }
        else {
            kprintf("[0x%x] ", uPageNo);
        }

        if (pPage->m_uFlags & PageFlag_Reserved) {
            kprintf("*ERROR* Reserved page (No: 0x%x) in buddy list[%d]\n",
                    uPageNo, uOrder);
        }
    }
    if (0 < uNumberOfFreePages) kprintf("\n");

    if (c_uNumberOfOrders - 1 == uOrder) return uNumberOfFreePages;

    uint_t uNumberOfZonePages =
        pZone->m_region.uLimitPageNo - pZone->m_region.uBasePageNo;

    for (uint_t uBuddyBlockNo = 0;
        uBuddyBlockNo < (uNumberOfZonePages >> (uOrder + 1));
        uBuddyBlockNo++) {
        uint_t uBlock1PageNo = uBasePageNo + (uBuddyBlockNo << (uOrder + 1));
        uint_t uBlock2PageNo =
            uBasePageNo + (uBuddyBlockNo << (uOrder + 1)) + (1 << uOrder);

        bool_t bExist1 = ExistBlockInList(pZone, uBlock1PageNo, uOrder);
        bool_t bExist2 = ExistBlockInList(pZone, uBlock2PageNo, uOrder);

        if (bExist1 && bExist2) {
            kprintf("*ERROR* Invalid buddy blocks exist "
                    "(BlockPageNo: 0x%x, 0x%x)\n",
                    uBlock1PageNo, uBlock2PageNo);
        }

        if (0 == TestBit(uBuddyBlockNo, pZone->m_buddyLists[uOrder].m_pbMap)) {
            if (bExist1) {
                kprintf("*ERROR* Buddy block 1 exist (BlockPageNo: 0x%x)\n",
                        uBlock1PageNo);
            }

            if (bExist2) {
                kprintf("*ERROR* Buddy block 2 exist (BlockPageNo: 0x%x)\n",
                        uBlock2PageNo);
            }
        }
        else {
            if (!bExist1 && !bExist2) {
                kprintf("*ERROR* Invalid buddy blocks not exist "
                        "(BlockPageNo: 0x%x, 0x%x)\n",
                        uBlock1PageNo, uBlock2PageNo);
            }
        }
    }

    return uNumberOfFreePages;
}

STATIC DECL_CODEINIT void DumpZone(Zone *pZone)
{
    uint_t uZoneNo = pZone - g_zones;
    kprintf("Zone[%d]: [%x, %x), approx max order: %d, 0x%x free pages\n",
            uZoneNo,
            pZone->m_region.uBasePageNo, pZone->m_region.uLimitPageNo,
            pZone->m_nApproxMaxOrder, pZone->m_uNumberOfFreePages);

    uint_t uNumberOfFreePages = 0;
    for (uint_t i = 0; i < c_uNumberOfOrders; i++) {
        uNumberOfFreePages += DumpBuddyList(pZone, i);
    }

    if (uNumberOfFreePages != pZone->m_uNumberOfFreePages) {
        kprintf("*ERROR* Invalid free pages(expected: 0x%x, actual: 0x%x)"
                " in Zone[%d]\n",
                pZone->m_uNumberOfFreePages, uNumberOfFreePages, uZoneNo);
    }
}

#endif // _RELEASE

EXTERN DECL_CODEINIT void DumpZones()
{
#ifndef _RELEASE
    kprintf("<<<<<< Zones: %d zones, max order: %d\n",
            dz.uNumberOfZones, c_uNumberOfOrders - 1);

    for (uint_t i = 0; i < dz.uNumberOfZones; i++) {
        DumpZone(&g_zones[i]);
    }

    kprintf(">>>>>>\n");
#endif // _RELEASE
}

//
// only for KERNEL_MEMORY_DETECT
//
#if defined(KERNEL_MEMORY_DETECT)
physaddr_t DzAllocPages(uint_t uOrder, uint_t uPreferredZoneNo)
{
    physaddr_t paPages = KMemAllocPages(uOrder, uPreferredZoneNo);
    if (INVALID_PAGE_ADDRESS == paPages) return INVALID_PAGE_ADDRESS;

    KMEM_INSERT_PAGES_INTO_HASH

    return paPages;
}

void DzFreePages(physaddr_t pa, uint_t uOrder)
{
    assert(RoundUp(pa, PAGE_SIZE) == pa);

    KMEM_DETACH_PAGES_FROM_HASH

    KMemFreePages(pa, uOrder);
}

#endif //KERNEL_MEMORY_DETECT
