//==========================================================================
// Copyright (c) 2000-2008,  Elastos, Inc.  All Rights Reserved.
//==========================================================================
#include <winhack.h>
#include <CMalloc.h>
#include <aura.h>
#include <stdlib.h>

#define PAGESIZE                 0x1000
#define PAGESIZE_ROUNDOFF        0xfff
#define RoundPages(size)         ((size + PAGESIZE_ROUNDOFF) & (~PAGESIZE_ROUNDOFF))
#define HasRoundPages(Size)      ((Size & 0xfff) == 0)
#define HasRoundAddress(Address) (((DWORD)Address & 0xfff) == 0)
#define Round4Bytes(size)        ((size + 3) & (~3))

extern CMalloc* g_pMalloc;
RTL_CRITICAL_SECTION g_valveLock;
Int32 g_totalMemSpySize = 0;

typedef struct _BChkHeader {
    PVoid pAllocPage; // Pointer to the address of 1st page.
    Int32 Size;       // The size of this heap block,
                      // 8 bytes rounded,  including header.
} BChkHeader, *PBChkHeader;

int Rand()
{// The random number of generation is 0 to 9;
    return (rand() % 10);
}

static Int32 GetCommitMemorySize(PVoid pv)
{
    MEMORY_BASIC_INFORMATION memInfo;
    SIZE_T res = 0;

    memset((void *)&memInfo, 0, sizeof(memInfo));
    res = VirtualQuery(pv, &memInfo, sizeof(memInfo));
    assert(res == sizeof(memInfo));
    assert(pv == memInfo.BaseAddress);
    assert(pv == (PVoid)((Byte *)memInfo.AllocationBase + PAGESIZE));
    assert(MEM_COMMIT == memInfo.State);
    assert(PAGE_READWRITE == memInfo.Protect);

    return (Int32)memInfo.RegionSize;
}

extern "C" void * memset(void *dest, int c, size_t n);
extern "C" void * __cdecl memcpy(void * pDest, const void * pSource, size_t cb);
extern "C" void* __cdecl memmove(void* pDest, const void* pSource, size_t count);
extern "C" int wcscmp (const wchar_t* string1, const wchar_t* string2);
extern "C" wchar_t* wcscpy (wchar_t* strDestination, const wchar_t* strSource);

//
// CPreBChkMalloc:
// (Previous Memory Bounds Checker Malloc Object)
// When reading or writing before the heap block,
// The system will catch the error right now!
//
// Theory:
// For each heap block, commit 1 or n physical pages,
// and the block occupies the part from the beginning.
// And the previous one page is reserved.
// If reading or writing before the heap block,
// Probably the address belongs to the previous one page.
// There will be a error detected right now,
// since the page is only reserved, not commited.
//

// | 1 Page (Reserved, NOT Commited) |        1 ~ n Pages (Commited)       |
// +---------------------------------+----------------+--------------------+
// |              RESERVED           | User Content   | Not Used   | Header|
// +---------------------------------+----------------+--------------------+
// |                                 ^                                     |
//                                   |
//                                   |__Return Address For Users

PVoid CPreBChkMalloc::Alloc(size_t cb)
{
    Int32 commitSize = RoundPages(cb + sizeof(BChkHeader));
    Byte  * pReserveMemory = NULL;
    Byte  * pCommitMemory  = NULL;
    BChkHeader * pHeader   = NULL;

    assert(cb >= 0);

    pReserveMemory = (Byte *)VirtualAlloc(NULL,
                                 commitSize + PAGESIZE,
                                 MEM_RESERVE,
                                 PAGE_NOACCESS);
    if (NULL == pReserveMemory) {
        goto ErrorExit;
    }
    pCommitMemory  = (Byte *)VirtualAlloc(
                                 pReserveMemory + PAGESIZE,
                                 commitSize,
                                 MEM_COMMIT,
                                 PAGE_READWRITE);
    if (NULL == pCommitMemory) {
        goto ErrorExit;
    }
    pHeader = (BChkHeader *)(pCommitMemory + commitSize) - 1;
    pHeader->pAllocPage = (PVoid)pReserveMemory;
    pHeader->Size = (Int32)cb + sizeof(BChkHeader);

    EnterCriticalSection(&g_valveLock);
    g_totalMemSpySize += (commitSize + PAGESIZE);
    LeaveCriticalSection(&g_valveLock);


    return (PVoid)pCommitMemory;

ErrorExit:
    if (NULL != pReserveMemory) {
        VirtualFree(pReserveMemory, 0, MEM_FREE);
    }
    return NULL;
}

PVoid CPreBChkMalloc::Realloc(PVoid pv, size_t cb)
{
    Byte * pCommitMemory = (Byte *)pv;
    PVoid  pUserMemory   = NULL;
    BChkHeader * pHeader = NULL;
    Int32 commitSize = 0;
    Int32 originalCb = 0;

    assert(HasRoundAddress(pv));
    assert(cb >= 0);

    if (NULL == pv) {
        return CPreBChkMalloc::Alloc(cb);
    }

    commitSize = GetCommitMemorySize(pv);
    pHeader = (BChkHeader *)(pCommitMemory + commitSize) - 1;
    assert(pHeader->pAllocPage == (pCommitMemory - PAGESIZE));
    assert(commitSize == RoundPages(pHeader->Size));
    originalCb = pHeader->Size - sizeof(BChkHeader);

    if (!cb) {
        CPreBChkMalloc::Free(pv);
    }
    else if ((Int32)cb <= originalCb) {
        pHeader->Size = cb + sizeof(BChkHeader);
        pUserMemory = pv;
    }
    else {
        pUserMemory = CPreBChkMalloc::Alloc(cb);
        if (NULL == pUserMemory) {
            return NULL;
        }
        memcpy(pUserMemory, pv, originalCb);
        CPreBChkMalloc::Free(pv);
    }

    return pUserMemory;
}

void CPreBChkMalloc::Free(PVoid pv)
{
    Byte * pCommitMemory = (Byte *)pv;
    BChkHeader * pHeader = NULL;
    Int32 commitSize = 0;

    assert(HasRoundAddress(pv));

    if (NULL == pv) return ;

    commitSize = GetCommitMemorySize(pv);
    pHeader = (BChkHeader *)(pCommitMemory + commitSize) - 1;

    assert(pHeader->pAllocPage == (pCommitMemory - PAGESIZE));
   // assert(commitSize == RoundPages(pHeader->Size));

    VirtualFree(pv, commitSize, MEM_DECOMMIT);
    VirtualFree((pCommitMemory - PAGESIZE), 0, MEM_FREE);

    EnterCriticalSection(&g_valveLock);
    g_totalMemSpySize -= (commitSize + PAGESIZE);
    LeaveCriticalSection(&g_valveLock);


    return ;
}

//
//
// CPostBChkMalloc:
// (Post Memory Bounds Checker Malloc Object)
// When reading or writing after the heap block,
// The system will catch the error right now!
//
// Theory:
// For each heap block, commit 1 or n physical pages,
// and the block occupies the tail of these pages.
// And the next one page is reserved.
// If reading or writing after the heap block,
// Probably the address belongs to the next one page.
// There will be a error detected right now,
// since the page is only reserved, not commited.
//

// |        1 ~ n Pages (Commited)       | 1 Page (Reserved, NOT Commited) |
// +----------------+------+-------------+---------------------------------+
// | Not Used       |Header| User Content|              RESERVED           |
// +----------------+------+-------------+---------------------------------+
// |                       ^             |
//                         |
//                         |__Return Address For Users

// NOTE: It needs lots of memory! So it should be only used when necessary.

PVoid CPostBChkMalloc::Alloc(size_t cb)
{
    Byte * pReserveMemory = NULL;
    Byte * pCommitMemory  = NULL;
    PVoid pUserMemory    = NULL;
    BChkHeader * pHeader = NULL;

    assert(cb >= 0);
    cb = Round4Bytes(cb);

    Int32 commitSize = RoundPages(cb + sizeof(BChkHeader));

    pReserveMemory = (Byte *)VirtualAlloc(NULL,
                                 commitSize + PAGESIZE,
                                 MEM_RESERVE,
                                 PAGE_NOACCESS);
    if (NULL == pReserveMemory) {
        goto ErrorExit;
    }

    pCommitMemory = (Byte *)VirtualAlloc(pReserveMemory,
                                commitSize,
                                MEM_COMMIT,
                                PAGE_READWRITE);
    if (NULL == pCommitMemory) {
        goto ErrorExit;
    }

    pUserMemory = pCommitMemory + commitSize - cb;
    pHeader = (BChkHeader *)pUserMemory - 1;
    pHeader->pAllocPage = pReserveMemory;
    pHeader->Size = cb + sizeof(BChkHeader);

    EnterCriticalSection(&g_valveLock);
    g_totalMemSpySize += (commitSize + PAGESIZE);
    LeaveCriticalSection(&g_valveLock);


    return pUserMemory;

ErrorExit:
    if (NULL != pReserveMemory) {
        VirtualFree(pReserveMemory, 0, MEM_FREE);
    }
    return NULL;
}

PVoid CPostBChkMalloc::Realloc(PVoid pv, size_t cb)
{
    PVoid pReserveMemory = NULL;
    PVoid pUserMemory    = NULL;
    BChkHeader * pHeader = NULL;
    Int32 originalCb = 0;

    assert(cb >= 0);
    cb = Round4Bytes(cb);

    if (NULL == pv) {
        return CPostBChkMalloc::Alloc(cb);
    }

    pHeader = (BChkHeader *)pv - 1;
    originalCb = pHeader->Size - sizeof(BChkHeader);
    pReserveMemory = pHeader->pAllocPage;

    assert(HasRoundAddress(pReserveMemory));
    assert(HasRoundAddress((Byte *)pv + originalCb));

    if (!cb) {
        CPostBChkMalloc::Free(pv);
    }
    else if ((Int32)cb <= originalCb) {
        pUserMemory = (Byte *)pv + (originalCb - cb);
        memmove(pUserMemory, pv, cb);
        pHeader = (BChkHeader *)pUserMemory - 1;
        pHeader->pAllocPage = pReserveMemory;
        pHeader->Size = cb + sizeof(BChkHeader);
    }
    else {
        pUserMemory = CPostBChkMalloc::Alloc(cb);
        if (NULL == pUserMemory) {
            return NULL;
        }
        memcpy(pUserMemory, pv, originalCb);
        CPostBChkMalloc::Free(pv);
    }

    return pUserMemory;
}

void CPostBChkMalloc::Free(PVoid pv)
{
    PVoid pReserveMemory = NULL;
    PVoid pCommitMemory  = NULL;
    BChkHeader * pHeader = NULL;
    Int32 cb = 0;
    Int32 commitSize = 0;

    if (NULL == pv) return ;

    pHeader = (BChkHeader *)pv - 1;
    cb = pHeader->Size - sizeof(BChkHeader);
    pReserveMemory = (pCommitMemory = pHeader->pAllocPage);

    assert(HasRoundAddress(pReserveMemory));
    assert(HasRoundAddress((Byte *)pv + cb));

    commitSize = RoundPages(pHeader->Size);
    VirtualFree(pCommitMemory, commitSize, MEM_DECOMMIT);
    VirtualFree(pReserveMemory, 0, MEM_FREE);

    EnterCriticalSection(&g_valveLock);
    g_totalMemSpySize -= (commitSize + PAGESIZE);
    LeaveCriticalSection(&g_valveLock);

    return ;
}

#define MAGIC_MEMSPY_MALLOC   0x87345135
#define MAGIC_NORMAL_MALLOC 0x74334143

CHyperMalloc::CHyperMalloc(CMalloc * pMalloc,
                           Int32 minSpySize,
                           Int32 maxSpySize,
                           Int32 valveSpySize)
{
    m_pSpyMalloc = pMalloc;
    m_minSpySize = minSpySize;
    m_maxSpySize = maxSpySize;
    m_valveSpySize = valveSpySize;

    InitializeCriticalSection(&g_valveLock);
}

CHyperMalloc::~CHyperMalloc()
{
    delete(m_pSpyMalloc);
    DeleteCriticalSection(&g_valveLock);
}

PVoid CHyperMalloc::Alloc(size_t cb)
{
    size_t realCb = cb + 4;
    PVoid pMem = NULL;
    Int32 allocPro = Rand();//the allocate probability;

    if (cb < (size_t)m_minSpySize || cb > (size_t)m_maxSpySize
        || ((g_totalMemSpySize < m_valveSpySize) && (allocPro < 2)) //20% probability
        || g_totalMemSpySize > m_valveSpySize) {
        pMem = Aura_malloc(realCb);
        if (NULL != pMem) {
            *(UInt32 *)pMem = MAGIC_NORMAL_MALLOC;
        }
    }
    else {//80% probability, if g_totalMemSpySize is less valveSpySize;
        pMem = m_pSpyMalloc->Alloc(realCb);
        if (NULL != pMem) {
            *(UInt32 *)pMem = MAGIC_MEMSPY_MALLOC;
        }
    }

    if (NULL != pMem) {
        pMem = (Byte *)pMem + 4;
    }

    return pMem;
}

PVoid CHyperMalloc::Realloc(PVoid pv, size_t cb)
{
    PVoid  pHyperAddr = NULL;
    UInt32 magic = 0;
    PVoid  pMem = NULL;

    if (NULL == pv) {
        return CHyperMalloc::Alloc(cb);
    }

    pHyperAddr = (Byte *)pv - 4;
    magic = *(UInt32 *)pHyperAddr;

    if (MAGIC_NORMAL_MALLOC == magic) {
        if (!cb) {
            Aura_free(pHyperAddr);
        }
        else {
            pMem = Aura_realloc(pHyperAddr, cb + 4);
            if (NULL != pMem) {
                *(UInt32 *)pMem = MAGIC_NORMAL_MALLOC;
            }
        }
    }
    else if (MAGIC_MEMSPY_MALLOC == magic) {
        if (!cb) {
            m_pSpyMalloc->Free(pv);
        }
        else {
            pMem = m_pSpyMalloc->Realloc(pHyperAddr, cb + 4);
            if (NULL != pMem) {
                *(UInt32 *)pMem = MAGIC_MEMSPY_MALLOC;
            }
        }
    }
    else {
        assert(0);
    }

    if (NULL != pMem) {
        pMem = (Byte *)pMem + 4;
    }
    return pMem;
}

void CHyperMalloc::Free(PVoid pv)
{
    if (NULL == pv) return ;

    PVoid pHyperAddr = (Byte *)pv - 4;
    UInt32 magic = *(UInt32 *)pHyperAddr;

    if (MAGIC_NORMAL_MALLOC == magic) {
        Aura_free(pHyperAddr);
    }
    else if (MAGIC_MEMSPY_MALLOC == magic) {
        m_pSpyMalloc->Free(pHyperAddr);
    }
    else {
        assert(0);
    }
    return ;
}
