#include <sys/mman.h>
#include <aura.h>
#include <private.h>
#include <linknode.h>

static DLinkNode   s_virtualMemoryList;
static Aura_Handle s_hVMListLock;

#define TRAVERSE_VIRTUAL_MEMORY_LIST \
     ForEachDLinkNode(struct VirtualMemory *, pVM, &s_virtualMemoryList)

#define FALLINTO_AREA(addr, start, size) \
        ((addr >= start) && (addr < start + size))

#define _PAGE_SIZE          (1u << 12)
#define IS_PAGE_ALIGNED(va) (0 == ((va) & (_PAGE_SIZE -1)))

#define LOCK   START_LOCK(s_hVMListLock)
#define UNLOCK END_LOCK(s_hVMListLock)

struct VirtualMemory : public DLinkNode
{
    Aura_Address          m_baseAddr;
    Aura_MemorySize       m_memorySize;
    Aura_MemoryMapFlags   m_mapFlags;
    Aura_MemoryProtection m_proection;
};

Aura_ECode VirtualMemory_Initialize()
{
    s_virtualMemoryList.Initialize();
    return AuraCriticalSection_Create(&s_hVMListLock);
}

Aura_Void  VirtualMemory_Destroy()
{
    struct VirtualMemory * pVM = NULL;

    while(!s_virtualMemoryList.IsEmpty()) {
        pVM = (struct VirtualMemory *)s_virtualMemoryList.Next();
        munmap((void *)pVM->m_baseAddr, (int)pVM->m_memorySize);
        pVM->Detach();
        Aura_free(pVM);
    }
    AuraCriticalSection_Destroy(s_hVMListLock);
}

AURA_API AuraVirtualMemory_Map(
        /* [in] */ Aura_Address vBaseAddr,
        /* [in] */ Aura_MemorySize size,
        /* [in] */ Aura_MemoryMapFlags flags,
        /* [in] */ Aura_MemoryProtection protect,
        /* [out] */ Aura_Address *pBaseAddr)
{
    struct VirtualMemory * pVM = NULL;
    Aura_ECode ec = AURA_NOERROR;
    void * pAddr  = NULL;
    int lxProtect = 0;
    int lxFlags   = 0;

#if defined(_arm)
    // Damm!!! ARM for the list will be called contructor.
    // we don't konw why yet.
    static Aura_Bool s_bListInitialized = AURA_FALSE;
    if (!s_bListInitialized) {
        s_bListInitialized = AURA_TRUE;
        s_virtualMemoryList.Initialize();
    }
#endif

    assert(IS_PAGE_ALIGNED(vBaseAddr));
    assert(size > 0);
    assert(IS_PAGE_ALIGNED(size));
    assert(NULL != pBaseAddr);

    LOCK;
    if ((flags & Aura_MemoryMap_Fixed)
        && (vBaseAddr != 0)) {
        TRAVERSE_VIRTUAL_MEMORY_LIST {
            if (FALLINTO_AREA(vBaseAddr,
                        pVM->m_baseAddr,
                        pVM->m_memorySize)) {
                UNLOCK;
                return AURA_E_ALREADY_EXIST;
            }
        }
    }

    pVM = (struct VirtualMemory *)Aura_malloc(sizeof(*pVM));
    if (NULL == pVM) {
        ec = AURA_E_OUT_OF_MEMORY;
        goto E_FAIL_EXIT;
    }
    lxFlags   = AuraMemoryMapFlagsConvertToLinux(flags);
    lxProtect = AuraMemoryProtectionConvertToLinux(protect);

    pAddr = mmap((void *)vBaseAddr,
                (size_t)size,
                lxProtect,
                lxFlags,
                -1,
                0);
    if ((void *)MAP_FAILED == pAddr) {
        ec = GetLastAuraECode();
        if (AURA_E_OUT_OF_MEMORY == ec) {
            ec = AURA_E_NOT_ENOUGH_ADDRESS_SPACE;
        }
        goto E_FAIL_EXIT;
    }
    pVM->m_baseAddr = (Aura_Address)pAddr;
    pVM->m_mapFlags = flags;
    pVM->m_memorySize = size;
    pVM->m_proection  = protect;
    pVM->Initialize();

    s_virtualMemoryList.InsertLast(pVM);
    UNLOCK;

    *pBaseAddr = (Aura_Address)pAddr;

    return AURA_NOERROR;

E_FAIL_EXIT:
    UNLOCK;
    if (NULL != pVM) {
        Aura_free(pVM);
    }
    return ec;
}

AURA_API AuraVirtualMemory_Query(
        /* [in] */ Aura_Address vAddress,
        /* [out] */ Aura_Address * pBaseAddr,
        /* [out] */ Aura_MemorySize *pSize)
{
    struct VirtualMemory * pVM = NULL;

    assert((NULL != pSize) || (NULL != pBaseAddr));

    LOCK;
    TRAVERSE_VIRTUAL_MEMORY_LIST {
        if (FALLINTO_AREA(vAddress,
                    pVM->m_baseAddr,
                    pVM->m_memorySize)) {
            if (NULL != pBaseAddr) {
                *pBaseAddr = pVM->m_baseAddr;
            }
            if (NULL != pSize) {
                *pSize     = pVM->m_memorySize;
            }
            UNLOCK;

            return AURA_NOERROR;
        }
    }
    UNLOCK;

    return AURA_E_DOES_NOT_EXIST;
}

AURA_API AuraVirtualMemory_SetProtection(
        /* [in] */ Aura_Address vBaseAddr,
        /* [in] */ Aura_MemorySize size,
        /* [in] */ Aura_MemoryProtection protect)
{
    struct VirtualMemory * pVM = NULL;

    assert(size > 0);

    LOCK;
    TRAVERSE_VIRTUAL_MEMORY_LIST {
        if (FALLINTO_AREA(vBaseAddr,
                    pVM->m_baseAddr,
                    pVM->m_memorySize)) {
            if (size != pVM->m_memorySize) {
                goto E_FAIL_EXIT;
            }
            pVM->m_proection = protect;
            UNLOCK;

            return AURA_NOERROR;
        }
    }

E_FAIL_EXIT:
    UNLOCK;

    return AURA_E_INVALID_ARGUMENT;
}

AURA_API AuraVirtualMemory_CheckProtection(
        /* [in] */ Aura_Address vAddress,
        /* [in] */ Aura_MemoryProtection protect,
        /* [out] */ Aura_Bool *pIsValid)
{
    struct VirtualMemory * pVM = NULL;

    assert(NULL != pIsValid);

    LOCK;
    TRAVERSE_VIRTUAL_MEMORY_LIST {
        if (FALLINTO_AREA(vAddress,
                    pVM->m_baseAddr,
                    pVM->m_memorySize)) {
            if (pVM->m_proection == protect) {
                *pIsValid = AURA_TRUE;
            }
            else {
                *pIsValid = AURA_FALSE;
            }
            UNLOCK;

            return AURA_NOERROR;
        }
    }
    UNLOCK;
    return AURA_E_ACCESS_DENIED;
}

AURA_API AuraVirtualMemory_Commit(
        /* [in] */ Aura_Address vAddress)
{
    struct VirtualMemory * pVM = NULL;

    assert(IS_PAGE_ALIGNED(vAddress));

    LOCK;
    TRAVERSE_VIRTUAL_MEMORY_LIST {
        if (FALLINTO_AREA(vAddress,
                    pVM->m_baseAddr,
                    pVM->m_memorySize)) {
            // do nothing for commission.
            UNLOCK;

            return AURA_NOERROR;
        }
    }
    UNLOCK;
    return AURA_E_DOES_NOT_EXIST;
}

AURA_API AuraVirtualMemory_IORemap(
        /* [in]  */ Aura_Address virtualAddr,
        /* [in]  */ Aura_MemorySize size,
        /* [in]  */ Aura_Address physicalAddr,
        /* [in]  */ Aura_MemoryMapFlags flags,
        /* [in]  */ Aura_MemoryProtection protect,
        /* [out] */ Aura_Address *pVirtualAddr)
{
    FixMe("Not Supported");
    return AURA_E_NOT_SUPPORTED;
}

AURA_API AuraVirtualMemory_Unmap(
        /* [in]  */ Aura_Address vBaseAddr,
        /* [in]  */ Aura_MemorySize size)
{
    struct VirtualMemory * pVM = NULL;

    assert(size > 0);
    assert(IS_PAGE_ALIGNED(size));

    LOCK;
    TRAVERSE_VIRTUAL_MEMORY_LIST {
        if (FALLINTO_AREA(vBaseAddr,
                    pVM->m_baseAddr,
                    pVM->m_memorySize)) {
            if (size != pVM->m_memorySize) {
                goto E_FAIL_EXIT;
            }
            munmap((void *)pVM->m_baseAddr, (int)pVM->m_memorySize);
            pVM->Detach();
            Aura_free(pVM);

            UNLOCK;

            return AURA_NOERROR;
        }
    }

E_FAIL_EXIT:
    UNLOCK;
    return AURA_E_INVALID_ARGUMENT;
}
