/*
 * Copyright (C) 2006-2020 Oracle Corporation
 *
 * This file is part of VirtualBox Open Source Edition (OSE), as
 * available from http://www.virtualbox.org. This file is free software;
 * you can redistribute it and/or modify it under the terms of the GNU
 * General Public License (GPL) as published by the Free Software
 * Foundation, in version 2 as it comes in the "COPYING" file of the
 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
 *
 * The contents of this file may alternatively be used under the terms
 * of the Common Development and Distribution License Version 1.0
 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
 * VirtualBox OSE distribution, in which case the provisions of the
 * CDDL are applicable instead of those of the GPL.
 *
 * You may elect to license modified versions of this file under the
 * terms and conditions of either the GPL or the CDDL or both.
 */


/*********************************************************************************************************************************
*   Header Files                                                                                                                 *
*********************************************************************************************************************************/
#include "Ntifs.h"
#include "wddm/VBoxMPTypes.h"
#include "CommonDef.h"
#include "memobj.h"
#include "trace.h"
#include "process.h"
#include "internal-memobj.h"

#define NOREF(T) (void)T

enum IMPERSONATE_STATUS
{
    Native,
    Impersonated,
    NotImpersonated
};

decltype(ExAllocatePoolWithTag)* g_pfnrtExAllocatePoolWithTag;
decltype(ExFreePoolWithTag)* g_pfnrtExFreePoolWithTag;
decltype(MmAllocatePagesForMdl)* g_pfnrtMmAllocatePagesForMdl;  //y
decltype(MmAllocatePagesForMdlEx)* g_pfnrtMmAllocatePagesForMdlEx;  //y
decltype(MmFreePagesFromMdl)* g_pfnrtMmFreePagesFromMdl; //y
decltype(MmMapLockedPagesSpecifyCache)* g_pfnrtMmMapLockedPagesSpecifyCache; //y
decltype(MmAllocateContiguousMemorySpecifyCache)* g_pfnrtMmAllocateContiguousMemorySpecifyCache; //y
decltype(MmUnsecureVirtualMemory)* g_pfnrtMmUnsecureVirtualMemory;

void InitMemObj()
{
    g_pfnrtExAllocatePoolWithTag = ExAllocatePoolWithTag;
    g_pfnrtExFreePoolWithTag = ExFreePoolWithTag;
    g_pfnrtMmAllocatePagesForMdl = MmAllocatePagesForMdl;  //y
    g_pfnrtMmAllocatePagesForMdlEx = MmAllocatePagesForMdlEx;  //y
    g_pfnrtMmFreePagesFromMdl = MmFreePagesFromMdl; //y
    g_pfnrtMmMapLockedPagesSpecifyCache = MmMapLockedPagesSpecifyCache; //y
    g_pfnrtMmAllocateContiguousMemorySpecifyCache = MmAllocateContiguousMemorySpecifyCache; //y
    g_pfnrtMmUnsecureVirtualMemory = MmUnsecureVirtualMemory;
}

/*********************************************************************************************************************************
*   Defined Constants And Macros                                                                                                 *
*********************************************************************************************************************************/
/** Maximum number of bytes we try to lock down in one go.
 * This is supposed to have a limit right below 256MB, but this appears
 * to actually be much lower. The values here have been determined experimentally.
 */
# define MAX_LOCK_MEM_SIZE   (24*1024*1024) /* 24MB */

/* Newer WDK constants: */
#ifndef MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS
# define MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS 0x20
#endif
#ifndef MM_ALLOCATE_FAST_LARGE_PAGES
# define MM_ALLOCATE_FAST_LARGE_PAGES 0x40
#endif

#define IPRT_NT_POOL_TAG    'TRPI'
#define DECLHIDDEN(T) T
/*********************************************************************************************************************************
*   Structures and Typedefs                                                                                                      *
*********************************************************************************************************************************/
/**
 * The NT version of the memory object structure.
 */

typedef struct RTR0MEMOBJNT
{
    /** The core structure. */
    RTR0MEMOBJINTERNAL  Core;
    /** Used MmAllocatePagesForMdl(). */
    bool                fAllocatedPagesForMdl;
    /** Set if this is sub-section of the parent. */
    bool                fSubMapping;
    /** Pointer returned by MmSecureVirtualMemory */
    PVOID               pvSecureMem;
    /** The number of PMDLs (memory descriptor lists) in the array. */
    UINT32            cMdls;
    /** Array of MDL pointers. (variable size) */
    PMDL                apMdls[1];
} RTR0MEMOBJNT;
/** Pointer to the NT version of the memory object structure. */
typedef RTR0MEMOBJNT *PRTR0MEMOBJNT;


DECLHIDDEN(int) rtR0MemObjNativeAllocPage(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    AssertMsgReturn(cb <= _1G, ("%#x\n", cb), VERR_OUT_OF_RANGE); /* for safe size_t -> ULONG */
    RT_NOREF(fExecutable);

    /*
     * Use MmAllocatePagesForMdlEx for all type allocation
     */
    int rc = VERR_NO_PAGE_MEMORY;
    if (   g_pfnrtMmAllocatePagesForMdlEx
        && g_pfnrtMmFreePagesFromMdl
        && g_pfnrtMmMapLockedPagesSpecifyCache)
    {
        PHYSICAL_ADDRESS Zero;
        Zero.QuadPart = 0;
        PHYSICAL_ADDRESS HighAddr;
        HighAddr.QuadPart = MAXLONGLONG;
        PMDL pMdl;
		/*
		 * If the value is less than or equal to the value of _TEMP_LIMIT_MEM, discontinuous physical pages will be applied for. 
		 * Otherwise, continuous physical pages will be applied for.
		 * Using this as a boundary can prevent all non-consecutive physical pages from being applied for, which affects other scenarios.
		 */
        if (cb <= _TEMP_LIMIT_MEM) {
            pMdl = MmAllocatePagesForMdlEx(Zero, HighAddr, Zero, cb, MmCached, MM_ALLOCATE_FULLY_REQUIRED);
        }
        else {
            pMdl = MmAllocatePagesForMdlEx(Zero, HighAddr, Zero, cb, MmCached, MM_ALLOCATE_FULLY_REQUIRED | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS);
        }
        if (!pMdl) {
            LOGI("Failed to MmAllocatePagesForMdlEx continuous memory: size %lu\n", cb);
            pMdl = MmAllocatePagesForMdlEx(Zero, HighAddr, Zero, cb, MmCached, MM_ALLOCATE_FULLY_REQUIRED | MM_ALLOCATE_PREFER_CONTIGUOUS);
        }

        if (pMdl)
        {
            if (MmGetMdlByteCount(pMdl) >= cb)
            {
                __try
                {
                    void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
                                                                   FALSE /* no bug check on failure */, NormalPagePriority);
                    if (pv)
                    {
#if defined(_AMD64_) || defined(_ARM64_) //RT_ARCH_AMD64
                        if (fExecutable) {
                            MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
                        }
#endif

                        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PAGE, pv, cb);
                        if (pMemNt)
                        {
                            pMemNt->fAllocatedPagesForMdl = true;
                            pMemNt->cMdls = 1;
                            pMemNt->apMdls[0] = pMdl;
                            *ppMem = &pMemNt->Core;
                            return VINF_SUCCESS;
                        }
                        MmUnmapLockedPages(pv, pMdl);
                    }
                }
                __except(EXCEPTION_EXECUTE_HANDLER)
                {
# ifdef LOG_ENABLED
                    NTSTATUS rcNt = GetExceptionCode();
                    LOGE(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
# endif
                    /* nothing */
                }
            }
            g_pfnrtMmFreePagesFromMdl(pMdl);
            ExFreePool(pMdl);
        }
    }

    LOGE("MmAllocatePagesForMdlEx failed size=%u\n", cb);
    return VERR_NO_MEMORY;
}


DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    int rc;

    rc = rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
    if (RT_SUCCESS(rc)) {
        return rc;
    }
    *ppMem = NULL;
    LOGW("rtR0MemObjNativeAllocCont failed: cb=%u\n", cb);

    /*
     * Try see if we get lucky first...
     * (We could probably just assume we're lucky on NT4.)
     */
    rc = rtR0MemObjNativeAllocPage(ppMem, cb, fExecutable);
    if (RT_SUCCESS(rc)) {
        return rc;
    }

    *ppMem = NULL;
    LOGE("rtR0MemObjNativeAllocPage failed: cb=%u\n", cb);

    /*
     * Use MmAllocatePagesForMdl to specify the range of physical addresses we wish to use.
     */
    if (   g_pfnrtMmAllocatePagesForMdlEx
        && g_pfnrtMmFreePagesFromMdl
        && g_pfnrtMmMapLockedPagesSpecifyCache)
    {
        PHYSICAL_ADDRESS Zero;
        Zero.QuadPart = 0;
        PHYSICAL_ADDRESS HighAddr;
        HighAddr.QuadPart = _4G - 1;
        PMDL pMdl = MmAllocatePagesForMdlEx(Zero, HighAddr, Zero, cb, MmCached, MM_ALLOCATE_FULLY_REQUIRED);
        if (pMdl)
        {
            if (MmGetMdlByteCount(pMdl) >= cb)
            {
                __try
                {
                    void *pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl, KernelMode, MmCached, NULL /* no base address */,
                                                                   FALSE /* no bug check on failure */, NormalPagePriority);
                    if (pv)
                    {
                        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_LOW, pv, cb);
                        if (pMemNt)
                        {
                            pMemNt->fAllocatedPagesForMdl = true;
                            pMemNt->cMdls = 1;
                            pMemNt->apMdls[0] = pMdl;
                            *ppMem = &pMemNt->Core;
                            return VINF_SUCCESS;
                        }
                        MmUnmapLockedPages(pv, pMdl);
                    }
                }
                __except(EXCEPTION_EXECUTE_HANDLER)
                {
# ifdef LOG_ENABLED
                    NTSTATUS rcNt = GetExceptionCode();
                    LOGE(("rtR0MemObjNativeAllocLow: Exception Code %#x\n", rcNt));
# endif
                    /* nothing */
                }
            }
            g_pfnrtMmFreePagesFromMdl(pMdl);
            ExFreePool(pMdl);
        } else {
            LOGE("MmAllocatePagesForMdlEx failed");
        }
    }

    /*
     * Fall back on contiguous memory...
     */
    return rtR0MemObjNativeAllocCont(ppMem, cb, fExecutable);
}


/**
 * Internal worker for rtR0MemObjNativeAllocCont(), rtR0MemObjNativeAllocPhys()
 * and rtR0MemObjNativeAllocPhysNC() that takes a max physical address in addition
 * to what rtR0MemObjNativeAllocCont() does.
 *
 * @returns IPRT status code.
 * @param   ppMem           Where to store the pointer to the ring-0 memory object.
 * @param   cb              The size.
 * @param   fExecutable     Whether the mapping should be executable or not.
 * @param   PhysHighest     The highest physical address for the pages in allocation.
 * @param   uAlignment      The alignment of the physical memory to allocate.
 *                          Supported values are PAGE_SIZE, _2M, _4M and _1G.
 */
static int rtR0MemObjNativeAllocContEx(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable, RTHCPHYS PhysHighest,
                                       size_t uAlignment)
{
    RT_NOREF(fExecutable);

    /*
     * Allocate the memory and create an MDL for it.
     */
    PHYSICAL_ADDRESS PhysAddrHighest;
    PhysAddrHighest.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
    void *pv;
    if (g_pfnrtMmAllocateContiguousMemorySpecifyCache)
    {
        PHYSICAL_ADDRESS PhysAddrLowest, PhysAddrBoundary;
        PhysAddrLowest.QuadPart   = 0;
        PhysAddrBoundary.QuadPart = (uAlignment == PAGE_SIZE) ? 0 : uAlignment;
        pv = g_pfnrtMmAllocateContiguousMemorySpecifyCache(cb, PhysAddrLowest, PhysAddrHighest, PhysAddrBoundary, MmCached);
    }
    else if (uAlignment == PAGE_SIZE)
        pv = MmAllocateContiguousMemory(cb, PhysAddrHighest);
    else
        return VERR_NOT_SUPPORTED;
    if (!pv)
        return VERR_NO_MEMORY;

    PMDL pMdl = IoAllocateMdl(pv, (ULONG)cb, FALSE, FALSE, NULL);
    if (pMdl)
    {
        MmBuildMdlForNonPagedPool(pMdl);
#if defined(_AMD64_) || defined(_ARM64_) // #ifdef RT_ARCH_AMD64
        if (fExecutable)
            MmProtectMdlSystemAddress(pMdl, PAGE_EXECUTE_READWRITE);
#endif

        PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_CONT, pv, cb);
        if (pMemNt)
        {
            pMemNt->Core.u.Cont.Phys = (RTHCPHYS)*MmGetMdlPfnArray(pMdl) << PAGE_SHIFT;
            pMemNt->cMdls = 1;
            pMemNt->apMdls[0] = pMdl;
            *ppMem = &pMemNt->Core;
            return VINF_SUCCESS;
        }

        IoFreeMdl(pMdl);
    }
    MmFreeContiguousMemory(pv);
    return VERR_NO_MEMORY;
}


DECLHIDDEN(int) rtR0MemObjNativeAllocCont(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable)
{
    return rtR0MemObjNativeAllocContEx(ppMem, cb, fExecutable, _4G-1, PAGE_SIZE /* alignment */);
}


DECLHIDDEN(RTHCPHYS) rtR0MemObjNativeGetPagePhysAddr(PRTR0MEMOBJINTERNAL pMem, size_t iPage)
{
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;

    if (pMemNt->cMdls)
    {
        if (pMemNt->cMdls == 1)
        {
            PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[0]);
            return (RTHCPHYS)paPfns[iPage] << PAGE_SHIFT;
        }

        size_t iMdl = iPage / (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
        size_t iMdlPfn = iPage % (MAX_LOCK_MEM_SIZE >> PAGE_SHIFT);
        PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMemNt->apMdls[iMdl]);
        return (RTHCPHYS)paPfns[iMdlPfn] << PAGE_SHIFT;
    }

    switch (pMemNt->Core.enmType)
    {
        case RTR0MEMOBJTYPE_MAPPING:
            return rtR0MemObjNativeGetPagePhysAddr(pMemNt->Core.uRel.Child.pParent, iPage);

        case RTR0MEMOBJTYPE_PHYS:
            return pMemNt->Core.u.Phys.PhysBase + (iPage << PAGE_SHIFT);

        case RTR0MEMOBJTYPE_PAGE:
        case RTR0MEMOBJTYPE_PHYS_NC:
        case RTR0MEMOBJTYPE_LOW:
        case RTR0MEMOBJTYPE_CONT:
        case RTR0MEMOBJTYPE_LOCK:
        case RTR0MEMOBJTYPE_RES_VIRT:
        default:
            return NIL_RTHCPHYS;
    }
}


DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
{
    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)pMem;

    /*
     * Deal with it on a per type basis (just as a variation).
     */
    switch (pMemNt->Core.enmType)
    {
    case RTR0MEMOBJTYPE_LOW:
        if (pMemNt->fAllocatedPagesForMdl)
        {
            ASSERT(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
            MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
            pMemNt->Core.pv = NULL;
            if (pMemNt->pvSecureMem)
            {
                g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
                pMemNt->pvSecureMem = NULL;
            }

            g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
            ExFreePool(pMemNt->apMdls[0]);
            pMemNt->apMdls[0] = NULL;
            pMemNt->cMdls = 0;
            break;
        }
        AssertFailed();
        break;

    case RTR0MEMOBJTYPE_PAGE:
        ASSERT(pMemNt->Core.pv);
        if (pMemNt->fAllocatedPagesForMdl)
        {
            ASSERT(pMemNt->Core.pv && pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
            ASSERT(pMemNt->pvSecureMem == NULL);
            MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
            g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
            ExFreePool(pMemNt->apMdls[0]);
        }
        else
        {
            if (g_pfnrtExFreePoolWithTag)
                g_pfnrtExFreePoolWithTag(pMemNt->Core.pv, IPRT_NT_POOL_TAG);
            else
                ExFreePool(pMemNt->Core.pv);

            ASSERT(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
            IoFreeMdl(pMemNt->apMdls[0]);
        }
        pMemNt->Core.pv = NULL;
        pMemNt->apMdls[0] = NULL;
        pMemNt->cMdls = 0;
        break;

    case RTR0MEMOBJTYPE_CONT:
        ASSERT(pMemNt->Core.pv);
        MmFreeContiguousMemory(pMemNt->Core.pv);
        pMemNt->Core.pv = NULL;

        ASSERT(pMemNt->cMdls == 1 && pMemNt->apMdls[0]);
        IoFreeMdl(pMemNt->apMdls[0]);
        pMemNt->apMdls[0] = NULL;
        pMemNt->cMdls = 0;
        break;

    case RTR0MEMOBJTYPE_PHYS:
        /* rtR0MemObjNativeEnterPhys? */
        if (!pMemNt->Core.u.Phys.fAllocated)
        {
            ASSERT(!pMemNt->fAllocatedPagesForMdl);
            /* Nothing to do here. */
            break;
        }
        RT_FALL_THRU();

    case RTR0MEMOBJTYPE_PHYS_NC:
        if (pMemNt->fAllocatedPagesForMdl)
        {
            g_pfnrtMmFreePagesFromMdl(pMemNt->apMdls[0]);
            ExFreePool(pMemNt->apMdls[0]);
            pMemNt->apMdls[0] = NULL;
            pMemNt->cMdls = 0;
            break;
        }
        AssertFailed();
        break;

    case RTR0MEMOBJTYPE_LOCK:
        if (pMemNt->pvSecureMem)
        {
            g_pfnrtMmUnsecureVirtualMemory(pMemNt->pvSecureMem);
            pMemNt->pvSecureMem = NULL;
        }
        for (UINT32 i = 0; i < pMemNt->cMdls; i++)
        {
            MmUnlockPages(pMemNt->apMdls[i]);
            IoFreeMdl(pMemNt->apMdls[i]);
            pMemNt->apMdls[i] = NULL;
        }
        break;

    case RTR0MEMOBJTYPE_RES_VIRT:
        return VERR_INTERNAL_ERROR;
        break;

    case RTR0MEMOBJTYPE_MAPPING:
    {
        PRTR0MEMOBJNT pMemNtParent = (PRTR0MEMOBJNT)pMemNt->Core.uRel.Child.pParent;
        if (!pMemNtParent || !pMemNt->Core.pv) {
            LOGE("invalid state for pMemNt\n");
            return VERR_INTERNAL_ERROR;
        }
        if (!((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping))) {
            LOGE("invalid state for pMemNt\n");
            return VERR_INTERNAL_ERROR;
        }
        ASSERT(pMemNtParent);
        ASSERT(pMemNt->Core.pv);
        ASSERT((pMemNt->cMdls == 0 && !pMemNt->fSubMapping) || (pMemNt->cMdls == 1 && pMemNt->fSubMapping));

        if (pMemNtParent == NULL || pMemNt->Core.pv == NULL) {
            LOGE("invalid state for RTR0MEMOBJTYPE_MAPPING\n");
            return VERR_INTERNAL_ERROR;
        }

        if (pMemNt->cMdls != 0 || pMemNt->fSubMapping == true) {
            LOGE("Invalid state: cmdls=%u, fSubMapping=%u\n", pMemNt->cMdls, pMemNt->fSubMapping);
        }

        if (pMemNtParent->cMdls)
        {
            if (!(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0])) {
                LOGE("invalid state for pMemNtParent\n");
                return VERR_INTERNAL_ERROR;
            }
            ASSERT(pMemNtParent->cMdls == 1 && pMemNtParent->apMdls[0]);

            IMPERSONATE_STATUS ImpersonateStatus = Native;
            KAPC_STATE State;

            if (pMemNt->Core.u.Mapping.R0Process != NIL_RTR0PROCESS &&
                pMemNt->Core.u.Mapping.R0Process != RTR0ProcHandleSelf())
            {
                LOGI("invalid pMemNt->Core.u.Mapping.R0Process\n");
                ImpersonateStatus = NotImpersonated;

                PEPROCESS R0Process = NULL;

                if (NT_SUCCESS(PsLookupProcessByProcessId((HANDLE)pMemNt->Core.u.Mapping.ProcessId, &R0Process))) {
                    if ((PEPROCESS)pMemNt->Core.u.Mapping.R0Process != R0Process) {
                        LOGE("PsLookupProcessByProcessId invalid R0Process: pid=%u\n", pMemNt->Core.u.Mapping.ProcessId);
                        ObDereferenceObject(R0Process);
                        return VERR_INTERNAL_ERROR;
                    }
                    if (PsGetProcessExitStatus((PEPROCESS)pMemNt->Core.u.Mapping.R0Process) == STATUS_PENDING)
                    {
                        LOGI("invalid R0Process pending, attach process\n");
                        KeStackAttachProcess((PRKPROCESS)pMemNt->Core.u.Mapping.R0Process, &State);
                        ImpersonateStatus = Impersonated;
                    }
                    ObDereferenceObject(R0Process);
                } else {
                    LOGE("PsLookupProcessByProcessId not find: pid=%u\n", pMemNt->Core.u.Mapping.ProcessId);
                }
            }
            if (ImpersonateStatus != NotImpersonated)
            {
                if (!pMemNt->cMdls)
                    MmUnmapLockedPages(pMemNt->Core.pv, pMemNtParent->apMdls[0]);
                else
                {
                    LOGE("Invalid state: cmdls=%u, fSubMapping=%u\n", pMemNt->cMdls, pMemNt->fSubMapping);
                    MmUnmapLockedPages(pMemNt->Core.pv, pMemNt->apMdls[0]);
                    IoFreeMdl(pMemNt->apMdls[0]);
                    pMemNt->apMdls[0] = NULL;
                }
            } else {
                LOGE("Mdl without unmap: cmdls=%u, fSubMapping=%u, pid=%u\n",
                      pMemNt->cMdls, pMemNt->fSubMapping, pMemNt->Core.u.Mapping.ProcessId);
            }
            if (ImpersonateStatus == Impersonated)
            {
                KeUnstackDetachProcess(&State);
            }
        }
        else
        {
            ASSERT(pMemNtParent->Core.enmType == RTR0MEMOBJTYPE_PHYS
                   && !pMemNtParent->Core.u.Phys.fAllocated);
            ASSERT(pMemNt->Core.u.Mapping.R0Process == NIL_RTR0PROCESS);
            ASSERT(!pMemNt->fSubMapping);
            MmUnmapIoSpace(pMemNt->Core.pv, pMemNt->Core.cb);
        }
        pMemNt->Core.pv = NULL;
        break;
    }

    default: {
        LOGE("invalud pMemNt->Core.enmType: %u\n", pMemNt->Core.enmType);
        return VERR_INTERNAL_ERROR;
    }
    }

    return VINF_SUCCESS;
}


/**
 * Internal worker for rtR0MemObjNativeMapKernel and rtR0MemObjNativeMapUser.
 *
 * @returns IPRT status code.
 * @param   ppMem       Where to store the memory object for the mapping.
 * @param   pMemToMap   The memory object to map.
 * @param   pvFixed     Where to map it. (void *)-1 if anywhere is fine.
 * @param   uAlignment  The alignment requirement for the mapping.
 * @param   fProt       The desired page protection for the mapping.
 * @param   R0Process   If NIL_RTR0PROCESS map into system (kernel) memory.
 *                      If not nil, it's the current process.
 * @param   offSub      Offset into @a pMemToMap to start mapping.
 * @param   cbSub       The number of bytes to map from @a pMapToMem.  0 if
 *                      we're to map everything. Non-zero if @a offSub is
 *                      non-zero.
 */
static int rtR0MemObjNtMap(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void* pvFixed, size_t uAlignment,
    unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
{
    int rc = VERR_MAP_FAILED;

    /*
     * Check that the specified alignment is supported.
     */
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    /*
     * There are two basic cases here, either we've got an MDL and can
     * map it using MmMapLockedPages, or we've got a contiguous physical
     * range (MMIO most likely) and can use MmMapIoSpace.
     */
    PRTR0MEMOBJNT pMemNtToMap = (PRTR0MEMOBJNT)pMemToMap;
    if (pMemNtToMap->cMdls)
    {
        /* don't attempt map locked regions with more than one mdl. */
        if (pMemNtToMap->cMdls != 1)
            return VERR_NOT_SUPPORTED;

        /* Need g_pfnrtMmMapLockedPagesSpecifyCache to map to a specific address. */
        if (pvFixed != (void*)-1 && g_pfnrtMmMapLockedPagesSpecifyCache == NULL)
            return VERR_NOT_SUPPORTED;

        /* we can't map anything to the first page, sorry. */
        if (pvFixed == 0)
            return VERR_NOT_SUPPORTED;

        /* only one system mapping for now - no time to figure out MDL restrictions right now. */
        if (pMemNtToMap->Core.uRel.Parent.cMappings
            && R0Process == NIL_RTR0PROCESS)
        {
            if (pMemNtToMap->Core.enmType != RTR0MEMOBJTYPE_PHYS_NC)
                return VERR_NOT_SUPPORTED;
            UINT32 iMapping = pMemNtToMap->Core.uRel.Parent.cMappings;
            while (iMapping-- > 0)
            {
                PRTR0MEMOBJNT pMapping = (PRTR0MEMOBJNT)pMemNtToMap->Core.uRel.Parent.papMappings[iMapping];
                if (pMapping->Core.enmType != RTR0MEMOBJTYPE_MAPPING
                    || pMapping->Core.u.Mapping.R0Process == NIL_RTR0PROCESS)
                    return VERR_NOT_SUPPORTED;
            }
        }

        /* Create a partial MDL if this is a sub-range request. */
        PMDL pMdl;
        if (!offSub && !cbSub)
            pMdl = pMemNtToMap->apMdls[0];
        else
        {
            pMdl = IoAllocateMdl(NULL, (ULONG)cbSub, FALSE, FALSE, NULL);
            if (pMdl)
                IoBuildPartialMdl(pMemNtToMap->apMdls[0], pMdl,
                    (uint8_t*)MmGetMdlVirtualAddress(pMemNtToMap->apMdls[0]) + offSub, (ULONG)cbSub);
            else
            {
                IoFreeMdl(pMdl);
                return VERR_NO_MEMORY;
            }
        }

        __try
        {
            /** @todo uAlignment */
            /** @todo How to set the protection on the pages? */
            void* pv;
            if (g_pfnrtMmMapLockedPagesSpecifyCache)
                pv = g_pfnrtMmMapLockedPagesSpecifyCache(pMdl,
                    R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode,
                    MmCached,
                    pvFixed != (void*)-1 ? pvFixed : NULL,
                    FALSE /* no bug check on failure */,
                    NormalPagePriority);
            else
                pv = MmMapLockedPages(pMdl, R0Process == NIL_RTR0PROCESS ? KernelMode : UserMode);
            if (pv)
            {
                NOREF(fProt);

                PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(!offSub && !cbSub
                    ? sizeof(*pMemNt) : RT_UOFFSETOF_DYN(RTR0MEMOBJNT, apMdls[1]),
                    RTR0MEMOBJTYPE_MAPPING, pv, pMemNtToMap->Core.cb);
                if (pMemNt)
                {
                    pMemNt->Core.u.Mapping.R0Process = R0Process;
                    pMemNt->Core.u.Mapping.ProcessId = RTProcSelf();
                    if (!offSub && !cbSub)
                        pMemNt->fSubMapping = false;
                    else
                    {
                        pMemNt->apMdls[0] = pMdl;
                        pMemNt->cMdls = 1;
                        pMemNt->fSubMapping = true;
                    }

                    *ppMem = &pMemNt->Core;
                    return VINF_SUCCESS;
                }

                rc = VERR_NO_MEMORY;
                MmUnmapLockedPages(pv, pMdl);
            }
        }
        __except (EXCEPTION_EXECUTE_HANDLER)
        {
#ifdef LOG_ENABLED
            NTSTATUS rcNt = GetExceptionCode();
            LOGE(("rtR0MemObjNtMap: Exception Code %#x\n", rcNt));
#endif

            /* nothing */
            rc = VERR_MAP_FAILED;
        }

    }
    else
    {
        AssertReturn(pMemNtToMap->Core.enmType == RTR0MEMOBJTYPE_PHYS
            && !pMemNtToMap->Core.u.Phys.fAllocated, VERR_INTERNAL_ERROR);

        /* cannot map phys mem to user space (yet). */
        if (R0Process != NIL_RTR0PROCESS)
            return VERR_NOT_SUPPORTED;

        /* Cannot sub-mak these (yet). */
        AssertMsgReturn(!offSub && !cbSub, ("%#zx %#zx\n", offSub, cbSub), VERR_NOT_SUPPORTED);


        /** @todo uAlignment */
        /** @todo How to set the protection on the pages? */
        PHYSICAL_ADDRESS Phys;
        Phys.QuadPart = pMemNtToMap->Core.u.Phys.PhysBase;
        void* pv = MmMapIoSpace(Phys, pMemNtToMap->Core.cb,
            pMemNtToMap->Core.u.Phys.uCachePolicy == RTMEM_CACHE_POLICY_MMIO ? MmNonCached : MmCached);
        if (pv)
        {
            PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_MAPPING, pv,
                pMemNtToMap->Core.cb);
            if (pMemNt)
            {
                pMemNt->Core.u.Mapping.R0Process = R0Process;
                pMemNt->Core.u.Mapping.ProcessId = RTProcSelf();
                *ppMem = &pMemNt->Core;
                return VINF_SUCCESS;
            }

            rc = VERR_NO_MEMORY;
            MmUnmapIoSpace(pv, pMemNtToMap->Core.cb);
        }
    }

    NOREF(uAlignment); NOREF(fProt);
    return rc;
}


DECLHIDDEN(int) rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
    unsigned fProt, RTR0PROCESS R0Process, size_t offSub, size_t cbSub)
{
    AssertReturn(R0Process == RTR0ProcHandleSelf(), VERR_NOT_SUPPORTED);
    return rtR0MemObjNtMap(ppMem, pMemToMap, (void*)R3PtrFixed, uAlignment, fProt, R0Process, offSub, cbSub);
}

DECLHIDDEN(int) rtR0MemObjNativeMapKernel(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, void *pvFixed, size_t uAlignment,
                                          unsigned fProt, size_t offSub, size_t cbSub)
{
    return rtR0MemObjNtMap(ppMem, pMemToMap, pvFixed, uAlignment, fProt, NIL_RTR0PROCESS, offSub, cbSub);
}

DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment)
{
    /*
     * Try and see if we're lucky and get a contiguous chunk from MmAllocatePagesForMdl.
     *
     * This is preferable to using MmAllocateContiguousMemory because there are
     * a few situations where the memory shouldn't be mapped, like for instance
     * VT-x control memory. Since these are rather small allocations (one or
     * two pages) MmAllocatePagesForMdl will probably be able to satisfy the
     * request.
     *
     * If the allocation is big, the chances are *probably* not very good. The
     * current limit is kind of random...
     */
    if (   uAlignment == PAGE_SIZE
        && g_pfnrtMmAllocatePagesForMdlEx
        && g_pfnrtMmFreePagesFromMdl)
    {
        PHYSICAL_ADDRESS Zero;
        Zero.QuadPart = 0;
        PHYSICAL_ADDRESS HighAddr;
        HighAddr.QuadPart = PhysHighest == NIL_RTHCPHYS ? MAXLONGLONG : PhysHighest;
        PMDL pMdl = g_pfnrtMmAllocatePagesForMdlEx(Zero, HighAddr, Zero, cb, MmCached, MM_ALLOCATE_FULLY_REQUIRED | MM_ALLOCATE_REQUIRE_CONTIGUOUS_CHUNKS);
        if (pMdl)
        {
            if (MmGetMdlByteCount(pMdl) >= cb)
            {
                PPFN_NUMBER paPfns = MmGetMdlPfnArray(pMdl);
                {
                    PRTR0MEMOBJNT pMemNt = (PRTR0MEMOBJNT)rtR0MemObjNew(sizeof(*pMemNt), RTR0MEMOBJTYPE_PHYS, NULL, cb);
                    if (pMemNt)
                    {
                        pMemNt->Core.fFlags |= RTR0MEMOBJ_FLAGS_ZERO_AT_ALLOC;
                        pMemNt->Core.u.Phys.fAllocated = true;
                        pMemNt->Core.u.Phys.PhysBase = (RTHCPHYS)paPfns[0] << PAGE_SHIFT;
                        pMemNt->fAllocatedPagesForMdl = true;
                        pMemNt->cMdls = 1;
                        pMemNt->apMdls[0] = pMdl;
                        *ppMem = &pMemNt->Core;
                        return VINF_SUCCESS;
                    }
                }
            }
            LOGE("mdl byte count failed\n");
            g_pfnrtMmFreePagesFromMdl(pMdl);
            ExFreePool(pMdl);
        }
    }

    LOGE("MmAllocatePagesForMdlEx failed: size=%u\n", cb);
    return VERR_NO_MEMORY;
}
