/* A very fast user mode page allocator implementation enabling all sorts of
useful speed improvements for common malloc operations. (C) 2010 Niall Douglas.


Boost Software License - Version 1.0 - August 17th, 2003

Permission is hereby granted, free of charge, to any person or organization
obtaining a copy of the software and accompanying documentation covered by
this license (the "Software") to use, reproduce, display, distribute,
execute, and transmit the Software, and to prepare derivative works of the
Software, and to permit third-parties to whom the Software is furnished to
do so, all subject to the following:

The copyright notices in the Software and this entire statement, including
the above license grant, this restriction and the following disclaimer,
must be included in all copies of the Software, in whole or in part, and
all derivative works of the Software, unless such copies or derivative
works are solely in the form of machine-executable object code generated by
a source language processor.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/

#ifdef ENABLE_USERMODEPAGEALLOCATOR

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable: 4101) /* unreferenced local variable */
#pragma warning(disable: 4189) /* local variable is initialized but not referenced */
#endif

/* This is how many free pages relative to used pages to keep around before
returning them to the system. It gets ignored if system free memory is
perceived to be tight. */
/*#define USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE(usedpages, freepages) (usedpages)*/
#define USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE(usedpages, freepages) ((size_t)-1)

/* This is how many subsequent free operations must happen since a page was
freed before it will be eligible to be returned to the system. It helps prevent
large amounts of memory getting repeatedly freed and reallocated. It gets ignored
if system free memory is perceived to be tight. */
#define USERMODEPAGEALLOCATOR_FREEPAGECACHEAGE(usedpages, freepages) 256

/* This is how many pages to immediately preload the free page cache with on
process startup. If you set it too high relative to the remaining system free
memory, it will get given back fairly rapidly. */
/*#define USERMODEPAGEALLOCATOR_FREEPAGECACHEPRELOAD(systemmemorypressure) ((512*1024*1024/4096))*/
#define USERMODEPAGEALLOCATOR_FREEPAGECACHEPRELOAD(systemmemorypressure) 0

/* This defines how frequently the system free memory state should be
checked, and it must be a power of two. */
#define USERMODEPAGEALLOCATOR_SYSTEMFREEMEMORYCHECKRATE 64

/* This turns on the storage of free page metadata directly in the page tables which
will halve the page table memory requirements. As on x86/x64 page frames start from
one going upwards sequentially and will never use the top bit, this ought to always
be safe. */
#if (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) || (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)))
/*#define USERMODEPAGEALLOCATOR_USECOMPACTFREEPAGEINDICATOR*/
#endif

/* This puts the user mode page allocator into debug config which means that as
much buffering and caching is disabled as possible in order to best test the code. */
#ifdef DEBUG
#define USERMODEPAGEALLOCATOR_DEBUGCONFIG
#endif

/*
#if (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
#define PREFETCHCACHELINE(addr, rw, locality) __builtin_prefetch((const void *)(addr), (rw), (locality))
#define STRUCTUREALIGNMENT(alignment)         __attribute__ ((aligned(alignment)))
#elif (defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64)))
#define PREFETCHCACHELINE(addr, rw, locality) _mm_prefetch((const char *)(addr), (locality))
#define STRUCTUREALIGNMENT(alignment)         __declspec(align(alignment))
#endif
*/

#ifndef PREFETCHCACHELINE
#define PREFETCHCACHELINE(addr, rw, locality)
#endif
#ifndef STRUCTUREALIGNMENT
#define STRUCTUREALIGNMENT(alignment)
#endif

#ifdef USERMODEPAGEALLOCATOR_DEBUGCONFIG
#undef USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE
#define USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE(usedpages, freepages) ((usedpages)/16)
#undef USERMODEPAGEALLOCATOR_FREEPAGECACHEAGE
#define USERMODEPAGEALLOCATOR_FREEPAGECACHEAGE(usedpages, freepages) 0
#undef USERMODEPAGEALLOCATOR_SYSTEMFREEMEMORYCHECKRATE
#define USERMODEPAGEALLOCATOR_SYSTEMFREEMEMORYCHECKRATE 1
#endif

#include "nedtries/nedtrie.h"
extern int OSHavePhysicalPageSupport(void);
extern void *userpage_malloc(size_t toallocate, unsigned flags);
extern int userpage_free(void *mem, size_t size);
extern void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2);
#if defined(WIN32) && defined(USERMODEPAGEALLOCATOR_DLL_EXPORTS)
extern void DebugPrint(const char *fmt, ...) THROWSPEC;
#else
#define DebugPrint printf
#endif

#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#endif
#ifndef USERPAGE_TOPDOWN
#define USERPAGE_TOPDOWN                   (M2_CUSTOM_FLAGS_BEGIN<<0)
#define USERPAGE_NOCOMMIT                  (M2_CUSTOM_FLAGS_BEGIN<<1)
#endif

#define REGION_ENTRY(type)                        NEDTRIE_ENTRY(type)
#define REGION_HEAD(name, type)                   NEDTRIE_HEAD(name, type)
#define REGION_INIT(treevar)                      NEDTRIE_INIT(treevar)
#define REGION_EMPTY(treevar)                     NEDTRIE_EMPTY(treevar)
#define REGION_GENERATE(proto, treetype, nodetype, link, cmpfunct) NEDTRIE_GENERATE(proto, treetype, nodetype, link, cmpfunct, NEDTRIE_NOBBLEZEROS(treetype))
#define REGION_INSERT(treetype, treevar, node)    NEDTRIE_INSERT(treetype, treevar, node)
#define REGION_REMOVE(treetype, treevar, node)    NEDTRIE_REMOVE(treetype, treevar, node)
#define REGION_FIND(treetype, treevar, node)      NEDTRIE_FIND(treetype, treevar, node)
#define REGION_EXACTFIND(treetype, treevar, node) NEDTRIE_EXACTFIND(treetype, treevar, node)
#define REGION_CFIND(treetype, treevar, node, rounds) NEDTRIE_CFIND(treetype, treevar, node, rounds)
#define REGION_MAX(treetype, treevar)             NEDTRIE_MAX(treetype, treevar)
#define REGION_MIN(treetype, treevar)             NEDTRIE_MIN(treetype, treevar)
#define REGION_NEXT(treetype, treevar, node)      NEDTRIE_NEXT(treetype, treevar, node)
#define REGION_PREV(treetype, treevar, node)      NEDTRIE_PREV(treetype, treevar, node)
#define REGION_FOREACH(var, treetype, treevar)    NEDTRIE_FOREACH(var, treetype, treevar)
#define REGION_HASNODEHEADER(treevar, node, link) NEDTRIE_HASNODEHEADER(treevar, node, link)

typedef struct RegionStorage_s RegionStorage_t;
typedef struct region_node_s region_node_t;
struct region_node_s {
  RegionStorage_t *owner;
  region_node_t *prev, *next;        /* Always keep owner, prev at top */
  REGION_ENTRY(region_node_s) linkA; /* by start addr */
  REGION_ENTRY(region_node_s) linkL; /* by length */
  void *start, *end;
};
typedef struct regionA_tree_s regionA_tree_t;
REGION_HEAD(regionA_tree_s, region_node_s);
typedef struct regionL_tree_s regionL_tree_t;
REGION_HEAD(regionL_tree_s, region_node_s);

size_t regionkeyA(const region_node_t *RESTRICT r)
{
  return (size_t) r->start;
}
size_t regionkeyL(const region_node_t *RESTRICT r)
{
  return (size_t) r->end - (size_t) r->start;
}
REGION_GENERATE(static, regionA_tree_s, region_node_s, linkA, regionkeyA);
REGION_GENERATE(static, regionL_tree_s, region_node_s, linkL, regionkeyL);
typedef struct MemorySource_t MemorySource;
static struct MemorySource_t
{
  regionA_tree_t regiontreeA; /* The list of allocated regions, keyed by start addr */
  regionL_tree_t regiontreeL; /* The list of free regions, keyed by length */
  region_node_t *firstregion; /* The first region by order of addition */
  region_node_t *lastregion;  /* The last region by order of addition */
} lower, upper;

typedef struct OSAddressSpaceReservationData_t
{
  void *addr;
  void *data[2];
} OSAddressSpaceReservationData;
#ifndef WIN32
typedef size_t PageFrameType;

/* This function determines whether the host OS allows user mode physical memory
page mapping. */
static int OSDeterminePhysicalPageSupport(void) { return 0; }

/* This function returns a simple true or false if the host OS allows user mode
physical page mapping */
int OSHavePhysicalPageSupport(void) { return 0; }

/* This function determines whether the host OS is currently short of memory.
The value is LINEAR between 0.0 (no pressure) and 1.0 (terrible pressure). */
static double OSSystemMemoryPressure(void) { return 0; }

/* This function could ask the host OS for address space, or on embedded systems
it could simply parcel out space via moving a pointer. The second two void *
are some arbitrary extra data to be later passed to OSReleaseAddrSpace(). */
static OSAddressSpaceReservationData OSReserveAddrSpace(size_t space) { OSAddressSpaceReservationData asrd={0}; return asrd; }

/* This function returns address space previously allocated using
OSReserveAddrSpace(). It is guaranteed to exactly match what was previously
returned by that function. */
static int OSReleaseAddrSpace(OSAddressSpaceReservationData *data, size_t space) { return 0; }

/* This function obtains physical memory pages, either by asking the host OS
or on embedded systems by simply pulling them from a free page ring list. */
static size_t OSObtainMemoryPages(PageFrameType *buffer, size_t number, OSAddressSpaceReservationData *data) { return 0; }

/* This function returns previously obtained physical memory pages. */
static size_t OSReleaseMemoryPages(PageFrameType *buffer, size_t number, OSAddressSpaceReservationData *data) { return 0; }

/* This function causes the specified set of physical memory pages to be
mapped at the specified address. On an embedded system this would simply
modify the MMU and flush the appropriate TLB entries.
*/
static size_t OSRemapMemoryPagesOntoAddr(void *addr, size_t entries, PageFrameType *pageframes, OSAddressSpaceReservationData *data) { return 0; }

/* This function causes the specified set of physical memory pages to be
mapped at the specified set of addresses. On an embedded system this would
simply modify the MMU and flush the appropriate TLB entries. It works like this:

for(size_t n=0; n<entries; n++, addrs++, pageframes++) {
  if(*pageframe)
    Map(*addr, *pageframe);
  else
    Unmap(*addr);
}
*/
static size_t OSRemapMemoryPagesOntoAddrs(void **addrs, size_t entries, PageFrameType *pageframes, OSAddressSpaceReservationData *data) { return 0; }
#else
static enum {
  DISABLEEVERYTHING=1,
  NOPHYSICALPAGESUPPORT=2,
  HAVEPHYSICALPAGESUPPORT=4
} PhysicalPageSupport;

#ifdef ENABLE_PHYSICALPAGEEMULATION
/* Windows has the curious problem of using 4Kb pages but requiring those pages
to be mapped at 64Kb aligned address. By far the easiest solution is to pretend
that we actually have 64Kb pages. */
#undef PAGE_SIZE
#define PAGE_SIZE 65536
typedef struct PageFrameType_t
{
  ULONG_PTR pages[16];
} PageFrameType;
#else
typedef ULONG_PTR PageFrameType;
#endif

/* Returns 1 for bad compile, 2 for no support on this machine/user,
4 for supported */
#pragma comment(lib, "advapi32.lib")
static int OSDeterminePhysicalPageSupport(void)
{
  if(!PhysicalPageSupport)
  { /* Quick test */
    PageFrameType pageframe;
    size_t no=sizeof(PageFrameType)/sizeof(ULONG_PTR);
    SYSTEM_INFO si={0};
		{
			HANDLE token;
			if(OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, &token))
			{
				TOKEN_PRIVILEGES privs={1};
				if(LookupPrivilegeValue(NULL, SE_LOCK_MEMORY_NAME, &privs.Privileges[0].Luid))
				{
					privs.Privileges[0].Attributes=SE_PRIVILEGE_ENABLED;
					if(!AdjustTokenPrivileges(token, FALSE, &privs, 0, NULL, NULL) || GetLastError()!=S_OK)
					{
					}
				}
				CloseHandle(token);
			}
		}
    if(AllocateUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &no, (PULONG_PTR) &pageframe))
    {
      FreeUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &no, (PULONG_PTR) &pageframe);
      PhysicalPageSupport=HAVEPHYSICALPAGESUPPORT;
      CreateEvent(NULL, FALSE, FALSE, __T("UserModePageAllocatorEnabled"));
    }
    else
    {
      PhysicalPageSupport=NOPHYSICALPAGESUPPORT;
      /*fprintf(stderr, "User Mode Page Allocator: Failed to allocate physical memory pages (does the user running this process have the right to lock pages in memory?). User Mode Page Allocator will not be used.\n");*/
      OutputDebugStringA("User Mode Page Allocator: Failed to allocate physical memory pages (does the user running this process have the right to lock pages in memory?). User Mode Page Allocator will not be used.\n");
      CreateEvent(NULL, FALSE, FALSE, __T("UserModePageAllocatorDisabled"));
    }
    GetSystemInfo(&si);
#ifdef ENABLE_PHYSICALPAGEEMULATION
    if(si.dwAllocationGranularity!=PAGE_SIZE)
    {
      assert(si.dwAllocationGranularity==PAGE_SIZE);
      fprintf(stderr, "User Mode Page Allocator: Allocation granularity is %u not %u. Please recompile with corrected PAGE_SIZE\n", si.dwAllocationGranularity, PAGE_SIZE);
      PhysicalPageSupport=DISABLEEVERYTHING;
    }
    if(si.dwAllocationGranularity/si.dwPageSize!=sizeof(PageFrameType)/sizeof(ULONG_PTR))
    {
      assert(si.dwAllocationGranularity/si.dwPageSize==sizeof(PageFrameType)/sizeof(ULONG_PTR));
      fprintf(stderr, "User Mode Page Allocator: Pages per PageFrameType is %u not %u. Please recompile with corrected PageFrameType definition\n", si.dwAllocationGranularity/si.dwPageSize, sizeof(PageFrameType)/sizeof(ULONG_PTR));
      PhysicalPageSupport=DISABLEEVERYTHING;
    }
#else
    if(si.dwPageSize!=PAGE_SIZE)
    {
      assert(si.dwPageSize==PAGE_SIZE);
      fprintf(stderr, "User Mode Page Allocator: Page size is %u not %u. Please recompile with corrected PAGE_SIZE\n", si.dwPageSize, PAGE_SIZE);
      PhysicalPageSupport=DISABLEEVERYTHING;
    }
#endif
  }
  return PhysicalPageSupport;
}
int OSHavePhysicalPageSupport(void)
{
  if(!PhysicalPageSupport) OSDeterminePhysicalPageSupport();
  return HAVEPHYSICALPAGESUPPORT==PhysicalPageSupport;
}
static double OSSystemMemoryPressure(void)
{
  MEMORYSTATUSEX ms={sizeof(MEMORYSTATUSEX)};
  if(!GlobalMemoryStatusEx(&ms))
    return 0;
  return ms.dwMemoryLoad/100.0;
}
static OSAddressSpaceReservationData OSReserveAddrSpace(size_t space)
{
  OSAddressSpaceReservationData ret={0};
  if(!PhysicalPageSupport) OSDeterminePhysicalPageSupport();
  if(DISABLEEVERYTHING==PhysicalPageSupport) return ret;
  if(HAVEPHYSICALPAGESUPPORT==PhysicalPageSupport)
  {
    ret.addr=VirtualAlloc(NULL, space, MEM_RESERVE|MEM_PHYSICAL, PAGE_READWRITE);
  }
#ifdef ENABLE_PHYSICALPAGEEMULATION
  if(!ret.addr)
  {
    HANDLE fmh;
    fmh = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, PAGE_READWRITE|SEC_RESERVE,
#if defined(_M_IA64) || defined(_M_X64) || defined(WIN64)
                            (DWORD)(space>>32),
#else
                            0,
#endif
                            (DWORD)(space&((DWORD)-1)), NULL);
    if(fmh)
    { /* This is breathtakingly inefficient, but win32 leaves us no choice :(
         At least this function is called very infrequently. */
      while((ret.addr=VirtualAlloc(NULL, space, MEM_RESERVE, PAGE_READWRITE)))
      {
        void *RESTRICT seg;
        VirtualFree(ret.addr, 0, MEM_RELEASE);
        for(seg=ret.addr; seg<(void*)((size_t) ret.addr + space); seg=(void *)((size_t) seg + Win32granularity))
        {
          if(!VirtualAlloc(seg, Win32granularity, MEM_RESERVE, PAGE_READWRITE))
            break;
        }
        if(seg==(void*)((size_t) ret.addr + space))
          break;
        else
        {
          for(; seg>=ret.addr; seg=(void *)((size_t) seg - Win32granularity))
            VirtualFree(seg, 0, MEM_RELEASE);
        }
      }
      if(!ret.addr)
        CloseHandle(fmh);
      else
      {
        ret.data[0]=(void *) fmh;
        ret.data[1]=(void *)(size_t) 1;
      }
    }
  }
#endif
  return ret;
}
static int OSReleaseAddrSpace(OSAddressSpaceReservationData *RESTRICT data, size_t space)
{
  if(!data->data[0])
    return VirtualFree(data->addr, 0, MEM_RELEASE);
#ifdef ENABLE_PHYSICALPAGEEMULATION
  else
  {
    void *seg;
    CloseHandle((HANDLE)data->data[0]);
    for(seg=data->addr; seg<(void*)((size_t) data->addr + space); seg=(void *)((size_t) seg + Win32granularity))
      VirtualFree(seg, 0, MEM_RELEASE);
    return 1;
  }
#endif
  return 0;
}
static size_t OSObtainMemoryPages(PageFrameType *RESTRICT buffer, size_t number, OSAddressSpaceReservationData *RESTRICT data)
{
  if(!data->data[0])
  {
#ifdef ENABLE_PHYSICALPAGEEMULATION
    number*=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
#if 1
    if(!AllocateUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &number, (PULONG_PTR) buffer))
    {
      if(number)
        FreeUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &number, (PULONG_PTR) buffer);
      return 0;
    }
#else
    {
      size_t n;
      PageFrameType *RESTRICT bptr;
      for(n=0, bptr=buffer; n<number; n++, bptr++)
      {
        size_t no=1;
        if(!AllocateUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &no, (PULONG_PTR) bptr))
        {
          for(bptr--; bptr>=buffer; bptr--)
          {
            no=1;
            FreeUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &no, (PULONG_PTR) bptr);
          }
          return 0;
        }
      }
    }
#endif
#ifdef ENABLE_PHYSICALPAGEEMULATION
    number/=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
    return number;
  }
#ifdef ENABLE_PHYSICALPAGEEMULATION
  else
  {
    size_t n;
    ULONG_PTR *RESTRICT pf=(ULONG_PTR *RESTRICT) &data->data[1];
    for(n=0; n<number*(sizeof(PageFrameType)/sizeof(ULONG_PTR)); n++)
      ((ULONG_PTR *) buffer)[n]=(*pf)++;
    return number;
  }
#endif
  return 0;
}
static size_t OSReleaseMemoryPages(PageFrameType *RESTRICT buffer, size_t number, OSAddressSpaceReservationData *RESTRICT data)
{
  if(!data->data[0])
  {
#ifdef ENABLE_PHYSICALPAGEEMULATION
    number*=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
#if 1
    if(!FreeUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &number, (PULONG_PTR) buffer)) return 0;
#else
    {
      size_t n;
      PageFrameType *RESTRICT bptr;
      for(n=0, bptr=buffer; n<number; n++, bptr++)
      {
        size_t no=1;
        if(!FreeUserPhysicalPages((HANDLE)(size_t)-1, (PULONG_PTR) &no, (PULONG_PTR) bptr))
        {
          assert(0);
        }
      }
    }
#endif
#ifdef ENABLE_PHYSICALPAGEEMULATION
    number/=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
#ifdef DEBUG
    /*for(n=0; n<number*(sizeof(PageFrameType)/sizeof(ULONG_PTR)); n++)
      ((ULONG_PTR *) buffer)[n]=0;*/
#endif
    return number;
  }
  /* Always fail if we are emulating physical pages */
  return 0;
}
static 
__declspec(noinline)
size_t OSRemapMemoryPagesOntoAddr(void *addr, size_t entries, PageFrameType *RESTRICT pageframes, OSAddressSpaceReservationData *RESTRICT data)
{
  if(!data->data[0])
  {
    BOOL ret;
#ifdef ENABLE_PHYSICALPAGEEMULATION
    entries*=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
#if 1
    ret=MapUserPhysicalPages(addr, entries, (PULONG_PTR) pageframes);
    if(!ret)
    {
      assert(ret);
    }
    return ret;
#else
    {
      size_t n;
      PageFrameType *RESTRICT bptr;
      for(n=0, bptr=pageframes; n<entries; n++, bptr++, addr=(void *)((size_t) addr + PAGE_SIZE))
      {
        ret=MapUserPhysicalPages(addr, 1, pageframes ? bptr : NULL);
        if(!ret)
        {
          assert(ret);
          return 0;
        }
      }
      return 1;
    }
#endif
  }
#ifdef ENABLE_PHYSICALPAGEEMULATION
  else
  {
    size_t n, ret=1;
    PageFrameType *RESTRICT pfa, *RESTRICT pf;
    for(n=0; n<entries; n++, addr=(void *)((size_t) addr + PAGE_SIZE), pageframes++)
    {
      if(*pageframe)
      {
        size_t filemappingoffset=PAGE_SIZE*((*pageframe)-1);
        /* Change reservation for next segment */
        if(!VirtualFree(addr, 0, MEM_RELEASE)) ret=0;
        if(!MapViewOfFileEx((HANDLE) data.data[0], FILE_MAP_ALL_ACCESS,
#if defined(_M_IA64) || defined(_M_X64) || defined(WIN64)
                           (DWORD)(filemappingoffset>>32),
#else
                           0,
#endif
                           (DWORD)(filemappingoffset & (DWORD)-1), PAGE_SIZE, addr)) ret=0;
      }
      else
      {
        if(!UnmapViewOfFile(addr)) ret=0;
        /* Rereserve */
        if(!VirtualAlloc(addr, PAGE_SIZE, MEM_RESERVE, PAGE_READWRITE)) ret=0;
      }
    }
    return ret;
  }
#endif
  return 0;
}
static
__declspec(noinline)
size_t OSRemapMemoryPagesOntoAddrs(void *RESTRICT *addrs, size_t entries, PageFrameType *RESTRICT pageframes, OSAddressSpaceReservationData *RESTRICT data)
{
#ifdef DEBUG
  size_t n;
  void *RESTRICT *addr;
  PageFrameType *RESTRICT pf;
  assert(entries);
  for(addr=addrs, pf=pageframes, n=0; n<entries; n++, addr++, pf++)
  {
    assert(*addr);
    /*DebugPrint("Mapping page frame %p to %p\n", *pf, *addr);*/
  }
#endif
  if(!data->data[0])
  {
    BOOL ret;
#ifdef ENABLE_PHYSICALPAGEEMULATION
    entries*=sizeof(PageFrameType)/sizeof(ULONG_PTR);
#endif
#if 1
    ret=MapUserPhysicalPagesScatter(addrs, entries, (PULONG_PTR) pageframes);
    if(!ret)
    {
      assert(ret);
    }
#else
    {
      size_t n;
      void **aptr;
      PageFrameType *RESTRICT bptr;
      for(n=0, aptr=addrs, bptr=pageframes; n<entries; n++, aptr++, bptr++)
      {
        ret=MapUserPhysicalPages(*aptr, 1, *bptr ? bptr : NULL);
        if(!ret)
        {
          assert(ret);
          return 0;
        }
      }
      return 1;
    }
#endif
    return ret;
  }
#ifdef ENABLE_PHYSICALPAGEEMULATION
  else
  {
    size_t n, ret=1;
    PageFrameType *RESTRICT pfa, *RESTRICT pf;
    for(n=0; n<entries; n++, addrs++, pageframes++)
    {
      if(*pageframe)
      {
        size_t filemappingoffset=PAGE_SIZE*((*pageframe)-1);
        /* Change reservation for next segment */
        if(!VirtualFree(*addrs, 0, MEM_RELEASE)) ret=0;
        if(!MapViewOfFileEx((HANDLE) data.data[0], FILE_MAP_ALL_ACCESS,
#if defined(_M_IA64) || defined(_M_X64) || defined(WIN64)
                           (DWORD)(filemappingoffset>>32),
#else
                           0,
#endif
                           (DWORD)(filemappingoffset & (DWORD)-1), PAGE_SIZE, *addrs)) ret=0;
      }
      else
      {
        if(!UnmapViewOfFile(*addrs)) ret=0;
        /* Rereserve */
        if(!VirtualAlloc(*addrs, PAGE_SIZE, MEM_RESERVE, PAGE_READWRITE)) ret=0;
      }
    }
    return ret;
  }
#endif
  return 0;
}

#endif

/* Maps an address reservation */
typedef struct FreePageNodeStorage_s FreePageNodeStorage_t;
typedef struct FreePageNode_t FreePageNode;
struct FreePageNode_t
{
  FreePageNodeStorage_t *owner;
  FreePageNode *older, *newer;        /* Always keep owner + older at top */
  void *freepage;
  PageFrameType pageframe;
  size_t dirty: 1;
  size_t age  :(8*sizeof(size_t)-1);
};
static FreePageNode *AllocateFPN(void);
static void FreeFPN(FreePageNode *node);

typedef struct AddressSpaceReservation_s AddressSpaceReservation_t;
#ifdef USERMODEPAGEALLOCATOR_USECOMPACTFREEPAGEINDICATOR
typedef union PageMapping_t PageMapping;
union PageMapping_t
#else
typedef struct PageMapping_t PageMapping;
struct PageMapping_t
#endif
{
  PageFrameType pageframe;      /* Actually the pageframe shifted left by one and the bottom bit always set */
  FreePageNode *freepagenode;
};
static struct AddressSpaceReservation_s
{
  OSAddressSpaceReservationData OSreservedata;
  AddressSpaceReservation_t *RESTRICT next;
  void *front, *frontptr;         /* Grows upward */
  void *back, *backptr;           /* Grows downward */
  size_t opcount;
  FreePageNode *oldestclean, *newestclean;
  FreePageNode *oldestdirty, *newestdirty;
  FreePageNode *nodestore;
  size_t freepages;
  size_t usedpages;               /* Doesn't include pages used to store this structure */
  PageMapping pagemapping[1];     /* Includes this structure */
} *RESTRICT addressspacereservation;
#ifdef USERMODEPAGEALLOCATOR_USECOMPACTFREEPAGEINDICATOR
#define ISPAGEFREE(pagemapping)       (!((size_t)((pagemapping).pageframe) & 1))
#define PAGEFRAME(pagemapping)        (ISPAGEFREE(pagemapping) ? ((pagemapping).freepagenode)->pageframe : (PageFrameType)((size_t)((pagemapping).pageframe)>>1))
#define SETPAGEFRAME(pagemapping, pf) ((pagemapping).pageframe=!(pf) ? 0 : (PageFrameType)(((size_t)(pf)<<1)|1))
#define SETPAGEFREE(pagemapping, fpn) ((fpn)->pageframe=(PageFrameType)((size_t)((pagemapping).pageframe)>>1), (pagemapping).freepagenode=(fpn))
#define SETPAGEUSED(pagemapping, pf)  (SETPAGEFRAME((pagemapping), (pf))
#else
#define ISPAGEFREE(pagemapping)       (((pagemapping).freepagenode))
#define PAGEFRAME(pagemapping)        ((pagemapping).pageframe)
#define SETPAGEFRAME(pagemapping, pf) ((pagemapping).pageframe=(pf))
#define SETPAGEFREE(pagemapping, fpn) ((fpn)->pageframe=((pagemapping).pageframe), (pagemapping).freepagenode=(fpn))
#define SETPAGEUSED(pagemapping, pf)  (SETPAGEFRAME((pagemapping), (pf)), (pagemapping).freepagenode=0)
#endif


static void ValidateFreePageLists(AddressSpaceReservation_t *RESTRICT addr)
{
#ifndef NDEBUG
#if 1
  FreePageNode *RESTRICT fpn;
  size_t count=0;
  PageMapping *RESTRICT pf;
  size_t n, freepagepfidx;
  for(fpn=addr->oldestclean; fpn; fpn=fpn->newer)
  {
    freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
    assert(ISPAGEFREE(addr->pagemapping[freepagepfidx]));
    assert(addr->pagemapping[freepagepfidx].freepagenode==fpn);
    assert((!fpn->older && addr->oldestclean==fpn) || fpn->older->newer==fpn);
    assert((!fpn->newer && addr->newestclean==fpn) || fpn->newer->older==fpn);
    assert(fpn->age<=addr->opcount);
    assert(!fpn->dirty);
    count++;
  }
  for(fpn=addr->oldestdirty; fpn; fpn=fpn->newer)
  {
    freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
    assert(ISPAGEFREE(addr->pagemapping[freepagepfidx]));
    assert(addr->pagemapping[freepagepfidx].freepagenode==fpn);
    assert((!fpn->older && addr->oldestdirty==fpn) || fpn->older->newer==fpn);
    assert((!fpn->newer && addr->newestdirty==fpn) || fpn->newer->older==fpn);
    assert(fpn->age<=addr->opcount);
    assert(fpn->dirty);
    count++;
  }
  assert(count==addr->freepages);
#if 0
  count=0;
  for(pf=addr->pagemapping, n=0; n<((size_t) addr->frontptr + 16*PAGE_SIZE - (size_t) addr); pf++, n+=PAGE_SIZE)
  {
    if(pf->pageframe && ISPAGEFREE(*pf))
    {
      fpn=pf->freepagenode;
      freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
      assert(addr->pagemapping+freepagepfidx==pf);
      assert((size_t)fpn->freepage-(size_t)addr==n);
      assert((!fpn->older && addr->oldestdirty==fpn) || fpn->older->newer==fpn);
      assert((!fpn->newer && addr->newestdirty==fpn) || fpn->newer->older==fpn);
      assert(fpn->age<=addr->opcount);
      count++;
    }
  }
  n=((size_t) addr->backptr - 16*PAGE_SIZE - (size_t) addr);
  for(pf=addr->pagemapping+n/PAGE_SIZE; n<((size_t) addr->back - (size_t) addr); pf++, n+=PAGE_SIZE)
  {
    if(pf->pageframe && ISPAGEFREE(*pf))
    {
      fpn=pf->freepagenode;
      freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
      assert(addr->pagemapping+freepagepfidx==pf);
      assert((size_t)fpn->freepage-(size_t)addr==n);
      assert((!fpn->older && addr->oldestdirty==fpn) || fpn->older->newer==fpn);
      assert((!fpn->newer && addr->newestdirty==fpn) || fpn->newer->older==fpn);
      assert(fpn->age<=addr->opcount);
      count++;
    }
  }
  assert(count==addr->freepages);
#endif
#endif
#endif
}
static void ValidatePageMappings(AddressSpaceReservation_t *RESTRICT addr)
{
#ifndef NDEBUG
#if 0
#ifdef _MSC_VER
  PageMapping *RESTRICT pf;
  size_t n;
  for(pf=addr->pagemapping, n=0; n<((size_t) addr->frontptr + 16*PAGE_SIZE - (size_t) addr); pf++, n+=PAGE_SIZE)
  {
    volatile size_t *RESTRICT pageaddr=(size_t *RESTRICT)((size_t) addr + n + 0x300/* For some odd reason Windows maps 0x2F0 extra bytes */);
    int faulted=0;
    PageFrameType t=pf->pageframe ? PAGEFRAME(*pf) : 0;
    if(pf->pageframe && n)
    { /* Verify that this is indeed a valid page frame */
      assert(OSRemapMemoryPagesOntoAddr((void *)((size_t) addr + n), 1, NULL, &addr->OSreservedata));
      assert(OSRemapMemoryPagesOntoAddr((void *)((size_t) addr + n), 1, &t, &addr->OSreservedata));
    }
#if 0
#if 1
    {
      char buffer[8];
      faulted=!ReadProcessMemory((HANDLE)(size_t)-1, (void *) pageaddr, buffer, 1, NULL);
    }
#else
    __try
    {
      *pageaddr;
    }
    __except(1)
    {
      faulted=1;
    }
#endif
    assert(faulted==!t);
#endif
  }
#endif
#endif
#endif
}





static AddressSpaceReservation_t *ReserveSpace(size_t space)
{
  const size_t RESERVEALWAYSLEAVEFREE=64*1024*1024; /* Windows goes seriously screwy if you take away all address space */
  OSAddressSpaceReservationData addrR={0};
  AddressSpaceReservation_t *RESTRICT addr=0;
  size_t pagemappingsize, n, pagesallocated;
  PageFrameType pagebuffer[256];
  if(space<(size_t)1<<30 /* 1Gb */)
  {
    space=(size_t)1<<30;
    if(8==sizeof(size_t)) space<<=2; /* Go for 4Gb chunks on 64 bit */
  }
  while(space>=RESERVEALWAYSLEAVEFREE && !(addrR=OSReserveAddrSpace(space)).addr)
    space>>=1;
  if(space<RESERVEALWAYSLEAVEFREE)
    return 0;
  pagemappingsize=sizeof(AddressSpaceReservation_t)+sizeof(PageMapping)*((space/PAGE_SIZE)-2);
  pagemappingsize=(pagemappingsize+PAGE_SIZE-1) &~(PAGE_SIZE-1);
  pagemappingsize/=PAGE_SIZE;
  /* We now need pagemappingsize number of pages in order to store the mapping tables, but
  because this could be as much as 4Mb of stuff we'll need to do it in chunks to avoid
  breaking the stack. */
  for(n=0; n<pagemappingsize; n+=pagesallocated)
  {
    size_t torequest=sizeof(pagebuffer)/sizeof(PageFrameType);
    void *mapaddr=(void *)((size_t) addrR.addr + n*PAGE_SIZE);
    if(torequest>pagemappingsize-n) torequest=pagemappingsize-n;
    if(!(pagesallocated=OSObtainMemoryPages(pagebuffer, torequest, &addrR)))
      goto badexit;
    if(!OSRemapMemoryPagesOntoAddr(mapaddr, pagesallocated, pagebuffer, &addrR))
      goto badexit;
    if(!n)
    { /* This is the first run, so install AddressSpaceReservation */
      addr=(AddressSpaceReservation_t *RESTRICT) addrR.addr;
      addr->OSreservedata=addrR;
      addr->front=addr->frontptr=(void *)((size_t)addr+pagemappingsize*PAGE_SIZE);
      addr->back=addr->backptr=(void *)((size_t)addr+space);
    }
    /* Add these new pages to the page mappings. Because we are premapping in new pages,
    we are guaranteed to have memory already there ready for us. */
    for(torequest=0; torequest<pagesallocated; torequest++)
      SETPAGEFRAME(addr->pagemapping[n+torequest], pagebuffer[torequest]);
  }
  ValidatePageMappings(addr);
#ifdef DEBUG
   DebugPrint("*** Reserved address space from %p to %p (%luMb)\n", addr, addr->back, (unsigned long)((size_t)addr->back - (size_t) addr)/1024/1024);
#endif
  return addr;
badexit:
  /* Firstly throw away any just allocated pages */
  if(pagesallocated)
    OSReleaseMemoryPages(pagebuffer, pagesallocated, &addrR);
  if(addr)
  { /* Now throw away any previously stored */
    size_t m, o, torequest;
    for(m=0; m<n; m+=torequest)
    {
      torequest=sizeof(pagebuffer)/sizeof(PageFrameType);;
      if(torequest<m-n) torequest=m-n;
      for(o=m; o<m+torequest; o++)
        pagebuffer[o]=PAGEFRAME(addr->pagemapping[o]);
      OSReleaseMemoryPages(pagebuffer, torequest, &addrR);
    }
  }
  OSReleaseAddrSpace(&addrR, space);
  return 0;
}
static int CheckFreeAddressSpaces(AddressSpaceReservation_t *RESTRICT *RESTRICT _addr)
{
  AddressSpaceReservation_t *RESTRICT addr=*_addr;
  if(!addr->next || CheckFreeAddressSpaces(&addr->next))
  {
    assert(!addr->next);
    if(0==addr->usedpages)
    {
      size_t size=(size_t)addr->back-(size_t)addr;
      assert(addr->frontptr==addr->front);
      assert(addr->backptr==addr->back);
      if(OSReleaseAddrSpace(&addr->OSreservedata, size))
      {
        *_addr=0;
        return 1;
      }
    }
  }
  return 0;
}
#ifdef USERMODEPAGEALLOCATOR_DEBUGCONFIG
#define REMAPMEMORYPAGESBLOCKSIZE 16
#else
#define REMAPMEMORYPAGESBLOCKSIZE 1024
#endif
typedef struct STRUCTUREALIGNMENT(16) RemapMemoryPagesBlock_t
{
  void *addrs[REMAPMEMORYPAGESBLOCKSIZE];
  PageFrameType pageframes[REMAPMEMORYPAGESBLOCKSIZE];
  size_t idx;
} RemapMemoryPagesBlock;
static size_t FillWithFreePages(AddressSpaceReservation_t *RESTRICT addr, RemapMemoryPagesBlock *RESTRICT memtodecommit, RemapMemoryPagesBlock *RESTRICT memtocommit, void *freespaceaddr, PageMapping *RESTRICT start, PageMapping *RESTRICT end, int needclean)
{
  size_t n, pages=end-start;
  PageMapping *RESTRICT pf=start;
  for(n=0; n<pages && addr->freepages; n++, pf++, freespaceaddr=(void *)((size_t) freespaceaddr + PAGE_SIZE))
  {
    FreePageNode *RESTRICT fpn=0, *RESTRICT *RESTRICT fpnaddr=0, *RESTRICT *RESTRICT fpnnaddr=0;
    PageFrameType freepageframe;
    size_t freepagepfidx;
    assert(!pf->pageframe);
    if(needclean && addr->oldestclean)
    {
      assert(!addr->oldestclean->older);
      fpn=addr->oldestclean;
      fpnaddr=&addr->oldestclean;
      fpnnaddr=&addr->newestclean;
    }
    else if(addr->oldestdirty)
    {
      assert(!addr->oldestdirty->older);
      fpn=addr->oldestdirty;
      fpnaddr=&addr->oldestdirty;
      fpnnaddr=&addr->newestdirty;
    }
    else if(!needclean && addr->oldestclean)
    {
      assert(!addr->oldestclean->older);
      fpn=addr->oldestclean;
      fpnaddr=&addr->oldestclean;
      fpnnaddr=&addr->newestclean;
    }
    /* Add to the list of pages to demap */
    memtodecommit->addrs[memtodecommit->idx]=fpn->freepage;
    /*memtodecommit->pageframes[memtodecommit->idx]=0;*/
    memtodecommit->idx++;
    /* Add to the list of pages to remap */
    freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
    freepageframe=fpn->pageframe;
    SETPAGEUSED(addr->pagemapping[freepagepfidx], 0);
    memtocommit->addrs[memtocommit->idx]=freespaceaddr;
    SETPAGEUSED(*pf, freepageframe);
    memtocommit->pageframes[memtocommit->idx]=freepageframe;
    memtocommit->idx++;
    /* Remove from free page lists */
    assert(!fpn->older);
    *fpnaddr=fpn->newer;
    if(*fpnaddr)
      (*fpnaddr)->older=0;
    else
      *fpnnaddr=0;
    addr->freepages--;
    addr->usedpages++;
#if MMAP_CLEARS
    if(needclean && fpn->dirty)
      memset(fpn->freepage, 0, PAGE_SIZE);
#endif
    FreeFPN(fpn);
    ValidateFreePageLists(addr);

    if(REMAPMEMORYPAGESBLOCKSIZE==memtocommit->idx)
    {
      if(memtodecommit->idx) OSRemapMemoryPagesOntoAddrs(memtodecommit->addrs, memtodecommit->idx, NULL, &addr->OSreservedata);
      OSRemapMemoryPagesOntoAddrs(memtocommit->addrs, memtocommit->idx, memtocommit->pageframes, &addr->OSreservedata);
      memtodecommit->idx=memtocommit->idx=0;
      ValidatePageMappings(addr);
    }
  }
  /* Allocate more pages if needed */
#ifdef DEBUG
	if(pages-n>0) DebugPrint("Requesting %lu new pages from kernel\n", (unsigned long) pages-n);
#endif
  while(pages-n>0)
  {
    size_t newpagesnow=pages-n, newpagesobtained, m;
    if(newpagesnow>REMAPMEMORYPAGESBLOCKSIZE-memtocommit->idx) newpagesnow=REMAPMEMORYPAGESBLOCKSIZE-memtocommit->idx;
    newpagesobtained=OSObtainMemoryPages(&memtocommit->pageframes[memtocommit->idx], newpagesnow, &addr->OSreservedata);
    if(newpagesnow!=newpagesobtained)
    {
      if(newpagesobtained) OSReleaseMemoryPages(&memtocommit->pageframes[memtocommit->idx], newpagesobtained, &addr->OSreservedata);
      return n;
    }
    for(m=memtocommit->idx; m<memtocommit->idx+newpagesobtained; m++, pf++, freespaceaddr=(void *)((size_t) freespaceaddr + PAGE_SIZE))
    {
      memtocommit->addrs[m]=freespaceaddr;
      SETPAGEUSED(*pf, memtocommit->pageframes[m]);
    }
    memtocommit->idx+=newpagesobtained;
    n+=newpagesobtained;
    addr->usedpages+=newpagesobtained;
    if(REMAPMEMORYPAGESBLOCKSIZE==memtocommit->idx)
    {
      if(memtodecommit->idx) OSRemapMemoryPagesOntoAddrs(memtodecommit->addrs, memtodecommit->idx, NULL, &addr->OSreservedata);
      OSRemapMemoryPagesOntoAddrs(memtocommit->addrs, memtocommit->idx, memtocommit->pageframes, &addr->OSreservedata);
      memtodecommit->idx=memtocommit->idx=0;
      ValidatePageMappings(addr);
    }
  }
  return n;
}
static int DetachFreePage(AddressSpaceReservation_t *RESTRICT addr, FreePageNode *RESTRICT fpn)
{
  int wipeall=0;
  FreePageNode *RESTRICT *RESTRICT prevnextaddr=0, *RESTRICT *RESTRICT nextprevaddr=0;
  if(fpn->older)
  {
    assert(fpn->older->newer==fpn);
    prevnextaddr=&fpn->older->newer;
  }
  else if(addr->oldestdirty==fpn)
  {
    assert(!fpn->older);
    prevnextaddr=&addr->oldestdirty;
    wipeall=1;
  }
  else if(addr->oldestclean==fpn)
  {
    assert(!fpn->older);
    prevnextaddr=&addr->oldestclean;
  }
  if(fpn->newer)
  {
    assert(fpn->newer->older==fpn);
    nextprevaddr=&fpn->newer->older;
  }
  else if(addr->newestdirty==fpn)
  {
    assert(!fpn->newer);
    nextprevaddr=&addr->newestdirty;
    wipeall=1;
  }
  else if(addr->newestclean==fpn)
  {
    assert(!fpn->newer);
    nextprevaddr=&addr->newestclean;
  }
  if(!wipeall) wipeall=(int)fpn->dirty;
  assert(prevnextaddr && nextprevaddr);
  *prevnextaddr=fpn->newer;
  *nextprevaddr=fpn->older;
  return wipeall;
}
static int ReleasePages(void *mem, size_t size, int dontfreeVA);
static void *AllocatePages(void *mem, size_t size, unsigned flags)
{
  AddressSpaceReservation_t *RESTRICT addr;
  if(!addressspacereservation && !(addressspacereservation=ReserveSpace(0)))
  {
    DebugPrint("User Mode Page Allocator: Failed to allocate initial address space\n");
    abort();
  }
  for(addr=addressspacereservation; addr; addr=addr->next)
  {
    int fromback=(flags & USERPAGE_TOPDOWN);
    if((mem && ((mem>=addr->front && mem<addr->frontptr && !(fromback=0)) || (mem>=addr->backptr && mem<addr->back && (fromback=1))))
      || (!mem && ((size_t) addr->backptr - (size_t) addr->frontptr>=size)))
    {
      RemapMemoryPagesBlock memtodecommit, memtocommit;
      size_t n, sizeinpages=size/PAGE_SIZE;
      void *ret=mem ? mem : ((fromback) ? (void *)((size_t) addr->backptr - size) : addr->frontptr), *retptr;
      PageMapping *RESTRICT pagemappingsbase=addr->pagemapping+((size_t)ret-(size_t)addr)/PAGE_SIZE, *RESTRICT pagemappings;
      int needtofillwithfree=0;
      memtodecommit.idx=memtocommit.idx=0;
      if(!mem)
      {
        if(fromback)
          addr->backptr=(void *)((size_t) addr->backptr - size);
        else
          addr->frontptr=(void *)((size_t) addr->frontptr + size);
#ifdef DEBUG
        DebugPrint("VA %s goes to %p within %p-%p\n", fromback ? "backptr" : "frontptr", fromback ? addr->backptr : addr->frontptr, addr->front, addr->back);
#endif
      }
      if(!(flags & USERPAGE_NOCOMMIT))
      { /* We leave memory still held by the application mapped at the addresses it was mapped at
        when freed and only nobble these when we need new pages. Hence between addresses ret and
        ret+size there may be a patchwork of already allocated regions, so what we do is to firstly
        delink any already mapped pages from the free page list and then to batch the filling in of
        the blank spots sixteen at a time. */
        pagemappings=pagemappingsbase;
        retptr=ret;
        for(n=0; n<sizeinpages; n++, pagemappings++, retptr=(void *)((size_t)retptr + PAGE_SIZE))
        {
          if(pagemappings->pageframe && ISPAGEFREE(*pagemappings))
          {
            FreePageNode *RESTRICT fpn=pagemappings->freepagenode;
            int wipeall=DetachFreePage(addr, fpn);
            assert(fpn->freepage==retptr);
            SETPAGEUSED(*pagemappings, fpn->pageframe);
            addr->freepages--;
            addr->usedpages++;
#if MMAP_CLEARS
            if(fromback && wipeall)
              memset(retptr, 0, PAGE_SIZE);
#endif
            FreeFPN(fpn);
            ValidateFreePageLists(addr);
          }
          else
            needtofillwithfree=1;
        }
        if(needtofillwithfree)
        {
          pagemappings=pagemappingsbase;
          retptr=ret;
          for(n=0; n<sizeinpages; n++, pagemappings++, retptr=(void *)((size_t)retptr + PAGE_SIZE))
          {
            if(!pagemappings->pageframe)
            {
              void *emptyaddrstart=retptr;
              PageMapping *RESTRICT emptyframestart=pagemappings;
              size_t filled;
              for(; n<sizeinpages && !pagemappings->pageframe; n++, pagemappings++, retptr=(void *)((size_t)retptr + PAGE_SIZE));
              if((size_t)(pagemappings-emptyframestart)!=(filled=FillWithFreePages(addr, &memtodecommit, &memtocommit, emptyaddrstart, emptyframestart, pagemappings, fromback)))
              { /* We failed to allocate everything, so release */
                assert(0);
                ReleasePages(ret, size, 0);
                return 0;
              }
              if(n==sizeinpages) break;
            }
          }
          if(memtocommit.idx)
          {
            if(memtodecommit.idx) OSRemapMemoryPagesOntoAddrs(memtodecommit.addrs, memtodecommit.idx, NULL, &addr->OSreservedata);
            OSRemapMemoryPagesOntoAddrs(memtocommit.addrs, memtocommit.idx, memtocommit.pageframes, &addr->OSreservedata);
            memtodecommit.idx=memtocommit.idx=0;
            ValidatePageMappings(addr);
          }
        }
      }
      return ret;
    }
    if(!addr->next)
    {
      addr->next=ReserveSpace((size_t) 1<<(nedtriebitscanr(size-1)+1));
    }
  }
  return 0;
}
static AddressSpaceReservation_t *RESTRICT AddressSpaceFromMem(int *RESTRICT fromback, void *mem)
{
  AddressSpaceReservation_t *RESTRICT addr;
  for(addr=addressspacereservation; addr; addr=addr->next)
  {
    if(mem>=addr->front && mem<addr->back)
    {
      if(fromback) *fromback=mem>=addr->backptr;
      return addr;
    }
  }
  return 0;
}
/* Returns 0 for used, 1 for free and 2 for empty. */
static int IsPageFreeOrEmpty(AddressSpaceReservation_t *RESTRICT addr, void *mem)
{
  PageMapping *RESTRICT pagemappings=addr->pagemapping+((size_t)mem-(size_t)addr)/PAGE_SIZE;
  return !pagemappings->pageframe ? 2 : (ISPAGEFREE(*pagemappings) ? 1 : 0);
}
static size_t TrimFreePagesFromAddr(AddressSpaceReservation_t *RESTRICT addr, size_t pagestofree, size_t pagesmustfree)
{
  size_t pagesfreed=0;
  int done=0;
  FreePageNode *RESTRICT fpn;
  RemapMemoryPagesBlock memtodecommit;
  memtodecommit.idx=0;
  /* Try to cull dirty before clean pages if poss */
  for(fpn=addr->oldestdirty; !done; fpn=addr->oldestdirty, pagesfreed++, pagestofree--, pagesmustfree=pagesmustfree ? pagesmustfree-1 : 0)
  {
    size_t freepagepfidx;
    if(!fpn || !pagestofree || (!pagesmustfree && addr->opcount-fpn->age<USERMODEPAGEALLOCATOR_FREEPAGECACHEAGE(addr->usedpages, addr->freepages)))
    {
      done=1;
#ifdef DEBUG
      /*printf("FREEING %u pages\n", pagesfreed);*/
#endif
      goto decommitpages;
    }
    assert(!fpn->older);
    addr->oldestdirty=fpn->newer;
    if(addr->oldestdirty)
      addr->oldestdirty->older=0;
    else
      addr->newestdirty=0;
    assert(!addr->oldestclean); /* clean pages not implemented yet */
    addr->freepages--;
    freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
    memtodecommit.addrs[memtodecommit.idx]=fpn->freepage;
    memtodecommit.pageframes[memtodecommit.idx]=fpn->pageframe;
    SETPAGEUSED(addr->pagemapping[freepagepfidx], 0);
    memtodecommit.idx++;
    FreeFPN(fpn);
    ValidateFreePageLists(addr);

    if(REMAPMEMORYPAGESBLOCKSIZE==memtodecommit.idx)
    { /* Not actually needed, and it interferes with physical page emulation.
      OSRemapMemoryPagesOntoAddrs(memtodecommit.addrs, memtodecommitidx, memtodecommit.pageframes, &addr->OSreservedata);*/
decommitpages:
      if(memtodecommit.idx)
      {
        if(memtodecommit.idx==OSReleaseMemoryPages(memtodecommit.pageframes, memtodecommit.idx, &addr->OSreservedata))
        {
          memtodecommit.idx=0;
          ValidatePageMappings(addr);
        }
        else
        {
          size_t n;
          for(n=0; n<memtodecommit.idx; n++)
          {
            fpn=AllocateFPN();
            fpn->freepage=memtodecommit.addrs[n];
            freepagepfidx=((size_t)fpn->freepage-(size_t)addr)/PAGE_SIZE;
            fpn->pageframe=memtodecommit.pageframes[n];
            SETPAGEFREE(addr->pagemapping[freepagepfidx], fpn);
            fpn->older=0;
            fpn->newer=addr->oldestdirty;
            if(addr->oldestdirty)
              addr->oldestdirty->older=fpn;
            else
              addr->newestdirty=fpn;
            addr->oldestdirty=fpn;
            addr->freepages++;
            ValidateFreePageLists(addr);
          }
          pagestofree+=memtodecommit.idx;
          done=1;
        }
      }
    }
  }
  return pagesfreed;
}
static size_t TrimFreePages(size_t pagestofree, size_t pagesmustfree)
{
  AddressSpaceReservation_t *RESTRICT addr;
  size_t totalpagesfreed=0;
  for(addr=addressspacereservation; addr && pagestofree; addr=addr->next)
  {
    size_t pagesfreed=TrimFreePagesFromAddr(addr, pagestofree, pagesmustfree);
    if(pagesfreed>pagesmustfree)
      pagesmustfree=0;
    else
      pagesmustfree-=pagesfreed;
    pagestofree-=pagesfreed;
    totalpagesfreed+=pagesfreed;
  }
  return totalpagesfreed;
}
static double mypow8(double v)
{
  double t1=v*v;
  double t2=t1*t1;
  return t2+t2;
}
static int ReleasePages(void *mem, size_t size, int dontfreeVA)
{ /* Returns 1 if address space was freed */
  int fromback;
  AddressSpaceReservation_t *RESTRICT addr=AddressSpaceFromMem(&fromback, mem);
  if(addr)
  {
    size_t n, sizeinpages=size/PAGE_SIZE;
    void *mempage=mem;
    PageMapping *RESTRICT pagemappings=addr->pagemapping+((size_t)mem-(size_t)addr)/PAGE_SIZE;
    int dofreesystemmemorycheck=0;
    if(mem>=addr->frontptr && mem<addr->backptr)
    {
      fprintf(stderr, "User Mode Page Allocator: Attempt to free memory in dead man's land\n");
      assert(0);
      abort();
    }
    for(n=0; n<sizeinpages; n++, pagemappings++, mempage=(void *)((size_t)mempage + PAGE_SIZE))
    {
      if(pagemappings->pageframe && !ISPAGEFREE(*pagemappings))
      {
        FreePageNode *RESTRICT fpn=AllocateFPN();
        if(!fpn) break;
        fpn->freepage=mempage;
        SETPAGEFREE(*pagemappings, fpn);
        fpn->older=addr->newestdirty;
        fpn->newer=0;
        fpn->dirty=1;
        fpn->age=addr->opcount;
        if(addr->newestdirty)
        {
          assert(!addr->newestdirty->newer);
          addr->newestdirty->newer=fpn;
        }
        else
          addr->oldestdirty=fpn;
        addr->newestdirty=fpn;
        addr->freepages++;
        addr->usedpages--;
        ValidateFreePageLists(addr);
      }
    }
    /* Do I need to return memory to the system? */
    if((dofreesystemmemorycheck=!(addr->opcount++ & (USERMODEPAGEALLOCATOR_SYSTEMFREEMEMORYCHECKRATE-1))) || addr->freepages>USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE(addr->usedpages, addr->freepages))
    {
      size_t pagestofree=USERMODEPAGEALLOCATOR_FREEPAGECACHESIZE(addr->usedpages, addr->freepages), pagesmustfree=0;
      static double memorypressurescale=0;
      if(dofreesystemmemorycheck)
        memorypressurescale=mypow8(OSSystemMemoryPressure());
      pagestofree=pagestofree>addr->usedpages ? 0 : addr->usedpages-pagestofree;
      pagesmustfree=(size_t)(addr->freepages*memorypressurescale);
      pagestofree+=pagesmustfree;
#if 1
      /* Don't bother with the overhead of freeing memory unless you have a sizeable chunk to do at once */
      if(pagestofree>=REMAPMEMORYPAGESBLOCKSIZE)
        TrimFreePagesFromAddr(addr, pagestofree, pagesmustfree);
#endif
    }
    if(!dontfreeVA)
    {
      if((size_t) addr->frontptr-size==(size_t) mem || (size_t) addr->backptr==(size_t) mem)
      {
        if(fromback)
          addr->backptr=(void *)((size_t) addr->backptr + size);
        else
          addr->frontptr=(void *)((size_t) addr->frontptr - size);
        /*if(!addr->pagesused)
            CheckFreeAddressSpaces(&addressspacereservation); */
        return 1;
      }
    }
    return 0;
  }
  return 0;
}

static INLINE void SwapPagesBatchDecommit(AddressSpaceReservation_t *RESTRICT addr, RemapMemoryPagesBlock *RESTRICT memtodecommit)
{
  if(memtodecommit->idx)
  {
    OSRemapMemoryPagesOntoAddrs(memtodecommit->addrs, memtodecommit->idx, NULL, &addr->OSreservedata);
    memtodecommit->idx=0;
  }
}
static void SwapPagesBatchCommit(AddressSpaceReservation_t *RESTRICT addr, RemapMemoryPagesBlock *RESTRICT memtodecommit, RemapMemoryPagesBlock *RESTRICT memtocommit)
{
  SwapPagesBatchDecommit(addr, memtodecommit);
  if(memtocommit->idx)
  {
    OSRemapMemoryPagesOntoAddrs(memtocommit->addrs, memtocommit->idx, memtocommit->pageframes, &addr->OSreservedata);
    memtocommit->idx=0;
  }
}
static int SwapPages(void *dest, void *start, void *end)
{
  int destfromback;
  AddressSpaceReservation_t *destaddr=AddressSpaceFromMem(&destfromback, dest), *srcaddr=AddressSpaceFromMem(0, start);
  size_t pages, n, freepages=0, usedpages=0, destpfidx=((size_t)dest-(size_t)destaddr)/PAGE_SIZE, srcpfidx=((size_t)start-(size_t)srcaddr)/PAGE_SIZE;
  PageMapping *RESTRICT destpf, *RESTRICT srcpf;
  void *destptr, *srcptr;
  RemapMemoryPagesBlock memtodecommit, memtocommit;
  memtodecommit.idx=memtocommit.idx=0;

  pages=((size_t)end-(size_t)start)/PAGE_SIZE;
  destptr=dest; destpf=destaddr->pagemapping+destpfidx;
  srcptr=start;  srcpf=srcaddr->pagemapping+srcpfidx;
  for(n=0; n<pages; n++, destptr=(void *)((size_t) destptr + PAGE_SIZE), destpf++, srcptr=(void *)((size_t) srcptr + PAGE_SIZE), srcpf++)
  {
    PageMapping t;
    int srcIsFree=srcpf->pageframe && ISPAGEFREE(*srcpf);
    int destIsFree=destpf->pageframe && ISPAGEFREE(*destpf);
    /* Although strictly not necessary, in the context of usage by
    userspace_realloc() the pages from start to end must always be in use */
    assert(srcpf->pageframe);
    assert(!srcIsFree);
    /* Firstly we demap any existing page in either src or dest */
    if(srcpf->pageframe)
    {
      memtodecommit.addrs[memtodecommit.idx]=srcptr;
      if(REMAPMEMORYPAGESBLOCKSIZE==++memtodecommit.idx) SwapPagesBatchDecommit(srcaddr, &memtodecommit);
    }
    if(destpf->pageframe)
    {
      memtodecommit.addrs[memtodecommit.idx]=destptr;
      if(REMAPMEMORYPAGESBLOCKSIZE==++memtodecommit.idx) SwapPagesBatchDecommit(destaddr, &memtodecommit);
    }
    /* Next we swap page mappings */
    t=*destpf;
    *destpf=*srcpf;
    *srcpf=t;
    /* If one or both of the pages swapped were free, fix up their address */
    if((srcIsFree && destIsFree) || (!srcIsFree && !destIsFree))
    {
      /* Do nothing, as either it doesn't matter or pointers come out the same */
#ifndef NDEBUG
      if(srcIsFree)  { assert(srcpf->freepagenode->freepage==destaddr); }
      if(destIsFree) { assert(destpf->freepagenode->freepage==srcaddr); }
#endif
    }
    else
    {
      FreePageNode *RESTRICT fpn=srcIsFree ? srcpf->freepagenode : destpf->freepagenode;
      void *newaddr=srcIsFree ? srcaddr : destaddr;
#ifndef NDEBUG
      if(srcIsFree)  { assert(srcpf->freepagenode->freepage==destaddr); }
      if(destIsFree) { assert(destpf->freepagenode->freepage==srcaddr); }
#endif
      fpn->freepage=newaddr;
    }
    /* Commit the changes */
    if(srcpf->pageframe)
    {
      memtocommit.addrs[memtocommit.idx]=srcptr;
      memtocommit.pageframes[memtocommit.idx]=PAGEFRAME(*srcpf);
      if(REMAPMEMORYPAGESBLOCKSIZE==++memtocommit.idx) SwapPagesBatchCommit(srcaddr, &memtodecommit, &memtocommit);
      if(srcIsFree)
        freepages--;
      else
        usedpages--;
    }
    if(destpf->pageframe)
    {
      memtocommit.addrs[memtocommit.idx]=destptr;
      memtocommit.pageframes[memtocommit.idx]=PAGEFRAME(*destpf);
      if(REMAPMEMORYPAGESBLOCKSIZE==++memtocommit.idx) SwapPagesBatchCommit(destaddr, &memtodecommit, &memtocommit);
      if(destIsFree)
        freepages++;
      else
        usedpages++;
    }
  }
  SwapPagesBatchCommit(destaddr, &memtodecommit, &memtocommit);
  srcaddr->freepages-=freepages;
  srcaddr->usedpages-=usedpages;
  destaddr->freepages+=freepages;
  destaddr->usedpages+=usedpages;
  ValidateFreePageLists(destaddr);
  ValidatePageMappings(destaddr);
  return 1;
}

/************************************************************************************/

#ifdef USERMODEPAGEALLOCATOR_DEBUGCONFIG
#define FREEPAGENODESTORAGESIZE (PAGE_SIZE*16)
#else
#define FREEPAGENODESTORAGESIZE (PAGE_SIZE*64)
#endif
#define FREEPAGENODESPERSTORAGE ((FREEPAGENODESTORAGESIZE-4*sizeof(void *))/sizeof(FreePageNode))
static struct FreePageNodeStorage_s
{
  size_t magic;
  FreePageNodeStorage_t *next;
  FreePageNode *freefpns;
  size_t freeitems;
  FreePageNode freepagenodes[FREEPAGENODESPERSTORAGE];
} fpnstorage, *RESTRICT lastfpnstorage;

static FreePageNodeStorage_t *FindFreePageNodeStorage(void)
{
  int n;
  FreePageNodeStorage_t *RESTRICT fpns, *RESTRICT ofpns=0;
  if(!fpnstorage.magic)
  {
    fpns=&fpnstorage;
    goto initstorage;
  }
  for(fpns=&fpnstorage; fpns; fpns=fpns->next)
  {
    ofpns=fpns;
    if(fpns->freefpns)
      return fpns;
  }
  /* Need to extend */
  assert(sizeof(FreePageNodeStorage_t)<=FREEPAGENODESTORAGESIZE);
  if((ofpns->next=fpns=(FreePageNodeStorage_t *) AllocatePages(0, FREEPAGENODESTORAGESIZE, USERPAGE_TOPDOWN)))
  {
#ifdef DEBUG
    /*printf("FreePageNodeStorage extends %p to %p\n", fpns, ((size_t)fpns+FREEPAGENODESTORAGESIZE));*/
#endif
#if !MMAP_CLEARS
    memset(fpns, 0, FREEPAGENODESTORAGESIZE);
#endif
initstorage:
    fpns->magic=*(size_t *)"UMPAFRPN";
    for(n=FREEPAGENODESPERSTORAGE-1; n>=0; n--)
    {
      fpns->freepagenodes[n].owner=fpns;
      fpns->freepagenodes[n].older=fpns->freefpns;
      fpns->freefpns=&fpns->freepagenodes[n];
    }
    fpns->freeitems=FREEPAGENODESPERSTORAGE;
  }
  return fpns;
}
static INLINE FreePageNode *AllocateFPN(void)
{
  FreePageNode *RESTRICT ret;
  if(!lastfpnstorage || !lastfpnstorage->freefpns)
  {
    if(!(lastfpnstorage=FindFreePageNodeStorage())) return 0;
  }
#ifdef DEBUG
  {
    FreePageNodeStorage_t *t;
    for(t=&fpnstorage; t; t=t->next)
    {
      assert(*(size_t *)"UMPAFRPN"==t->magic);
    }
  }
#endif
  assert(lastfpnstorage->freefpns);
  ret=lastfpnstorage->freefpns;
  lastfpnstorage->freefpns=ret->older;
  assert(ret->owner==lastfpnstorage);
  ret->older=0;
  lastfpnstorage->freeitems--;
  return ret;
}
static int CheckFreeFPNs(FreePageNodeStorage_t *fpns)
{
  if(fpns->next)
  {
    if(CheckFreeFPNs(fpns->next))
    {
      assert(!fpns->next->next);
#ifdef DEBUG
      /*printf("FreePageNodeStorage releases %p to %p\n", fpns->next, ((size_t)(fpns->next)+FREEPAGENODESTORAGESIZE));*/
#endif
      ReleasePages(fpns->next, FREEPAGENODESTORAGESIZE, 0);
      fpns->next=0;
    }
    else return 0;
  }
  return FREEPAGENODESPERSTORAGE==fpns->freeitems;
}
static INLINE void FreeFPN(FreePageNode *node)
{
  FreePageNodeStorage_t *RESTRICT fpns=node->owner;
  assert(node>=fpns->freepagenodes && node<fpns->freepagenodes+FREEPAGENODESPERSTORAGE);
  memset((void *)((size_t) node + 2*sizeof(void *)), 0, sizeof(FreePageNode)-2*sizeof(void *));
  node->older=fpns->freefpns;
  fpns->freefpns=node;
  /* If I'm empty and there is another storage after me, check if we need to free storage */
  /*if(FREEPAGENODESPERSTORAGE==++fpns->freeitems && fpns->next && FREEPAGENODESPERSTORAGE==fpns->next->freeitems)
    CheckFreeFPNs(&fpnstorage);*/
}


/************************************************************************************/

#ifdef USERMODEPAGEALLOCATOR_DEBUGCONFIG
#define REGIONSTORAGESIZE PAGE_SIZE
#else
#define REGIONSTORAGESIZE (PAGE_SIZE*4)
#endif
#define REGIONSPERSTORAGE ((REGIONSTORAGESIZE-4*sizeof(void *))/sizeof(region_node_t))
static struct RegionStorage_s
{
  size_t magic;
  RegionStorage_t *next;
  region_node_t *freeregions;
  size_t freeitems;
  region_node_t regions[REGIONSPERSTORAGE];
} regionstorage, *RESTRICT lastregionstorage;

static RegionStorage_t *FindFreeRegionNodeStorage(void)
{
  int n;
  RegionStorage_t *RESTRICT fpns, *RESTRICT ofpns=0;
  if(!regionstorage.magic)
  {
    fpns=&regionstorage;
    goto initstorage;
  }
  for(fpns=&regionstorage; fpns; fpns=fpns->next)
  {
    ofpns=fpns;
    if(fpns->freeregions)
      return fpns;
  }
  /* Need to extend */
  assert(sizeof(RegionStorage_t)<=REGIONSTORAGESIZE);
  if((ofpns->next=fpns=(RegionStorage_t *) AllocatePages(0, REGIONSTORAGESIZE, USERPAGE_TOPDOWN)))
  {
#ifdef DEBUG
    /*printf("RegionNodeStorage extends %p to %p\n", fpns, ((size_t)fpns+REGIONSTORAGESIZE));*/
#endif
#if !MMAP_CLEARS
    memset(fpns, 0, REGIONSTORAGESIZE);
#endif
initstorage:
    fpns->magic=*(size_t *)"UMPARNST";
    for(n=REGIONSPERSTORAGE-1; n>=0; n--)
    {
      fpns->regions[n].owner=fpns;
      fpns->regions[n].prev=fpns->freeregions;
      fpns->freeregions=&fpns->regions[n];
    }
    fpns->freeitems=REGIONSPERSTORAGE;
  }
  return fpns;
}
static INLINE region_node_t *AllocateRegionNode(void)
{
  region_node_t *RESTRICT ret;
  if(!lastregionstorage || !lastregionstorage->freeregions)
  {
    if(!(lastregionstorage=FindFreeRegionNodeStorage())) return 0;
  }
#ifdef DEBUG
  {
    RegionStorage_t *t;
    for(t=&regionstorage; t; t=t->next)
    {
      region_node_t *r;
      size_t c=0;
      assert(*(size_t *)"UMPARNST"==t->magic);
      assert(!t->freeitems || (t->freeregions>=t->regions && t->freeregions<t->regions+REGIONSPERSTORAGE));
      for(r=t->freeregions; r; r=r->prev, c++)
      {
        assert(!r->next);
      }
      assert(c==t->freeitems);
    }
  }
#endif
  assert(lastregionstorage->freeregions);
  ret=lastregionstorage->freeregions;
  assert(ret->owner==lastregionstorage);
  assert(*(size_t *)"UMPARNST"==lastregionstorage->magic);
  assert(ret>=lastregionstorage->regions && ret<lastregionstorage->regions+REGIONSPERSTORAGE);
  lastregionstorage->freeregions=ret->prev;
  ret->prev=0;
  lastregionstorage->freeitems--;
  assert(lastregionstorage->freeitems<=REGIONSPERSTORAGE);
  return ret;
}
static int CheckFreeRegionNodeStorages(RegionStorage_t *fpns)
{
  if(fpns->next)
  {
    if(CheckFreeRegionNodeStorages(fpns->next))
    {
      assert(!fpns->next->next);
#ifdef DEBUG
      DebugPrint("RegionNodeStorage releases %p to %p\n", fpns->next, (void *)((size_t)(fpns->next)+REGIONSTORAGESIZE));
#endif
      ReleasePages(fpns->next, REGIONSTORAGESIZE, 0);
      fpns->next=0;
    }
    else return 0;
  }
  return REGIONSPERSTORAGE==fpns->freeitems;
}
static INLINE void FreeRegionNode(region_node_t *node)
{
  RegionStorage_t *RESTRICT fpns=node->owner;
#ifdef DEBUG
  {
    RegionStorage_t *t;
    for(t=&regionstorage; t; t=t->next)
    {
      region_node_t *r;
      size_t c=0;
      assert(*(size_t *)"UMPARNST"==t->magic);
      assert(!t->freeitems || (t->freeregions>=t->regions && t->freeregions<t->regions+REGIONSPERSTORAGE));
      for(r=t->freeregions; r; r=r->prev, c++)
      {
        assert(!r->next);
      }
      assert(c==t->freeitems);
    }
  }
#endif
  assert(node>=fpns->regions && node<fpns->regions+REGIONSPERSTORAGE);
  assert(*(size_t *)"UMPARNST"==fpns->magic);
  memset((void *)((size_t) node + 2*sizeof(void *)), 0, sizeof(region_node_t)-2*sizeof(void *));
  node->prev=fpns->freeregions;
  fpns->freeregions=node;
  fpns->freeitems++;
  assert(fpns->freeitems<=REGIONSPERSTORAGE);
  /* If I'm empty and there is another storage after me, check if we need to free storage */
  /*if(REGIONSPERSTORAGE==fpns->freeitems && fpns->next && REGIONSPERSTORAGE==fpns->next->freeitems)
    CheckFreeRegionNodeStorages(&regionstorage);*/
}



/************************************************************************************/

#if USE_LOCKS
static MLOCK_T userpagemutex;
#endif

static void userpage_validatestate(MemorySource *source)
{
#ifndef NDEBUG
#if 1
  region_node_t *RESTRICT r, *RESTRICT rfree;
  int fromback=0;
  NEDTRIE_FOREACH(rfree, regionL_tree_s, &source->regiontreeL)
  {
    AddressSpaceReservation_t *RESTRICT addr=AddressSpaceFromMem(&fromback, rfree->start);
    size_t n, pagepfidx=((size_t)rfree->start-(size_t)addr)/PAGE_SIZE;
    FreePageNode *RESTRICT fpn;
    assert(rfree->end>rfree->start);
    /* Ensure that every free block is also allocated */
    assert(REGION_HASNODEHEADER(regionA_tree_s, rfree, linkA));
    r=REGION_FIND(regionA_tree_s, &source->regiontreeA, rfree);
    assert(rfree==r);
    /* Ensure that every free block never has another free block preceding or postceding it */
    if(rfree->prev)
    {
      AddressSpaceReservation_t *RESTRICT addr2=AddressSpaceFromMem(&fromback, rfree->prev->start);
      assert(addr!=addr2 || !REGION_HASNODEHEADER(regionL_tree_s, rfree->prev, linkL));
    }
    if(rfree->next)
    {
      AddressSpaceReservation_t *RESTRICT addr2=AddressSpaceFromMem(&fromback, rfree->next->start);
      assert(addr!=addr2 || !REGION_HASNODEHEADER(regionL_tree_s, rfree->next, linkL));
    }
    /* Ensure that this free block lives in an address reservation */
    assert(addr);
    /* Ensure that any pages in this free block are definitely marked as free */
    for(n=pagepfidx; n<((size_t)rfree->end-(size_t)addr)/PAGE_SIZE; n++)
    {
      if(addr->pagemapping[n].pageframe)
      {
        assert(ISPAGEFREE(addr->pagemapping[n]));
        fpn=addr->pagemapping[n].freepagenode;
        assert(fpn->freepage==(void *)((size_t)rfree->start+(n-pagepfidx)*PAGE_SIZE));
      }
    }
  }
  if(!source->firstregion)
  {
    assert(!source->lastregion);
  }
  else
  {
#if 0
    /* The entry on HEAD must never be a free chunk */
    assert(!REGION_HASNODEHEADER(regionL_tree_s, source->lastregion, linkL));
#endif
    for(r=source->firstregion; r; r=r->next)
    {
      int mode;
      AddressSpaceReservation_t *RESTRICT addr=AddressSpaceFromMem(&fromback, r->start);
      size_t n, pagepfidx=((size_t)r->start-(size_t)addr)/PAGE_SIZE;
      assert(r->end>r->start);
      /* Ensure that this block lives in an address reservation */
      assert(addr);
      assert((r->start>=addr->front && r->end<=addr->frontptr) || (r->start>=addr->backptr && r->end<=addr->back));
      /* Ensure every item is in the allocated list. */
      rfree=REGION_FIND(regionA_tree_s, &source->regiontreeA, r);
      assert(rfree==r);
      /* Ensure contiguity. */
      if(fromback)
      {
        assert(!source->firstregion->next);
        assert(!source->lastregion->prev);
      }
      else
      {
        assert(!source->firstregion->prev);
        assert(!source->lastregion->next);
      }
      if(r->prev)
      {
        AddressSpaceReservation_t *RESTRICT addr2=AddressSpaceFromMem(&fromback, r->prev->start);
        assert(r->prev->next==r);
        if(addr==addr2)
        {
          assert(r->prev->end==r->start);
        }
      }
      else
      {
        assert(fromback ? source->lastregion==r : source->firstregion==r);
      }
      if(r->next)
      {
        AddressSpaceReservation_t *RESTRICT addr2=AddressSpaceFromMem(&fromback, r->next->start);
        assert(r->next->prev==r);
        if(addr==addr2)
        {
          assert(r->next->start==r->end);
        }
      }
      else
      {
        assert(fromback ? source->firstregion==r : source->lastregion==r);
      }
      /* Ensure that any pages in this block are contiguously allocated */
      mode=0;
      for(n=pagepfidx; n<((size_t)r->end-(size_t)addr)/PAGE_SIZE; n++)
      {
        if(!mode)
        { /* mode=0: Look for initial range of allocated pages */
          if(!n) { assert(!ISPAGEFREE(addr->pagemapping[n])); }
          if(ISPAGEFREE(addr->pagemapping[n]))
            mode=1;
        }
        else if(1==mode)
        { /* mode=1: All pages from now on must be free or not allocated */
          assert(!addr->pagemapping[n].pageframe || ISPAGEFREE(addr->pagemapping[n]));
        }
      }
    }
  }
#endif
#endif
}

static void AddRegionNode(MemorySource *source, region_node_t *RESTRICT newnode, int fromback)
{
  REGION_INSERT(regionA_tree_s, &source->regiontreeA, newnode);
  if(fromback)
  {
    newnode->prev=0;
    newnode->next=source->lastregion;
    if(source->lastregion)
      source->lastregion->prev=newnode;
    else
      source->firstregion=newnode;
    source->lastregion=newnode;
  }
  else
  {
    newnode->prev=source->lastregion;
    newnode->next=0;
    if(source->lastregion)
      source->lastregion->next=newnode;
    else
      source->firstregion=newnode;
    source->lastregion=newnode;
  }
}
static void RemoveRegionNode(MemorySource *source, region_node_t *RESTRICT r, int fromback)
{
  REGION_REMOVE(regionA_tree_s, &source->regiontreeA, r);
  if(r==source->lastregion)
  {
    if(fromback)
    {
      source->lastregion=r->next;
      if(source->lastregion)
        source->lastregion->prev=0;
      else
        source->firstregion=0;
    }
    else
    {
      source->lastregion=r->prev;
      if(source->lastregion)
        source->lastregion->next=0;
      else
        source->firstregion=0;
    }
  }
  else
  { /* This can happen when there is more than one Address Space Reservation */
    if(r==source->firstregion)
    {
      if(fromback)
      {
        source->firstregion=r->prev;
        if(source->firstregion)
          source->firstregion->next=0;
      }
      else
      {
        source->firstregion=r->next;
        if(source->firstregion)
          source->firstregion->prev=0;
      }
    }
    else
    {
      if(r->next)
        r->next->prev=r->prev;
      if(r->prev)
        r->prev->next=r->next;
    }
  }
}
static int HandleVANonContiguity(MemorySource *source, region_node_t *RESTRICT r, int fromback)
{ /* Occasionally either RegionStorage or FreePageNodeStorage extends or
  retracts themselves from back which appears to us as unexpected gaps in
  VA HEAD. We handle this by either inserting or removing dummy region
  nodes which pretend that the unexpected gap is an allocated region. */
  AddressSpaceReservation_t *RESTRICT lastregionaddr, *RESTRICT raddr;
  lastregionaddr=AddressSpaceFromMem(NULL, source->lastregion->start);
  raddr=AddressSpaceFromMem(NULL, r->start);
  assert(source->lastregion);
  if(raddr==lastregionaddr)
  {
    assert(fromback);
    if(r->end<source->lastregion->start)
    { /* Storage extension, so insert a dummy allocated block. */
      region_node_t *RESTRICT dummy;
      if(!(dummy=AllocateRegionNode())) return 0;
      dummy->start=r->end;
      dummy->end=source->lastregion->start;
#ifdef DEBUG
      DebugPrint("Adding dummy node %p (%p - %p)\n", dummy, dummy->start, dummy->end);
#endif
      AddRegionNode(source, dummy, fromback);
    }
    else if(r->start>source->lastregion->end)
    { /* Storage retraction, so remove the previously inserted dummy block. */
      region_node_t *RESTRICT dummy=source->lastregion;
      /*assert(IsPageFreeOrEmpty(addr, dummy->start));*/
#ifdef DEBUG
      DebugPrint("Removing dummy node %p (%p - %p)\n", dummy, dummy->start, dummy->end);
#endif
      RemoveRegionNode(source, dummy, fromback);
      FreeRegionNode(dummy);
    }
  }
  return 1;
}
/* Special flags: USERPAGE_TOPDOWN causes the allocation to be made from the top down.
USERPAGE_NOCOMMIT causes no memory to be committed */
void *userpage_malloc(size_t toallocate, unsigned flags)
{
  void *ret=0;
  region_node_t node, *RESTRICT r, *RESTRICT newnode=0;
  MemorySource *source=(flags & USERPAGE_TOPDOWN) ? &upper : &lower;
  unsigned mremapvalue = (flags & M2_RESERVE_MASK)>>8;
  size_t size = mremapvalue ? ((flags & M2_RESERVE_ISMULTIPLIER) ? toallocate*mremapvalue : (size_t)1<<mremapvalue) : toallocate;
  if(size < toallocate)
    size = toallocate;
	if(!addressspacereservation)
	{
		size_t topreload=USERMODEPAGEALLOCATOR_FREEPAGECACHEPRELOAD(OSSystemMemoryPressure())*PAGE_SIZE;
		if(topreload>size)
		{
#ifdef DEBUG
			DebugPrint("Preloading free page cache with %lu bytes ...\n", (unsigned long) topreload);
#endif
			ret=AllocatePages(0, topreload, 0);
			ReleasePages(ret, topreload, 0);
			ret=0;
		}
	}
  /* Firstly find out if there is a free slot of sufficient size and if so use that.
  If there isn't a sufficient free slot, extend the virtual address space */
  node.start=0;
  node.end=(void *)size;
#if USE_LOCKS
  ACQUIRE_LOCK(&userpagemutex);
#endif
  r=REGION_CFIND(regionL_tree_s, &source->regiontreeL, &node, 1);
  if(r)
  {
    size_t rlen=(size_t) r->end - (size_t) r->start;
    if(rlen<size)
    {
      assert(rlen>=size);
      abort();
    }
    REGION_REMOVE(regionL_tree_s, &source->regiontreeL, r);
    memset(&r->linkL, 0, sizeof(r->linkL));
    if(rlen==size)
    { /* Don't need to split */
      ret=r->start;
      goto commitpages;
    }
    /* Split r with new start addr */
    if(!(newnode=AllocateRegionNode()))
      goto mfail;
    ret=r->start;
    newnode->start=(void *)((size_t) r->start + size);
    newnode->end=r->end;
    r->end=newnode->start;
    newnode->prev=r;
    newnode->next=r->next;
    if(newnode->next) newnode->next->prev=newnode;
    else if(flags & USERPAGE_TOPDOWN)
    {
      assert(r==source->firstregion);
      source->firstregion=newnode;
    }
    r->next=newnode;
    REGION_INSERT(regionA_tree_s, &source->regiontreeA, newnode);
    REGION_INSERT(regionL_tree_s, &source->regiontreeL, newnode);
    newnode=0;
  }
  else
  { /* Reserve sufficient new address space */
    if(!(newnode=AllocateRegionNode()))
      goto mfail;
    if((ret=AllocatePages(0, size, USERPAGE_NOCOMMIT|flags)))
    {
      newnode->start=ret;
      newnode->end=(void *)((size_t) ret + size);
      if(source->lastregion && ((flags & USERPAGE_TOPDOWN) ? source->lastregion->start!=newnode->end : source->lastregion->end!=newnode->start))
      {
        if(!HandleVANonContiguity(source, newnode, flags & USERPAGE_TOPDOWN))
          goto mfail;
      }
      AddRegionNode(source, newnode, flags & USERPAGE_TOPDOWN);
      newnode=0;
    }
  }
commitpages:
  if(!ret) goto mfail;
  if(!(flags & USERPAGE_NOCOMMIT))
  {
    if(!AllocatePages(ret, toallocate, 0))
      goto mfail;
#if 0
    else if(flags & USERPAGE_TOPDOWN)
    {
#ifdef DEBUG
      size_t n, *RESTRICT p;
      for(n=0, p=(size_t *) ret; n<toallocate; n+=sizeof(size_t), p++)
      { /* All memory returned from upper must always be zeroed */
        assert(!*p);
      }
#endif
    }
#endif
  }
  userpage_validatestate(source);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return ret;
mfail:
  if(r)
  {
    REGION_INSERT(regionL_tree_s, &source->regiontreeL, r);
  }
  if(ret)
    ReleasePages(ret, size, 0);
  if(newnode)
    FreeRegionNode(newnode);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return MFAIL;
}

static void ConsolidateNextIntoRegion(MemorySource *RESTRICT source, region_node_t *RESTRICT r, int fromback)
{
  region_node_t *RESTRICT t;
  if(!fromback)
  {
    assert(r->next->start==r->end);
    assert(r->next->prev==r);
  }
  REGION_REMOVE(regionA_tree_s, &source->regiontreeA, r->next);
  REGION_REMOVE(regionL_tree_s, &source->regiontreeL, r->next);
  t=r->next;
  r->end=t->end;
  r->next=t->next;
  if(r->next) r->next->prev=r;
  if(fromback && source->firstregion==t)
  {
    source->firstregion=r;
    assert(!r->next);
  }
  FreeRegionNode(t);
}
/* size is actually unusued, and can be zero */
int userpage_free(void *mem, size_t size)
{
  region_node_t node, *RESTRICT r=0;
  int fromback, prevIsFree, nextIsFree;
  MemorySource *source=0;
  AddressSpaceReservation_t *RESTRICT addr=0;
  node.start=mem;
  node.end=0; /* size may actually be quite a lot smaller than region size */
#if USE_LOCKS
  ACQUIRE_LOCK(&userpagemutex);
#endif
  if((addr=AddressSpaceFromMem(&fromback, mem)))
    source=fromback ? &upper : &lower;
  else
    goto fail;
  r=REGION_FIND(regionA_tree_s, &source->regiontreeA, &node);
  if(!r)
  { /* Block not found */
    assert(r);
    goto fail;
  }
  assert(!REGION_EXACTFIND(regionL_tree_s, &source->regiontreeL, r));
  if(r->next)
  {
    assert(REGION_FIND(regionA_tree_s, &source->regiontreeA, r->next));
  }
  if(r->prev)
  {
    assert(REGION_FIND(regionA_tree_s, &source->regiontreeA, r->prev));
  }
  /* Can I merge with adjacent free blocks? */
  prevIsFree=r->prev && REGION_HASNODEHEADER(regionL_tree_s, r->prev, linkL) && r->prev->end==r->start;
  nextIsFree=r->next && REGION_HASNODEHEADER(regionL_tree_s, r->next, linkL) && r->next->start==r->end;
  if(nextIsFree)
  { /* Consolidate into r */
    assert(REGION_EXACTFIND(regionL_tree_s, &source->regiontreeL, r->next));
    ConsolidateNextIntoRegion(source, r, fromback);
  }
  if(prevIsFree)
  { /* Consolidate into prev */
    region_node_t *RESTRICT t;
    assert(REGION_EXACTFIND(regionL_tree_s, &source->regiontreeL, r->prev));
    if(!fromback)
    {
      assert(r->prev->end==r->start);
      assert(r->prev->next==r);
    }
    REGION_REMOVE(regionA_tree_s, &source->regiontreeA, r);
    REGION_REMOVE(regionL_tree_s, &source->regiontreeL, r->prev);
    t=r->prev;
#if 0
    memset(&t->linkL, 0, sizeof(t->linkL)); /* Not actually necessary */
#endif
    t->end=r->end;
    t->next=r->next;
    if(t->next) t->next->prev=t;
    if(fromback && source->firstregion==r)
    {
      source->firstregion=t;
      assert(!t->next);
    }
    FreeRegionNode(r);
    r=t;
  }
  assert(r->end>r->start);
  if(!ReleasePages(r->start, (size_t)r->end - (size_t)r->start, 0))
  { /* We didn't shrink VA, so add newly freed region to free list */
    REGION_INSERT(regionL_tree_s, &source->regiontreeL, r);
    assert(REGION_FIND(regionA_tree_s, &source->regiontreeA, r));
  }
  else
  { /* We did shrink VA, so remove newly freed region from allocated list */
    if(source->lastregion && (fromback ? source->lastregion->end==r->start : source->lastregion->start==r->end))
      HandleVANonContiguity(source, r, fromback);
    RemoveRegionNode(source, r, fromback);
    FreeRegionNode(r);
  }
  userpage_validatestate(source);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return 0;
fail:
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return -1;
}

void *userpage_realloc(void *mem, size_t oldsize, size_t newsize, int flags, unsigned flags2)
{
  void *ret=0;
  region_node_t node, *RESTRICT r=0;
  int fromback;
  MemorySource *source=0;
  AddressSpaceReservation_t *RESTRICT addr=0;
  size_t regionsize;
  node.start=mem;
  node.end=(void *)((size_t) mem + newsize); /* not actually used for the search */
#if USE_LOCKS
  ACQUIRE_LOCK(&userpagemutex);
#endif
  if((addr=AddressSpaceFromMem(&fromback, mem)))
    source=fromback ? &upper : &lower;
  else
    goto mfail;
  r=REGION_FIND(regionA_tree_s, &source->regiontreeA, &node);
  if(!r)
  { /* Block not found */
    assert(r);
    goto mfail;
  }
  regionsize=(size_t) r->end - (size_t) r->start;
  if(node.end>r->end)
  { /* We ordinarily have the region's size available to us, but if he wants
    more then can we expand? */
    if(r->next && REGION_HASNODEHEADER(regionL_tree_s, r->next, linkL))
    {
      if(node.end>r->next->end)
        goto relocate;
      /* Consolidate into r */
      ConsolidateNextIntoRegion(source, r, fromback);
      /* Fall through */
    }
    else if(r->end==addr->frontptr)
    {
      if((size_t)addr->backptr - (size_t) addr->frontptr < newsize-regionsize)
        goto relocate;
      /* Extend VA and r */
      r->end=addr->frontptr=(void *)((size_t) addr->frontptr + newsize - regionsize);
      /* Fall through */
    }
    else
      goto relocate;
  }
  assert((size_t)r->end-(size_t)r->start>=newsize);
  if(newsize>oldsize)
  { /* Ensure there are pages up to newsize, but you need to be careful if you're extending VA */
    void *newmemaddr=(void *)((size_t) mem + oldsize);
    size_t newmemsize=newsize-oldsize;
    assert((void *)((size_t) mem + newsize)<=r->end);
    if(!AllocatePages(newmemaddr, newmemsize, flags2))
      goto mfail;
  }
  else if(newsize<oldsize)
  { /* Free anything after newsize up to r->end */
    ReleasePages(node.end, (size_t) r->end - (size_t) node.end, 1);
  }
  userpage_validatestate(source);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return mem;
relocate:
  assert(newsize>oldsize);
  if(!(flags & MREMAP_MAYMOVE)) goto mfail;
  if(MFAIL==(ret=userpage_malloc(newsize, flags2|USERPAGE_NOCOMMIT|(fromback ? USERPAGE_TOPDOWN : 0)))) goto mfail;
#ifdef DEBUG
  /*printf("Need to realloc from %p-%p to %p-%p\n", mem, (void *)((size_t) mem + oldsize), ret, (void *)((size_t) ret + newsize));*/
#endif
  /* Ensure there are pages from oldsize to newsize */
  if(!AllocatePages((void *)((size_t) ret + oldsize), newsize-oldsize, 0))
  {
    userpage_free(ret, newsize);
    goto mfail;
  }
#ifdef DEBUG
  /*printf("Allocated new pages between %p and %p\n", (void *)((size_t) ret + oldsize), (void *)((size_t) ret + newsize));
  printf("Swapping pages from %p-%p to %p-%p\n", mem, (void *)((size_t) mem + oldsize), ret, (void *)((size_t) ret + oldsize));*/
#endif
#if 0
  if(AllocatePages(ret, oldsize, 0))
    memcpy(ret, mem, oldsize);
  else
#else
  if(!SwapPages(ret, mem, (void *)((size_t) mem + oldsize)))
#endif
  {
    userpage_free(ret, newsize);
    goto mfail;
  }
  userpage_free(mem, oldsize /* Actually is ignored in practice */);
  userpage_validatestate(source);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return ret;
mfail:
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return MFAIL;
}

void *userpage_commit(void *mem, size_t size)
{
  void *ret=0;
#if USE_LOCKS
  ACQUIRE_LOCK(&userpagemutex);
#endif
  if(!AddressSpaceFromMem(0, mem))
    goto mfail;
  if(!AllocatePages(mem, size, 0))
    goto mfail;
  ret=mem;
#ifdef DEBUG
  /*{
    volatile char *p, *pend=(char *)ret+size;
    for(p=(char *) ret; p<pend; p+=PAGE_SIZE)
    {
      DebugPrint("Testing page %p exists ...", p);
      *p;
    }
  }*/
#endif
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return ret;
mfail:
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return MFAIL;
}

int userpage_release(void *mem, size_t size)
{
#if USE_LOCKS
  ACQUIRE_LOCK(&userpagemutex);
#endif
  if(!AddressSpaceFromMem(0, mem))
    goto fail;
  ReleasePages(mem, size, 1);
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return 1;
fail:
#if USE_LOCKS
  RELEASE_LOCK(&userpagemutex);
#endif
  return 0;
}

#ifdef _MSC_VER
#pragma warning(pop)
#endif
#endif
