#define USE_DL_PREFIX 1
#define USE_LOCKS 1
#define HAVE_MORECORE 0
#define NO_MALLINFO 1

#ifndef WIN32
#ifdef _WIN32
#define WIN32
#endif
#endif
#ifdef WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define HAVE_MMAP 1
#define HAVE_MORECORE 0
#define LACKS_UNISTD_H
#define LACKS_SYS_PARAM_H
#define LACKS_SYS_MMAN_H
#define LACKS_STRING_H
#define LACKS_STRINGS_H
#define LACKS_SYS_TYPES_H
#define LACKS_ERRNO_H
#define MALLOC_FAILURE_ACTION
#define MMAP_CLEARS 0
#endif

#if defined(DARWIN) || defined(_DARWIN)
#ifndef HAVE_MORECORE
#define HAVE_MORECORE 0
#define HAVE_MMAP 1
#endif
#endif

#ifndef LACKS_SYS_TYPES_H
#include <sys/types.h>
#endif

#define MAX_SIZE_T (~(size_t)0)

#ifndef ONLY_MSPACES
#define ONLY_MSPACES 0
#endif
#ifndef MSPACES
#if ONLY_MSPACES
#define MSPACES 1
#else
#define MSPACES 0
#endif
#endif
#ifndef MALLOC_ALIGNMENT
#define MALLOC_ALIGNMENT ((size_t)8U);
#endif
#ifndef FOOTERS
#define FOOTERS 0
#endif
#ifndef ABORT
#define ABORT abort()
#endif
#ifndef ABORT_ON_ASSERT_FAILURE
#define ABORT_ON_ASSERT_FAILURE 1
#endif
#ifndef PROCEED_ON_ERROR
#define PROCEED_ON_ERROR 0
#endif
#ifndef USE_LOCKS
#define USE_LOCKE 0
#endif
#ifndef INSECURE
#define INSECURE 0
#endif
#ifndef HAVE_MMAP
#define HAVE_MMAP 1
#endif
#ifndef MMAP_CLEARS
#define MMAP_CLEARS 1
#endif
#ifndef HAVE_MREMAP
#ifdef linux
#define HAVE_MREMAP 1
#else
#define HAVE_MREMAP 0
#endif
#endif
#ifndef MALLOC_FAILURE_ACTION
#define MALLOC_FAILURE_ACTION errorno = ENOMEM
#endif
#ifndef HAVE_MORECORE
#if ONLY_MSPACES
#define HAVE_MORECORE 0
#else
#define HAVE_MORECORE 1
#endif
#endif
#if !HAVE_MORECORE
#define MORECORE_CONTIGUOUS 0
#else
#ifndef MORECORE
#define MORECORE sbrk
#endif
#ifndef MORECORE_CONTIGUOUS
#define MORECORE_CONTIGUOUS 1
#endif
#endif
#ifndef DEFAULT_GRANULARITY
#if MORECORE_CONTIGUOUS
#define DEFAULT_GRANULARITY (0)
#else
#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
#endif
#endif
#ifndef DEFAULT_TRIM_THRESHOLD
#ifndef MORECORE_CANNOT_TRIM
#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
#else
#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
#endif
#endif
#ifndef DEFAULT_MMAP_THRESHOLD
#if HAVE_MMAP
#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
#else
#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
#endif
#endif
#ifndef USE_BUILTIN_FFS
#define USE_BUILTIN_FFS 0
#endif
#ifndef USE_DEV_RANDOM
#define USE_DEV_RANDOM 0
#endif
#ifndef NO_MALLINFO
#define NO_MALLINFO 0
#endif
#ifndef MALLINFO_FIELD_TYPE
#define MALLINFO_FIELD_TYPE size_t
#endif

#define M_TRIM_THRESHOLD (-1)
#define M_GRANULARITY (-2)
#define M_MMAP_THRESHOLD (-3)

#if !NO_MALLINFO
#ifdef HAVE_USR_INCLUDE_MALLOC_H
#include "/usr/include/malloc.h"
#else
struct mallinfo
{
	MALLINFO_FIELD_TYPE arena;
	MALLINFO_FIELD_TYPE ordblks;
	MALLINFO_FIELD_TYPE smblks;
	MALLINFO_FIELD_TYPE hblks;
	MALLINFO_FIELD_TYPE hblknd;
	MALLINFO_FIELD_TYPE usmblks;
	MALLINFO_FIELD_TYPE fsmblks;
	MALLINFO_FIELD_TYPE uordblks;
	MALLINFO_FIELD_TYPE fordblks;
	MALLINFO_FIELD_TYPE keepcost;
};
#endif
#endif

#ifdef __cplusplus
extern "C"
{
#endif

#if !ONLY_MSPACES
void* dlmalloc(size_t);

void dlfree(void*);

void* dlcalloc(size_t, size_t);

void* dlrealloc(void*, size_t);

void* dlmemalign(size_t, size_t);

void* dlvalloc(size_t);

int dlmallopt(int, int);

size_t dlmalloc_footprint(void);

size_t dlmalloc_max_footprint(void);

#if !NO_MALLINFO
struct mallinfo dlmallinfo(void);
#endif

void** dlindependent_calloc(size_t, size_t, void**);

void** dlindependent_comalloc(size_t, size_t, void**);

void* dlpvalloc(size_t);

int dlmalloc_trim(size_t);

size_t dlmalloc_usable_size(void*);

void dlmalloc_stats(void);
#endif

#if MSPACES
typedef void* mspace;

mspace create_space(size_t capacity, int locked);

size_t destroy_mspace(mspace msp);

mspace create_mspace_with_base(void* base, size_t capacity, int locked);

void* mspace_malloc(mspace msp, size_t bytes);

void mspace_free(mspace msp, void* mem);

void* mspace_realloc(mspace msp, void* mem, size_t newsize);

void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);

void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);

void** mspace_independent_calloc(mspace msp, size_t n_elements,
	size_t elem_size, void* chunks[]);

void** mspace_independent_comalloc(mspace msp, size_t n_elements,
		size_t sizes[], void* chunks[]);

size_t mspace_footprint(mspace msp);

size_t mspace_max_footprint(mspace msp);

#if !NO_MALLINFO
struct mallinfo mspace_mallinfo(mspace msp);
#endif

void mspace_malloc_stats(mspace msp);

int mspace_trim(mspace msp, size_t pad);

int mspace_mallopt(int, int);
#endif
#ifdef __cplusplus
}
#endif

#ifdef WIN32
#pragma warning(disable : 4146)
#endif

#include <stdio.h>

#ifndef LACKS_ERRNO_H
#include <errno.h>
#endif
#if FOOTERS
#include <time.h>
#endif
#ifndef LACKS_STDLIB_H
#include <stdlib.h>
#endif
#ifdef DEBUG
#if ABORT_ON_ASSERT_FAILURE
#define assert(x) if (!(x)) ABORT
#else
#include <assert.h>
#endif
#else
#define assert(x)
#endif
#ifndef LACKS_STRING_H
#include <string.h>
#endif
#if USE_BUILTIN_FFS
#ifndef LACKS_STRINGS_H
#include <strings.h>
#endif
#endif
#if HAVE_MMAP
#ifndef LACKS_SYS_MMAN_H
#include <sys/mman.h>
#endif
#ifndef LACKS_FCNTL_H
#include <fcntl.h>
#endif
#endif
#if HAVE_MORECORE
#ifndef LACKS_UNISTD_H
#include <unistd.h>
#else
#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
extern void* sbrk(ptrdiff_t);
#endif
#endif
#endif

#ifndef WIN32
#ifndef malloc_getpagesize
# ifdef _SC_PAGESIZE
#   ifndef _SC_PAGE_SIZE
#     define _SC_PAGE_SIZE _SC_PAGESIZE
#   endif
# endif
# ifdef _SC_PAGE_SIZE
#   define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
# else
#   if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
	  extern size_t getpagesize();
#     define malloc_getpagesize getpagesize()
#   else
#     ifdef WIN32
#       define malloc_getpagesize getpagesize()
#     else
#       ifndef LACKS_SYS_PARAM_H
#         include <sys/param.h>
#       endif
#       ifdef EXEC_PAGESIZE
#         define malloc_getpagesize EXEC_PAGESIZE
#       else
#         ifdef NBPG
#           ifndef CLSIZE
#             define malloc_getpagesize NBPG
#           else
#             define malloc_getpagesize (NBPG * CLSIZE)
#           endif
#         else
#           ifdef NBPC
#             define malloc_getpagesize NBPC
#           else
#             ifdef PAGESIZE
#               define malloc_getpagesize PAGESIZE
#             else
#               define malloc_getpagesize ((size_t)4096U)
#             endif
#           endif
#         endif
#       endif
#     endif
#   endif
# endif
#endif
#endif

#define SIZE_T_SIZE (sizeof(size_t))
#define SIZE_T_BITSIZE (sizeof(size_t) << 3)

#define SIZE_T_ZERO ((size_t)0)
#define SIZE_T_ONE ((size_t)1)
#define SIZE_T_TWO ((size_t)2)
#define TWO_SIZE_T_SIZES (SIZE_T_SIZE << 1)
#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE << 2)
#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES + TWO_SIZE_T_SIZES)
#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)

#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)

#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)

#define align_offset(A) \
	((((size_t)(A) & CHUNK_ALIGN_MASK) == 0) ? 0 : \
	((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))

#define MFAIL ((void*)(MAX_SIZE_T))
#define CMFAIL ((char*)(MFAIL))

#if !HAVE_MMAP
#define IS_MMAPPED_BIT (SIZE_T_ZERO)
#define USE_MMAP_BIT (SIZE_T_ZERO)
#define CALL_MMAP(s) MFAIL
#define CALL_MUNMAP(a, s) (-1)
#define DIRECT_MMAP(s) MFAIL
#else
#define IS_MMAPPED_BIT (SIZE_T_ONE)
#define USE_MMAP_BIT (SIZE_T_ONE)

#ifndef WIN32
#define CALL_MUNMAP(a, s) munmap((a), (s))
#define MMAP_PROT (PROT_READ | PROT_WRITE | PROT_EXEC)
#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
#define MAP_ANONYMOUS MAP_ANON
#endif
#ifdef MAP_ANONYMOUS
#define MMAP_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS)
#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
#else
#define MMAP_FLAGS (MAP_PRIVATE)
static int dev_zero_fd = -1;
#define CALL_MMAP(s) (((dev_zero_fd < 0) ? \
		(dev_zero_fd = open("/dev/zero", 0_RDWR), \
		mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
		mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
#endif

#define DIRECT_MMAP(s) CALL_MMAP(s)
#else

static void* win32mmap(size_t size)
{
	void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_EXECUTE_READWRITE);
	return (ptr != 0) ? ptr : MFAIL;
}

static void* win32direct_mmap(size_t size)
{
	void* ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
					PAGE_EXECUTE_READWRITE);
	return (ptr != 0) ? ptr : MFAIL;
}

static int win32munmap(void* ptr, size_t size)
{
	MEMORY_BASIC_INFORMATION minfo;
	char* cptr = ptr;
	while(size)
	{
		if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
			return -1;
		if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
			minfo.State != MEM_COMMIT || minfo.RegionSize > size)
			return -1;
		if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
			return -1;
		cptr += minfo.RegionSize;
		size -= minfo.RegionSize;
	}

	return 0;
}

#define CALL_MMAP(s) win32mmap(s)
#define CALL_MUNMAP(a, s) win32munmap((a), (s))
#define DIRECT_MMAP(s) win32direct_mmap(s)
#endif
#endif

#if HAVE_MMAP && HAVE_MREMAP
#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
#else
#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
#endif

#if HAVE_MORECORE
#define CALL_MORECORE(s) MORECORE(s)
#else
#define CALL_MORECORE(s) MFAIL
#endif

#define USE_NONCONTIGUOUS_BIT (4U)

#define EXTERN_BIT (8U)

#if USE_LOCKS
#ifndef WIN32
#include <pthread.h>
#define MLOCK_T pthread_mutex_t
#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
#define RELEASE_LOCK(l) pthread_mutex_unlock(l)

#if HAVE_MORECORE
static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
#endif

static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
#else
#define MLOCK_T long
static int win32_acquire_lock(MLOCK_T* sl)
{
	for(;;)
	{
#ifdef InterlockedCompareExchangePointer
		if (!InterlockedCompareExchange(sl, 1, 0))
			return 0;
#else
		if (!InterlockedCompareExchange((void**)sl, (void*)1, (void*)0))
			return 0;
#endif
		Sleep(0);
	}
}

static void win32_release_lock(MLOCK_T* sl)
{
	InterlockedExchange(sl, 0);
}

#define INITIAL_LOCK(l) *(l) = 0;
#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
#define RELEASE_LOCK(l) win32_release_lock(l)
#if HAVE_MORECODE
static MLOCK_T morecore_mutex;
#endif
static MLOCK_T magic_init_mutex;
#endif

#define USE_LOCK_BIT (2U)
#else
#define USE_LOCK_BIT (0U)
#define INITIAL_LOCK(l)
#endif

#if USE_LOCKS && HAVE_MORECORE
#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
#else
#define ACQUIRE_MORECORE_LOCK()
#define RELEASE_MORECORE_LOCK()
#endif

#if USE_LOCKS
#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
#else
#define ACQUIRE_MAGIC_INIT_LOCK()
#define RELEASE_MAGIC_INIT_LOCK()
#endif

struct malloc_chunk
{
	size_t prev_foot;
	size_t head;
	struct malloc_chunk* fd;
	struct malloc_chunk* bk;
};

typedef struct malloc_chunk mchunk;
typedef struct malloc_chunk* mchunkptr;
typedef struct malloc_chunk* sbinptr;
typedef unsigned int bindex_t;
typedef unsigned int binmap_t;
typedef unsigned int flag_t;

#define MCHUNK_SIZE (sizeof(mchunk))

#if FOOTERS
#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
#else
#define CHUNK_OVERHEAD (SIZE_T_SIZE)
#endif

#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)

#define MIN_CHUNK_SIZE \
	((MCHUNK_SIZE + CHUNK_ALIGH_MASK) & ~CHUNK_ALIGN_MASK)

#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))

#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))

#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)

#define pad_request(req) \
	(((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)

#define request2size(req) \
	(((req) < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(req))

#define PINUSE_BIT (SIZE_T_ONE)
#define CINUSE_BIT (SIZE_T_TWO)
#define INUSE_BITS (PINUSE_BIT | CINUSE_BIT)

#define FENCEPOST_HEAD (INUSE_BITS | SIZE_T_SIZE)

#define cinuse(p) ((p)->head & CINUSE_BIT)
#define pinuse(p) ((p)->head & PINUSE_BIT)
#define chunksize(p) ((p)->head & ~(INUSE_BITS))

#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)

#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))

#define next_chunk(p) ((mchunkptr)(((char*)(p)) + ((p)->head & ~INUSE_BITS)))
#define prev_chunk(p) ((mchunkptr)(((char*)(p)) - ((p)->prev_foot)))

#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)

#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))

#define set_size_and_pinuse_of_free_chunk(p, s) \
	((p)->head = (s | PINUSE_BIT), set_foot(p, s))

#define set_free_with_pinuse(p, s, n) \
	(clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))

#define is_mmapped(p) \
	(!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))

#define overhead_for(p) \
	(is_mmapped(p) ? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)

#if MMAP_CLEARS
#define calloc_must_clear(p) (!is_mmapped(p))
#else
#define calloc_must_clear(p) (1)
#endif

struct malloc_tree_chunk
{
	size_t                    prev_foot;
    size_t                    head;
    struct malloc_tree_chunk* fd;
    struct malloc_tree_chunk* bk;

	struct malloc_tree_chunk* child[2];
	struct malloc_tree_chunk* parent;
	bindex_t index;
};

typedef struct malloc_tree_chunk tchunk;
typedef struct malloc_tree_chunk* tchunkptr;
typedef struct malloc_tree_chunk* tbinptr;

#define leftmost_child(t) ((t)->child[0] != 0) ? (t)->child[0] : (t)->child[1];

struct malloc_segment
{
	char* base;
	size_t size;
	struct malloc_segment* next;
	flag_t sflags;
};

#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)

typedef struct malloc_segment msegment;
typedef struct malloc_segment* msegmentptr;

#define NSMALLBINS (32U)
#define NTREEBINS (32U)
#define SMALLBIN_SHIFT (3U)
#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
#define TREEBIN_SHIFT (8U)
#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)

struct malloc_state
{
	binmap_t smallmap;
	binmap_t treemap;
	size_t dvsize;
	size_t topsize;
	char* least_addr;
	mchunkptr dv;
	mchunkptr top;
	size_t trim_check;
	size_t magic;
	mchunkptr smallbins[(NSMALLBINS + 1) * 2];
	tbinptr treebins[NTREEBINS];
	size_t footprint;
	size_t max_footprint;
	flag_t mflags;
#if USE_LOCKS
	MLOCK_T mutex;
#endif
	msegment seg;
};

typedef struct malloc_state* mstate;

struct malloc_params
{
	size_t magic;
	size_t page_size;
	size_t granularity;
	size_t mmap_threshold;
	size_t trim_threshold;
	flag_t default_mflags;
};

static struct malloc_params mparams;

static struct malloc_state _gm_;
#define gm (&_gm_)
#define is_global(M) ((M) == &_gm_)
#define is_initialized(M) ((M)->top != 0)

#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)

#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)

#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)

#define set_lock(M, L) \
	((M)->mflags = (L) ? \
	((M)->mflags | USE_LOCK_BIT) : \
	((M)->mflags & ~USE_LOCK_BIT))

#define page_align(S) \
	(((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))

#define granularity_align(S) \
	(((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))

#define is_page_aligned(S) \
	(((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
#define is_granularity_aligned(S) \
	(((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)

#define segment_holds(S, A) \
	((char*)(A) >= S->base && (char*)(A) < S->base + S->size)

static msegmentptr segment_holding(mstate m, char* addr)
{
	msegmentptr sp = &m->seg;
	for (;;)
	{
		if (addr >= sp->base && addr < sp->base + sp->size)
			return sp;
		if ((sp = sp->next) == 0)
		 return 0;
	}
}

static int has_segment_link(mstate m, msegmentptr ss)
{
	msegmentptr sp = &m->seg;
	for (;;)
	{
		if ((char*)sp >= ss->base && (char*)sp < sp->base + ss->size)
			return 1;
		if ((sp = sp->next) == 0)
			return 0;
	}
}

#ifndef MORECORE_CANNOT_TRIM
#define should_trim(M, s) ((s) > (M)->trim_check)
#else
#define should_trim(M, s) (0)
#endif

#define TOP_FOOT_SIZE \
	(align_offset(chunk2mem(0)) + pad_request(sizeof(struct_malloc_segment)) + MIN_CHUNK_SIZE)

#if USE_LOCKS
#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())

#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M)) ? ACQUIRE_LOCK(&(M)->mutex) : 0)
#define POSTACTION(M) {if (use_lock(M)) RELEASE_LOCK(&(M)->mutex);}
#else
#ifndef PREACTION
#define PREACTION(M) (0)
#endif

#ifndef POSTACTION
#define POSTACTION(M)
#endif
#endif

#if PROCEED_ON_ERROR
static void reset_on_error(mstate m);

#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
#define USAGE_ERROR_ACTION(m, p)
#else
#ifndef CORRUPTION_ERROR_ACTION
#define CORRUPTION_ERROR_ACTION(m) ABORT
#endif

#ifndef USAGE_ERROR_ACTION
#define USAGE_ERROR_ACTION(m, p) ABORT
#endif
#endif

#if !DEBUG
#define check_free_chunk(M, P)
#define check_inuse_chunk(M, P)
#define check_malloced_chunk(M, P, N)
#define check_mmapped_chunk(M, P)
#define check_malloc_state(M)
#define check_top_chunk(M, P)
#else
#define check_free_chunk(M, P) do_check_free_chunk(M, P)
#define check_inuse_chunk(M, P) do_check_inuse_chunk(M, P)
#define check_top_chunk(M, P) do_check_top_chunk(M, P)
#define check_malloced_chunk(M, P, N) do_check_malloced_chunk(M, P, N)
#define check_mmapped_chunk(M, P) do_check_mmapped_chunk(M, P)
#define check_malloc_state(M) do_check_malloc_state(M)

static void do_check_any_chunk(mstate m, mchunkptr p);
static void do_check_top_chunk(mstate m, mchunkptr p);
static void do_check_mmapped_chunk(mstate m, mchunkptr p);
static void do_check_inuse_chunk(mstate m, mchunkptr p);
static void do_check_free_chunk(mstate m, mchunkptr p);
static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
static void do_check_tree(mstate m, tchunkptr t);
static void do_check_treebin(mstate m, bindex_t i);
static void do_check_smallbin(mstate m, bindex_t i);
static void do_check_malloc_state(mstate m);
static int bin_find(mstate m, mchunkptr x);
static size_t traverse_and_check(mstate m);
#endif

#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
#define small_index(s) ((s) >> SMALLBIN_SHIFT)
#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))

#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i) << 1])))
#define treebin_at(M, i) (&((M)->treebins[i]))

#if defined(__GNUC__) && defined(i386)
#define compute_tree_index(S, I) \
{ \
	size_t X = S >> TREEBIN_SHIFT; \
	if (X == 0) \
		I = 0; \
	else if (X > 0xFFFF) \
		I = NTREEBINS - 1; \
	else { \
		unsigned int K; \
		__asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm"  (X)); \
		I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT - 1)) & 1))); \
	} \
}
#else
#define compute_tree_index(S, I) \
{ \
	size_t X = S >> TREEBIN_SHIFT; \
	if (X == 0) \
		I = 0; \
	else if (X > 0xFFFF) \
		I = NTREEBINS - 1; \
	else { \
		unsigned int Y = (unsigned int)X; \
		unsigned int N = ((Y - 0x100) >> 16) & 8; \
		unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
		N += K;\
		N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
		K = 14 - N + ((Y <<= K) >> 15);\
		I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
	} \
}
#endif

#define bit_for_tree_index(i) \
   (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)

#define leftshift_for_tree_index(i) \
	((i == NTREEBINS - 1) ? 0 : \
	 ((SIZE_T_BITSIZE - SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))

#define minsize_for_tree_index(i) \
	((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
	(((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))

#define idx2bit(i) ((binmap_t)(1) << (i))

#define mark_smallmap(M, i) ((M)->smallmap |= idx2bit(i))
#define clear_smallmap(M, i) ((M)->smallmap &= ~idx2bit(i))
#define smallmap_is_marked(M, i) ((M)->smallmap & idx2bit(i))

#define mark_treemap(M, i) ((M)->treemap |= idx2bit(i))
#define clear_treemap(M, i) ((M)->treemap &= ~idx2bit(i))
#define treemap_is_marked(M, i) ((M)->treemap & idx2bit(i))

#if defined(__GNUC__) && defined(i386)
#define compute_bit2idx(X, I) \
{ \
	unsigned int J;\
	__asm__("bsfl %1, %0\n\t" : "=r" (J) : "rm" (X)); \
	I = (bindex_t)J;\
}
#else
#if USE_BUILTIN_FFS
#define compute_bit2idx(X, I) I = ffs(X) - 1
#else
#define compute_bit2idx(X, I) \
{ \
	unsigned int Y = X - 1; \
	unsigned int K = Y >> (16 - 4) & 16; \
	unsigned int N = K;        Y >>= K;\
	N += K = Y >> (8-3) &  8;  Y >>= K;\
	N += K = Y >> (4-2) &  4;  Y >>= K;\
	N += K = Y >> (2-1) &  2;  Y >>= K;\
	N += K = Y >> (1-0) &  1;  Y >>= K;\
	I = (bindex_t)(N + Y);\
}
#endif
#endif

#define least_bit(x) ((x) & -(x))

#define left_bits(x) ((x << 1) | -(x << 1))

#define same_or_left_bits(x) ((x) | (-x))

#if !INSECURE
#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)

#define ok_next(p, n) ((char*)p < (char*)(n))

#define ok_cinuse(p) cinuse(p)

#define ok_pinuse(p) pinuse(p)
#else
#define ok_address(M, a) (1)
#define ok_next(b, n) (1)
#define ok_cinuse(p) (1)
#define ok_pinuse(p) (1)
#endif

#if (FOOTERS && !INSECURE)
#define ok_magic(M) ((M)->magic == mparams.magic)
#else
#define	ok_magic(M) (1)
#endif

#if !INSECURE
#if defined(__GNUC__) && __GNUC__ >= 3
#define RTCHECK(e) __builtin_expect(e, 1)
#else
#define RTCHECK(e) (e)
#endif
#else
#define RTCHECK(e) (1)
#endif

#if !FOOTERS
#define mark_inuse_foot(M, p, s)

#define set_inuse(M, p, s) \
	((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
	((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)

#define set_inuse_and_pinuse(M, p, s) \
	((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
	((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)

#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
	((p)->head = (s | PINUSE_BIT | CINUSE_BIT))

#else

#define mark_inuse_foot(M, p, s) \
	(((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))

#define get_mstate_for(p) \
	((mstate)(((mchunkptr)((char*)(p) + \
		(chunksize(p))))->prev_foot ^ mparams.magic))

#define set_inuse(M, p, s) \
	((p)->head = (((p)->head & PINUSE_BIT) | s | CINUSE_BIT), \
	(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
	mark_inuse_foot(M, p, s))

#define set_inuse_and_pinuse(M, p, s) \
	((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
	(((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
	mark_inuse_foot(M, p, s))

#define set_size_and_pinuse_of_inuse_chunk(M, p, s) \
	((p)->head = (s | PINUSE_BIT | CINUSE_BIT), \
	mark_inuse_foot(M, p, s))

#endif

static int init_mparams(void)
{
	if (mparams.page_size == 0)
	{
		size_t s;

		mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
		mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
#if MORECORE_CONTIGUOUS
		mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
#else
		mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
#endif

#if (FOOTERS && !INSECURE)
#if USE_DEV_RANDOM
		int fd;
		unsigned char buf[sizeof(size_t)];
		int ((fd = open("/dev/urandom", 0_RDONLY)) >= 0 &&
			read(fd, buf, sizeof(buf)) == sizeof(buf))
		{
			s = *((size_t*)buf);
			close(fd);
		}
		else
#endif
		s = (size_t)(time(0) ^ (size_t)0x55555555U);
		
		s |= (size_t)8U;
		s &= ~(size_t)7U;
#else
		s = (size_t)0x58585858U;
#endif
		ACQUIRE_MAGIC_INIT_LOCK();
		if (mparams.magic == 0)
		{
			mparams.magic = s;
			INITIAL_LOCK(&gm->mutex);
			gm->mflags = mparams.default_mflags;
		}
		RELEASE_MAGIC_INIT_LOCK();

#ifndef WIN32
		mparams.page_size = malloc_getpagesize;
		mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
								DEFAULT_GRANULARITY : mparams.page_size);
#else
		SYSTEM_INFO system_info;
		GetSystemInfo(&system_info);
		mparams.page_size = system_info.dwPageSize;
		mparams.granularity = system_info.dwAllocationGranularity;
#endif
	

	if ((sizeof(size_t) != sizeof(char*)) ||
		(MAX_SIZE_T < MIN_CHUNK_SIZE) ||
		(sizeof(int) < 4) ||
		(MALLOC_ALIGNMENT < (size_t)8U) ||
		((MALLOC_ALIGNMENT    & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) ||
		((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
		((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0) ||
		((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
		ABORT;
	}
	return 0;
}

static int change_mparam(int param_number, int value)
{
	size_t val = (size_t)value;
	init_mparams();
	switch(param_number)
	{
	case M_TRIM_THRESHOLD:
		mparams.trim_threshold = val;
		return 1;
	case M_GRANULARITY:
		if (val >= mparams.page_size && ((val & (val - 1)) == 0))
		{
			mparams.granularity = val;
			return 1;
		}
		else
			return 0;
	case M_MMAP_THRESHOLD:
		mparams.mmap_threshold = val;
		return 1;
	default:
		return 0;
	}
}

#if DEBUG
static void do_check_any_chunk(mstate m, mchunkptr p)
{
	assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
	assert(ok_address(m, p));
}

static void do_check_top_chunk(mstate m, mchunkptr p)
{
	msegmentptr sp = segment_holding(m, (char*)p);
	size_t sz = chunksize(p);
	assert(sp != 0);
	assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
	assert(ok_address(m, p));
	assert(sz == m->topsize);
	assert(sz > 0);
	assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
	assert(pinuse(p));
	assert(!next_pinuse(p));
}

static void do_check_mmapped_chunk(mstate m, mchunkptr p)
{
	size_t sz = chunksize(p);
	size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
	assert(is_mmapped(p));
	assert(use_mmap(m));
	assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
	assert(ok_address(m, p));
	assert(!is_small(sz));
	assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
	assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
	assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
}

static void do_check_inuse_chunk(mstate m, mchunkptr p)
{
	do_check_any_chunk(m, p);
	assert(cinuse(p));
	assert(next_pinuse(p));
	assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
	if (is_mmapped(p))
		do_check_mmapped_chunk(m, p);
}

static void do_check_free_chunk(mstate m, mchunkptr p)
{
	size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
	mchunkptr next = chunk_plus_offset(p, sz);
	do_check_any_chunk(m, p);
	assert(!cinuse(p));
	assert(!next_pinuse(p));
	assert(!is_mmapped(p));
	if (p != m->dv && p != m->top)
	{
		if (sz >= MIN_CHUNK_SIZE)
		{
			assert((sz & CHUNK_ALIGN_MASK) == 0);
			assert(is_aligned(chunk2mem(p)));
			assert(next->prev_foot == sz);
			assert(pinuse(p));
			assert(next == m->top || cinuse(next));
			assert(p->fd->bk == p);
			assert(p->bk->fd == p);
		}
		else
			assert(sz == SIZE_T_SIZE);
	}
}

static void do_check_malloced_chunk(mstate m, void* mem, size_t s)
{
	if (mem != 0)
	{
		mchunkptr p = mem2chunk(mem);
		size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
		do_check_inuse_chunk(m, p);
		assert((sz & CHUNK_ALIGN_MASK) == 0);
		assert(sz >= MIN_CHUNK_SIZE);
		assert(sz >= s);
		assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
	}
}

static void do_check_tree(mstate m, tchunkptr t)
{
	tchunkptr head = 0;
	tchunkptr u = t;
	bindex_t tindex = t->index;
	size_t tsize = chunksize(t);
	bindex_t idx;
	compute_tree_index(tsize, idx);
	assert(tindex == idx);
	assert(tsize >= MIN_LARGE_SIZE);
	assert(tsize >= minsize_for_tree_index(idx));
	assert((idx == NTREEBINS - 1) || (tsize < minsize_for_tree_index((idx + 1)));

	do
	{
		do_check_any_chunk(m, ((mchunkptr)u));
		assert(u->index == tindex);
		assert(chunksize(u) == tsize);
		assert(!cinuse(u));
		assert(!next_pinuse(u));
		assert(u->fd->bk == u);
		assert(u->bk->fd == u);
		if (u->parent == 0)
		{
			assert(u->child[0] == 0);
			assert(u->child[1] == 0);
		}
		else
		{
			assert(head == 0);
			head = u;
			assert(u->parent != u);
			assert(u->parent->child[0] == u ||
				u->parent->child[1] == u ||
				*((tbinptr*)(u->parent)) == u);
			if (u->child[0] != 0)
			{
				assert(u->child[0]->parent == u);
				assert(u->child[0] != u);
				do_check_tree(m, u->child[0]);
			}
			if (u->child[1] != 0)
			{
				assert(u->child[1]->parent == u);
				assert(u->child[1] != u);
				do_check_tree(m, u->child[1]);
			}
			if (u->child[0] != 0 && u->child[1] != 0)
			{
				assert(chunksize(u->child[0]) < chunksize(u->child[1]));
			}
		}
		u = u->fd;
	} while (u != t);
	assert(head != 0);
}


#endif
