repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/rtree.c
#define JEMALLOC_RTREE_C_ #include "jemalloc/internal/jemalloc_internal.h" static unsigned hmin(unsigned ha, unsigned hb) { return (ha < hb ? ha : hb); } /* Only the most significant bits of keys passed to rtree_[gs]et() are used. */ bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc) { unsigned bits_in_leaf, height, i; assert(RTREE_HEIGHT_MAX == ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)); assert(bits > 0 && bits <= (sizeof(uintptr_t) << 3)); bits_in_leaf = (bits % RTREE_BITS_PER_LEVEL) == 0 ? RTREE_BITS_PER_LEVEL : (bits % RTREE_BITS_PER_LEVEL); if (bits > bits_in_leaf) { height = 1 + (bits - bits_in_leaf) / RTREE_BITS_PER_LEVEL; if ((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf != bits) height++; } else height = 1; assert((height-1) * RTREE_BITS_PER_LEVEL + bits_in_leaf == bits); rtree->alloc = alloc; rtree->dalloc = dalloc; rtree->height = height; /* Root level. */ rtree->levels[0].subtree = NULL; rtree->levels[0].bits = (height > 1) ? RTREE_BITS_PER_LEVEL : bits_in_leaf; rtree->levels[0].cumbits = rtree->levels[0].bits; /* Interior levels. */ for (i = 1; i < height-1; i++) { rtree->levels[i].subtree = NULL; rtree->levels[i].bits = RTREE_BITS_PER_LEVEL; rtree->levels[i].cumbits = rtree->levels[i-1].cumbits + RTREE_BITS_PER_LEVEL; } /* Leaf level. */ if (height > 1) { rtree->levels[height-1].subtree = NULL; rtree->levels[height-1].bits = bits_in_leaf; rtree->levels[height-1].cumbits = bits; } /* Compute lookup table to be used by rtree_start_level(). */ for (i = 0; i < RTREE_HEIGHT_MAX; i++) { rtree->start_level[i] = hmin(RTREE_HEIGHT_MAX - 1 - i, height - 1); } return (false); } static void rtree_delete_subtree(rtree_t *rtree, rtree_node_elm_t *node, unsigned level) { if (level + 1 < rtree->height) { size_t nchildren, i; nchildren = ZU(1) << rtree->levels[level].bits; for (i = 0; i < nchildren; i++) { rtree_node_elm_t *child = node[i].child; if (child != NULL) rtree_delete_subtree(rtree, child, level + 1); } } rtree->dalloc(node); } void rtree_delete(rtree_t *rtree) { unsigned i; for (i = 0; i < rtree->height; i++) { rtree_node_elm_t *subtree = rtree->levels[i].subtree; if (subtree != NULL) rtree_delete_subtree(rtree, subtree, i); } } static rtree_node_elm_t * rtree_node_init(rtree_t *rtree, unsigned level, rtree_node_elm_t **elmp) { rtree_node_elm_t *node; if (atomic_cas_p((void **)elmp, NULL, RTREE_NODE_INITIALIZING)) { spin_t spinner; /* * Another thread is already in the process of initializing. * Spin-wait until initialization is complete. */ spin_init(&spinner); do { spin_adaptive(&spinner); node = atomic_read_p((void **)elmp); } while (node == RTREE_NODE_INITIALIZING); } else { node = rtree->alloc(ZU(1) << rtree->levels[level].bits); if (node == NULL) return (NULL); atomic_write_p((void **)elmp, node); } return (node); } rtree_node_elm_t * rtree_subtree_read_hard(rtree_t *rtree, unsigned level) { return (rtree_node_init(rtree, level, &rtree->levels[level].subtree)); } rtree_node_elm_t * rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level) { return (rtree_node_init(rtree, level+1, &elm->child)); }
3,324
24
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/huge.c
#define JEMALLOC_HUGE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ static extent_node_t * huge_node_get(const void *ptr) { extent_node_t *node; node = chunk_lookup(ptr, true); assert(!extent_node_achunk_get(node)); return (node); } static bool huge_node_set(tsdn_t *tsdn, const void *ptr, extent_node_t *node) { assert(extent_node_addr_get(node) == ptr); assert(!extent_node_achunk_get(node)); return (chunk_register(tsdn, ptr, node)); } static void huge_node_reset(tsdn_t *tsdn, const void *ptr, extent_node_t *node) { bool err; err = huge_node_set(tsdn, ptr, node); assert(!err); } static void huge_node_unset(const void *ptr, const extent_node_t *node) { chunk_deregister(ptr, node); } void * huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { assert(usize == s2u(usize)); return (huge_palloc(tsdn, arena, usize, chunksize, zero)); } void * huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { void *ret; size_t ausize; arena_t *iarena; extent_node_t *node; size_t sn; bool is_zeroed; /* Allocate one or more contiguous chunks for this request. */ assert(!tsdn_null(tsdn) || arena != NULL); ausize = sa2u(usize, alignment); if (unlikely(ausize == 0 || ausize > HUGE_MAXCLASS)) return (NULL); assert(ausize >= chunksize); /* Allocate an extent node with which to track the chunk. */ iarena = (!tsdn_null(tsdn)) ? arena_ichoose(tsdn_tsd(tsdn), NULL) : a0get(); node = ipallocztm(tsdn, CACHELINE_CEILING(sizeof(extent_node_t)), CACHELINE, false, NULL, true, iarena); if (node == NULL) return (NULL); /* * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that * it is possible to make correct junk/zero fill decisions below. */ is_zeroed = zero; if (likely(!tsdn_null(tsdn))) arena = arena_choose(tsdn_tsd(tsdn), arena); if (unlikely(arena == NULL) || (ret = arena_chunk_alloc_huge(tsdn, arena, usize, alignment, &sn, &is_zeroed)) == NULL) { idalloctm(tsdn, node, NULL, true, true); return (NULL); } extent_node_init(node, arena, ret, usize, sn, is_zeroed, true); if (huge_node_set(tsdn, ret, node)) { arena_chunk_dalloc_huge(tsdn, arena, ret, usize, sn); idalloctm(tsdn, node, NULL, true, true); return (NULL); } /* Insert node into huge. */ malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_elm_new(node, ql_link); ql_tail_insert(&arena->huge, node, ql_link); malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed) memset(ret, 0, usize); } else if (config_fill && unlikely(opt_junk_alloc)) memset(ret, JEMALLOC_ALLOC_JUNK, usize); arena_decay_tick(tsdn, arena); return (ret); } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl) #endif static void huge_dalloc_junk(void *ptr, size_t usize) { if (config_fill && have_dss && unlikely(opt_junk_free)) { /* * Only bother junk filling if the chunk isn't about to be * unmapped. */ if (!config_munmap || (have_dss && chunk_in_dss(ptr))) memset(ptr, JEMALLOC_FREE_JUNK, usize); } } #ifdef JEMALLOC_JET #undef huge_dalloc_junk #define huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk) huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl); #endif static void huge_ralloc_no_move_similar(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t usize, usize_next; extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; bool pre_zeroed, post_zeroed; /* Increase usize to incorporate extra. */ for (usize = usize_min; usize < usize_max && (usize_next = s2u(usize+1)) <= oldsize; usize = usize_next) ; /* Do nothing. */ if (oldsize == usize) return; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); /* Fill if necessary (shrinking). */ if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, sdiff); post_zeroed = false; } else { post_zeroed = !chunk_purge_wrapper(tsdn, arena, &chunk_hooks, ptr, CHUNK_CEILING(oldsize), usize, sdiff); } } else post_zeroed = pre_zeroed; malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ huge_node_unset(ptr, node); assert(extent_node_size_get(node) != usize); extent_node_size_set(node, usize); huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(tsdn, &arena->huge_mtx); arena_chunk_ralloc_huge_similar(tsdn, arena, ptr, oldsize, usize); /* Fill if necessary (growing). */ if (oldsize < usize) { if (zero || (config_fill && unlikely(opt_zero))) { if (!pre_zeroed) { memset((void *)((uintptr_t)ptr + oldsize), 0, usize - oldsize); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize); } } } static bool huge_ralloc_no_move_shrink(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize) { extent_node_t *node; arena_t *arena; chunk_hooks_t chunk_hooks; size_t cdiff; bool pre_zeroed, post_zeroed; node = huge_node_get(ptr); arena = extent_node_arena_get(node); pre_zeroed = extent_node_zeroed_get(node); chunk_hooks = chunk_hooks_get(tsdn, arena); assert(oldsize > usize); /* Split excess chunks. */ cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); if (cdiff != 0 && chunk_hooks.split(ptr, CHUNK_CEILING(oldsize), CHUNK_CEILING(usize), cdiff, true, arena->ind)) return (true); if (oldsize > usize) { size_t sdiff = oldsize - usize; if (config_fill && unlikely(opt_junk_free)) { huge_dalloc_junk((void *)((uintptr_t)ptr + usize), sdiff); post_zeroed = false; } else { post_zeroed = !chunk_purge_wrapper(tsdn, arena, &chunk_hooks, CHUNK_ADDR2BASE((uintptr_t)ptr + usize), CHUNK_CEILING(oldsize), CHUNK_ADDR2OFFSET((uintptr_t)ptr + usize), sdiff); } } else post_zeroed = pre_zeroed; malloc_mutex_lock(tsdn, &arena->huge_mtx); /* Update the size of the huge allocation. */ huge_node_unset(ptr, node); extent_node_size_set(node, usize); huge_node_reset(tsdn, ptr, node); /* Update zeroed. */ extent_node_zeroed_set(node, post_zeroed); malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* Zap the excess chunks. */ arena_chunk_ralloc_huge_shrink(tsdn, arena, ptr, oldsize, usize, extent_node_sn_get(node)); return (false); } static bool huge_ralloc_no_move_expand(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize, bool zero) { extent_node_t *node; arena_t *arena; bool is_zeroed_subchunk, is_zeroed_chunk; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(tsdn, &arena->huge_mtx); is_zeroed_subchunk = extent_node_zeroed_get(node); malloc_mutex_unlock(tsdn, &arena->huge_mtx); /* * Use is_zeroed_chunk to detect whether the trailing memory is zeroed, * update extent's zeroed field, and zero as necessary. */ is_zeroed_chunk = false; if (arena_chunk_ralloc_huge_expand(tsdn, arena, ptr, oldsize, usize, &is_zeroed_chunk)) return (true); malloc_mutex_lock(tsdn, &arena->huge_mtx); huge_node_unset(ptr, node); extent_node_size_set(node, usize); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && is_zeroed_chunk); huge_node_reset(tsdn, ptr, node); malloc_mutex_unlock(tsdn, &arena->huge_mtx); if (zero || (config_fill && unlikely(opt_zero))) { if (!is_zeroed_subchunk) { memset((void *)((uintptr_t)ptr + oldsize), 0, CHUNK_CEILING(oldsize) - oldsize); } if (!is_zeroed_chunk) { memset((void *)((uintptr_t)ptr + CHUNK_CEILING(oldsize)), 0, usize - CHUNK_CEILING(oldsize)); } } else if (config_fill && unlikely(opt_junk_alloc)) { memset((void *)((uintptr_t)ptr + oldsize), JEMALLOC_ALLOC_JUNK, usize - oldsize); } return (false); } bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { assert(s2u(oldsize) == oldsize); /* The following should have been caught by callers. */ assert(usize_min > 0 && usize_max <= HUGE_MAXCLASS); /* Both allocations must be huge to avoid a move. */ if (oldsize < chunksize || usize_max < chunksize) return (true); if (CHUNK_CEILING(usize_max) > CHUNK_CEILING(oldsize)) { /* Attempt to expand the allocation in-place. */ if (!huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_max, zero)) { arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Try again, this time with usize_min. */ if (usize_min < usize_max && CHUNK_CEILING(usize_min) > CHUNK_CEILING(oldsize) && huge_ralloc_no_move_expand(tsdn, ptr, oldsize, usize_min, zero)) { arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } /* * Avoid moving the allocation if the existing chunk size accommodates * the new size. */ if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize_min) && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(usize_max)) { huge_ralloc_no_move_similar(tsdn, ptr, oldsize, usize_min, usize_max, zero); arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } /* Attempt to shrink the allocation in-place. */ if (CHUNK_CEILING(oldsize) > CHUNK_CEILING(usize_max)) { if (!huge_ralloc_no_move_shrink(tsdn, ptr, oldsize, usize_max)) { arena_decay_tick(tsdn, huge_aalloc(ptr)); return (false); } } return (true); } static void * huge_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero) { if (alignment <= chunksize) return (huge_malloc(tsdn, arena, usize, zero)); return (huge_palloc(tsdn, arena, usize, alignment, zero)); } void * huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache) { void *ret; size_t copysize; /* The following should have been caught by callers. */ assert(usize > 0 && usize <= HUGE_MAXCLASS); /* Try to avoid moving the allocation. */ if (!huge_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, usize, zero)) return (ptr); /* * usize and oldsize are different enough that we need to use a * different size class. In that case, fall back to allocating new * space and copying. */ ret = huge_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, alignment, zero); if (ret == NULL) return (NULL); copysize = (usize < oldsize) ? usize : oldsize; memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache, true); return (ret); } void huge_dalloc(tsdn_t *tsdn, void *ptr) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); huge_node_unset(ptr, node); malloc_mutex_lock(tsdn, &arena->huge_mtx); ql_remove(&arena->huge, node, ql_link); malloc_mutex_unlock(tsdn, &arena->huge_mtx); huge_dalloc_junk(extent_node_addr_get(node), extent_node_size_get(node)); arena_chunk_dalloc_huge(tsdn, extent_node_arena_get(node), extent_node_addr_get(node), extent_node_size_get(node), extent_node_sn_get(node)); idalloctm(tsdn, node, NULL, true, true); arena_decay_tick(tsdn, arena); } arena_t * huge_aalloc(const void *ptr) { return (extent_node_arena_get(huge_node_get(ptr))); } size_t huge_salloc(tsdn_t *tsdn, const void *ptr) { size_t size; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(tsdn, &arena->huge_mtx); size = extent_node_size_get(node); malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (size); } prof_tctx_t * huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr) { prof_tctx_t *tctx; extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(tsdn, &arena->huge_mtx); tctx = extent_node_prof_tctx_get(node); malloc_mutex_unlock(tsdn, &arena->huge_mtx); return (tctx); } void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx) { extent_node_t *node; arena_t *arena; node = huge_node_get(ptr); arena = extent_node_arena_get(node); malloc_mutex_lock(tsdn, &arena->huge_mtx); extent_node_prof_tctx_set(node, tctx); malloc_mutex_unlock(tsdn, &arena->huge_mtx); } void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr) { huge_prof_tctx_set(tsdn, ptr, (prof_tctx_t *)(uintptr_t)1U); }
12,682
25.533473
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/tcache.c
#define JEMALLOC_TCACHE_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ bool opt_tcache = true; ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; tcache_bin_info_t *tcache_bin_info; static unsigned stack_nelms; /* Total stack elms per tcache. */ unsigned nhbins; size_t tcache_maxclass; tcaches_t *tcaches; /* Index of first element within tcaches that has never been used. */ static unsigned tcaches_past; /* Head of singly linked list tracking available tcaches elements. */ static tcaches_t *tcaches_avail; /******************************************************************************/ size_t tcache_salloc(tsdn_t *tsdn, const void *ptr) { return (arena_salloc(tsdn, ptr, false)); } void tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { szind_t binind = tcache->next_gc_bin; tcache_bin_t *tbin = &tcache->tbins[binind]; tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; if (tbin->low_water > 0) { /* * Flush (ceiling) 3/4 of the objects below the low water mark. */ if (binind < NBINS) { tcache_bin_flush_small(tsd, tcache, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2)); } else { tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached - tbin->low_water + (tbin->low_water >> 2), tcache); } /* * Reduce fill count by 2X. Limit lg_fill_div such that the * fill count is always at least 1. */ if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) tbin->lg_fill_div++; } else if (tbin->low_water < 0) { /* * Increase fill count by 2X. Make sure lg_fill_div stays * greater than 0. */ if (tbin->lg_fill_div > 1) tbin->lg_fill_div--; } tbin->low_water = tbin->ncached; tcache->next_gc_bin++; if (tcache->next_gc_bin == nhbins) tcache->next_gc_bin = 0; } void * tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { void *ret; arena_tcache_fill_small(tsdn, arena, tbin, binind, config_prof ? tcache->prof_accumbytes : 0); if (config_prof) tcache->prof_accumbytes = 0; ret = tcache_alloc_easy(tbin, tcache_success); return (ret); } void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < NBINS); assert(rem <= tbin->ncached); arena = arena_choose(tsd, NULL); assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena bin associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( *(tbin->avail - 1)); arena_t *bin_arena = extent_node_arena_get(&chunk->node); arena_bin_t *bin = &bin_arena->bins[binind]; if (config_prof && bin_arena == arena) { if (arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) prof_idump(tsd_tsdn(tsd)); tcache->prof_accumbytes = 0; } malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); if (config_stats && bin_arena == arena) { assert(!merged_stats); merged_stats = true; bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == bin_arena) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; arena_chunk_map_bits_t *bitselm = arena_bitselm_get_mutable(chunk, pageind); arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), bin_arena, chunk, ptr, bitselm); } else { /* * This object was allocated via a different * arena bin than the one that is currently * locked. Stash the object, so that it can be * handled in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ arena_bin_t *bin = &arena->bins[binind]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nflushes++; bin->stats.nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache) { arena_t *arena; void *ptr; unsigned i, nflush, ndeferred; bool merged_stats = false; assert(binind < nhbins); assert(rem <= tbin->ncached); arena = arena_choose(tsd, NULL); assert(arena != NULL); for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { /* Lock the arena associated with the first object. */ arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( *(tbin->avail - 1)); arena_t *locked_arena = extent_node_arena_get(&chunk->node); UNUSED bool idump; if (config_prof) idump = false; malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->lock); if ((config_prof || config_stats) && locked_arena == arena) { if (config_prof) { idump = arena_prof_accum_locked(arena, tcache->prof_accumbytes); tcache->prof_accumbytes = 0; } if (config_stats) { merged_stats = true; arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } ndeferred = 0; for (i = 0; i < nflush; i++) { ptr = *(tbin->avail - 1 - i); assert(ptr != NULL); chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); if (extent_node_arena_get(&chunk->node) == locked_arena) { arena_dalloc_large_junked_locked(tsd_tsdn(tsd), locked_arena, chunk, ptr); } else { /* * This object was allocated via a different * arena than the one that is currently locked. * Stash the object, so that it can be handled * in a future pass. */ *(tbin->avail - 1 - ndeferred) = ptr; ndeferred++; } } malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->lock); if (config_prof && idump) prof_idump(tsd_tsdn(tsd)); arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - ndeferred); } if (config_stats && !merged_stats) { /* * The flush loop didn't happen to flush to this thread's * arena, so the stats didn't get merged. Manually do so now. */ malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[binind - NBINS].nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * sizeof(void *)); tbin->ncached = rem; if ((int)tbin->ncached < tbin->low_water) tbin->low_water = tbin->ncached; } static void tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Link into list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->lock); ql_elm_new(tcache, link); ql_tail_insert(&arena->tcache_ql, tcache, link); malloc_mutex_unlock(tsdn, &arena->lock); } } static void tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { if (config_stats) { /* Unlink from list of extant tcaches. */ malloc_mutex_lock(tsdn, &arena->lock); if (config_debug) { bool in_ql = false; tcache_t *iter; ql_foreach(iter, &arena->tcache_ql, link) { if (iter == tcache) { in_ql = true; break; } } assert(in_ql); } ql_remove(&arena->tcache_ql, tcache, link); tcache_stats_merge(tsdn, tcache, arena); malloc_mutex_unlock(tsdn, &arena->lock); } } void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, arena_t *newarena) { tcache_arena_dissociate(tsdn, tcache, oldarena); tcache_arena_associate(tsdn, tcache, newarena); } tcache_t * tcache_get_hard(tsd_t *tsd) { arena_t *arena; if (!tcache_enabled_get()) { if (tsd_nominal(tsd)) tcache_enabled_set(false); /* Memoize. */ return (NULL); } arena = arena_choose(tsd, NULL); if (unlikely(arena == NULL)) return (NULL); return (tcache_create(tsd_tsdn(tsd), arena)); } tcache_t * tcache_create(tsdn_t *tsdn, arena_t *arena) { tcache_t *tcache; size_t size, stack_offset; unsigned i; size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); /* Naturally align the pointer stacks. */ size = PTR_CEILING(size); stack_offset = size; size += stack_nelms * sizeof(void *); /* Avoid false cacheline sharing. */ size = sa2u(size, CACHELINE); tcache = ipallocztm(tsdn, size, CACHELINE, true, NULL, true, arena_get(TSDN_NULL, 0, true)); if (tcache == NULL) return (NULL); tcache_arena_associate(tsdn, tcache, arena); ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); for (i = 0; i < nhbins; i++) { tcache->tbins[i].lg_fill_div = 1; stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); /* * avail points past the available space. Allocations will * access the slots toward higher addresses (for the benefit of * prefetch). */ tcache->tbins[i].avail = (void **)((uintptr_t)tcache + (uintptr_t)stack_offset); } return (tcache); } static void tcache_destroy(tsd_t *tsd, tcache_t *tcache) { arena_t *arena; unsigned i; arena = arena_choose(tsd, NULL); tcache_arena_dissociate(tsd_tsdn(tsd), tcache, arena); for (i = 0; i < NBINS; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_small(tsd, tcache, tbin, i, 0); if (config_stats && tbin->tstats.nrequests != 0) { arena_bin_t *bin = &arena->bins[i]; malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); } } for (; i < nhbins; i++) { tcache_bin_t *tbin = &tcache->tbins[i]; tcache_bin_flush_large(tsd, tbin, i, 0, tcache); if (config_stats && tbin->tstats.nrequests != 0) { malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); arena->stats.nrequests_large += tbin->tstats.nrequests; arena->stats.lstats[i - NBINS].nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); } } if (config_prof && tcache->prof_accumbytes > 0 && arena_prof_accum(tsd_tsdn(tsd), arena, tcache->prof_accumbytes)) prof_idump(tsd_tsdn(tsd)); idalloctm(tsd_tsdn(tsd), tcache, NULL, true, true); } void tcache_cleanup(tsd_t *tsd) { tcache_t *tcache; if (!config_tcache) return; if ((tcache = tsd_tcache_get(tsd)) != NULL) { tcache_destroy(tsd, tcache); tsd_tcache_set(tsd, NULL); } } void tcache_enabled_cleanup(tsd_t *tsd) { /* Do nothing. */ } void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { unsigned i; cassert(config_stats); malloc_mutex_assert_owner(tsdn, &arena->lock); /* Merge and reset tcache stats. */ for (i = 0; i < NBINS; i++) { arena_bin_t *bin = &arena->bins[i]; tcache_bin_t *tbin = &tcache->tbins[i]; malloc_mutex_lock(tsdn, &bin->lock); bin->stats.nrequests += tbin->tstats.nrequests; malloc_mutex_unlock(tsdn, &bin->lock); tbin->tstats.nrequests = 0; } for (; i < nhbins; i++) { malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; tcache_bin_t *tbin = &tcache->tbins[i]; arena->stats.nrequests_large += tbin->tstats.nrequests; lstats->nrequests += tbin->tstats.nrequests; tbin->tstats.nrequests = 0; } } bool tcaches_create(tsd_t *tsd, unsigned *r_ind) { arena_t *arena; tcache_t *tcache; tcaches_t *elm; if (tcaches == NULL) { tcaches = base_alloc(tsd_tsdn(tsd), sizeof(tcache_t *) * (MALLOCX_TCACHE_MAX+1)); if (tcaches == NULL) return (true); } if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) return (true); arena = arena_ichoose(tsd, NULL); if (unlikely(arena == NULL)) return (true); tcache = tcache_create(tsd_tsdn(tsd), arena); if (tcache == NULL) return (true); if (tcaches_avail != NULL) { elm = tcaches_avail; tcaches_avail = tcaches_avail->next; elm->tcache = tcache; *r_ind = (unsigned)(elm - tcaches); } else { elm = &tcaches[tcaches_past]; elm->tcache = tcache; *r_ind = tcaches_past; tcaches_past++; } return (false); } static void tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) { if (elm->tcache == NULL) return; tcache_destroy(tsd, elm->tcache); elm->tcache = NULL; } void tcaches_flush(tsd_t *tsd, unsigned ind) { tcaches_elm_flush(tsd, &tcaches[ind]); } void tcaches_destroy(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; tcaches_elm_flush(tsd, elm); elm->next = tcaches_avail; tcaches_avail = elm; } bool tcache_boot(tsdn_t *tsdn) { unsigned i; /* * If necessary, clamp opt_lg_tcache_max, now that large_maxclass is * known. */ if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < SMALL_MAXCLASS) tcache_maxclass = SMALL_MAXCLASS; else if ((ZU(1) << opt_lg_tcache_max) > large_maxclass) tcache_maxclass = large_maxclass; else tcache_maxclass = (ZU(1) << opt_lg_tcache_max); nhbins = size2index(tcache_maxclass) + 1; /* Initialize tcache_bin_info. */ tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, nhbins * sizeof(tcache_bin_info_t)); if (tcache_bin_info == NULL) return (true); stack_nelms = 0; for (i = 0; i < NBINS; i++) { if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MIN; } else if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MAX) { tcache_bin_info[i].ncached_max = (arena_bin_info[i].nregs << 1); } else { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_SMALL_MAX; } stack_nelms += tcache_bin_info[i].ncached_max; } for (; i < nhbins; i++) { tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; stack_nelms += tcache_bin_info[i].ncached_max; } return (false); }
14,530
25.134892
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/chunk.c
#define JEMALLOC_CHUNK_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ /* Data. */ const char *opt_dss = DSS_DEFAULT; size_t opt_lg_chunk = 0; /* Used exclusively for gdump triggering. */ static size_t curchunks; static size_t highchunks; rtree_t chunks_rtree; /* Various chunk-related settings. */ size_t chunksize; size_t chunksize_mask; /* (chunksize - 1). */ size_t chunk_npages; static void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind); static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind); static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind); static bool chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind); static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind); const chunk_hooks_t chunk_hooks_default = { chunk_alloc_default, chunk_dalloc_default, chunk_commit_default, chunk_decommit_default, chunk_purge_default, chunk_split_default, chunk_merge_default }; /******************************************************************************/ /* * Function prototypes for static functions that are referenced prior to * definition. */ static void chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, bool zeroed, bool committed); /******************************************************************************/ static chunk_hooks_t chunk_hooks_get_locked(arena_t *arena) { return (arena->chunk_hooks); } chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena) { chunk_hooks_t chunk_hooks; malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks = chunk_hooks_get_locked(arena); malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (chunk_hooks); } chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks) { chunk_hooks_t old_chunk_hooks; malloc_mutex_lock(tsdn, &arena->chunks_mtx); old_chunk_hooks = arena->chunk_hooks; /* * Copy each field atomically so that it is impossible for readers to * see partially updated pointers. There are places where readers only * need one hook function pointer (therefore no need to copy the * entirety of arena->chunk_hooks), and stale reads do not affect * correctness, so they perform unlocked reads. */ #define ATOMIC_COPY_HOOK(n) do { \ union { \ chunk_##n##_t **n; \ void **v; \ } u; \ u.n = &arena->chunk_hooks.n; \ atomic_write_p(u.v, chunk_hooks->n); \ } while (0) ATOMIC_COPY_HOOK(alloc); ATOMIC_COPY_HOOK(dalloc); ATOMIC_COPY_HOOK(commit); ATOMIC_COPY_HOOK(decommit); ATOMIC_COPY_HOOK(purge); ATOMIC_COPY_HOOK(split); ATOMIC_COPY_HOOK(merge); #undef ATOMIC_COPY_HOOK malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (old_chunk_hooks); } static void chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, bool locked) { static const chunk_hooks_t uninitialized_hooks = CHUNK_HOOKS_INITIALIZER; if (memcmp(chunk_hooks, &uninitialized_hooks, sizeof(chunk_hooks_t)) == 0) { *chunk_hooks = locked ? chunk_hooks_get_locked(arena) : chunk_hooks_get(tsdn, arena); } } static void chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true); } static void chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks) { chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false); } bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node) { assert(extent_node_addr_get(node) == chunk); if (rtree_set(&chunks_rtree, (uintptr_t)chunk, node)) return (true); if (config_prof && opt_prof) { size_t size = extent_node_size_get(node); size_t nadd = (size == 0) ? 1 : size / chunksize; size_t cur = atomic_add_z(&curchunks, nadd); size_t high = atomic_read_z(&highchunks); while (cur > high && atomic_cas_z(&highchunks, high, cur)) { /* * Don't refresh cur, because it may have decreased * since this thread lost the highchunks update race. */ high = atomic_read_z(&highchunks); } if (cur > high && prof_gdump_get_unlocked()) prof_gdump(tsdn); } return (false); } void chunk_deregister(const void *chunk, const extent_node_t *node) { bool err; err = rtree_set(&chunks_rtree, (uintptr_t)chunk, NULL); assert(!err); if (config_prof && opt_prof) { size_t size = extent_node_size_get(node); size_t nsub = (size == 0) ? 1 : size / chunksize; assert(atomic_read_z(&curchunks) >= nsub); atomic_sub_z(&curchunks, nsub); } } /* * Do first-best-fit chunk selection, i.e. select the oldest/lowest chunk that * best fits. */ static extent_node_t * chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size) { extent_node_t key; assert(size == CHUNK_CEILING(size)); extent_node_init(&key, arena, NULL, size, 0, false, false); return (extent_tree_szsnad_nsearch(chunks_szsnad, &key)); } static void * chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit, bool dalloc_node) { void *ret; extent_node_t *node; size_t alloc_size, leadsize, trailsize; bool zeroed, committed; assert(CHUNK_CEILING(size) == size); assert(alignment > 0); assert(new_addr == NULL || alignment == chunksize); assert(CHUNK_ADDR2BASE(new_addr) == new_addr); /* * Cached chunks use the node linkage embedded in their headers, in * which case dalloc_node is true, and new_addr is non-NULL because * we're operating on a specific chunk. */ assert(dalloc_node || new_addr != NULL); alloc_size = size + CHUNK_CEILING(alignment) - chunksize; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); if (new_addr != NULL) { extent_node_t key; extent_node_init(&key, arena, new_addr, alloc_size, 0, false, false); node = extent_tree_ad_search(chunks_ad, &key); } else { node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size); } if (node == NULL || (new_addr != NULL && extent_node_size_get(node) < size)) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } leadsize = ALIGNMENT_CEILING((uintptr_t)extent_node_addr_get(node), alignment) - (uintptr_t)extent_node_addr_get(node); assert(new_addr == NULL || leadsize == 0); assert(extent_node_size_get(node) >= leadsize + size); trailsize = extent_node_size_get(node) - leadsize - size; ret = (void *)((uintptr_t)extent_node_addr_get(node) + leadsize); *sn = extent_node_sn_get(node); zeroed = extent_node_zeroed_get(node); if (zeroed) *zero = true; committed = extent_node_committed_get(node); if (committed) *commit = true; /* Split the lead. */ if (leadsize != 0 && chunk_hooks->split(extent_node_addr_get(node), extent_node_size_get(node), leadsize, size, false, arena->ind)) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); return (NULL); } /* Remove node from the tree. */ extent_tree_szsnad_remove(chunks_szsnad, node); extent_tree_ad_remove(chunks_ad, node); arena_chunk_cache_maybe_remove(arena, node, cache); if (leadsize != 0) { /* Insert the leading space as a smaller chunk. */ extent_node_size_set(node, leadsize); extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (trailsize != 0) { /* Split the trail. */ if (chunk_hooks->split(ret, size + trailsize, size, trailsize, false, arena->ind)) { if (dalloc_node && node != NULL) arena_node_dalloc(tsdn, arena, node); malloc_mutex_unlock(tsdn, &arena->chunks_mtx); chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, cache, ret, size + trailsize, *sn, zeroed, committed); return (NULL); } /* Insert the trailing space as a smaller chunk. */ if (node == NULL) { node = arena_node_alloc(tsdn, arena); if (node == NULL) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, cache, ret, size + trailsize, *sn, zeroed, committed); return (NULL); } } extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size), trailsize, *sn, zeroed, committed); extent_tree_szsnad_insert(chunks_szsnad, node); extent_tree_ad_insert(chunks_ad, node); arena_chunk_cache_maybe_insert(arena, node, cache); node = NULL; } if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) { malloc_mutex_unlock(tsdn, &arena->chunks_mtx); chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad, cache, ret, size, *sn, zeroed, committed); return (NULL); } malloc_mutex_unlock(tsdn, &arena->chunks_mtx); assert(dalloc_node || node != NULL); if (dalloc_node && node != NULL) arena_node_dalloc(tsdn, arena, node); if (*zero) { if (!zeroed) memset(ret, 0, size); else if (config_debug) { size_t i; size_t *p = (size_t *)(uintptr_t)ret; for (i = 0; i < size / sizeof(size_t); i++) assert(p[i] == 0); } if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size); } return (ret); } /* * If the caller specifies (!*zero), it is still possible to receive zeroed * memory, in which case *zero is toggled to true. arena_chunk_alloc() takes * advantage of this to avoid demanding zeroed chunks, but taking advantage of * them if they are returned. */ static void * chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); /* "primary" dss. */ if (have_dss && dss_prec == dss_prec_primary && (ret = chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* mmap. */ if ((ret = chunk_alloc_mmap(new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* "secondary" dss. */ if (have_dss && dss_prec == dss_prec_secondary && (ret = chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, commit)) != NULL) return (ret); /* All strategies for allocation failed. */ return (NULL); } void * chunk_alloc_base(size_t size) { void *ret; bool zero, commit; /* * Directly call chunk_alloc_mmap() rather than chunk_alloc_core() * because it's critical that chunk_alloc_base() return untouched * demand-zeroed virtual memory. */ zero = true; commit = true; ret = chunk_alloc_mmap(NULL, size, chunksize, &zero, &commit); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } void * chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit, bool dalloc_node) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); ret = chunk_recycle(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, new_addr, size, alignment, sn, zero, commit, dalloc_node); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static arena_t * chunk_arena_get(tsdn_t *tsdn, unsigned arena_ind) { arena_t *arena; arena = arena_get(tsdn, arena_ind, false); /* * The arena we're allocating on behalf of must have been initialized * already. */ assert(arena != NULL); return (arena); } static void * chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero, commit, arena->dss_prec); if (ret == NULL) return (NULL); if (config_valgrind) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); return (ret); } static void * chunk_alloc_default(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { tsdn_t *tsdn; arena_t *arena; tsdn = tsdn_fetch(); arena = chunk_arena_get(tsdn, arena_ind); return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, zero, commit)); } static void * chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit) { void *ret; assert(size != 0); assert((size & chunksize_mask) == 0); assert(alignment != 0); assert((alignment & chunksize_mask) == 0); ret = chunk_recycle(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, new_addr, size, alignment, sn, zero, commit, true); if (config_stats && ret != NULL) arena->stats.retained -= size; return (ret); } void * chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit) { void *ret; chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size, alignment, sn, zero, commit); if (ret == NULL) { if (chunk_hooks->alloc == chunk_alloc_default) { /* Call directly to propagate tsdn. */ ret = chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment, zero, commit); } else { ret = chunk_hooks->alloc(new_addr, size, alignment, zero, commit, arena->ind); } if (ret == NULL) return (NULL); *sn = arena_extent_sn_next(arena); if (config_valgrind && chunk_hooks->alloc != chunk_alloc_default) JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, chunksize); } return (ret); } static void chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, extent_tree_t *chunks_szsnad, extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { bool unzeroed; extent_node_t *node, *prev; extent_node_t key; assert(!cache || !zeroed); unzeroed = cache || !zeroed; JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size); malloc_mutex_lock(tsdn, &arena->chunks_mtx); chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks); extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0, false, false); node = extent_tree_ad_nsearch(chunks_ad, &key); /* Try to coalesce forward. */ if (node != NULL && extent_node_addr_get(node) == extent_node_addr_get(&key) && extent_node_committed_get(node) == committed && !chunk_hooks->merge(chunk, size, extent_node_addr_get(node), extent_node_size_get(node), false, arena->ind)) { /* * Coalesce chunk with the following address range. This does * not change the position within chunks_ad, so only * remove/insert from/into chunks_szsnad. */ extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, chunk); extent_node_size_set(node, size + extent_node_size_get(node)); if (sn < extent_node_sn_get(node)) extent_node_sn_set(node, sn); extent_node_zeroed_set(node, extent_node_zeroed_get(node) && !unzeroed); extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } else { /* Coalescing forward failed, so insert a new node. */ node = arena_node_alloc(tsdn, arena); if (node == NULL) { /* * Node allocation failed, which is an exceedingly * unlikely failure. Leak chunk after making sure its * pages have already been purged, so that this is only * a virtual memory leak. */ if (cache) { chunk_purge_wrapper(tsdn, arena, chunk_hooks, chunk, size, 0, size); } goto label_return; } extent_node_init(node, arena, chunk, size, sn, !unzeroed, committed); extent_tree_ad_insert(chunks_ad, node); extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); } /* Try to coalesce backward. */ prev = extent_tree_ad_prev(chunks_ad, node); if (prev != NULL && (void *)((uintptr_t)extent_node_addr_get(prev) + extent_node_size_get(prev)) == chunk && extent_node_committed_get(prev) == committed && !chunk_hooks->merge(extent_node_addr_get(prev), extent_node_size_get(prev), chunk, size, false, arena->ind)) { /* * Coalesce chunk with the previous address range. This does * not change the position within chunks_ad, so only * remove/insert node from/into chunks_szsnad. */ extent_tree_szsnad_remove(chunks_szsnad, prev); extent_tree_ad_remove(chunks_ad, prev); arena_chunk_cache_maybe_remove(arena, prev, cache); extent_tree_szsnad_remove(chunks_szsnad, node); arena_chunk_cache_maybe_remove(arena, node, cache); extent_node_addr_set(node, extent_node_addr_get(prev)); extent_node_size_set(node, extent_node_size_get(prev) + extent_node_size_get(node)); if (extent_node_sn_get(prev) < extent_node_sn_get(node)) extent_node_sn_set(node, extent_node_sn_get(prev)); extent_node_zeroed_set(node, extent_node_zeroed_get(prev) && extent_node_zeroed_get(node)); extent_tree_szsnad_insert(chunks_szsnad, node); arena_chunk_cache_maybe_insert(arena, node, cache); arena_node_dalloc(tsdn, arena, prev); } label_return: malloc_mutex_unlock(tsdn, &arena->chunks_mtx); } void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool committed) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true, chunk, size, sn, false, committed); arena_maybe_purge(tsdn, arena); } static bool chunk_dalloc_default_impl(void *chunk, size_t size) { if (!have_dss || !chunk_in_dss(chunk)) return (chunk_dalloc_mmap(chunk, size)); return (true); } static bool chunk_dalloc_default(void *chunk, size_t size, bool committed, unsigned arena_ind) { return (chunk_dalloc_default_impl(chunk, size)); } void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool zeroed, bool committed) { bool err; assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert(size != 0); assert((size & chunksize_mask) == 0); chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); /* Try to deallocate. */ if (chunk_hooks->dalloc == chunk_dalloc_default) { /* Call directly to propagate tsdn. */ err = chunk_dalloc_default_impl(chunk, size); } else err = chunk_hooks->dalloc(chunk, size, committed, arena->ind); if (!err) return; /* Try to decommit; purge if that fails. */ if (committed) { committed = chunk_hooks->decommit(chunk, size, 0, size, arena->ind); } zeroed = !committed || !chunk_hooks->purge(chunk, size, 0, size, arena->ind); chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false, chunk, size, sn, zeroed, committed); if (config_stats) arena->stats.retained += size; } static bool chunk_commit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (pages_commit((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } static bool chunk_decommit_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { return (pages_decommit((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } static bool chunk_purge_default(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { assert(chunk != NULL); assert(CHUNK_ADDR2BASE(chunk) == chunk); assert((offset & PAGE_MASK) == 0); assert(length != 0); assert((length & PAGE_MASK) == 0); return (pages_purge((void *)((uintptr_t)chunk + (uintptr_t)offset), length)); } bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length) { chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks); return (chunk_hooks->purge(chunk, size, offset, length, arena->ind)); } static bool chunk_split_default(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { if (!maps_coalesce) return (true); return (false); } static bool chunk_merge_default_impl(void *chunk_a, void *chunk_b) { if (!maps_coalesce) return (true); if (have_dss && !chunk_dss_mergeable(chunk_a, chunk_b)) return (true); return (false); } static bool chunk_merge_default(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind) { return (chunk_merge_default_impl(chunk_a, chunk_b)); } static rtree_node_elm_t * chunks_rtree_node_alloc(size_t nelms) { return ((rtree_node_elm_t *)base_alloc(TSDN_NULL, nelms * sizeof(rtree_node_elm_t))); } bool chunk_boot(void) { #ifdef _WIN32 SYSTEM_INFO info; GetSystemInfo(&info); /* * Verify actual page size is equal to or an integral multiple of * configured page size. */ if (info.dwPageSize & ((1U << LG_PAGE) - 1)) return (true); /* * Configure chunksize (if not set) to match granularity (usually 64K), * so pages_map will always take fast path. */ if (!opt_lg_chunk) { opt_lg_chunk = ffs_u((unsigned)info.dwAllocationGranularity) - 1; } #else if (!opt_lg_chunk) opt_lg_chunk = LG_CHUNK_DEFAULT; #endif /* Set variables according to the value of opt_lg_chunk. */ chunksize = (ZU(1) << opt_lg_chunk); assert(chunksize >= PAGE); chunksize_mask = chunksize - 1; chunk_npages = (chunksize >> LG_PAGE); if (have_dss) chunk_dss_boot(); if (rtree_new(&chunks_rtree, (unsigned)((ZU(1) << (LG_SIZEOF_PTR+3)) - opt_lg_chunk), chunks_rtree_node_alloc, NULL)) return (true); return (false); }
23,043
27.949749
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/chunk_mmap.c
#define JEMALLOC_CHUNK_MMAP_C_ #include "jemalloc/internal/jemalloc_internal.h" /******************************************************************************/ static void * chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; size_t alloc_size; alloc_size = size + alignment - PAGE; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); do { void *pages; size_t leadsize; pages = pages_map(NULL, alloc_size, commit); if (pages == NULL) return (NULL); leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - (uintptr_t)pages; ret = pages_trim(pages, alloc_size, leadsize, size, commit); } while (ret == NULL); assert(ret != NULL); *zero = true; return (ret); } void * chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit) { void *ret; size_t offset; /* * Ideally, there would be a way to specify alignment to mmap() (like * NetBSD has), but in the absence of such a feature, we have to work * hard to efficiently create aligned mappings. The reliable, but * slow method is to create a mapping that is over-sized, then trim the * excess. However, that always results in one or two calls to * pages_unmap(). * * Optimistically try mapping precisely the right amount before falling * back to the slow method, with the expectation that the optimistic * approach works most of the time. */ assert(alignment != 0); assert((alignment & chunksize_mask) == 0); ret = pages_map(new_addr, size, commit); if (ret == NULL || ret == new_addr) return (ret); assert(new_addr == NULL); offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); if (offset != 0) { pages_unmap(ret, size); return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); } assert(ret != NULL); *zero = true; return (ret); } bool chunk_dalloc_mmap(void *chunk, size_t size) { if (config_munmap) pages_unmap(chunk, size); return (!config_munmap); }
1,993
24.240506
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/quarantine.c
#define JEMALLOC_QUARANTINE_C_ #include "jemalloc/internal/jemalloc_internal.h" /* * Quarantine pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define QUARANTINE_STATE_REINCARNATED ((quarantine_t *)(uintptr_t)1) #define QUARANTINE_STATE_PURGATORY ((quarantine_t *)(uintptr_t)2) #define QUARANTINE_STATE_MAX QUARANTINE_STATE_PURGATORY /******************************************************************************/ /* Function prototypes for non-inline static functions. */ static quarantine_t *quarantine_grow(tsd_t *tsd, quarantine_t *quarantine); static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine); static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound); /******************************************************************************/ static quarantine_t * quarantine_init(tsdn_t *tsdn, size_t lg_maxobjs) { quarantine_t *quarantine; size_t size; size = offsetof(quarantine_t, objs) + ((ZU(1) << lg_maxobjs) * sizeof(quarantine_obj_t)); quarantine = (quarantine_t *)iallocztm(tsdn, size, size2index(size), false, NULL, true, arena_get(TSDN_NULL, 0, true), true); if (quarantine == NULL) return (NULL); quarantine->curbytes = 0; quarantine->curobjs = 0; quarantine->first = 0; quarantine->lg_maxobjs = lg_maxobjs; return (quarantine); } void quarantine_alloc_hook_work(tsd_t *tsd) { quarantine_t *quarantine; if (!tsd_nominal(tsd)) return; quarantine = quarantine_init(tsd_tsdn(tsd), LG_MAXOBJS_INIT); /* * Check again whether quarantine has been initialized, because * quarantine_init() may have triggered recursive initialization. */ if (tsd_quarantine_get(tsd) == NULL) tsd_quarantine_set(tsd, quarantine); else idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); } static quarantine_t * quarantine_grow(tsd_t *tsd, quarantine_t *quarantine) { quarantine_t *ret; ret = quarantine_init(tsd_tsdn(tsd), quarantine->lg_maxobjs + 1); if (ret == NULL) { quarantine_drain_one(tsd_tsdn(tsd), quarantine); return (quarantine); } ret->curbytes = quarantine->curbytes; ret->curobjs = quarantine->curobjs; if (quarantine->first + quarantine->curobjs <= (ZU(1) << quarantine->lg_maxobjs)) { /* objs ring buffer data are contiguous. */ memcpy(ret->objs, &quarantine->objs[quarantine->first], quarantine->curobjs * sizeof(quarantine_obj_t)); } else { /* objs ring buffer data wrap around. */ size_t ncopy_a = (ZU(1) << quarantine->lg_maxobjs) - quarantine->first; size_t ncopy_b = quarantine->curobjs - ncopy_a; memcpy(ret->objs, &quarantine->objs[quarantine->first], ncopy_a * sizeof(quarantine_obj_t)); memcpy(&ret->objs[ncopy_a], quarantine->objs, ncopy_b * sizeof(quarantine_obj_t)); } idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, ret); return (ret); } static void quarantine_drain_one(tsdn_t *tsdn, quarantine_t *quarantine) { quarantine_obj_t *obj = &quarantine->objs[quarantine->first]; assert(obj->usize == isalloc(tsdn, obj->ptr, config_prof)); idalloctm(tsdn, obj->ptr, NULL, false, true); quarantine->curbytes -= obj->usize; quarantine->curobjs--; quarantine->first = (quarantine->first + 1) & ((ZU(1) << quarantine->lg_maxobjs) - 1); } static void quarantine_drain(tsdn_t *tsdn, quarantine_t *quarantine, size_t upper_bound) { while (quarantine->curbytes > upper_bound && quarantine->curobjs > 0) quarantine_drain_one(tsdn, quarantine); } void quarantine(tsd_t *tsd, void *ptr) { quarantine_t *quarantine; size_t usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); cassert(config_fill); assert(opt_quarantine); if ((quarantine = tsd_quarantine_get(tsd)) == NULL) { idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); return; } /* * Drain one or more objects if the quarantine size limit would be * exceeded by appending ptr. */ if (quarantine->curbytes + usize > opt_quarantine) { size_t upper_bound = (opt_quarantine >= usize) ? opt_quarantine - usize : 0; quarantine_drain(tsd_tsdn(tsd), quarantine, upper_bound); } /* Grow the quarantine ring buffer if it's full. */ if (quarantine->curobjs == (ZU(1) << quarantine->lg_maxobjs)) quarantine = quarantine_grow(tsd, quarantine); /* quarantine_grow() must free a slot if it fails to grow. */ assert(quarantine->curobjs < (ZU(1) << quarantine->lg_maxobjs)); /* Append ptr if its size doesn't exceed the quarantine size. */ if (quarantine->curbytes + usize <= opt_quarantine) { size_t offset = (quarantine->first + quarantine->curobjs) & ((ZU(1) << quarantine->lg_maxobjs) - 1); quarantine_obj_t *obj = &quarantine->objs[offset]; obj->ptr = ptr; obj->usize = usize; quarantine->curbytes += usize; quarantine->curobjs++; if (config_fill && unlikely(opt_junk_free)) { /* * Only do redzone validation if Valgrind isn't in * operation. */ if ((!config_valgrind || likely(!in_valgrind)) && usize <= SMALL_MAXCLASS) arena_quarantine_junk_small(ptr, usize); else memset(ptr, JEMALLOC_FREE_JUNK, usize); } } else { assert(quarantine->curbytes == 0); idalloctm(tsd_tsdn(tsd), ptr, NULL, false, true); } } void quarantine_cleanup(tsd_t *tsd) { quarantine_t *quarantine; if (!config_fill) return; quarantine = tsd_quarantine_get(tsd); if (quarantine != NULL) { quarantine_drain(tsd_tsdn(tsd), quarantine, 0); idalloctm(tsd_tsdn(tsd), quarantine, NULL, true, true); tsd_quarantine_set(tsd, NULL); } }
5,560
29.222826
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/src/mutex.c
#define JEMALLOC_MUTEX_C_ #include "jemalloc/internal/jemalloc_internal.h" #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) #include <dlfcn.h> #endif #ifndef _CRT_SPINCOUNT #define _CRT_SPINCOUNT 4000 #endif /******************************************************************************/ /* Data. */ #ifdef JEMALLOC_LAZY_LOCK bool isthreaded = false; #endif #ifdef JEMALLOC_MUTEX_INIT_CB static bool postpone_init = true; static malloc_mutex_t *postponed_mutexes = NULL; #endif #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static void pthread_create_once(void); #endif /******************************************************************************/ /* * We intercept pthread_create() calls in order to toggle isthreaded if the * process goes multi-threaded. */ #if defined(JEMALLOC_LAZY_LOCK) && !defined(_WIN32) static int (*pthread_create_fptr)(pthread_t *__restrict, const pthread_attr_t *, void *(*)(void *), void *__restrict); static void pthread_create_once(void) { pthread_create_fptr = dlsym(RTLD_NEXT, "pthread_create"); if (pthread_create_fptr == NULL) { malloc_write("<jemalloc>: Error in dlsym(RTLD_NEXT, " "\"pthread_create\")\n"); abort(); } isthreaded = true; } JEMALLOC_EXPORT int pthread_create(pthread_t *__restrict thread, const pthread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { static pthread_once_t once_control = PTHREAD_ONCE_INIT; pthread_once(&once_control, pthread_create_once); return (pthread_create_fptr(thread, attr, start_routine, arg)); } #endif /******************************************************************************/ #ifdef JEMALLOC_MUTEX_INIT_CB JEMALLOC_EXPORT int _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex, void *(calloc_cb)(size_t, size_t)); #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank) { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 InitializeSRWLock(&mutex->lock); # else if (!InitializeCriticalSectionAndSpinCount(&mutex->lock, _CRT_SPINCOUNT)) return (true); # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) mutex->lock = OS_UNFAIR_LOCK_INIT; #elif (defined(JEMALLOC_OSSPIN)) mutex->lock = 0; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) if (postpone_init) { mutex->postponed_next = postponed_mutexes; postponed_mutexes = mutex; } else { if (_pthread_mutex_init_calloc_cb(&mutex->lock, bootstrap_calloc) != 0) return (true); } #else pthread_mutexattr_t attr; if (pthread_mutexattr_init(&attr) != 0) return (true); pthread_mutexattr_settype(&attr, MALLOC_MUTEX_TYPE); if (pthread_mutex_init(&mutex->lock, &attr) != 0) { pthread_mutexattr_destroy(&attr); return (true); } pthread_mutexattr_destroy(&attr); #endif if (config_debug) witness_init(&mutex->witness, name, rank, NULL); return (false); } void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_lock(tsdn, mutex); } void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex) { malloc_mutex_unlock(tsdn, mutex); } void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex) { #ifdef JEMALLOC_MUTEX_INIT_CB malloc_mutex_unlock(tsdn, mutex); #else if (malloc_mutex_init(mutex, mutex->witness.name, mutex->witness.rank)) { malloc_printf("<jemalloc>: Error re-initializing mutex in " "child\n"); if (opt_abort) abort(); } #endif } bool malloc_mutex_boot(void) { #ifdef JEMALLOC_MUTEX_INIT_CB postpone_init = false; while (postponed_mutexes != NULL) { if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock, bootstrap_calloc) != 0) return (true); postponed_mutexes = postponed_mutexes->postponed_next; } #endif return (false); }
3,729
22.459119
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/pack.c
#include "test/jemalloc_test.h" const char *malloc_conf = /* Use smallest possible chunk size. */ "lg_chunk:0" /* Immediately purge to minimize fragmentation. */ ",lg_dirty_mult:-1" ",decay_time:-1" ; /* * Size class that is a divisor of the page size, ideally 4+ regions per run. */ #if LG_PAGE <= 14 #define SZ (ZU(1) << (LG_PAGE - 2)) #else #define SZ 4096 #endif /* * Number of chunks to consume at high water mark. Should be at least 2 so that * if mmap()ed memory grows downward, downward growth of mmap()ed memory is * tested. */ #define NCHUNKS 8 static unsigned binind_compute(void) { size_t sz; unsigned nbins, i; sz = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < nbins; i++) { size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); size_t size; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)i; sz = sizeof(size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); if (size == SZ) return (i); } test_fail("Unable to compute nregs_per_run"); return (0); } static size_t nregs_per_run_compute(void) { uint32_t nregs; size_t sz; unsigned binind = binind_compute(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)binind; sz = sizeof(nregs); assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); return (nregs); } static size_t npages_per_run_compute(void) { size_t sz; unsigned binind = binind_compute(); size_t mib[4]; size_t miblen = sizeof(mib)/sizeof(size_t); size_t run_size; assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, "Unexpected mallctlnametomb failure"); mib[2] = (size_t)binind; sz = sizeof(run_size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); return (run_size >> LG_PAGE); } static size_t npages_per_chunk_compute(void) { return ((chunksize >> LG_PAGE) - map_bias); } static size_t nruns_per_chunk_compute(void) { return (npages_per_chunk_compute() / npages_per_run_compute()); } static unsigned arenas_extend_mallctl(void) { unsigned arena_ind; size_t sz; sz = sizeof(arena_ind); assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), 0, "Error in arenas.extend"); return (arena_ind); } static void arena_reset_mallctl(unsigned arena_ind) { size_t mib[3]; size_t miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_BEGIN(test_pack) { unsigned arena_ind = arenas_extend_mallctl(); size_t nregs_per_run = nregs_per_run_compute(); size_t nruns_per_chunk = nruns_per_chunk_compute(); size_t nruns = nruns_per_chunk * NCHUNKS; size_t nregs = nregs_per_run * nruns; VARIABLE_ARRAY(void *, ptrs, nregs); size_t i, j, offset; /* Fill matrix. */ for (i = offset = 0; i < nruns; i++) { for (j = 0; j < nregs_per_run; j++) { void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |" " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu", SZ, arena_ind, i, j); ptrs[(i * nregs_per_run) + j] = p; } } /* * Free all but one region of each run, but rotate which region is * preserved, so that subsequent allocations exercise the within-run * layout policy. */ offset = 0; for (i = offset = 0; i < nruns; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p = ptrs[(i * nregs_per_run) + j]; if (offset == j) continue; dallocx(p, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); } } /* * Logically refill matrix, skipping preserved regions and verifying * that the matrix is unmodified. */ offset = 0; for (i = offset = 0; i < nruns; i++, offset = (offset + 1) % nregs_per_run) { for (j = 0; j < nregs_per_run; j++) { void *p; if (offset == j) continue; p = mallocx(SZ, MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE); assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j], "Unexpected refill discrepancy, run=%zu, reg=%zu\n", i, j); } } /* Clean up. */ arena_reset_mallctl(arena_ind); } TEST_END int main(void) { return (test( test_pack)); }
4,849
22.429952
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/prof_thread_name.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false"; #endif static void mallctl_thread_name_get_impl(const char *thread_name_expected, const char *func, int line) { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); assert_str_eq(thread_name_old, thread_name_expected, "%s():%d: Unexpected thread.prof.name value", func, line); } #define mallctl_thread_name_get(a) \ mallctl_thread_name_get_impl(a, __func__, __LINE__) static void mallctl_thread_name_set_impl(const char *thread_name, const char *func, int line) { assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), 0, "%s():%d: Unexpected mallctl failure reading thread.prof.name", func, line); mallctl_thread_name_get_impl(thread_name, func, line); } #define mallctl_thread_name_set(a) \ mallctl_thread_name_set_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_thread_name_validation) { const char *thread_name; test_skip_if(!config_prof); mallctl_thread_name_get(""); mallctl_thread_name_set("hi there"); /* NULL input shouldn't be allowed. */ thread_name = NULL; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* '\n' shouldn't be allowed. */ thread_name = "hi\nthere"; assert_d_eq(mallctl("thread.prof.name", NULL, NULL, (void *)&thread_name, sizeof(thread_name)), EFAULT, "Unexpected mallctl result writing \"%s\" to thread.prof.name", thread_name); /* Simultaneous read/write shouldn't be allowed. */ { const char *thread_name_old; size_t sz; sz = sizeof(thread_name_old); assert_d_eq(mallctl("thread.prof.name", (void *)&thread_name_old, &sz, (void *)&thread_name, sizeof(thread_name)), EPERM, "Unexpected mallctl result writing \"%s\" to " "thread.prof.name", thread_name); } mallctl_thread_name_set(""); } TEST_END #define NTHREADS 4 #define NRESET 25 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; char thread_name[16] = ""; unsigned i; malloc_snprintf(thread_name, sizeof(thread_name), "thread %u", thd_ind); mallctl_thread_name_get(""); mallctl_thread_name_set(thread_name); for (i = 0; i < NRESET; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); mallctl_thread_name_get(thread_name); } mallctl_thread_name_set(thread_name); mallctl_thread_name_set(""); return (NULL); } TEST_BEGIN(test_prof_thread_name_threaded) { thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; test_skip_if(!config_prof); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); } TEST_END #undef NTHREADS #undef NRESET int main(void) { return (test( test_prof_thread_name_validation, test_prof_thread_name_threaded)); }
3,300
24.007576
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/arena_reset.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,lg_prof_sample:0"; #endif static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nsmall(void) { return (get_nsizes_impl("arenas.nbins")); } static unsigned get_nlarge(void) { return (get_nsizes_impl("arenas.nlruns")); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_small_size(size_t ind) { return (get_size_impl("arenas.bin.0.size", ind)); } static size_t get_large_size(size_t ind) { return (get_size_impl("arenas.lrun.0.size", ind)); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } TEST_BEGIN(test_arena_reset) { #define NHUGE 4 unsigned arena_ind, nsmall, nlarge, nhuge, nptrs, i; size_t sz, miblen; void **ptrs; int flags; size_t mib[3]; tsdn_t *tsdn; test_skip_if((config_valgrind && unlikely(in_valgrind)) || (config_fill && unlikely(opt_quarantine))); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; nsmall = get_nsmall(); nlarge = get_nlarge(); nhuge = get_nhuge() > NHUGE ? NHUGE : get_nhuge(); nptrs = nsmall + nlarge + nhuge; ptrs = (void **)malloc(nptrs * sizeof(void *)); assert_ptr_not_null(ptrs, "Unexpected malloc() failure"); /* Allocate objects with a wide range of sizes. */ for (i = 0; i < nsmall; i++) { sz = get_small_size(i); ptrs[i] = mallocx(sz, flags); assert_ptr_not_null(ptrs[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } for (i = 0; i < nlarge; i++) { sz = get_large_size(i); ptrs[nsmall + i] = mallocx(sz, flags); assert_ptr_not_null(ptrs[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } for (i = 0; i < nhuge; i++) { sz = get_huge_size(i); ptrs[nsmall + nlarge + i] = mallocx(sz, flags); assert_ptr_not_null(ptrs[i], "Unexpected mallocx(%zu, %#x) failure", sz, flags); } tsdn = tsdn_fetch(); /* Verify allocations. */ for (i = 0; i < nptrs; i++) { assert_zu_gt(ivsalloc(tsdn, ptrs[i], false), 0, "Allocation should have queryable size"); } /* Reset. */ miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = (size_t)arena_ind; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Verify allocations no longer exist. */ for (i = 0; i < nptrs; i++) { assert_zu_eq(ivsalloc(tsdn, ptrs[i], false), 0, "Allocation should no longer exist"); } free(ptrs); } TEST_END int main(void) { return (test( test_arena_reset)); }
3,442
20.51875
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/witness.c
#include "test/jemalloc_test.h" static witness_lock_error_t *witness_lock_error_orig; static witness_owner_error_t *witness_owner_error_orig; static witness_not_owner_error_t *witness_not_owner_error_orig; static witness_lockless_error_t *witness_lockless_error_orig; static bool saw_lock_error; static bool saw_owner_error; static bool saw_not_owner_error; static bool saw_lockless_error; static void witness_lock_error_intercept(const witness_list_t *witnesses, const witness_t *witness) { saw_lock_error = true; } static void witness_owner_error_intercept(const witness_t *witness) { saw_owner_error = true; } static void witness_not_owner_error_intercept(const witness_t *witness) { saw_not_owner_error = true; } static void witness_lockless_error_intercept(const witness_list_t *witnesses) { saw_lockless_error = true; } static int witness_comp(const witness_t *a, const witness_t *b) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); return (strcmp(a->name, b->name)); } static int witness_comp_reverse(const witness_t *a, const witness_t *b) { assert_u_eq(a->rank, b->rank, "Witnesses should have equal rank"); return (-strcmp(a->name, b->name)); } TEST_BEGIN(test_witness) { witness_t a, b; tsdn_t *tsdn; test_skip_if(!config_debug); tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); witness_assert_not_owner(tsdn, &a); witness_lock(tsdn, &a); witness_assert_owner(tsdn, &a); witness_init(&b, "b", 2, NULL); witness_assert_not_owner(tsdn, &b); witness_lock(tsdn, &b); witness_assert_owner(tsdn, &b); witness_unlock(tsdn, &a); witness_unlock(tsdn, &b); witness_assert_lockless(tsdn); } TEST_END TEST_BEGIN(test_witness_comp) { witness_t a, b, c, d; tsdn_t *tsdn; test_skip_if(!config_debug); tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, witness_comp); witness_assert_not_owner(tsdn, &a); witness_lock(tsdn, &a); witness_assert_owner(tsdn, &a); witness_init(&b, "b", 1, witness_comp); witness_assert_not_owner(tsdn, &b); witness_lock(tsdn, &b); witness_assert_owner(tsdn, &b); witness_unlock(tsdn, &b); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; witness_init(&c, "c", 1, witness_comp_reverse); witness_assert_not_owner(tsdn, &c); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(tsdn, &c); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(tsdn, &c); saw_lock_error = false; witness_init(&d, "d", 1, NULL); witness_assert_not_owner(tsdn, &d); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(tsdn, &d); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(tsdn, &d); witness_unlock(tsdn, &a); witness_assert_lockless(tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_reversal) { witness_t a, b; tsdn_t *tsdn; test_skip_if(!config_debug); witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); witness_init(&b, "b", 2, NULL); witness_lock(tsdn, &b); assert_false(saw_lock_error, "Unexpected witness lock error"); witness_lock(tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); witness_unlock(tsdn, &a); witness_unlock(tsdn, &b); witness_assert_lockless(tsdn); witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_recursive) { witness_t a; tsdn_t *tsdn; test_skip_if(!config_debug); witness_not_owner_error_orig = witness_not_owner_error; witness_not_owner_error = witness_not_owner_error_intercept; saw_not_owner_error = false; witness_lock_error_orig = witness_lock_error; witness_lock_error = witness_lock_error_intercept; saw_lock_error = false; tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); witness_lock(tsdn, &a); assert_false(saw_lock_error, "Unexpected witness lock error"); assert_false(saw_not_owner_error, "Unexpected witness not owner error"); witness_lock(tsdn, &a); assert_true(saw_lock_error, "Expected witness lock error"); assert_true(saw_not_owner_error, "Expected witness not owner error"); witness_unlock(tsdn, &a); witness_assert_lockless(tsdn); witness_owner_error = witness_owner_error_orig; witness_lock_error = witness_lock_error_orig; } TEST_END TEST_BEGIN(test_witness_unlock_not_owned) { witness_t a; tsdn_t *tsdn; test_skip_if(!config_debug); witness_owner_error_orig = witness_owner_error; witness_owner_error = witness_owner_error_intercept; saw_owner_error = false; tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); assert_false(saw_owner_error, "Unexpected owner error"); witness_unlock(tsdn, &a); assert_true(saw_owner_error, "Expected owner error"); witness_assert_lockless(tsdn); witness_owner_error = witness_owner_error_orig; } TEST_END TEST_BEGIN(test_witness_lockful) { witness_t a; tsdn_t *tsdn; test_skip_if(!config_debug); witness_lockless_error_orig = witness_lockless_error; witness_lockless_error = witness_lockless_error_intercept; saw_lockless_error = false; tsdn = tsdn_fetch(); witness_assert_lockless(tsdn); witness_init(&a, "a", 1, NULL); assert_false(saw_lockless_error, "Unexpected lockless error"); witness_assert_lockless(tsdn); witness_lock(tsdn, &a); witness_assert_lockless(tsdn); assert_true(saw_lockless_error, "Expected lockless error"); witness_unlock(tsdn, &a); witness_assert_lockless(tsdn); witness_lockless_error = witness_lockless_error_orig; } TEST_END int main(void) { return (test( test_witness, test_witness_comp, test_witness_reversal, test_witness_recursive, test_witness_unlock_not_owned, test_witness_lockful)); }
6,010
20.544803
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/rb.c
#include "test/jemalloc_test.h" #define rbtn_black_height(a_type, a_field, a_rbt, r_height) do { \ a_type *rbp_bh_t; \ for (rbp_bh_t = (a_rbt)->rbt_root, (r_height) = 0; \ rbp_bh_t != NULL; \ rbp_bh_t = rbtn_left_get(a_type, a_field, rbp_bh_t)) { \ if (!rbtn_red_get(a_type, a_field, rbp_bh_t)) { \ (r_height)++; \ } \ } \ } while (0) typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; rb_node(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the tree, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return (ret); } typedef rb_tree(node_t) tree_t; rb_gen(static, tree_, tree_t, node_t, link, node_cmp); TEST_BEGIN(test_rb_empty) { tree_t tree; node_t key; tree_new(&tree); assert_true(tree_empty(&tree), "Tree should be empty"); assert_ptr_null(tree_first(&tree), "Unexpected node"); assert_ptr_null(tree_last(&tree), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_search(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_nsearch(&tree, &key), "Unexpected node"); key.key = 0; key.magic = NODE_MAGIC; assert_ptr_null(tree_psearch(&tree, &key), "Unexpected node"); } TEST_END static unsigned tree_recurse(node_t *node, unsigned black_height, unsigned black_depth) { unsigned ret = 0; node_t *left_node; node_t *right_node; if (node == NULL) return (ret); left_node = rbtn_left_get(node_t, link, node); right_node = rbtn_right_get(node_t, link, node); if (!rbtn_red_get(node_t, link, node)) black_depth++; /* Red nodes must be interleaved with black nodes. */ if (rbtn_red_get(node_t, link, node)) { if (left_node != NULL) assert_false(rbtn_red_get(node_t, link, left_node), "Node should be black"); if (right_node != NULL) assert_false(rbtn_red_get(node_t, link, right_node), "Node should be black"); } /* Self. */ assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Left subtree. */ if (left_node != NULL) ret += tree_recurse(left_node, black_height, black_depth); else ret += (black_depth != black_height); /* Right subtree. */ if (right_node != NULL) ret += tree_recurse(right_node, black_height, black_depth); else ret += (black_depth != black_height); return (ret); } static node_t * tree_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *i = (unsigned *)data; node_t *search_node; assert_u32_eq(node->magic, NODE_MAGIC, "Bad magic"); /* Test rb_search(). */ search_node = tree_search(tree, node); assert_ptr_eq(search_node, node, "tree_search() returned unexpected node"); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); assert_ptr_eq(search_node, node, "tree_nsearch() returned unexpected node"); /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); assert_ptr_eq(search_node, node, "tree_psearch() returned unexpected node"); (*i)++; return (NULL); } static unsigned tree_iterate(tree_t *tree) { unsigned i; i = 0; tree_iter(tree, NULL, tree_iterate_cb, (void *)&i); return (i); } static unsigned tree_iterate_reverse(tree_t *tree) { unsigned i; i = 0; tree_reverse_iter(tree, NULL, tree_iterate_cb, (void *)&i); return (i); } static void node_remove(tree_t *tree, node_t *node, unsigned nnodes) { node_t *search_node; unsigned black_height, imbalances; tree_remove(tree, node); /* Test rb_nsearch(). */ search_node = tree_nsearch(tree, node); if (search_node != NULL) { assert_u64_ge(search_node->key, node->key, "Key ordering error"); } /* Test rb_psearch(). */ search_node = tree_psearch(tree, node); if (search_node != NULL) { assert_u64_le(search_node->key, node->key, "Key ordering error"); } node->magic = 0; rbtn_black_height(node_t, link, tree, black_height); imbalances = tree_recurse(tree->rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(tree), nnodes-1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(tree), nnodes-1, "Unexpected node iteration count"); } static node_t * remove_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_next(tree, node); node_remove(tree, node, *nnodes); return (ret); } static node_t * remove_reverse_iterate_cb(tree_t *tree, node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; node_t *ret = tree_prev(tree, node); node_remove(tree, node, *nnodes); return (ret); } static void destroy_cb(node_t *node, void *data) { unsigned *nnodes = (unsigned *)data; assert_u_gt(*nnodes, 0, "Destruction removed too many nodes"); (*nnodes)--; } TEST_BEGIN(test_rb_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; tree_t tree; node_t nodes[NNODES]; unsigned i, j, k, black_height, imbalances; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) bag[j] = j; break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) bag[j] = NNODES - j - 1; break; default: for (j = 0; j < NNODES; j++) bag[j] = gen_rand64_range(sfmt, NNODES); } for (j = 1; j <= NNODES; j++) { /* Initialize tree and nodes. */ tree_new(&tree); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { tree_insert(&tree, &nodes[k]); rbtn_black_height(node_t, link, &tree, black_height); imbalances = tree_recurse(tree.rbt_root, black_height, 0); assert_u_eq(imbalances, 0, "Tree is unbalanced"); assert_u_eq(tree_iterate(&tree), k+1, "Unexpected node iteration count"); assert_u_eq(tree_iterate_reverse(&tree), k+1, "Unexpected node iteration count"); assert_false(tree_empty(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_first(&tree), "Tree should not be empty"); assert_ptr_not_null(tree_last(&tree), "Tree should not be empty"); tree_next(&tree, &nodes[k]); tree_prev(&tree, &nodes[k]); } /* Remove nodes. */ switch (i % 5) { case 0: for (k = 0; k < j; k++) node_remove(&tree, &nodes[k], j - k); break; case 1: for (k = j; k > 0; k--) node_remove(&tree, &nodes[k-1], k); break; case 2: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_iter(&tree, start, remove_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 3: { node_t *start; unsigned nnodes = j; start = NULL; do { start = tree_reverse_iter(&tree, start, remove_reverse_iterate_cb, (void *)&nnodes); nnodes--; } while (start != NULL); assert_u_eq(nnodes, 0, "Removal terminated early"); break; } case 4: { unsigned nnodes = j; tree_destroy(&tree, destroy_cb, &nnodes); assert_u_eq(nnodes, 0, "Destruction terminated early"); break; } default: not_reached(); } } } fini_gen_rand(sfmt); #undef NNODES #undef NBAGS #undef SEED } TEST_END int main(void) { return (test( test_rb_empty, test_rb_random)); }
7,865
21.157746
71
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/util.c
#include "test/jemalloc_test.h" #define TEST_POW2_CEIL(t, suf, pri) do { \ unsigned i, pow2; \ t x; \ \ assert_##suf##_eq(pow2_ceil_##suf(0), 0, "Unexpected result"); \ \ for (i = 0; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf(((t)1) << i), ((t)1) \ << i, "Unexpected result"); \ } \ \ for (i = 2; i < sizeof(t) * 8; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) - 1), \ ((t)1) << i, "Unexpected result"); \ } \ \ for (i = 0; i < sizeof(t) * 8 - 1; i++) { \ assert_##suf##_eq(pow2_ceil_##suf((((t)1) << i) + 1), \ ((t)1) << (i+1), "Unexpected result"); \ } \ \ for (pow2 = 1; pow2 < 25; pow2++) { \ for (x = (((t)1) << (pow2-1)) + 1; x <= ((t)1) << pow2; \ x++) { \ assert_##suf##_eq(pow2_ceil_##suf(x), \ ((t)1) << pow2, \ "Unexpected result, x=%"pri, x); \ } \ } \ } while (0) TEST_BEGIN(test_pow2_ceil_u64) { TEST_POW2_CEIL(uint64_t, u64, FMTu64); } TEST_END TEST_BEGIN(test_pow2_ceil_u32) { TEST_POW2_CEIL(uint32_t, u32, FMTu32); } TEST_END TEST_BEGIN(test_pow2_ceil_zu) { TEST_POW2_CEIL(size_t, zu, "zu"); } TEST_END TEST_BEGIN(test_malloc_strtoumax_no_endptr) { int err; set_errno(0); assert_ju_eq(malloc_strtoumax("0", NULL, 0), 0, "Unexpected result"); err = get_errno(); assert_d_eq(err, 0, "Unexpected failure"); } TEST_END TEST_BEGIN(test_malloc_strtoumax) { struct test_s { const char *input; const char *expected_remainder; int base; int expected_errno; const char *expected_errno_name; uintmax_t expected_x; }; #define ERR(e) e, #e #define KUMAX(x) ((uintmax_t)x##ULL) #define KSMAX(x) ((uintmax_t)(intmax_t)x##LL) struct test_s tests[] = { {"0", "0", -1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 1, ERR(EINVAL), UINTMAX_MAX}, {"0", "0", 37, ERR(EINVAL), UINTMAX_MAX}, {"", "", 0, ERR(EINVAL), UINTMAX_MAX}, {"+", "+", 0, ERR(EINVAL), UINTMAX_MAX}, {"++3", "++3", 0, ERR(EINVAL), UINTMAX_MAX}, {"-", "-", 0, ERR(EINVAL), UINTMAX_MAX}, {"42", "", 0, ERR(0), KUMAX(42)}, {"+42", "", 0, ERR(0), KUMAX(42)}, {"-42", "", 0, ERR(0), KSMAX(-42)}, {"042", "", 0, ERR(0), KUMAX(042)}, {"+042", "", 0, ERR(0), KUMAX(042)}, {"-042", "", 0, ERR(0), KSMAX(-042)}, {"0x42", "", 0, ERR(0), KUMAX(0x42)}, {"+0x42", "", 0, ERR(0), KUMAX(0x42)}, {"-0x42", "", 0, ERR(0), KSMAX(-0x42)}, {"0", "", 0, ERR(0), KUMAX(0)}, {"1", "", 0, ERR(0), KUMAX(1)}, {"42", "", 0, ERR(0), KUMAX(42)}, {" 42", "", 0, ERR(0), KUMAX(42)}, {"42 ", " ", 0, ERR(0), KUMAX(42)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"42x", "x", 0, ERR(0), KUMAX(42)}, {"07", "", 0, ERR(0), KUMAX(7)}, {"010", "", 0, ERR(0), KUMAX(8)}, {"08", "8", 0, ERR(0), KUMAX(0)}, {"0_", "_", 0, ERR(0), KUMAX(0)}, {"0x", "x", 0, ERR(0), KUMAX(0)}, {"0X", "X", 0, ERR(0), KUMAX(0)}, {"0xg", "xg", 0, ERR(0), KUMAX(0)}, {"0XA", "", 0, ERR(0), KUMAX(10)}, {"010", "", 10, ERR(0), KUMAX(10)}, {"0x3", "x3", 10, ERR(0), KUMAX(0)}, {"12", "2", 2, ERR(0), KUMAX(1)}, {"78", "8", 8, ERR(0), KUMAX(7)}, {"9a", "a", 10, ERR(0), KUMAX(9)}, {"9A", "A", 10, ERR(0), KUMAX(9)}, {"fg", "g", 16, ERR(0), KUMAX(15)}, {"FG", "G", 16, ERR(0), KUMAX(15)}, {"0xfg", "g", 16, ERR(0), KUMAX(15)}, {"0XFG", "G", 16, ERR(0), KUMAX(15)}, {"z_", "_", 36, ERR(0), KUMAX(35)}, {"Z_", "_", 36, ERR(0), KUMAX(35)} }; #undef ERR #undef KUMAX #undef KSMAX unsigned i; for (i = 0; i < sizeof(tests)/sizeof(struct test_s); i++) { struct test_s *test = &tests[i]; int err; uintmax_t result; char *remainder; set_errno(0); result = malloc_strtoumax(test->input, &remainder, test->base); err = get_errno(); assert_d_eq(err, test->expected_errno, "Expected errno %s for \"%s\", base %d", test->expected_errno_name, test->input, test->base); assert_str_eq(remainder, test->expected_remainder, "Unexpected remainder for \"%s\", base %d", test->input, test->base); if (err == 0) { assert_ju_eq(result, test->expected_x, "Unexpected result for \"%s\", base %d", test->input, test->base); } } } TEST_END TEST_BEGIN(test_malloc_snprintf_truncated) { #define BUFLEN 15 char buf[BUFLEN]; size_t result; size_t len; #define TEST(expected_str_untruncated, ...) do { \ result = malloc_snprintf(buf, len, __VA_ARGS__); \ assert_d_eq(strncmp(buf, expected_str_untruncated, len-1), 0, \ "Unexpected string inequality (\"%s\" vs \"%s\")", \ buf, expected_str_untruncated); \ assert_zu_eq(result, strlen(expected_str_untruncated), \ "Unexpected result"); \ } while (0) for (len = 1; len < BUFLEN; len++) { TEST("012346789", "012346789"); TEST("a0123b", "a%sb", "0123"); TEST("a01234567", "a%s%s", "0123", "4567"); TEST("a0123 ", "a%-6s", "0123"); TEST("a 0123", "a%6s", "0123"); TEST("a 012", "a%6.3s", "0123"); TEST("a 012", "a%*.*s", 6, 3, "0123"); TEST("a 123b", "a% db", 123); TEST("a123b", "a%-db", 123); TEST("a-123b", "a%-db", -123); TEST("a+123b", "a%+db", 123); } #undef BUFLEN #undef TEST } TEST_END TEST_BEGIN(test_malloc_snprintf) { #define BUFLEN 128 char buf[BUFLEN]; size_t result; #define TEST(expected_str, ...) do { \ result = malloc_snprintf(buf, sizeof(buf), __VA_ARGS__); \ assert_str_eq(buf, expected_str, "Unexpected output"); \ assert_zu_eq(result, strlen(expected_str), "Unexpected result");\ } while (0) TEST("hello", "hello"); TEST("50%, 100%", "50%%, %d%%", 100); TEST("a0123b", "a%sb", "0123"); TEST("a 0123b", "a%5sb", "0123"); TEST("a 0123b", "a%*sb", 5, "0123"); TEST("a0123 b", "a%-5sb", "0123"); TEST("a0123b", "a%*sb", -1, "0123"); TEST("a0123 b", "a%*sb", -5, "0123"); TEST("a0123 b", "a%-*sb", -5, "0123"); TEST("a012b", "a%.3sb", "0123"); TEST("a012b", "a%.*sb", 3, "0123"); TEST("a0123b", "a%.*sb", -3, "0123"); TEST("a 012b", "a%5.3sb", "0123"); TEST("a 012b", "a%5.*sb", 3, "0123"); TEST("a 012b", "a%*.3sb", 5, "0123"); TEST("a 012b", "a%*.*sb", 5, 3, "0123"); TEST("a 0123b", "a%*.*sb", 5, -3, "0123"); TEST("_abcd_", "_%x_", 0xabcd); TEST("_0xabcd_", "_%#x_", 0xabcd); TEST("_1234_", "_%o_", 01234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_1234_", "_%d_", 1234); TEST("_ 1234_", "_% d_", 1234); TEST("_+1234_", "_%+d_", 1234); TEST("_-1234_", "_%d_", -1234); TEST("_-1234_", "_% d_", -1234); TEST("_-1234_", "_%+d_", -1234); TEST("_-1234_", "_%d_", -1234); TEST("_1234_", "_%d_", 1234); TEST("_-1234_", "_%i_", -1234); TEST("_1234_", "_%i_", 1234); TEST("_01234_", "_%#o_", 01234); TEST("_1234_", "_%u_", 1234); TEST("_0x1234abc_", "_%#x_", 0x1234abc); TEST("_0X1234ABC_", "_%#X_", 0x1234abc); TEST("_c_", "_%c_", 'c'); TEST("_string_", "_%s_", "string"); TEST("_0x42_", "_%p_", ((void *)0x42)); TEST("_-1234_", "_%ld_", ((long)-1234)); TEST("_1234_", "_%ld_", ((long)1234)); TEST("_-1234_", "_%li_", ((long)-1234)); TEST("_1234_", "_%li_", ((long)1234)); TEST("_01234_", "_%#lo_", ((long)01234)); TEST("_1234_", "_%lu_", ((long)1234)); TEST("_0x1234abc_", "_%#lx_", ((long)0x1234abc)); TEST("_0X1234ABC_", "_%#lX_", ((long)0x1234ABC)); TEST("_-1234_", "_%lld_", ((long long)-1234)); TEST("_1234_", "_%lld_", ((long long)1234)); TEST("_-1234_", "_%lli_", ((long long)-1234)); TEST("_1234_", "_%lli_", ((long long)1234)); TEST("_01234_", "_%#llo_", ((long long)01234)); TEST("_1234_", "_%llu_", ((long long)1234)); TEST("_0x1234abc_", "_%#llx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#llX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%qd_", ((long long)-1234)); TEST("_1234_", "_%qd_", ((long long)1234)); TEST("_-1234_", "_%qi_", ((long long)-1234)); TEST("_1234_", "_%qi_", ((long long)1234)); TEST("_01234_", "_%#qo_", ((long long)01234)); TEST("_1234_", "_%qu_", ((long long)1234)); TEST("_0x1234abc_", "_%#qx_", ((long long)0x1234abc)); TEST("_0X1234ABC_", "_%#qX_", ((long long)0x1234ABC)); TEST("_-1234_", "_%jd_", ((intmax_t)-1234)); TEST("_1234_", "_%jd_", ((intmax_t)1234)); TEST("_-1234_", "_%ji_", ((intmax_t)-1234)); TEST("_1234_", "_%ji_", ((intmax_t)1234)); TEST("_01234_", "_%#jo_", ((intmax_t)01234)); TEST("_1234_", "_%ju_", ((intmax_t)1234)); TEST("_0x1234abc_", "_%#jx_", ((intmax_t)0x1234abc)); TEST("_0X1234ABC_", "_%#jX_", ((intmax_t)0x1234ABC)); TEST("_1234_", "_%td_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%td_", ((ptrdiff_t)-1234)); TEST("_1234_", "_%ti_", ((ptrdiff_t)1234)); TEST("_-1234_", "_%ti_", ((ptrdiff_t)-1234)); TEST("_-1234_", "_%zd_", ((ssize_t)-1234)); TEST("_1234_", "_%zd_", ((ssize_t)1234)); TEST("_-1234_", "_%zi_", ((ssize_t)-1234)); TEST("_1234_", "_%zi_", ((ssize_t)1234)); TEST("_01234_", "_%#zo_", ((ssize_t)01234)); TEST("_1234_", "_%zu_", ((ssize_t)1234)); TEST("_0x1234abc_", "_%#zx_", ((ssize_t)0x1234abc)); TEST("_0X1234ABC_", "_%#zX_", ((ssize_t)0x1234ABC)); #undef BUFLEN } TEST_END int main(void) { return (test( test_pow2_ceil_u64, test_pow2_ceil_u32, test_pow2_ceil_zu, test_malloc_strtoumax_no_endptr, test_malloc_strtoumax, test_malloc_snprintf_truncated, test_malloc_snprintf)); }
9,319
28.125
70
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/prng.c
#include "test/jemalloc_test.h" static void test_prng_lg_range_u32(bool atomic) { uint32_t sa, sb, ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_u32(&sa, 32, atomic); sa = 42; rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u32(&sb, 32, atomic); assert_u32_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u32(&sa, 32, atomic); rb = prng_lg_range_u32(&sa, 32, atomic); assert_u32_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u32(&sa, 32, atomic); for (lg_range = 31; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u32(&sb, lg_range, atomic); assert_u32_eq((rb & (UINT32_C(0xffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u32_eq(rb, (ra >> (32 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_u64(void) { uint64_t sa, sb, ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_u64(&sa, 64); sa = 42; rb = prng_lg_range_u64(&sa, 64); assert_u64_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_u64(&sb, 64); assert_u64_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_u64(&sa, 64); rb = prng_lg_range_u64(&sa, 64); assert_u64_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_u64(&sa, 64); for (lg_range = 63; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_u64(&sb, lg_range); assert_u64_eq((rb & (UINT64_C(0xffffffffffffffff) << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_u64_eq(rb, (ra >> (64 - lg_range)), "Expected high order bits of full-width result, " "lg_range=%u", lg_range); } } static void test_prng_lg_range_zu(bool atomic) { size_t sa, sb, ra, rb; unsigned lg_range; sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); sa = 42; rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Repeated generation should produce repeated results"); sb = 42; rb = prng_lg_range_zu(&sb, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_eq(ra, rb, "Equivalent generation should produce equivalent results"); sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); rb = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); assert_zu_ne(ra, rb, "Full-width results must not immediately repeat"); sa = 42; ra = prng_lg_range_zu(&sa, ZU(1) << (3 + LG_SIZEOF_PTR), atomic); for (lg_range = (ZU(1) << (3 + LG_SIZEOF_PTR)) - 1; lg_range > 0; lg_range--) { sb = 42; rb = prng_lg_range_zu(&sb, lg_range, atomic); assert_zu_eq((rb & (SIZE_T_MAX << lg_range)), 0, "High order bits should be 0, lg_range=%u", lg_range); assert_zu_eq(rb, (ra >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range)), "Expected high order bits of full-width " "result, lg_range=%u", lg_range); } } TEST_BEGIN(test_prng_lg_range_u32_nonatomic) { test_prng_lg_range_u32(false); } TEST_END TEST_BEGIN(test_prng_lg_range_u32_atomic) { test_prng_lg_range_u32(true); } TEST_END TEST_BEGIN(test_prng_lg_range_u64_nonatomic) { test_prng_lg_range_u64(); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_nonatomic) { test_prng_lg_range_zu(false); } TEST_END TEST_BEGIN(test_prng_lg_range_zu_atomic) { test_prng_lg_range_zu(true); } TEST_END static void test_prng_range_u32(bool atomic) { uint32_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint32_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { uint32_t r = prng_range_u32(&s, range, atomic); assert_u32_lt(r, range, "Out of range"); } } } static void test_prng_range_u64(void) { uint64_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { uint64_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { uint64_t r = prng_range_u64(&s, range); assert_u64_lt(r, range, "Out of range"); } } } static void test_prng_range_zu(bool atomic) { size_t range; #define MAX_RANGE 10000000 #define RANGE_STEP 97 #define NREPS 10 for (range = 2; range < MAX_RANGE; range += RANGE_STEP) { size_t s; unsigned rep; s = range; for (rep = 0; rep < NREPS; rep++) { size_t r = prng_range_zu(&s, range, atomic); assert_zu_lt(r, range, "Out of range"); } } } TEST_BEGIN(test_prng_range_u32_nonatomic) { test_prng_range_u32(false); } TEST_END TEST_BEGIN(test_prng_range_u32_atomic) { test_prng_range_u32(true); } TEST_END TEST_BEGIN(test_prng_range_u64_nonatomic) { test_prng_range_u64(); } TEST_END TEST_BEGIN(test_prng_range_zu_nonatomic) { test_prng_range_zu(false); } TEST_END TEST_BEGIN(test_prng_range_zu_atomic) { test_prng_range_zu(true); } TEST_END int main(void) { return (test( test_prng_lg_range_u32_nonatomic, test_prng_lg_range_u32_atomic, test_prng_lg_range_u64_nonatomic, test_prng_lg_range_zu_nonatomic, test_prng_lg_range_zu_atomic, test_prng_range_u32_nonatomic, test_prng_range_u32_atomic, test_prng_range_u64_nonatomic, test_prng_range_zu_nonatomic, test_prng_range_zu_atomic)); }
5,611
20.257576
66
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/stats.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_stats_summary) { size_t *cactive; size_t sz, allocated, active, resident, mapped; int expected = config_stats ? 0 : ENOENT; sz = sizeof(cactive); assert_d_eq(mallctl("stats.cactive", (void *)&cactive, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.active", (void *)&active, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.resident", (void *)&resident, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_le(active, *cactive, "active should be no larger than cactive"); assert_zu_le(allocated, active, "allocated should be no larger than active"); assert_zu_lt(active, resident, "active should be less than resident"); assert_zu_lt(active, mapped, "active should be less than mapped"); } } TEST_END TEST_BEGIN(test_stats_huge) { void *p; uint64_t epoch; size_t allocated; uint64_t nmalloc, ndalloc, nrequests; size_t sz; int expected = config_stats ? 0 : ENOENT; p = mallocx(large_maxclass+1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_le(nmalloc, nrequests, "nmalloc should no larger than nrequests"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_summary) { unsigned arena; void *little, *large, *huge; uint64_t epoch; size_t sz; int expected = config_stats ? 0 : ENOENT; size_t mapped; uint64_t npurge, nmadvise, purged; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); little = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(little, "Unexpected mallocx() failure"); large = mallocx(large_maxclass, 0); assert_ptr_not_null(large, "Unexpected mallocx() failure"); huge = mallocx(chunksize, 0); assert_ptr_not_null(huge, "Unexpected mallocx() failure"); dallocx(little, 0); dallocx(large, 0); dallocx(huge, 0); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.mapped", (void *)&mapped, &sz, NULL, 0), expected, "Unexepected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.nmadvise", (void *)&nmadvise, &sz, NULL, 0), expected, "Unexepected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.purged", (void *)&purged, &sz, NULL, 0), expected, "Unexepected mallctl() result"); if (config_stats) { assert_u64_gt(npurge, 0, "At least one purge should have occurred"); assert_u64_le(nmadvise, purged, "nmadvise should be no greater than purged"); } } TEST_END void * thd_start(void *arg) { return (NULL); } static void no_lazy_lock(void) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_BEGIN(test_stats_arenas_small) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; no_lazy_lock(); /* Lazy locking would dodge tcache testing. */ arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(SMALL_MAXCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.small.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.small.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.small.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be no greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_large) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc, nrequests; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(large_maxclass, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.large.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.large.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.large.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_huge) { unsigned arena; void *p; size_t sz, allocated; uint64_t epoch, nmalloc, ndalloc; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.huge.allocated", (void *)&allocated, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.huge.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.huge.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_zu_gt(allocated, 0, "allocated should be greater than zero"); assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_bins) { unsigned arena; void *p; size_t sz, curruns, curregs; uint64_t epoch, nmalloc, ndalloc, nrequests, nfills, nflushes; uint64_t nruns, nreruns; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(arena_bin_info[0].reg_size, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("thread.tcache.flush", NULL, NULL, NULL, 0), config_tcache ? 0 : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.curregs", (void *)&curregs, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.nfills", (void *)&nfills, &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nflushes", (void *)&nflushes, &sz, NULL, 0), config_tcache ? expected : ENOENT, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nruns", (void *)&nruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.bins.0.nreruns", (void *)&nreruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.bins.0.curruns", (void *)&curruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_zu_gt(curregs, 0, "allocated should be greater than zero"); if (config_tcache) { assert_u64_gt(nfills, 0, "At least one fill should have occurred"); assert_u64_gt(nflushes, 0, "At least one flush should have occurred"); } assert_u64_gt(nruns, 0, "At least one run should have been allocated"); assert_zu_gt(curruns, 0, "At least one run should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_lruns) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc, nrequests; size_t curruns, sz; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(LARGE_MINCLASS, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.lruns.0.nrequests", (void *)&nrequests, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.lruns.0.curruns", (void *)&curruns, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(nrequests, 0, "nrequests should be greater than zero"); assert_u64_gt(curruns, 0, "At least one run should be currently allocated"); } dallocx(p, 0); } TEST_END TEST_BEGIN(test_stats_arenas_hchunks) { unsigned arena; void *p; uint64_t epoch, nmalloc, ndalloc; size_t curhchunks, sz; int expected = config_stats ? 0 : ENOENT; arena = 0; assert_d_eq(mallctl("thread.arena", NULL, NULL, (void *)&arena, sizeof(arena)), 0, "Unexpected mallctl() failure"); p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)), 0, "Unexpected mallctl() failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.nmalloc", (void *)&nmalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.ndalloc", (void *)&ndalloc, &sz, NULL, 0), expected, "Unexpected mallctl() result"); sz = sizeof(size_t); assert_d_eq(mallctl("stats.arenas.0.hchunks.0.curhchunks", (void *)&curhchunks, &sz, NULL, 0), expected, "Unexpected mallctl() result"); if (config_stats) { assert_u64_gt(nmalloc, 0, "nmalloc should be greater than zero"); assert_u64_ge(nmalloc, ndalloc, "nmalloc should be at least as large as ndalloc"); assert_u64_gt(curhchunks, 0, "At least one chunk should be currently allocated"); } dallocx(p, 0); } TEST_END int main(void) { return (test( test_stats_summary, test_stats_huge, test_stats_arenas_summary, test_stats_arenas_small, test_stats_arenas_large, test_stats_arenas_huge, test_stats_arenas_bins, test_stats_arenas_lruns, test_stats_arenas_hchunks)); }
14,583
30.912473
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/nstime.c
#include "test/jemalloc_test.h" #define BILLION UINT64_C(1000000000) TEST_BEGIN(test_nstime_init) { nstime_t nst; nstime_init(&nst, 42000000043); assert_u64_eq(nstime_ns(&nst), 42000000043, "ns incorrectly read"); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_init2) { nstime_t nst; nstime_init2(&nst, 42, 43); assert_u64_eq(nstime_sec(&nst), 42, "sec incorrectly read"); assert_u64_eq(nstime_nsec(&nst), 43, "nsec incorrectly read"); } TEST_END TEST_BEGIN(test_nstime_copy) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_init(&nstb, 0); nstime_copy(&nstb, &nsta); assert_u64_eq(nstime_sec(&nstb), 42, "sec incorrectly copied"); assert_u64_eq(nstime_nsec(&nstb), 43, "nsec incorrectly copied"); } TEST_END TEST_BEGIN(test_nstime_compare) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Times should be equal"); assert_d_eq(nstime_compare(&nstb, &nsta), 0, "Times should be equal"); nstime_init2(&nstb, 42, 42); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 42, 44); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); nstime_init2(&nstb, 41, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 1, "nsta should be greater than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), -1, "nstb should be less than nsta"); nstime_init2(&nstb, 43, 0); assert_d_eq(nstime_compare(&nsta, &nstb), -1, "nsta should be less than nstb"); assert_d_eq(nstime_compare(&nstb, &nsta), 1, "nstb should be greater than nsta"); } TEST_END TEST_BEGIN(test_nstime_add) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 84, 86); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); nstime_init2(&nsta, 42, BILLION - 1); nstime_copy(&nstb, &nsta); nstime_add(&nsta, &nstb); nstime_init2(&nstb, 85, BILLION - 2); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect addition result"); } TEST_END TEST_BEGIN(test_nstime_subtract) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_subtract(&nsta, &nstb); nstime_init(&nstb, 0); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); nstime_init2(&nsta, 42, 43); nstime_init2(&nstb, 41, 44); nstime_subtract(&nsta, &nstb); nstime_init2(&nstb, 0, BILLION - 1); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect subtraction result"); } TEST_END TEST_BEGIN(test_nstime_imultiply) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_imultiply(&nsta, 10); nstime_init2(&nstb, 420, 430); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); nstime_init2(&nsta, 42, 666666666); nstime_imultiply(&nsta, 3); nstime_init2(&nstb, 127, 999999998); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect multiplication result"); } TEST_END TEST_BEGIN(test_nstime_idivide) { nstime_t nsta, nstb; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_idivide(&nsta, 10); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); nstime_init2(&nsta, 42, 666666666); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 3); nstime_idivide(&nsta, 3); assert_d_eq(nstime_compare(&nsta, &nstb), 0, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_divide) { nstime_t nsta, nstb, nstc; nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_add(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 10, "Incorrect division result"); nstime_init2(&nsta, 42, 43); nstime_copy(&nstb, &nsta); nstime_imultiply(&nsta, 10); nstime_init(&nstc, 1); nstime_subtract(&nsta, &nstc); assert_u64_eq(nstime_divide(&nsta, &nstb), 9, "Incorrect division result"); } TEST_END TEST_BEGIN(test_nstime_monotonic) { nstime_monotonic(); } TEST_END TEST_BEGIN(test_nstime_update) { nstime_t nst; nstime_init(&nst, 0); assert_false(nstime_update(&nst), "Basic time update failed."); /* Only Rip Van Winkle sleeps this long. */ { nstime_t addend; nstime_init2(&addend, 631152000, 0); nstime_add(&nst, &addend); } { nstime_t nst0; nstime_copy(&nst0, &nst); assert_true(nstime_update(&nst), "Update should detect time roll-back."); assert_d_eq(nstime_compare(&nst, &nst0), 0, "Time should not have been modified"); } } TEST_END int main(void) { return (test( test_nstime_init, test_nstime_init2, test_nstime_copy, test_nstime_compare, test_nstime_add, test_nstime_subtract, test_nstime_imultiply, test_nstime_idivide, test_nstime_divide, test_nstime_monotonic, test_nstime_update)); }
5,414
22.75
71
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/atomic.c
#include "test/jemalloc_test.h" #define TEST_STRUCT(p, t) \ struct p##_test_s { \ t accum0; \ t x; \ t s; \ }; \ typedef struct p##_test_s p##_test_t; #define TEST_BODY(p, t, tc, ta, FMT) do { \ const p##_test_t tests[] = { \ {(t)-1, (t)-1, (t)-2}, \ {(t)-1, (t) 0, (t)-2}, \ {(t)-1, (t) 1, (t)-2}, \ \ {(t) 0, (t)-1, (t)-2}, \ {(t) 0, (t) 0, (t)-2}, \ {(t) 0, (t) 1, (t)-2}, \ \ {(t) 1, (t)-1, (t)-2}, \ {(t) 1, (t) 0, (t)-2}, \ {(t) 1, (t) 1, (t)-2}, \ \ {(t)0, (t)-(1 << 22), (t)-2}, \ {(t)0, (t)(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)-(1 << 22), (t)-2}, \ {(t)(1 << 22), (t)(1 << 22), (t)-2} \ }; \ unsigned i; \ \ for (i = 0; i < sizeof(tests)/sizeof(p##_test_t); i++) { \ bool err; \ t accum = tests[i].accum0; \ assert_##ta##_eq(atomic_read_##p(&accum), \ tests[i].accum0, \ "Erroneous read, i=%u", i); \ \ assert_##ta##_eq(atomic_add_##p(&accum, tests[i].x), \ (t)((tc)tests[i].accum0 + (tc)tests[i].x), \ "i=%u, accum=%"FMT", x=%"FMT, \ i, tests[i].accum0, tests[i].x); \ assert_##ta##_eq(atomic_read_##p(&accum), accum, \ "Erroneous add, i=%u", i); \ \ accum = tests[i].accum0; \ assert_##ta##_eq(atomic_sub_##p(&accum, tests[i].x), \ (t)((tc)tests[i].accum0 - (tc)tests[i].x), \ "i=%u, accum=%"FMT", x=%"FMT, \ i, tests[i].accum0, tests[i].x); \ assert_##ta##_eq(atomic_read_##p(&accum), accum, \ "Erroneous sub, i=%u", i); \ \ accum = tests[i].accum0; \ err = atomic_cas_##p(&accum, tests[i].x, tests[i].s); \ assert_b_eq(err, tests[i].accum0 != tests[i].x, \ "Erroneous cas success/failure result"); \ assert_##ta##_eq(accum, err ? tests[i].accum0 : \ tests[i].s, "Erroneous cas effect, i=%u", i); \ \ accum = tests[i].accum0; \ atomic_write_##p(&accum, tests[i].s); \ assert_##ta##_eq(accum, tests[i].s, \ "Erroneous write, i=%u", i); \ } \ } while (0) TEST_STRUCT(uint64, uint64_t) TEST_BEGIN(test_atomic_uint64) { #if !(LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) test_skip("64-bit atomic operations not supported"); #else TEST_BODY(uint64, uint64_t, uint64_t, u64, FMTx64); #endif } TEST_END TEST_STRUCT(uint32, uint32_t) TEST_BEGIN(test_atomic_uint32) { TEST_BODY(uint32, uint32_t, uint32_t, u32, "#"FMTx32); } TEST_END TEST_STRUCT(p, void *) TEST_BEGIN(test_atomic_p) { TEST_BODY(p, void *, uintptr_t, ptr, "p"); } TEST_END TEST_STRUCT(z, size_t) TEST_BEGIN(test_atomic_z) { TEST_BODY(z, size_t, size_t, zu, "#zx"); } TEST_END TEST_STRUCT(u, unsigned) TEST_BEGIN(test_atomic_u) { TEST_BODY(u, unsigned, unsigned, u, "#x"); } TEST_END int main(void) { return (test( test_atomic_uint64, test_atomic_uint32, test_atomic_p, test_atomic_z, test_atomic_u)); }
2,991
23.325203
59
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/bitmap.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_bitmap_size) { size_t i, prev_size; prev_size = 0; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; size_t size; bitmap_info_init(&binfo, i); size = bitmap_size(&binfo); assert_true(size >= prev_size, "Bitmap size is smaller than expected"); prev_size = size; } } TEST_END TEST_BEGIN(test_bitmap_init) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_size(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) { assert_false(bitmap_get(bitmap, &binfo, j), "Bit should be unset"); } free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_set) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_size(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_unset) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_size(&binfo)); bitmap_init(bitmap, &binfo); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); for (j = 0; j < i; j++) bitmap_unset(bitmap, &binfo, j); for (j = 0; j < i; j++) bitmap_set(bitmap, &binfo, j); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END TEST_BEGIN(test_bitmap_sfu) { size_t i; for (i = 1; i <= BITMAP_MAXBITS; i++) { bitmap_info_t binfo; bitmap_info_init(&binfo, i); { size_t j; bitmap_t *bitmap = (bitmap_t *)malloc( bitmap_size(&binfo)); bitmap_init(bitmap, &binfo); /* Iteratively set bits starting at the beginning. */ for (j = 0; j < i; j++) { assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should be just after " "previous first unset bit"); } assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); /* * Iteratively unset bits starting at the end, and * verify that bitmap_sfu() reaches the unset bits. */ for (j = i - 1; j < i; j--) { /* (i..0] */ bitmap_unset(bitmap, &binfo, j); assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should the bit previously " "unset"); bitmap_unset(bitmap, &binfo, j); } assert_false(bitmap_get(bitmap, &binfo, 0), "Bit should be unset"); /* * Iteratively set bits starting at the beginning, and * verify that bitmap_sfu() looks past them. */ for (j = 1; j < i; j++) { bitmap_set(bitmap, &binfo, j - 1); assert_zd_eq(bitmap_sfu(bitmap, &binfo), j, "First unset bit should be just after the " "bit previously set"); bitmap_unset(bitmap, &binfo, j); } assert_zd_eq(bitmap_sfu(bitmap, &binfo), i - 1, "First unset bit should be the last bit"); assert_true(bitmap_full(bitmap, &binfo), "All bits should be set"); free(bitmap); } } } TEST_END int main(void) { return (test( test_bitmap_size, test_bitmap_init, test_bitmap_set, test_bitmap_unset, test_bitmap_sfu)); }
3,574
20.79878
57
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/junk.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL # ifndef JEMALLOC_TEST_JUNK_OPT # define JEMALLOC_TEST_JUNK_OPT "junk:true" # endif const char *malloc_conf = "abort:false,zero:false,redzone:true,quarantine:0," JEMALLOC_TEST_JUNK_OPT; #endif static arena_dalloc_junk_small_t *arena_dalloc_junk_small_orig; static arena_dalloc_junk_large_t *arena_dalloc_junk_large_orig; static huge_dalloc_junk_t *huge_dalloc_junk_orig; static void *watch_for_junking; static bool saw_junking; static void watch_junking(void *p) { watch_for_junking = p; saw_junking = false; } static void arena_dalloc_junk_small_intercept(void *ptr, arena_bin_info_t *bin_info) { size_t i; arena_dalloc_junk_small_orig(ptr, bin_info); for (i = 0; i < bin_info->reg_size; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, bin_info->reg_size); } if (ptr == watch_for_junking) saw_junking = true; } static void arena_dalloc_junk_large_intercept(void *ptr, size_t usize) { size_t i; arena_dalloc_junk_large_orig(ptr, usize); for (i = 0; i < usize; i++) { assert_u_eq(((uint8_t *)ptr)[i], JEMALLOC_FREE_JUNK, "Missing junk fill for byte %zu/%zu of deallocated region", i, usize); } if (ptr == watch_for_junking) saw_junking = true; } static void huge_dalloc_junk_intercept(void *ptr, size_t usize) { huge_dalloc_junk_orig(ptr, usize); /* * The conditions under which junk filling actually occurs are nuanced * enough that it doesn't make sense to duplicate the decision logic in * test code, so don't actually check that the region is junk-filled. */ if (ptr == watch_for_junking) saw_junking = true; } static void test_junk(size_t sz_min, size_t sz_max) { uint8_t *s; size_t sz_prev, sz, i; if (opt_junk_free) { arena_dalloc_junk_small_orig = arena_dalloc_junk_small; arena_dalloc_junk_small = arena_dalloc_junk_small_intercept; arena_dalloc_junk_large_orig = arena_dalloc_junk_large; arena_dalloc_junk_large = arena_dalloc_junk_large_intercept; huge_dalloc_junk_orig = huge_dalloc_junk; huge_dalloc_junk = huge_dalloc_junk_intercept; } sz_prev = 0; s = (uint8_t *)mallocx(sz_min, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); for (sz = sallocx(s, 0); sz <= sz_max; sz_prev = sz, sz = sallocx(s, 0)) { if (sz_prev > 0) { assert_u_eq(s[0], 'a', "Previously allocated byte %zu/%zu is corrupted", ZU(0), sz_prev); assert_u_eq(s[sz_prev-1], 'a', "Previously allocated byte %zu/%zu is corrupted", sz_prev-1, sz_prev); } for (i = sz_prev; i < sz; i++) { if (opt_junk_alloc) { assert_u_eq(s[i], JEMALLOC_ALLOC_JUNK, "Newly allocated byte %zu/%zu isn't " "junk-filled", i, sz); } s[i] = 'a'; } if (xallocx(s, sz+1, 0, 0) == sz) { watch_junking(s); s = (uint8_t *)rallocx(s, sz+1, 0); assert_ptr_not_null((void *)s, "Unexpected rallocx() failure"); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); } } watch_junking(s); dallocx(s, 0); assert_true(!opt_junk_free || saw_junking, "Expected region of size %zu to be junk-filled", sz); if (opt_junk_free) { arena_dalloc_junk_small = arena_dalloc_junk_small_orig; arena_dalloc_junk_large = arena_dalloc_junk_large_orig; huge_dalloc_junk = huge_dalloc_junk_orig; } } TEST_BEGIN(test_junk_small) { test_skip_if(!config_fill); test_junk(1, SMALL_MAXCLASS-1); } TEST_END TEST_BEGIN(test_junk_large) { test_skip_if(!config_fill); test_junk(SMALL_MAXCLASS+1, large_maxclass); } TEST_END TEST_BEGIN(test_junk_huge) { test_skip_if(!config_fill); test_junk(large_maxclass+1, chunksize*2); } TEST_END arena_ralloc_junk_large_t *arena_ralloc_junk_large_orig; static void *most_recently_trimmed; static size_t shrink_size(size_t size) { size_t shrink_size; for (shrink_size = size - 1; nallocx(shrink_size, 0) == size; shrink_size--) ; /* Do nothing. */ return (shrink_size); } static void arena_ralloc_junk_large_intercept(void *ptr, size_t old_usize, size_t usize) { arena_ralloc_junk_large_orig(ptr, old_usize, usize); assert_zu_eq(old_usize, large_maxclass, "Unexpected old_usize"); assert_zu_eq(usize, shrink_size(large_maxclass), "Unexpected usize"); most_recently_trimmed = ptr; } TEST_BEGIN(test_junk_large_ralloc_shrink) { void *p1, *p2; p1 = mallocx(large_maxclass, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); arena_ralloc_junk_large_orig = arena_ralloc_junk_large; arena_ralloc_junk_large = arena_ralloc_junk_large_intercept; p2 = rallocx(p1, shrink_size(large_maxclass), 0); assert_ptr_eq(p1, p2, "Unexpected move during shrink"); arena_ralloc_junk_large = arena_ralloc_junk_large_orig; assert_ptr_eq(most_recently_trimmed, p1, "Expected trimmed portion of region to be junk-filled"); } TEST_END static bool detected_redzone_corruption; static void arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { detected_redzone_corruption = true; } TEST_BEGIN(test_junk_redzone) { char *s; arena_redzone_corruption_t *arena_redzone_corruption_orig; test_skip_if(!config_fill); test_skip_if(!opt_junk_alloc || !opt_junk_free); arena_redzone_corruption_orig = arena_redzone_corruption; arena_redzone_corruption = arena_redzone_corruption_replacement; /* Test underflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[-1] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); /* Test overflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[sallocx(s, 0)] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); arena_redzone_corruption = arena_redzone_corruption_orig; } TEST_END int main(void) { return (test( test_junk_small, test_junk_large, test_junk_huge, test_junk_large_ralloc_shrink, test_junk_redzone)); }
6,244
23.586614
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/ckh.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_new_delete) { tsd_t *tsd; ckh_t ckh; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); assert_false(ckh_new(tsd, &ckh, 3, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_count_insert_search_remove) { tsd_t *tsd; ckh_t ckh; const char *strs[] = { "a string", "A string", "a string.", "A string." }; const char *missing = "A string not in the hash table."; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_string_hash, ckh_string_keycomp), "Unexpected ckh_new() error"); assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); /* Insert. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { ckh_insert(tsd, &ckh, strs[i], strs[i]); assert_zu_eq(ckh_count(&ckh), i+1, "ckh_count() should return %zu, but it returned %zu", i+1, ckh_count(&ckh)); } /* Search. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_search(&ckh, strs[i], kp, vp), "Unexpected ckh_search() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); } assert_true(ckh_search(&ckh, missing, NULL, NULL), "Unexpected ckh_search() success"); /* Remove. */ for (i = 0; i < sizeof(strs)/sizeof(const char *); i++) { union { void *p; const char *s; } k, v; void **kp, **vp; const char *ks, *vs; kp = (i & 1) ? &k.p : NULL; vp = (i & 2) ? &v.p : NULL; k.p = NULL; v.p = NULL; assert_false(ckh_remove(tsd, &ckh, strs[i], kp, vp), "Unexpected ckh_remove() error"); ks = (i & 1) ? strs[i] : (const char *)NULL; vs = (i & 2) ? strs[i] : (const char *)NULL; assert_ptr_eq((void *)ks, (void *)k.s, "Key mismatch, i=%zu", i); assert_ptr_eq((void *)vs, (void *)v.s, "Value mismatch, i=%zu", i); assert_zu_eq(ckh_count(&ckh), sizeof(strs)/sizeof(const char *) - i - 1, "ckh_count() should return %zu, but it returned %zu", sizeof(strs)/sizeof(const char *) - i - 1, ckh_count(&ckh)); } ckh_delete(tsd, &ckh); } TEST_END TEST_BEGIN(test_insert_iter_remove) { #define NITEMS ZU(1000) tsd_t *tsd; ckh_t ckh; void **p[NITEMS]; void *q, *r; size_t i; tsd = tsd_fetch(); assert_false(ckh_new(tsd, &ckh, 2, ckh_pointer_hash, ckh_pointer_keycomp), "Unexpected ckh_new() error"); for (i = 0; i < NITEMS; i++) { p[i] = mallocx(i+1, 0); assert_ptr_not_null(p[i], "Unexpected mallocx() failure"); } for (i = 0; i < NITEMS; i++) { size_t j; for (j = i; j < NITEMS; j++) { assert_false(ckh_insert(tsd, &ckh, p[j], p[j]), "Unexpected ckh_insert() failure"); assert_false(ckh_search(&ckh, p[j], &q, &r), "Unexpected ckh_search() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); } assert_zu_eq(ckh_count(&ckh), NITEMS, "ckh_count() should return %zu, but it returned %zu", NITEMS, ckh_count(&ckh)); for (j = i + 1; j < NITEMS; j++) { assert_false(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[j], q, "Key pointer mismatch"); assert_ptr_eq(p[j], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[j], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[j], &q, &r), "Unexpected ckh_remove() success"); } { bool seen[NITEMS]; size_t tabind; memset(seen, 0, sizeof(seen)); for (tabind = 0; !ckh_iter(&ckh, &tabind, &q, &r);) { size_t k; assert_ptr_eq(q, r, "Key and val not equal"); for (k = 0; k < NITEMS; k++) { if (p[k] == q) { assert_false(seen[k], "Item %zu already seen", k); seen[k] = true; break; } } } for (j = 0; j < i + 1; j++) assert_true(seen[j], "Item %zu not seen", j); for (; j < NITEMS; j++) assert_false(seen[j], "Item %zu seen", j); } } for (i = 0; i < NITEMS; i++) { assert_false(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() failure"); assert_false(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() failure"); assert_ptr_eq(p[i], q, "Key pointer mismatch"); assert_ptr_eq(p[i], r, "Value pointer mismatch"); assert_true(ckh_search(&ckh, p[i], NULL, NULL), "Unexpected ckh_search() success"); assert_true(ckh_remove(tsd, &ckh, p[i], &q, &r), "Unexpected ckh_remove() success"); dallocx(p[i], 0); } assert_zu_eq(ckh_count(&ckh), 0, "ckh_count() should return %zu, but it returned %zu", ZU(0), ckh_count(&ckh)); ckh_delete(tsd, &ckh); #undef NITEMS } TEST_END int main(void) { return (test( test_new_delete, test_count_insert_search_remove, test_insert_iter_remove)); }
5,467
24.432558
65
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/run_quantize.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_small_run_size) { unsigned nbins, i; size_t sz, run_size; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all small size classes, get their run sizes, and verify * that the quantized size is the same as the run size. */ sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.bin.0.run_size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nbins; i++) { mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&run_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); assert_zu_eq(run_size, run_quantize_floor(run_size), "Small run quantization should be a no-op (run_size=%zu)", run_size); assert_zu_eq(run_size, run_quantize_ceil(run_size), "Small run quantization should be a no-op (run_size=%zu)", run_size); } } TEST_END TEST_BEGIN(test_large_run_size) { bool cache_oblivious; unsigned nlruns, i; size_t sz, run_size_prev, ceil_prev; size_t mib[4]; size_t miblen = sizeof(mib) / sizeof(size_t); /* * Iterate over all large size classes, get their run sizes, and verify * that the quantized size is the same as the run size. */ sz = sizeof(bool); assert_d_eq(mallctl("config.cache_oblivious", (void *)&cache_oblivious, &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctlnametomib("arenas.lrun.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib failure"); for (i = 0; i < nlruns; i++) { size_t lrun_size, run_size, floor, ceil; mib[2] = i; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&lrun_size, &sz, NULL, 0), 0, "Unexpected mallctlbymib failure"); run_size = cache_oblivious ? lrun_size + PAGE : lrun_size; floor = run_quantize_floor(run_size); ceil = run_quantize_ceil(run_size); assert_zu_eq(run_size, floor, "Large run quantization should be a no-op for precise " "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); assert_zu_eq(run_size, ceil, "Large run quantization should be a no-op for precise " "size (lrun_size=%zu, run_size=%zu)", lrun_size, run_size); if (i > 0) { assert_zu_eq(run_size_prev, run_quantize_floor(run_size - PAGE), "Floor should be a precise size"); if (run_size_prev < ceil_prev) { assert_zu_eq(ceil_prev, run_size, "Ceiling should be a precise size " "(run_size_prev=%zu, ceil_prev=%zu, " "run_size=%zu)", run_size_prev, ceil_prev, run_size); } } run_size_prev = floor; ceil_prev = run_quantize_ceil(run_size + PAGE); } } TEST_END TEST_BEGIN(test_monotonic) { unsigned nbins, nlruns, i; size_t sz, floor_prev, ceil_prev; /* * Iterate over all run sizes and verify that * run_quantize_{floor,ceil}() are monotonic. */ sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0, "Unexpected mallctl failure"); sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nlruns", (void *)&nlruns, &sz, NULL, 0), 0, "Unexpected mallctl failure"); floor_prev = 0; ceil_prev = 0; for (i = 1; i <= chunksize >> LG_PAGE; i++) { size_t run_size, floor, ceil; run_size = i << LG_PAGE; floor = run_quantize_floor(run_size); ceil = run_quantize_ceil(run_size); assert_zu_le(floor, run_size, "Floor should be <= (floor=%zu, run_size=%zu, ceil=%zu)", floor, run_size, ceil); assert_zu_ge(ceil, run_size, "Ceiling should be >= (floor=%zu, run_size=%zu, ceil=%zu)", floor, run_size, ceil); assert_zu_le(floor_prev, floor, "Floor should be monotonic " "(floor_prev=%zu, floor=%zu, run_size=%zu, ceil=%zu)", floor_prev, floor, run_size, ceil); assert_zu_le(ceil_prev, ceil, "Ceiling should be monotonic " "(floor=%zu, run_size=%zu, ceil_prev=%zu, ceil=%zu)", floor, run_size, ceil_prev, ceil); floor_prev = floor; ceil_prev = ceil; } } TEST_END int main(void) { return (test( test_small_run_size, test_large_run_size, test_monotonic)); }
4,340
27.94
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/math.c
#include "test/jemalloc_test.h" #define MAX_REL_ERR 1.0e-9 #define MAX_ABS_ERR 1.0e-9 #include <float.h> #ifdef __PGI #undef INFINITY #endif #ifndef INFINITY #define INFINITY (DBL_MAX + DBL_MAX) #endif static bool double_eq_rel(double a, double b, double max_rel_err, double max_abs_err) { double rel_err; if (fabs(a - b) < max_abs_err) return (true); rel_err = (fabs(b) > fabs(a)) ? fabs((a-b)/b) : fabs((a-b)/a); return (rel_err < max_rel_err); } static uint64_t factorial(unsigned x) { uint64_t ret = 1; unsigned i; for (i = 2; i <= x; i++) ret *= (uint64_t)i; return (ret); } TEST_BEGIN(test_ln_gamma_factorial) { unsigned x; /* exp(ln_gamma(x)) == (x-1)! for integer x. */ for (x = 1; x <= 21; x++) { assert_true(double_eq_rel(exp(ln_gamma(x)), (double)factorial(x-1), MAX_REL_ERR, MAX_ABS_ERR), "Incorrect factorial result for x=%u", x); } } TEST_END /* Expected ln_gamma([0.0..100.0] increment=0.25). */ static const double ln_gamma_misc_expected[] = { INFINITY, 1.28802252469807743, 0.57236494292470008, 0.20328095143129538, 0.00000000000000000, -0.09827183642181320, -0.12078223763524518, -0.08440112102048555, 0.00000000000000000, 0.12487171489239651, 0.28468287047291918, 0.47521466691493719, 0.69314718055994529, 0.93580193110872523, 1.20097360234707429, 1.48681557859341718, 1.79175946922805496, 2.11445692745037128, 2.45373657084244234, 2.80857141857573644, 3.17805383034794575, 3.56137591038669710, 3.95781396761871651, 4.36671603662228680, 4.78749174278204581, 5.21960398699022932, 5.66256205985714178, 6.11591589143154568, 6.57925121201010121, 7.05218545073853953, 7.53436423675873268, 8.02545839631598312, 8.52516136106541467, 9.03318691960512332, 9.54926725730099690, 10.07315123968123949, 10.60460290274525086, 11.14340011995171231, 11.68933342079726856, 12.24220494005076176, 12.80182748008146909, 13.36802367147604720, 13.94062521940376342, 14.51947222506051816, 15.10441257307551943, 15.69530137706046524, 16.29200047656724237, 16.89437797963419285, 17.50230784587389010, 18.11566950571089407, 18.73434751193644843, 19.35823122022435427, 19.98721449566188468, 20.62119544270163018, 21.26007615624470048, 21.90376249182879320, 22.55216385312342098, 23.20519299513386002, 23.86276584168908954, 24.52480131594137802, 25.19122118273868338, 25.86194990184851861, 26.53691449111561340, 27.21604439872720604, 27.89927138384089389, 28.58652940490193828, 29.27775451504081516, 29.97288476399884871, 30.67186010608067548, 31.37462231367769050, 32.08111489594735843, 32.79128302226991565, 33.50507345013689076, 34.22243445715505317, 34.94331577687681545, 35.66766853819134298, 36.39544520803305261, 37.12659953718355865, 37.86108650896109395, 38.59886229060776230, 39.33988418719949465, 40.08411059791735198, 40.83150097453079752, 41.58201578195490100, 42.33561646075348506, 43.09226539146988699, 43.85192586067515208, 44.61456202863158893, 45.38013889847690052, 46.14862228684032885, 46.91997879580877395, 47.69417578616628361, 48.47118135183522014, 49.25096429545256882, 50.03349410501914463, 50.81874093156324790, 51.60667556776436982, 52.39726942748592364, 53.19049452616926743, 53.98632346204390586, 54.78472939811231157, 55.58568604486942633, 56.38916764371992940, 57.19514895105859864, 58.00360522298051080, 58.81451220059079787, 59.62784609588432261, 60.44358357816834371, 61.26170176100199427, 62.08217818962842927, 62.90499082887649962, 63.73011805151035958, 64.55753862700632340, 65.38723171073768015, 66.21917683354901385, 67.05335389170279825, 67.88974313718154008, 68.72832516833013017, 69.56908092082363737, 70.41199165894616385, 71.25703896716800045, 72.10420474200799390, 72.95347118416940191, 73.80482079093779646, 74.65823634883015814, 75.51370092648485866, 76.37119786778275454, 77.23071078519033961, 78.09222355331530707, 78.95572030266725960, 79.82118541361435859, 80.68860351052903468, 81.55795945611502873, 82.42923834590904164, 83.30242550295004378, 84.17750647261028973, 85.05446701758152983, 85.93329311301090456, 86.81397094178107920, 87.69648688992882057, 88.58082754219766741, 89.46697967771913795, 90.35493026581838194, 91.24466646193963015, 92.13617560368709292, 93.02944520697742803, 93.92446296229978486, 94.82121673107967297, 95.71969454214321615, 96.61988458827809723, 97.52177522288820910, 98.42535495673848800, 99.33061245478741341, 100.23753653310367895, 101.14611615586458981, 102.05634043243354370, 102.96819861451382394, 103.88168009337621811, 104.79677439715833032, 105.71347118823287303, 106.63176026064346047, 107.55163153760463501, 108.47307506906540198, 109.39608102933323153, 110.32063971475740516, 111.24674154146920557, 112.17437704317786995, 113.10353686902013237, 114.03421178146170689, 114.96639265424990128, 115.90007047041454769, 116.83523632031698014, 117.77188139974506953, 118.70999700805310795, 119.64957454634490830, 120.59060551569974962, 121.53308151543865279, 122.47699424143097247, 123.42233548443955726, 124.36909712850338394, 125.31727114935689826, 126.26684961288492559, 127.21782467361175861, 128.17018857322420899, 129.12393363912724453, 130.07905228303084755, 131.03553699956862033, 131.99338036494577864, 132.95257503561629164, 133.91311374698926784, 134.87498931216194364, 135.83819462068046846, 136.80272263732638294, 137.76856640092901785, 138.73571902320256299, 139.70417368760718091, 140.67392364823425055, 141.64496222871400732, 142.61728282114600574, 143.59087888505104047, 144.56574394634486680, 145.54187159633210058, 146.51925549072063859, 147.49788934865566148, 148.47776695177302031, 149.45888214327129617, 150.44122882700193600, 151.42480096657754984, 152.40959258449737490, 153.39559776128982094, 154.38281063467164245, 155.37122539872302696, 156.36083630307879844, 157.35163765213474107, 158.34362380426921391, 159.33678917107920370, 160.33112821663092973, 161.32663545672428995, 162.32330545817117695, 163.32113283808695314, 164.32011226319519892, 165.32023844914485267, 166.32150615984036790, 167.32391020678358018, 168.32744544842768164, 169.33210678954270634, 170.33788918059275375, 171.34478761712384198, 172.35279713916281707, 173.36191283062726143, 174.37212981874515094, 175.38344327348534080, 176.39584840699734514, 177.40934047306160437, 178.42391476654847793, 179.43956662288721304, 180.45629141754378111, 181.47408456550741107, 182.49294152078630304, 183.51285777591152737, 184.53382886144947861, 185.55585034552262869, 186.57891783333786861, 187.60302696672312095, 188.62817342367162610, 189.65435291789341932, 190.68156119837468054, 191.70979404894376330, 192.73904728784492590, 193.76931676731820176, 194.80059837318714244, 195.83288802445184729, 196.86618167288995096, 197.90047530266301123, 198.93576492992946214, 199.97204660246373464, 201.00931639928148797, 202.04757043027063901, 203.08680483582807597, 204.12701578650228385, 205.16819948264117102, 206.21035215404597807, 207.25347005962987623, 208.29754948708190909, 209.34258675253678916, 210.38857820024875878, 211.43552020227099320, 212.48340915813977858, 213.53224149456323744, 214.58201366511514152, 215.63272214993284592, 216.68436345542014010, 217.73693411395422004, 218.79043068359703739, 219.84484974781133815, 220.90018791517996988, 221.95644181913033322, 223.01360811766215875, 224.07168349307951871, 225.13066465172661879, 226.19054832372759734, 227.25133126272962159, 228.31301024565024704, 229.37558207242807384, 230.43904356577689896, 231.50339157094342113, 232.56862295546847008, 233.63473460895144740, 234.70172344281823484, 235.76958639009222907, 236.83832040516844586, 237.90792246359117712, 238.97838956183431947, 240.04971871708477238, 241.12190696702904802, 242.19495136964280846, 243.26884900298270509, 244.34359696498191283, 245.41919237324782443, 246.49563236486270057, 247.57291409618682110, 248.65103474266476269, 249.72999149863338175, 250.80978157713354904, 251.89040220972316320, 252.97185064629374551, 254.05412415488834199, 255.13722002152300661, 256.22113555000953511, 257.30586806178126835, 258.39141489572085675, 259.47777340799029844, 260.56494097186322279, 261.65291497755913497, 262.74169283208021852, 263.83127195904967266, 264.92164979855277807, 266.01282380697938379, 267.10479145686849733, 268.19755023675537586, 269.29109765101975427, 270.38543121973674488, 271.48054847852881721, 272.57644697842033565, 273.67312428569374561, 274.77057798174683967, 275.86880566295326389, 276.96780494052313770, 278.06757344036617496, 279.16810880295668085, 280.26940868320008349, 281.37147075030043197, 282.47429268763045229, 283.57787219260217171, 284.68220697654078322, 285.78729476455760050, 286.89313329542699194, 287.99972032146268930, 289.10705360839756395, 290.21513093526289140, 291.32395009427028754, 292.43350889069523646, 293.54380514276073200, 294.65483668152336350, 295.76660135076059532, 296.87909700685889902, 297.99232151870342022, 299.10627276756946458, 300.22094864701409733, 301.33634706277030091, 302.45246593264130297, 303.56930318639643929, 304.68685676566872189, 305.80512462385280514, 306.92410472600477078, 308.04379504874236773, 309.16419358014690033, 310.28529831966631036, 311.40710727801865687, 312.52961847709792664, 313.65282994987899201, 314.77673974032603610, 315.90134590329950015, 317.02664650446632777, 318.15263962020929966, 319.27932333753892635, 320.40669575400545455, 321.53475497761127144, 322.66349912672620803, 323.79292633000159185, 324.92303472628691452, 326.05382246454587403, 327.18528770377525916, 328.31742861292224234, 329.45024337080525356, 330.58373016603343331, 331.71788719692847280, 332.85271267144611329, 333.98820480709991898, 335.12436183088397001, 336.26118197919845443, 337.39866349777429377, 338.53680464159958774, 339.67560367484657036, 340.81505887079896411, 341.95516851178109619, 343.09593088908627578, 344.23734430290727460, 345.37940706226686416, 346.52211748494903532, 347.66547389743118401, 348.80947463481720661, 349.95411804077025408, 351.09940246744753267, 352.24532627543504759, 353.39188783368263103, 354.53908551944078908, 355.68691771819692349, 356.83538282361303118, 357.98447923746385868, 359.13420536957539753 }; TEST_BEGIN(test_ln_gamma_misc) { unsigned i; for (i = 1; i < sizeof(ln_gamma_misc_expected)/sizeof(double); i++) { double x = (double)i * 0.25; assert_true(double_eq_rel(ln_gamma(x), ln_gamma_misc_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect ln_gamma result for i=%u", i); } } TEST_END /* Expected pt_norm([0.01..0.99] increment=0.01). */ static const double pt_norm_expected[] = { -INFINITY, -2.32634787404084076, -2.05374891063182252, -1.88079360815125085, -1.75068607125216946, -1.64485362695147264, -1.55477359459685305, -1.47579102817917063, -1.40507156030963221, -1.34075503369021654, -1.28155156554460081, -1.22652812003661049, -1.17498679206608991, -1.12639112903880045, -1.08031934081495606, -1.03643338949378938, -0.99445788320975281, -0.95416525314619416, -0.91536508784281390, -0.87789629505122846, -0.84162123357291418, -0.80642124701824025, -0.77219321418868492, -0.73884684918521371, -0.70630256284008752, -0.67448975019608171, -0.64334540539291685, -0.61281299101662701, -0.58284150727121620, -0.55338471955567281, -0.52440051270804067, -0.49585034734745320, -0.46769879911450812, -0.43991316567323380, -0.41246312944140462, -0.38532046640756751, -0.35845879325119373, -0.33185334643681652, -0.30548078809939738, -0.27931903444745404, -0.25334710313579978, -0.22754497664114931, -0.20189347914185077, -0.17637416478086135, -0.15096921549677725, -0.12566134685507399, -0.10043372051146975, -0.07526986209982976, -0.05015358346473352, -0.02506890825871106, 0.00000000000000000, 0.02506890825871106, 0.05015358346473366, 0.07526986209982990, 0.10043372051146990, 0.12566134685507413, 0.15096921549677739, 0.17637416478086146, 0.20189347914185105, 0.22754497664114931, 0.25334710313579978, 0.27931903444745404, 0.30548078809939738, 0.33185334643681652, 0.35845879325119373, 0.38532046640756762, 0.41246312944140484, 0.43991316567323391, 0.46769879911450835, 0.49585034734745348, 0.52440051270804111, 0.55338471955567303, 0.58284150727121620, 0.61281299101662701, 0.64334540539291685, 0.67448975019608171, 0.70630256284008752, 0.73884684918521371, 0.77219321418868492, 0.80642124701824036, 0.84162123357291441, 0.87789629505122879, 0.91536508784281423, 0.95416525314619460, 0.99445788320975348, 1.03643338949378938, 1.08031934081495606, 1.12639112903880045, 1.17498679206608991, 1.22652812003661049, 1.28155156554460081, 1.34075503369021654, 1.40507156030963265, 1.47579102817917085, 1.55477359459685394, 1.64485362695147308, 1.75068607125217102, 1.88079360815125041, 2.05374891063182208, 2.32634787404084076 }; TEST_BEGIN(test_pt_norm) { unsigned i; for (i = 1; i < sizeof(pt_norm_expected)/sizeof(double); i++) { double p = (double)i * 0.01; assert_true(double_eq_rel(pt_norm(p), pt_norm_expected[i], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_norm result for i=%u", i); } } TEST_END /* * Expected pt_chi2(p=[0.01..0.99] increment=0.07, * df={0.1, 1.1, 10.1, 100.1, 1000.1}). */ static const double pt_chi2_df[] = {0.1, 1.1, 10.1, 100.1, 1000.1}; static const double pt_chi2_expected[] = { 1.168926411457320e-40, 1.347680397072034e-22, 3.886980416666260e-17, 8.245951724356564e-14, 2.068936347497604e-11, 1.562561743309233e-09, 5.459543043426564e-08, 1.114775688149252e-06, 1.532101202364371e-05, 1.553884683726585e-04, 1.239396954915939e-03, 8.153872320255721e-03, 4.631183739647523e-02, 2.473187311701327e-01, 2.175254800183617e+00, 0.0003729887888876379, 0.0164409238228929513, 0.0521523015190650113, 0.1064701372271216612, 0.1800913735793082115, 0.2748704281195626931, 0.3939246282787986497, 0.5420727552260817816, 0.7267265822221973259, 0.9596554296000253670, 1.2607440376386165326, 1.6671185084541604304, 2.2604828984738705167, 3.2868613342148607082, 6.9298574921692139839, 2.606673548632508, 4.602913725294877, 5.646152813924212, 6.488971315540869, 7.249823275816285, 7.977314231410841, 8.700354939944047, 9.441728024225892, 10.224338321374127, 11.076435368801061, 12.039320937038386, 13.183878752697167, 14.657791935084575, 16.885728216339373, 23.361991680031817, 70.14844087392152, 80.92379498849355, 85.53325420085891, 88.94433120715347, 91.83732712857017, 94.46719943606301, 96.96896479994635, 99.43412843510363, 101.94074719829733, 104.57228644307247, 107.43900093448734, 110.71844673417287, 114.76616819871325, 120.57422505959563, 135.92318818757556, 899.0072447849649, 937.9271278858220, 953.8117189560207, 965.3079371501154, 974.8974061207954, 983.4936235182347, 991.5691170518946, 999.4334123954690, 1007.3391826856553, 1015.5445154999951, 1024.3777075619569, 1034.3538789836223, 1046.4872561869577, 1063.5717461999654, 1107.0741966053859 }; TEST_BEGIN(test_pt_chi2) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_chi2_df)/sizeof(double); i++) { double df = pt_chi2_df[i]; double ln_gamma_df = ln_gamma(df * 0.5); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_chi2(p, df, ln_gamma_df), pt_chi2_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_chi2 result for i=%u, j=%u", i, j); e++; } } } TEST_END /* * Expected pt_gamma(p=[0.1..0.99] increment=0.07, * shape=[0.5..3.0] increment=0.5). */ static const double pt_gamma_shape[] = {0.5, 1.0, 1.5, 2.0, 2.5, 3.0}; static const double pt_gamma_expected[] = { 7.854392895485103e-05, 5.043466107888016e-03, 1.788288957794883e-02, 3.900956150232906e-02, 6.913847560638034e-02, 1.093710833465766e-01, 1.613412523825817e-01, 2.274682115597864e-01, 3.114117323127083e-01, 4.189466220207417e-01, 5.598106789059246e-01, 7.521856146202706e-01, 1.036125427911119e+00, 1.532450860038180e+00, 3.317448300510606e+00, 0.01005033585350144, 0.08338160893905107, 0.16251892949777497, 0.24846135929849966, 0.34249030894677596, 0.44628710262841947, 0.56211891815354142, 0.69314718055994529, 0.84397007029452920, 1.02165124753198167, 1.23787435600161766, 1.51412773262977574, 1.89711998488588196, 2.52572864430825783, 4.60517018598809091, 0.05741590094955853, 0.24747378084860744, 0.39888572212236084, 0.54394139997444901, 0.69048812513915159, 0.84311389861296104, 1.00580622221479898, 1.18298694218766931, 1.38038096305861213, 1.60627736383027453, 1.87396970522337947, 2.20749220408081070, 2.65852391865854942, 3.37934630984842244, 5.67243336507218476, 0.1485547402532659, 0.4657458011640391, 0.6832386130709406, 0.8794297834672100, 1.0700752852474524, 1.2629614217350744, 1.4638400448580779, 1.6783469900166610, 1.9132338090606940, 2.1778589228618777, 2.4868823970010991, 2.8664695666264195, 3.3724415436062114, 4.1682658512758071, 6.6383520679938108, 0.2771490383641385, 0.7195001279643727, 0.9969081732265243, 1.2383497880608061, 1.4675206597269927, 1.6953064251816552, 1.9291243435606809, 2.1757300955477641, 2.4428032131216391, 2.7406534569230616, 3.0851445039665513, 3.5043101122033367, 4.0575997065264637, 4.9182956424675286, 7.5431362346944937, 0.4360451650782932, 0.9983600902486267, 1.3306365880734528, 1.6129750834753802, 1.8767241606994294, 2.1357032436097660, 2.3988853336865565, 2.6740603137235603, 2.9697561737517959, 3.2971457713883265, 3.6731795898504660, 4.1275751617770631, 4.7230515633946677, 5.6417477865306020, 8.4059469148854635 }; TEST_BEGIN(test_pt_gamma_shape) { unsigned i, j; unsigned e = 0; for (i = 0; i < sizeof(pt_gamma_shape)/sizeof(double); i++) { double shape = pt_gamma_shape[i]; double ln_gamma_shape = ln_gamma(shape); for (j = 1; j < 100; j += 7) { double p = (double)j * 0.01; assert_true(double_eq_rel(pt_gamma(p, shape, 1.0, ln_gamma_shape), pt_gamma_expected[e], MAX_REL_ERR, MAX_ABS_ERR), "Incorrect pt_gamma result for i=%u, j=%u", i, j); e++; } } } TEST_END TEST_BEGIN(test_pt_gamma_scale) { double shape = 1.0; double ln_gamma_shape = ln_gamma(shape); assert_true(double_eq_rel( pt_gamma(0.5, shape, 1.0, ln_gamma_shape) * 10.0, pt_gamma(0.5, shape, 10.0, ln_gamma_shape), MAX_REL_ERR, MAX_ABS_ERR), "Scale should be trivially equivalent to external multiplication"); } TEST_END int main(void) { return (test( test_ln_gamma_factorial, test_ln_gamma_misc, test_pt_norm, test_pt_chi2, test_pt_gamma_shape, test_pt_gamma_scale)); }
18,485
45.330827
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/ql.c
#include "test/jemalloc_test.h" /* Number of ring entries, in [2..26]. */ #define NENTRIES 9 typedef struct list_s list_t; typedef ql_head(list_t) list_head_t; struct list_s { ql_elm(list_t) link; char id; }; static void test_empty_list(list_head_t *head) { list_t *t; unsigned i; assert_ptr_null(ql_first(head), "Unexpected element for empty list"); assert_ptr_null(ql_last(head, link), "Unexpected element for empty list"); i = 0; ql_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); i = 0; ql_reverse_foreach(t, head, link) { i++; } assert_u_eq(i, 0, "Unexpected element for empty list"); } TEST_BEGIN(test_ql_empty) { list_head_t head; ql_new(&head); test_empty_list(&head); } TEST_END static void init_entries(list_t *entries, unsigned nentries) { unsigned i; for (i = 0; i < nentries; i++) { entries[i].id = 'a' + i; ql_elm_new(&entries[i], link); } } static void test_entries_list(list_head_t *head, list_t *entries, unsigned nentries) { list_t *t; unsigned i; assert_c_eq(ql_first(head)->id, entries[0].id, "Element id mismatch"); assert_c_eq(ql_last(head, link)->id, entries[nentries-1].id, "Element id mismatch"); i = 0; ql_foreach(t, head, link) { assert_c_eq(t->id, entries[i].id, "Element id mismatch"); i++; } i = 0; ql_reverse_foreach(t, head, link) { assert_c_eq(t->id, entries[nentries-i-1].id, "Element id mismatch"); i++; } for (i = 0; i < nentries-1; i++) { t = ql_next(head, &entries[i], link); assert_c_eq(t->id, entries[i+1].id, "Element id mismatch"); } assert_ptr_null(ql_next(head, &entries[nentries-1], link), "Unexpected element"); assert_ptr_null(ql_prev(head, &entries[0], link), "Unexpected element"); for (i = 1; i < nentries; i++) { t = ql_prev(head, &entries[i], link); assert_c_eq(t->id, entries[i-1].id, "Element id mismatch"); } } TEST_BEGIN(test_ql_tail_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_tail_insert(&head, &entries[i], link); test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_tail_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_tail_insert(&head, &entries[i], link); for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, entries, NENTRIES-i); ql_tail_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_head_insert) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_head_insert(&head, &entries[NENTRIES-i-1], link); test_entries_list(&head, entries, NENTRIES); } TEST_END TEST_BEGIN(test_ql_head_remove) { list_head_t head; list_t entries[NENTRIES]; unsigned i; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); for (i = 0; i < NENTRIES; i++) ql_head_insert(&head, &entries[NENTRIES-i-1], link); for (i = 0; i < NENTRIES; i++) { test_entries_list(&head, &entries[i], NENTRIES-i); ql_head_remove(&head, list_t, link); } test_empty_list(&head); } TEST_END TEST_BEGIN(test_ql_insert) { list_head_t head; list_t entries[8]; list_t *a, *b, *c, *d, *e, *f, *g, *h; ql_new(&head); init_entries(entries, sizeof(entries)/sizeof(list_t)); a = &entries[0]; b = &entries[1]; c = &entries[2]; d = &entries[3]; e = &entries[4]; f = &entries[5]; g = &entries[6]; h = &entries[7]; /* * ql_remove(), ql_before_insert(), and ql_after_insert() are used * internally by other macros that are already tested, so there's no * need to test them completely. However, insertion/deletion from the * middle of lists is not otherwise tested; do so here. */ ql_tail_insert(&head, f, link); ql_before_insert(&head, f, b, link); ql_before_insert(&head, f, c, link); ql_after_insert(f, h, link); ql_after_insert(f, g, link); ql_before_insert(&head, b, a, link); ql_after_insert(c, d, link); ql_before_insert(&head, f, e, link); test_entries_list(&head, entries, sizeof(entries)/sizeof(list_t)); } TEST_END int main(void) { return (test( test_ql_empty, test_ql_tail_insert, test_ql_tail_remove, test_ql_head_insert, test_ql_head_remove, test_ql_insert)); }
4,483
20.352381
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/mallctl.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_mallctl_errors) { uint64_t epoch; size_t sz; assert_d_eq(mallctl("no_such_name", NULL, NULL, NULL, 0), ENOENT, "mallctl() should return ENOENT for non-existent names"); assert_d_eq(mallctl("version", NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on attempt to write " "read-only value"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctl() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctl("epoch", (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctl() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctlnametomib_errors) { size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("no_such_name", mib, &miblen), ENOENT, "mallctlnametomib() should return ENOENT for non-existent names"); } TEST_END TEST_BEGIN(test_mallctlbymib_errors) { uint64_t epoch; size_t sz; size_t mib[1]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("version", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, "0.0.0", strlen("0.0.0")), EPERM, "mallctl() should return EPERM on " "attempt to write read-only value"); miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("epoch", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)-1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, (void *)&epoch, sizeof(epoch)+1), EINVAL, "mallctlbymib() should return EINVAL for input size mismatch"); sz = sizeof(epoch)-1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); sz = sizeof(epoch)+1; assert_d_eq(mallctlbymib(mib, miblen, (void *)&epoch, &sz, NULL, 0), EINVAL, "mallctlbymib() should return EINVAL for output size mismatch"); } TEST_END TEST_BEGIN(test_mallctl_read_write) { uint64_t old_epoch, new_epoch; size_t sz = sizeof(old_epoch); /* Blind. */ assert_d_eq(mallctl("epoch", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Write. */ assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); /* Read+write. */ assert_d_eq(mallctl("epoch", (void *)&old_epoch, &sz, (void *)&new_epoch, sizeof(new_epoch)), 0, "Unexpected mallctl() failure"); assert_zu_eq(sz, sizeof(old_epoch), "Unexpected output size"); } TEST_END TEST_BEGIN(test_mallctlnametomib_short_mib) { size_t mib[4]; size_t miblen; miblen = 3; mib[3] = 42; assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); assert_zu_eq(miblen, 3, "Unexpected mib output length"); assert_zu_eq(mib[3], 42, "mallctlnametomib() wrote past the end of the input mib"); } TEST_END TEST_BEGIN(test_mallctl_config) { #define TEST_MALLCTL_CONFIG(config, t) do { \ t oldval; \ size_t sz = sizeof(oldval); \ assert_d_eq(mallctl("config."#config, (void *)&oldval, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_b_eq(oldval, config_##config, "Incorrect config value"); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_CONFIG(cache_oblivious, bool); TEST_MALLCTL_CONFIG(debug, bool); TEST_MALLCTL_CONFIG(fill, bool); TEST_MALLCTL_CONFIG(lazy_lock, bool); TEST_MALLCTL_CONFIG(malloc_conf, const char *); TEST_MALLCTL_CONFIG(munmap, bool); TEST_MALLCTL_CONFIG(prof, bool); TEST_MALLCTL_CONFIG(prof_libgcc, bool); TEST_MALLCTL_CONFIG(prof_libunwind, bool); TEST_MALLCTL_CONFIG(stats, bool); TEST_MALLCTL_CONFIG(tcache, bool); TEST_MALLCTL_CONFIG(tls, bool); TEST_MALLCTL_CONFIG(utrace, bool); TEST_MALLCTL_CONFIG(valgrind, bool); TEST_MALLCTL_CONFIG(xmalloc, bool); #undef TEST_MALLCTL_CONFIG } TEST_END TEST_BEGIN(test_mallctl_opt) { bool config_always = true; #define TEST_MALLCTL_OPT(t, opt, config) do { \ t oldval; \ size_t sz = sizeof(oldval); \ int expected = config_##config ? 0 : ENOENT; \ int result = mallctl("opt."#opt, (void *)&oldval, &sz, NULL, \ 0); \ assert_d_eq(result, expected, \ "Unexpected mallctl() result for opt."#opt); \ assert_zu_eq(sz, sizeof(oldval), "Unexpected output size"); \ } while (0) TEST_MALLCTL_OPT(bool, abort, always); TEST_MALLCTL_OPT(size_t, lg_chunk, always); TEST_MALLCTL_OPT(const char *, dss, always); TEST_MALLCTL_OPT(unsigned, narenas, always); TEST_MALLCTL_OPT(const char *, purge, always); TEST_MALLCTL_OPT(ssize_t, lg_dirty_mult, always); TEST_MALLCTL_OPT(ssize_t, decay_time, always); TEST_MALLCTL_OPT(bool, stats_print, always); TEST_MALLCTL_OPT(const char *, junk, fill); TEST_MALLCTL_OPT(size_t, quarantine, fill); TEST_MALLCTL_OPT(bool, redzone, fill); TEST_MALLCTL_OPT(bool, zero, fill); TEST_MALLCTL_OPT(bool, utrace, utrace); TEST_MALLCTL_OPT(bool, xmalloc, xmalloc); TEST_MALLCTL_OPT(bool, tcache, tcache); TEST_MALLCTL_OPT(size_t, lg_tcache_max, tcache); TEST_MALLCTL_OPT(bool, prof, prof); TEST_MALLCTL_OPT(const char *, prof_prefix, prof); TEST_MALLCTL_OPT(bool, prof_active, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_sample, prof); TEST_MALLCTL_OPT(bool, prof_accum, prof); TEST_MALLCTL_OPT(ssize_t, lg_prof_interval, prof); TEST_MALLCTL_OPT(bool, prof_gdump, prof); TEST_MALLCTL_OPT(bool, prof_final, prof); TEST_MALLCTL_OPT(bool, prof_leak, prof); #undef TEST_MALLCTL_OPT } TEST_END TEST_BEGIN(test_manpage_example) { unsigned nbins, i; size_t mib[4]; size_t len, miblen; len = sizeof(nbins); assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &len, NULL, 0), 0, "Unexpected mallctl() failure"); miblen = 4; assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); for (i = 0; i < nbins; i++) { size_t bin_size; mib[2] = i; len = sizeof(bin_size); assert_d_eq(mallctlbymib(mib, miblen, (void *)&bin_size, &len, NULL, 0), 0, "Unexpected mallctlbymib() failure"); /* Do something with bin_size... */ } } TEST_END TEST_BEGIN(test_tcache_none) { void *p0, *q, *p1; test_skip_if(!config_tcache); /* Allocate p and q. */ p0 = mallocx(42, 0); assert_ptr_not_null(p0, "Unexpected mallocx() failure"); q = mallocx(42, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); /* Deallocate p and q, but bypass the tcache for q. */ dallocx(p0, 0); dallocx(q, MALLOCX_TCACHE_NONE); /* Make sure that tcache-based allocation returns p, not q. */ p1 = mallocx(42, 0); assert_ptr_not_null(p1, "Unexpected mallocx() failure"); assert_ptr_eq(p0, p1, "Expected tcache to allocate cached region"); /* Clean up. */ dallocx(p1, MALLOCX_TCACHE_NONE); } TEST_END TEST_BEGIN(test_tcache) { #define NTCACHES 10 unsigned tis[NTCACHES]; void *ps[NTCACHES]; void *qs[NTCACHES]; unsigned i; size_t sz, psz, qsz; test_skip_if(!config_tcache); psz = 42; qsz = nallocx(psz, 0) + 1; /* Create tcaches. */ for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Exercise tcache ID recycling. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } for (i = 0; i < NTCACHES; i++) { sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tis[i], &sz, NULL, 0), 0, "Unexpected mallctl() failure, i=%u", i); } /* Flush empty tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Cache some allocations. */ for (i = 0; i < NTCACHES; i++) { ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); dallocx(ps[i], MALLOCX_TCACHE(tis[i])); qs[i] = mallocx(qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected mallocx() failure, i=%u", i); dallocx(qs[i], MALLOCX_TCACHE(tis[i])); } /* Verify that tcaches allocate cached regions. */ for (i = 0; i < NTCACHES; i++) { void *p0 = ps[i]; ps[i] = mallocx(psz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure, i=%u", i); assert_ptr_eq(ps[i], p0, "Expected mallocx() to allocate cached region, i=%u", i); } /* Verify that reallocation uses cached regions. */ for (i = 0; i < NTCACHES; i++) { void *q0 = qs[i]; qs[i] = rallocx(ps[i], qsz, MALLOCX_TCACHE(tis[i])); assert_ptr_not_null(qs[i], "Unexpected rallocx() failure, i=%u", i); assert_ptr_eq(qs[i], q0, "Expected rallocx() to allocate cached region, i=%u", i); /* Avoid undefined behavior in case of test failure. */ if (qs[i] == NULL) qs[i] = ps[i]; } for (i = 0; i < NTCACHES; i++) dallocx(qs[i], MALLOCX_TCACHE(tis[i])); /* Flush some non-empty tcaches. */ for (i = 0; i < NTCACHES/2; i++) { assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } /* Destroy tcaches. */ for (i = 0; i < NTCACHES; i++) { assert_d_eq(mallctl("tcache.destroy", NULL, NULL, (void *)&tis[i], sizeof(unsigned)), 0, "Unexpected mallctl() failure, i=%u", i); } } TEST_END TEST_BEGIN(test_thread_arena) { unsigned arena_old, arena_new, narenas; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas, opt_narenas, "Number of arenas incorrect"); arena_new = narenas - 1; assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, (void *)&arena_new, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); arena_new = 0; assert_d_eq(mallctl("thread.arena", (void *)&arena_old, &sz, (void *)&arena_new, sizeof(unsigned)), 0, "Unexpected mallctl() failure"); } TEST_END TEST_BEGIN(test_arena_i_lg_dirty_mult) { ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; size_t sz = sizeof(ssize_t); test_skip_if(opt_purge != purge_mode_ratio); assert_d_eq(mallctl("arena.0.lg_dirty_mult", (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arena.0.lg_dirty_mult", NULL, NULL, (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; assert_d_eq(mallctl("arena.0.lg_dirty_mult", (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arena.0.lg_dirty_mult"); } } TEST_END TEST_BEGIN(test_arena_i_decay_time) { ssize_t decay_time, orig_decay_time, prev_decay_time; size_t sz = sizeof(ssize_t); test_skip_if(opt_purge != purge_mode_decay); assert_d_eq(mallctl("arena.0.decay_time", (void *)&orig_decay_time, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); decay_time = -2; assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, (void *)&decay_time, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); decay_time = 0x7fffffff; assert_d_eq(mallctl("arena.0.decay_time", NULL, NULL, (void *)&decay_time, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); for (prev_decay_time = decay_time, decay_time = -1; decay_time < 20; prev_decay_time = decay_time, decay_time++) { ssize_t old_decay_time; assert_d_eq(mallctl("arena.0.decay_time", (void *)&old_decay_time, &sz, (void *)&decay_time, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_decay_time, prev_decay_time, "Unexpected old arena.0.decay_time"); } } TEST_END TEST_BEGIN(test_arena_i_purge) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_decay) { unsigned narenas; size_t sz = sizeof(unsigned); size_t mib[3]; size_t miblen = 3; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0, "Unexpected mallctlnametomib() failure"); mib[1] = narenas; assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0, "Unexpected mallctlbymib() failure"); } TEST_END TEST_BEGIN(test_arena_i_dss) { const char *dss_prec_old, *dss_prec_new; size_t sz = sizeof(dss_prec_old); size_t mib[3]; size_t miblen; miblen = sizeof(mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.dss", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_old)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); mib[1] = narenas_total_get(); dss_prec_new = "disabled"; assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, (void *)&dss_prec_new, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected default for dss precedence"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_new, &sz, (void *)&dss_prec_old, sizeof(dss_prec_new)), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctlbymib(mib, miblen, (void *)&dss_prec_old, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_str_ne(dss_prec_old, "primary", "Unexpected value for dss precedence"); } TEST_END TEST_BEGIN(test_arenas_initialized) { unsigned narenas; size_t sz = sizeof(narenas); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); { VARIABLE_ARRAY(bool, initialized, narenas); sz = narenas * sizeof(bool); assert_d_eq(mallctl("arenas.initialized", (void *)initialized, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); } } TEST_END TEST_BEGIN(test_arenas_lg_dirty_mult) { ssize_t lg_dirty_mult, orig_lg_dirty_mult, prev_lg_dirty_mult; size_t sz = sizeof(ssize_t); test_skip_if(opt_purge != purge_mode_ratio); assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&orig_lg_dirty_mult, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); lg_dirty_mult = -2; assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); lg_dirty_mult = (sizeof(size_t) << 3); assert_d_eq(mallctl("arenas.lg_dirty_mult", NULL, NULL, (void *)&lg_dirty_mult, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); for (prev_lg_dirty_mult = orig_lg_dirty_mult, lg_dirty_mult = -1; lg_dirty_mult < (ssize_t)(sizeof(size_t) << 3); prev_lg_dirty_mult = lg_dirty_mult, lg_dirty_mult++) { ssize_t old_lg_dirty_mult; assert_d_eq(mallctl("arenas.lg_dirty_mult", (void *)&old_lg_dirty_mult, &sz, (void *)&lg_dirty_mult, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_lg_dirty_mult, prev_lg_dirty_mult, "Unexpected old arenas.lg_dirty_mult"); } } TEST_END TEST_BEGIN(test_arenas_decay_time) { ssize_t decay_time, orig_decay_time, prev_decay_time; size_t sz = sizeof(ssize_t); test_skip_if(opt_purge != purge_mode_decay); assert_d_eq(mallctl("arenas.decay_time", (void *)&orig_decay_time, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); decay_time = -2; assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, (void *)&decay_time, sizeof(ssize_t)), EFAULT, "Unexpected mallctl() success"); decay_time = 0x7fffffff; assert_d_eq(mallctl("arenas.decay_time", NULL, NULL, (void *)&decay_time, sizeof(ssize_t)), 0, "Expected mallctl() failure"); for (prev_decay_time = decay_time, decay_time = -1; decay_time < 20; prev_decay_time = decay_time, decay_time++) { ssize_t old_decay_time; assert_d_eq(mallctl("arenas.decay_time", (void *)&old_decay_time, &sz, (void *)&decay_time, sizeof(ssize_t)), 0, "Unexpected mallctl() failure"); assert_zd_eq(old_decay_time, prev_decay_time, "Unexpected old arenas.decay_time"); } } TEST_END TEST_BEGIN(test_arenas_constants) { #define TEST_ARENAS_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas."#name, (void *)&name, &sz, NULL, \ 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_CONSTANT(size_t, quantum, QUANTUM); TEST_ARENAS_CONSTANT(size_t, page, PAGE); TEST_ARENAS_CONSTANT(unsigned, nbins, NBINS); TEST_ARENAS_CONSTANT(unsigned, nlruns, nlclasses); TEST_ARENAS_CONSTANT(unsigned, nhchunks, nhclasses); #undef TEST_ARENAS_CONSTANT } TEST_END TEST_BEGIN(test_arenas_bin_constants) { #define TEST_ARENAS_BIN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.bin.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_BIN_CONSTANT(size_t, size, arena_bin_info[0].reg_size); TEST_ARENAS_BIN_CONSTANT(uint32_t, nregs, arena_bin_info[0].nregs); TEST_ARENAS_BIN_CONSTANT(size_t, run_size, arena_bin_info[0].run_size); #undef TEST_ARENAS_BIN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_lrun_constants) { #define TEST_ARENAS_LRUN_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.lrun.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_LRUN_CONSTANT(size_t, size, LARGE_MINCLASS); #undef TEST_ARENAS_LRUN_CONSTANT } TEST_END TEST_BEGIN(test_arenas_hchunk_constants) { #define TEST_ARENAS_HCHUNK_CONSTANT(t, name, expected) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("arenas.hchunk.0."#name, (void *)&name, \ &sz, NULL, 0), 0, "Unexpected mallctl() failure"); \ assert_zu_eq(name, expected, "Incorrect "#name" size"); \ } while (0) TEST_ARENAS_HCHUNK_CONSTANT(size_t, size, chunksize); #undef TEST_ARENAS_HCHUNK_CONSTANT } TEST_END TEST_BEGIN(test_arenas_extend) { unsigned narenas_before, arena, narenas_after; size_t sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_before, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.extend", (void *)&arena, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_d_eq(mallctl("arenas.narenas", (void *)&narenas_after, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); assert_u_eq(narenas_before+1, narenas_after, "Unexpected number of arenas before versus after extension"); assert_u_eq(arena, narenas_after-1, "Unexpected arena index"); } TEST_END TEST_BEGIN(test_stats_arenas) { #define TEST_STATS_ARENAS(t, name) do { \ t name; \ size_t sz = sizeof(t); \ assert_d_eq(mallctl("stats.arenas.0."#name, (void *)&name, &sz, \ NULL, 0), 0, "Unexpected mallctl() failure"); \ } while (0) TEST_STATS_ARENAS(unsigned, nthreads); TEST_STATS_ARENAS(const char *, dss); TEST_STATS_ARENAS(ssize_t, lg_dirty_mult); TEST_STATS_ARENAS(ssize_t, decay_time); TEST_STATS_ARENAS(size_t, pactive); TEST_STATS_ARENAS(size_t, pdirty); #undef TEST_STATS_ARENAS } TEST_END int main(void) { return (test( test_mallctl_errors, test_mallctlnametomib_errors, test_mallctlbymib_errors, test_mallctl_read_write, test_mallctlnametomib_short_mib, test_mallctl_config, test_mallctl_opt, test_manpage_example, test_tcache_none, test_tcache, test_thread_arena, test_arena_i_lg_dirty_mult, test_arena_i_decay_time, test_arena_i_purge, test_arena_i_decay, test_arena_i_dss, test_arenas_initialized, test_arenas_lg_dirty_mult, test_arenas_decay_time, test_arenas_constants, test_arenas_bin_constants, test_arenas_lrun_constants, test_arenas_hchunk_constants, test_arenas_extend, test_stats_arenas)); }
22,753
29.542282
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/rtree.c
#include "test/jemalloc_test.h" static rtree_node_elm_t * node_alloc(size_t nelms) { return ((rtree_node_elm_t *)calloc(nelms, sizeof(rtree_node_elm_t))); } static void node_dalloc(rtree_node_elm_t *node) { free(node); } TEST_BEGIN(test_rtree_get_empty) { unsigned i; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); assert_ptr_null(rtree_get(&rtree, 0, false), "rtree_get() should return NULL for empty tree"); rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_extrema) { unsigned i; extent_node_t node_a, node_b; for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); assert_false(rtree_set(&rtree, 0, &node_a), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, 0, true), &node_a, "rtree_get() should return previously set value"); assert_false(rtree_set(&rtree, ~((uintptr_t)0), &node_b), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, ~((uintptr_t)0), true), &node_b, "rtree_get() should return previously set value"); rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_bits) { unsigned i, j, k; for (i = 1; i < (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[] = {0, 1, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)) - 1}; extent_node_t node; rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); for (j = 0; j < sizeof(keys)/sizeof(uintptr_t); j++) { assert_false(rtree_set(&rtree, keys[j], &node), "Unexpected rtree_set() failure"); for (k = 0; k < sizeof(keys)/sizeof(uintptr_t); k++) { assert_ptr_eq(rtree_get(&rtree, keys[k], true), &node, "rtree_get() should return " "previously set value and ignore " "insignificant key bits; i=%u, j=%u, k=%u, " "set key=%#"FMTxPTR", get key=%#"FMTxPTR, i, j, k, keys[j], keys[k]); } assert_ptr_null(rtree_get(&rtree, (((uintptr_t)1) << (sizeof(uintptr_t)*8-i)), false), "Only leftmost rtree leaf should be set; " "i=%u, j=%u", i, j); assert_false(rtree_set(&rtree, keys[j], NULL), "Unexpected rtree_set() failure"); } rtree_delete(&rtree); } } TEST_END TEST_BEGIN(test_rtree_random) { unsigned i; sfmt_t *sfmt; #define NSET 16 #define SEED 42 sfmt = init_gen_rand(SEED); for (i = 1; i <= (sizeof(uintptr_t) << 3); i++) { uintptr_t keys[NSET]; extent_node_t node; unsigned j; rtree_t rtree; assert_false(rtree_new(&rtree, i, node_alloc, node_dalloc), "Unexpected rtree_new() failure"); for (j = 0; j < NSET; j++) { keys[j] = (uintptr_t)gen_rand64(sfmt); assert_false(rtree_set(&rtree, keys[j], &node), "Unexpected rtree_set() failure"); assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_ptr_eq(rtree_get(&rtree, keys[j], true), &node, "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_false(rtree_set(&rtree, keys[j], NULL), "Unexpected rtree_set() failure"); assert_ptr_null(rtree_get(&rtree, keys[j], true), "rtree_get() should return previously set value"); } for (j = 0; j < NSET; j++) { assert_ptr_null(rtree_get(&rtree, keys[j], true), "rtree_get() should return previously set value"); } rtree_delete(&rtree); } fini_gen_rand(sfmt); #undef NSET #undef SEED } TEST_END int main(void) { return (test( test_rtree_get_empty, test_rtree_extrema, test_rtree_bits, test_rtree_random)); }
3,831
24.210526
70
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/decay.c
#include "test/jemalloc_test.h" const char *malloc_conf = "purge:decay,decay_time:1"; static nstime_monotonic_t *nstime_monotonic_orig; static nstime_update_t *nstime_update_orig; static unsigned nupdates_mock; static nstime_t time_mock; static bool monotonic_mock; static bool nstime_monotonic_mock(void) { return (monotonic_mock); } static bool nstime_update_mock(nstime_t *time) { nupdates_mock++; if (monotonic_mock) nstime_copy(time, &time_mock); return (!monotonic_mock); } TEST_BEGIN(test_decay_ticks) { ticker_t *decay_ticker; unsigned tick0, tick1; size_t sz, huge0, large0; void *p; test_skip_if(opt_purge != purge_mode_decay); decay_ticker = decay_ticker_get(tsd_fetch(), 0); assert_ptr_not_null(decay_ticker, "Unexpected failure getting decay ticker"); sz = sizeof(size_t); assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); /* * Test the standard APIs using a huge size class, since we can't * control tcache interactions (except by completely disabling tcache * for the entire test program). */ /* malloc(). */ tick0 = ticker_read(decay_ticker); p = malloc(huge0); assert_ptr_not_null(p, "Unexpected malloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()"); /* free(). */ tick0 = ticker_read(decay_ticker); free(p); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()"); /* calloc(). */ tick0 = ticker_read(decay_ticker); p = calloc(1, huge0); assert_ptr_not_null(p, "Unexpected calloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()"); free(p); /* posix_memalign(). */ tick0 = ticker_read(decay_ticker); assert_d_eq(posix_memalign(&p, sizeof(size_t), huge0), 0, "Unexpected posix_memalign() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during posix_memalign()"); free(p); /* aligned_alloc(). */ tick0 = ticker_read(decay_ticker); p = aligned_alloc(sizeof(size_t), huge0); assert_ptr_not_null(p, "Unexpected aligned_alloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during aligned_alloc()"); free(p); /* realloc(). */ /* Allocate. */ tick0 = ticker_read(decay_ticker); p = realloc(NULL, huge0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Reallocate. */ tick0 = ticker_read(decay_ticker); p = realloc(p, huge0); assert_ptr_not_null(p, "Unexpected realloc() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* Deallocate. */ tick0 = ticker_read(decay_ticker); realloc(p, 0); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()"); /* * Test the *allocx() APIs using huge, large, and small size classes, * with tcache explicitly disabled. */ { unsigned i; size_t allocx_sizes[3]; allocx_sizes[0] = huge0; allocx_sizes[1] = large0; allocx_sizes[2] = 1; for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) { sz = allocx_sizes[i]; /* mallocx(). */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during mallocx() (sz=%zu)", sz); /* rallocx(). */ tick0 = ticker_read(decay_ticker); p = rallocx(p, sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected rallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during rallocx() (sz=%zu)", sz); /* xallocx(). */ tick0 = ticker_read(decay_ticker); xallocx(p, sz, 0, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during xallocx() (sz=%zu)", sz); /* dallocx(). */ tick0 = ticker_read(decay_ticker); dallocx(p, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during dallocx() (sz=%zu)", sz); /* sdallocx(). */ p = mallocx(sz, MALLOCX_TCACHE_NONE); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick0 = ticker_read(decay_ticker); sdallocx(p, sz, MALLOCX_TCACHE_NONE); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during sdallocx() " "(sz=%zu)", sz); } } /* * Test tcache fill/flush interactions for large and small size classes, * using an explicit tcache. */ if (config_tcache) { unsigned tcache_ind, i; size_t tcache_sizes[2]; tcache_sizes[0] = large0; tcache_sizes[1] = 1; sz = sizeof(unsigned); assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz, NULL, 0), 0, "Unexpected mallctl failure"); for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) { sz = tcache_sizes[i]; /* tcache fill. */ tick0 = ticker_read(decay_ticker); p = mallocx(sz, MALLOCX_TCACHE(tcache_ind)); assert_ptr_not_null(p, "Unexpected mallocx() failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache fill " "(sz=%zu)", sz); /* tcache flush. */ dallocx(p, MALLOCX_TCACHE(tcache_ind)); tick0 = ticker_read(decay_ticker); assert_d_eq(mallctl("tcache.flush", NULL, NULL, (void *)&tcache_ind, sizeof(unsigned)), 0, "Unexpected mallctl failure"); tick1 = ticker_read(decay_ticker); assert_u32_ne(tick1, tick0, "Expected ticker to tick during tcache flush " "(sz=%zu)", sz); } } } TEST_END TEST_BEGIN(test_decay_ticker) { #define NPS 1024 int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); void *ps[NPS]; uint64_t epoch; uint64_t npurge0 = 0; uint64_t npurge1 = 0; size_t sz, large; unsigned i, nupdates0; nstime_t time, decay_time, deadline; test_skip_if(opt_purge != purge_mode_decay); /* * Allocate a bunch of large objects, pause the clock, deallocate the * objects, restore the clock, then [md]allocx() in a tight loop to * verify the ticker triggers purging. */ if (config_tcache) { size_t tcache_max; sz = sizeof(size_t); assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL, 0), 0, "Unexpected mallctl failure"); large = nallocx(tcache_max + 1, flags); } else { sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large, &sz, NULL, 0), 0, "Unexpected mallctl failure"); } assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); for (i = 0; i < NPS; i++) { ps[i] = mallocx(large, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = true; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { dallocx(ps[i], flags); nupdates0 = nupdates_mock; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.decay failure"); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; nstime_init(&time, 0); nstime_update(&time); nstime_init2(&decay_time, opt_decay_time, 0); nstime_copy(&deadline, &time); nstime_add(&deadline, &decay_time); do { for (i = 0; i < DECAY_NTICKS_PER_UPDATE / 2; i++) { void *p = mallocx(1, flags); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, flags); } assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); nstime_update(&time); } while (nstime_compare(&time, &deadline) <= 0 && npurge1 == npurge0); if (config_stats) assert_u64_gt(npurge1, npurge0, "Expected purging to occur"); #undef NPS } TEST_END TEST_BEGIN(test_decay_nonmonotonic) { #define NPS (SMOOTHSTEP_NSTEPS + 1) int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE); void *ps[NPS]; uint64_t epoch; uint64_t npurge0 = 0; uint64_t npurge1 = 0; size_t sz, large0; unsigned i, nupdates0; test_skip_if(opt_purge != purge_mode_decay); sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure"); assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge0, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); nupdates_mock = 0; nstime_init(&time_mock, 0); nstime_update(&time_mock); monotonic_mock = false; nstime_monotonic_orig = nstime_monotonic; nstime_update_orig = nstime_update; nstime_monotonic = nstime_monotonic_mock; nstime_update = nstime_update_mock; for (i = 0; i < NPS; i++) { ps[i] = mallocx(large0, flags); assert_ptr_not_null(ps[i], "Unexpected mallocx() failure"); } for (i = 0; i < NPS; i++) { dallocx(ps[i], flags); nupdates0 = nupdates_mock; assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0, "Unexpected arena.0.decay failure"); assert_u_gt(nupdates_mock, nupdates0, "Expected nstime_update() to be called"); } assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(uint64_t)), 0, "Unexpected mallctl failure"); sz = sizeof(uint64_t); assert_d_eq(mallctl("stats.arenas.0.npurge", (void *)&npurge1, &sz, NULL, 0), config_stats ? 0 : ENOENT, "Unexpected mallctl result"); if (config_stats) assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred"); nstime_monotonic = nstime_monotonic_orig; nstime_update = nstime_update_orig; #undef NPS } TEST_END int main(void) { return (test( test_decay_ticks, test_decay_ticker, test_decay_nonmonotonic)); }
11,060
28.496
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/ph.c
#include "test/jemalloc_test.h" typedef struct node_s node_t; struct node_s { #define NODE_MAGIC 0x9823af7e uint32_t magic; phn(node_t) link; uint64_t key; }; static int node_cmp(const node_t *a, const node_t *b) { int ret; ret = (a->key > b->key) - (a->key < b->key); if (ret == 0) { /* * Duplicates are not allowed in the heap, so force an * arbitrary ordering for non-identical items with equal keys. */ ret = (((uintptr_t)a) > ((uintptr_t)b)) - (((uintptr_t)a) < ((uintptr_t)b)); } return (ret); } static int node_cmp_magic(const node_t *a, const node_t *b) { assert_u32_eq(a->magic, NODE_MAGIC, "Bad magic"); assert_u32_eq(b->magic, NODE_MAGIC, "Bad magic"); return (node_cmp(a, b)); } typedef ph(node_t) heap_t; ph_gen(static, heap_, heap_t, node_t, link, node_cmp_magic); static void node_print(const node_t *node, unsigned depth) { unsigned i; node_t *leftmost_child, *sibling; for (i = 0; i < depth; i++) malloc_printf("\t"); malloc_printf("%2"FMTu64"\n", node->key); leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) return; node_print(leftmost_child, depth + 1); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { node_print(sibling, depth + 1); } } static void heap_print(const heap_t *heap) { node_t *auxelm; malloc_printf("vvv heap %p vvv\n", heap); if (heap->ph_root == NULL) goto label_return; node_print(heap->ph_root, 0); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); node_print(auxelm, 0); } label_return: malloc_printf("^^^ heap %p ^^^\n", heap); } static unsigned node_validate(const node_t *node, const node_t *parent) { unsigned nnodes = 1; node_t *leftmost_child, *sibling; if (parent != NULL) { assert_d_ge(node_cmp_magic(node, parent), 0, "Child is less than parent"); } leftmost_child = phn_lchild_get(node_t, link, node); if (leftmost_child == NULL) return (nnodes); assert_ptr_eq((void *)phn_prev_get(node_t, link, leftmost_child), (void *)node, "Leftmost child does not link to node"); nnodes += node_validate(leftmost_child, node); for (sibling = phn_next_get(node_t, link, leftmost_child); sibling != NULL; sibling = phn_next_get(node_t, link, sibling)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, sibling)), sibling, "sibling's prev doesn't link to sibling"); nnodes += node_validate(sibling, node); } return (nnodes); } static unsigned heap_validate(const heap_t *heap) { unsigned nnodes = 0; node_t *auxelm; if (heap->ph_root == NULL) goto label_return; nnodes += node_validate(heap->ph_root, NULL); for (auxelm = phn_next_get(node_t, link, heap->ph_root); auxelm != NULL; auxelm = phn_next_get(node_t, link, auxelm)) { assert_ptr_eq(phn_next_get(node_t, link, phn_prev_get(node_t, link, auxelm)), auxelm, "auxelm's prev doesn't link to auxelm"); nnodes += node_validate(auxelm, NULL); } label_return: if (false) heap_print(heap); return (nnodes); } TEST_BEGIN(test_ph_empty) { heap_t heap; heap_new(&heap); assert_true(heap_empty(&heap), "Heap should be empty"); assert_ptr_null(heap_first(&heap), "Unexpected node"); } TEST_END static void node_remove(heap_t *heap, node_t *node) { heap_remove(heap, node); node->magic = 0; } static node_t * node_remove_first(heap_t *heap) { node_t *node = heap_remove_first(heap); node->magic = 0; return (node); } TEST_BEGIN(test_ph_random) { #define NNODES 25 #define NBAGS 250 #define SEED 42 sfmt_t *sfmt; uint64_t bag[NNODES]; heap_t heap; node_t nodes[NNODES]; unsigned i, j, k; sfmt = init_gen_rand(SEED); for (i = 0; i < NBAGS; i++) { switch (i) { case 0: /* Insert in order. */ for (j = 0; j < NNODES; j++) bag[j] = j; break; case 1: /* Insert in reverse order. */ for (j = 0; j < NNODES; j++) bag[j] = NNODES - j - 1; break; default: for (j = 0; j < NNODES; j++) bag[j] = gen_rand64_range(sfmt, NNODES); } for (j = 1; j <= NNODES; j++) { /* Initialize heap and nodes. */ heap_new(&heap); assert_u_eq(heap_validate(&heap), 0, "Incorrect node count"); for (k = 0; k < j; k++) { nodes[k].magic = NODE_MAGIC; nodes[k].key = bag[k]; } /* Insert nodes. */ for (k = 0; k < j; k++) { heap_insert(&heap, &nodes[k]); if (i % 13 == 12) { /* Trigger merging. */ assert_ptr_not_null(heap_first(&heap), "Heap should not be empty"); } assert_u_eq(heap_validate(&heap), k + 1, "Incorrect node count"); } assert_false(heap_empty(&heap), "Heap should not be empty"); /* Remove nodes. */ switch (i % 4) { case 0: for (k = 0; k < j; k++) { assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); node_remove(&heap, &nodes[k]); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); } break; case 1: for (k = j; k > 0; k--) { node_remove(&heap, &nodes[k-1]); assert_u_eq(heap_validate(&heap), k - 1, "Incorrect node count"); } break; case 2: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = node_remove_first(&heap); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } prev = node; } break; } case 3: { node_t *prev = NULL; for (k = 0; k < j; k++) { node_t *node = heap_first(&heap); assert_u_eq(heap_validate(&heap), j - k, "Incorrect node count"); if (prev != NULL) { assert_d_ge(node_cmp(node, prev), 0, "Bad removal order"); } node_remove(&heap, node); assert_u_eq(heap_validate(&heap), j - k - 1, "Incorrect node count"); prev = node; } break; } default: not_reached(); } assert_ptr_null(heap_first(&heap), "Heap should be empty"); assert_true(heap_empty(&heap), "Heap should be empty"); } } fini_gen_rand(sfmt); #undef NNODES #undef SEED } TEST_END int main(void) { return (test( test_ph_empty, test_ph_random)); }
6,510
21.37457
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/prof_reset.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false,lg_prof_sample:0"; #endif static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } static void set_prof_active(bool active) { assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure"); } static size_t get_lg_prof_sample(void) { size_t lg_prof_sample; size_t sz = sizeof(size_t); assert_d_eq(mallctl("prof.lg_sample", (void *)&lg_prof_sample, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); return (lg_prof_sample); } static void do_prof_reset(size_t lg_prof_sample) { assert_d_eq(mallctl("prof.reset", NULL, NULL, (void *)&lg_prof_sample, sizeof(size_t)), 0, "Unexpected mallctl failure while resetting profile data"); assert_zu_eq(lg_prof_sample, get_lg_prof_sample(), "Expected profile sample rate change"); } TEST_BEGIN(test_prof_reset_basic) { size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next; size_t sz; unsigned i; test_skip_if(!config_prof); sz = sizeof(size_t); assert_d_eq(mallctl("opt.lg_prof_sample", (void *)&lg_prof_sample_orig, &sz, NULL, 0), 0, "Unexpected mallctl failure while reading profiling sample rate"); assert_zu_eq(lg_prof_sample_orig, 0, "Unexpected profiling sample rate"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); /* Test simple resets. */ for (i = 0; i < 2; i++) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected mallctl failure while resetting profile data"); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected profile sample rate change"); } /* Test resets with prof.lg_sample changes. */ lg_prof_sample_next = 1; for (i = 0; i < 2; i++) { do_prof_reset(lg_prof_sample_next); lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample, lg_prof_sample_next, "Expected profile sample rate change"); lg_prof_sample_next = lg_prof_sample_orig; } /* Make sure the test code restored prof.lg_sample. */ lg_prof_sample = get_lg_prof_sample(); assert_zu_eq(lg_prof_sample_orig, lg_prof_sample, "Unexpected disagreement between \"opt.lg_prof_sample\" and " "\"prof.lg_sample\""); } TEST_END bool prof_dump_header_intercepted = false; prof_cnt_t cnt_all_copy = {0, 0, 0, 0}; static bool prof_dump_header_intercept(tsdn_t *tsdn, bool propagate_err, const prof_cnt_t *cnt_all) { prof_dump_header_intercepted = true; memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t)); return (false); } TEST_BEGIN(test_prof_reset_cleanup) { void *p; prof_dump_header_t *prof_dump_header_orig; test_skip_if(!config_prof); set_prof_active(true); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header_orig = prof_dump_header; prof_dump_header = prof_dump_header_intercept; assert_false(prof_dump_header_intercepted, "Unexpected intercept"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_true(prof_dump_header_intercepted, "Expected intercept"); assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation"); assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile data"); assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations"); assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace"); prof_dump_header = prof_dump_header_orig; dallocx(p, 0); assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces"); set_prof_active(false); } TEST_END #define NTHREADS 4 #define NALLOCS_PER_THREAD (1U << 13) #define OBJ_RING_BUF_COUNT 1531 #define RESET_INTERVAL (1U << 10) #define DUMP_INTERVAL 3677 static void * thd_start(void *varg) { unsigned thd_ind = *(unsigned *)varg; unsigned i; void *objs[OBJ_RING_BUF_COUNT]; memset(objs, 0, sizeof(objs)); for (i = 0; i < NALLOCS_PER_THREAD; i++) { if (i % RESET_INTERVAL == 0) { assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0, "Unexpected error while resetting heap profile " "data"); } if (i % DUMP_INTERVAL == 0) { assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0), 0, "Unexpected error while dumping heap profile"); } { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i); assert_ptr_not_null(*pp, "Unexpected btalloc() failure"); } } /* Clean up any remaining objects. */ for (i = 0; i < OBJ_RING_BUF_COUNT; i++) { void **pp = &objs[i % OBJ_RING_BUF_COUNT]; if (*pp != NULL) { dallocx(*pp, 0); *pp = NULL; } } return (NULL); } TEST_BEGIN(test_prof_reset) { size_t lg_prof_sample_orig; thd_t thds[NTHREADS]; unsigned thd_args[NTHREADS]; unsigned i; size_t bt_count, tdata_count; test_skip_if(!config_prof); bt_count = prof_bt_count(); assert_zu_eq(bt_count, 0, "Unexpected pre-existing tdata structures"); tdata_count = prof_tdata_count(); lg_prof_sample_orig = get_lg_prof_sample(); do_prof_reset(5); set_prof_active(true); for (i = 0; i < NTHREADS; i++) { thd_args[i] = i; thd_create(&thds[i], thd_start, (void *)&thd_args[i]); } for (i = 0; i < NTHREADS; i++) thd_join(thds[i], NULL); assert_zu_eq(prof_bt_count(), bt_count, "Unexpected bactrace count change"); assert_zu_eq(prof_tdata_count(), tdata_count, "Unexpected remaining tdata structures"); set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NTHREADS #undef NALLOCS_PER_THREAD #undef OBJ_RING_BUF_COUNT #undef RESET_INTERVAL #undef DUMP_INTERVAL /* Test sampling at the same allocation site across resets. */ #define NITER 10 TEST_BEGIN(test_xallocx) { size_t lg_prof_sample_orig; unsigned i; void *ptrs[NITER]; test_skip_if(!config_prof); lg_prof_sample_orig = get_lg_prof_sample(); set_prof_active(true); /* Reset profiling. */ do_prof_reset(0); for (i = 0; i < NITER; i++) { void *p; size_t sz, nsz; /* Reset profiling. */ do_prof_reset(0); /* Allocate small object (which will be promoted). */ p = ptrs[i] = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); /* Reset profiling. */ do_prof_reset(0); /* Perform successful xallocx(). */ sz = sallocx(p, 0); assert_zu_eq(xallocx(p, sz, 0, 0), sz, "Unexpected xallocx() failure"); /* Perform unsuccessful xallocx(). */ nsz = nallocx(sz+1, 0); assert_zu_eq(xallocx(p, nsz, 0, 0), sz, "Unexpected xallocx() success"); } for (i = 0; i < NITER; i++) { /* dallocx. */ dallocx(ptrs[i], 0); } set_prof_active(false); do_prof_reset(lg_prof_sample_orig); } TEST_END #undef NITER int main(void) { /* Intercept dumping prior to running any tests. */ prof_dump_open = prof_dump_open_intercept; return (test( test_prof_reset_basic, test_prof_reset_cleanup, test_prof_reset, test_xallocx)); }
7,580
23.855738
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/hash.c
/* * This file is based on code that is part of SMHasher * (https://code.google.com/p/smhasher/), and is subject to the MIT license * (http://www.opensource.org/licenses/mit-license.php). Both email addresses * associated with the source code's revision history belong to Austin Appleby, * and the revision history ranges from 2010 to 2012. Therefore the copyright * and license are here taken to be: * * Copyright (c) 2010-2012 Austin Appleby * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "test/jemalloc_test.h" typedef enum { hash_variant_x86_32, hash_variant_x86_128, hash_variant_x64_128 } hash_variant_t; static int hash_variant_bits(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return (32); case hash_variant_x86_128: return (128); case hash_variant_x64_128: return (128); default: not_reached(); } } static const char * hash_variant_string(hash_variant_t variant) { switch (variant) { case hash_variant_x86_32: return ("hash_x86_32"); case hash_variant_x86_128: return ("hash_x86_128"); case hash_variant_x64_128: return ("hash_x64_128"); default: not_reached(); } } #define KEY_SIZE 256 static void hash_variant_verify_key(hash_variant_t variant, uint8_t *key) { const int hashbytes = hash_variant_bits(variant) / 8; const int hashes_size = hashbytes * 256; VARIABLE_ARRAY(uint8_t, hashes, hashes_size); VARIABLE_ARRAY(uint8_t, final, hashbytes); unsigned i; uint32_t computed, expected; memset(key, 0, KEY_SIZE); memset(hashes, 0, hashes_size); memset(final, 0, hashbytes); /* * Hash keys of the form {0}, {0,1}, {0,1,2}, ..., {0,1,...,255} as the * seed. */ for (i = 0; i < 256; i++) { key[i] = (uint8_t)i; switch (variant) { case hash_variant_x86_32: { uint32_t out; out = hash_x86_32(key, i, 256-i); memcpy(&hashes[i*hashbytes], &out, hashbytes); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(key, i, 256-i, out); memcpy(&hashes[i*hashbytes], out, hashbytes); break; } default: not_reached(); } } /* Hash the result array. */ switch (variant) { case hash_variant_x86_32: { uint32_t out = hash_x86_32(hashes, hashes_size, 0); memcpy(final, &out, sizeof(out)); break; } case hash_variant_x86_128: { uint64_t out[2]; hash_x86_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } case hash_variant_x64_128: { uint64_t out[2]; hash_x64_128(hashes, hashes_size, 0, out); memcpy(final, out, sizeof(out)); break; } default: not_reached(); } computed = (final[0] << 0) | (final[1] << 8) | (final[2] << 16) | (final[3] << 24); switch (variant) { #ifdef JEMALLOC_BIG_ENDIAN case hash_variant_x86_32: expected = 0x6213303eU; break; case hash_variant_x86_128: expected = 0x266820caU; break; case hash_variant_x64_128: expected = 0xcc622b6fU; break; #else case hash_variant_x86_32: expected = 0xb0f57ee3U; break; case hash_variant_x86_128: expected = 0xb3ece62aU; break; case hash_variant_x64_128: expected = 0x6384ba69U; break; #endif default: not_reached(); } assert_u32_eq(computed, expected, "Hash mismatch for %s(): expected %#x but got %#x", hash_variant_string(variant), expected, computed); } static void hash_variant_verify(hash_variant_t variant) { #define MAX_ALIGN 16 uint8_t key[KEY_SIZE + (MAX_ALIGN - 1)]; unsigned i; for (i = 0; i < MAX_ALIGN; i++) hash_variant_verify_key(variant, &key[i]); #undef MAX_ALIGN } #undef KEY_SIZE TEST_BEGIN(test_hash_x86_32) { hash_variant_verify(hash_variant_x86_32); } TEST_END TEST_BEGIN(test_hash_x86_128) { hash_variant_verify(hash_variant_x86_128); } TEST_END TEST_BEGIN(test_hash_x64_128) { hash_variant_verify(hash_variant_x64_128); } TEST_END int main(void) { return (test( test_hash_x86_32, test_hash_x86_128, test_hash_x64_128)); }
5,031
26.053763
80
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/smoothstep.c
#include "test/jemalloc_test.h" static const uint64_t smoothstep_tab[] = { #define STEP(step, h, x, y) \ h, SMOOTHSTEP #undef STEP }; TEST_BEGIN(test_smoothstep_integral) { uint64_t sum, min, max; unsigned i; /* * The integral of smoothstep in the [0..1] range equals 1/2. Verify * that the fixed point representation's integral is no more than * rounding error distant from 1/2. Regarding rounding, each table * element is rounded down to the nearest fixed point value, so the * integral may be off by as much as SMOOTHSTEP_NSTEPS ulps. */ sum = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) sum += smoothstep_tab[i]; max = (KQU(1) << (SMOOTHSTEP_BFP-1)) * (SMOOTHSTEP_NSTEPS+1); min = max - SMOOTHSTEP_NSTEPS; assert_u64_ge(sum, min, "Integral too small, even accounting for truncation"); assert_u64_le(sum, max, "Integral exceeds 1/2"); if (false) { malloc_printf("%"FMTu64" ulps under 1/2 (limit %d)\n", max - sum, SMOOTHSTEP_NSTEPS); } } TEST_END TEST_BEGIN(test_smoothstep_monotonic) { uint64_t prev_h; unsigned i; /* * The smoothstep function is monotonic in [0..1], i.e. its slope is * non-negative. In practice we want to parametrize table generation * such that piecewise slope is greater than zero, but do not require * that here. */ prev_h = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { uint64_t h = smoothstep_tab[i]; assert_u64_ge(h, prev_h, "Piecewise non-monotonic, i=%u", i); prev_h = h; } assert_u64_eq(smoothstep_tab[SMOOTHSTEP_NSTEPS-1], (KQU(1) << SMOOTHSTEP_BFP), "Last step must equal 1"); } TEST_END TEST_BEGIN(test_smoothstep_slope) { uint64_t prev_h, prev_delta; unsigned i; /* * The smoothstep slope strictly increases until x=0.5, and then * strictly decreases until x=1.0. Verify the slightly weaker * requirement of monotonicity, so that inadequate table precision does * not cause false test failures. */ prev_h = 0; prev_delta = 0; for (i = 0; i < SMOOTHSTEP_NSTEPS / 2 + SMOOTHSTEP_NSTEPS % 2; i++) { uint64_t h = smoothstep_tab[i]; uint64_t delta = h - prev_h; assert_u64_ge(delta, prev_delta, "Slope must monotonically increase in 0.0 <= x <= 0.5, " "i=%u", i); prev_h = h; prev_delta = delta; } prev_h = KQU(1) << SMOOTHSTEP_BFP; prev_delta = 0; for (i = SMOOTHSTEP_NSTEPS-1; i >= SMOOTHSTEP_NSTEPS / 2; i--) { uint64_t h = smoothstep_tab[i]; uint64_t delta = prev_h - h; assert_u64_ge(delta, prev_delta, "Slope must monotonically decrease in 0.5 <= x <= 1.0, " "i=%u", i); prev_h = h; prev_delta = delta; } } TEST_END int main(void) { return (test( test_smoothstep_integral, test_smoothstep_monotonic, test_smoothstep_slope)); }
2,728
24.504673
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/prof_gdump.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_active:false,prof_gdump:true"; #endif static bool did_prof_dump_open; static int prof_dump_open_intercept(bool propagate_err, const char *filename) { int fd; did_prof_dump_open = true; fd = open("/dev/null", O_WRONLY); assert_d_ne(fd, -1, "Unexpected open() failure"); return (fd); } TEST_BEGIN(test_gdump) { bool active, gdump, gdump_old; void *p, *q, *r, *s; size_t sz; test_skip_if(!config_prof); active = true; assert_d_eq(mallctl("prof.active", NULL, NULL, (void *)&active, sizeof(active)), 0, "Unexpected mallctl failure while activating profiling"); prof_dump_open = prof_dump_open_intercept; did_prof_dump_open = false; p = mallocx(chunksize, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); did_prof_dump_open = false; q = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); gdump = false; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while disabling prof.gdump"); assert(gdump_old); did_prof_dump_open = false; r = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_false(did_prof_dump_open, "Unexpected profile dump"); gdump = true; sz = sizeof(gdump_old); assert_d_eq(mallctl("prof.gdump", (void *)&gdump_old, &sz, (void *)&gdump, sizeof(gdump)), 0, "Unexpected mallctl failure while enabling prof.gdump"); assert(!gdump_old); did_prof_dump_open = false; s = mallocx(chunksize, 0); assert_ptr_not_null(q, "Unexpected mallocx() failure"); assert_true(did_prof_dump_open, "Expected a profile dump"); dallocx(p, 0); dallocx(q, 0); dallocx(r, 0); dallocx(s, 0); } TEST_END int main(void) { return (test( test_gdump)); }
2,010
23.228916
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/prof_active.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_PROF const char *malloc_conf = "prof:true,prof_thread_active_init:false,lg_prof_sample:0"; #endif static void mallctl_bool_get(const char *name, bool expected, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, NULL, 0), 0, "%s():%d: Unexpected mallctl failure reading %s", func, line, name); assert_b_eq(old, expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_bool_set(const char *name, bool old_expected, bool val_new, const char *func, int line) { bool old; size_t sz; sz = sizeof(old); assert_d_eq(mallctl(name, (void *)&old, &sz, (void *)&val_new, sizeof(val_new)), 0, "%s():%d: Unexpected mallctl failure reading/writing %s", func, line, name); assert_b_eq(old, old_expected, "%s():%d: Unexpected %s value", func, line, name); } static void mallctl_prof_active_get_impl(bool prof_active_old_expected, const char *func, int line) { mallctl_bool_get("prof.active", prof_active_old_expected, func, line); } #define mallctl_prof_active_get(a) \ mallctl_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_prof_active_set_impl(bool prof_active_old_expected, bool prof_active_new, const char *func, int line) { mallctl_bool_set("prof.active", prof_active_old_expected, prof_active_new, func, line); } #define mallctl_prof_active_set(a, b) \ mallctl_prof_active_set_impl(a, b, __func__, __LINE__) static void mallctl_thread_prof_active_get_impl(bool thread_prof_active_old_expected, const char *func, int line) { mallctl_bool_get("thread.prof.active", thread_prof_active_old_expected, func, line); } #define mallctl_thread_prof_active_get(a) \ mallctl_thread_prof_active_get_impl(a, __func__, __LINE__) static void mallctl_thread_prof_active_set_impl(bool thread_prof_active_old_expected, bool thread_prof_active_new, const char *func, int line) { mallctl_bool_set("thread.prof.active", thread_prof_active_old_expected, thread_prof_active_new, func, line); } #define mallctl_thread_prof_active_set(a, b) \ mallctl_thread_prof_active_set_impl(a, b, __func__, __LINE__) static void prof_sampling_probe_impl(bool expect_sample, const char *func, int line) { void *p; size_t expected_backtraces = expect_sample ? 1 : 0; assert_zu_eq(prof_bt_count(), 0, "%s():%d: Expected 0 backtraces", func, line); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_zu_eq(prof_bt_count(), expected_backtraces, "%s():%d: Unexpected backtrace count", func, line); dallocx(p, 0); } #define prof_sampling_probe(a) \ prof_sampling_probe_impl(a, __func__, __LINE__) TEST_BEGIN(test_prof_active) { test_skip_if(!config_prof); mallctl_prof_active_get(true); mallctl_thread_prof_active_get(false); mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(false, false); /* prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(true, false); mallctl_thread_prof_active_set(false, false); /* !prof.active, !thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, false); mallctl_thread_prof_active_set(false, true); /* !prof.active, thread.prof.active. */ prof_sampling_probe(false); mallctl_prof_active_set(false, true); mallctl_thread_prof_active_set(true, true); /* prof.active, thread.prof.active. */ prof_sampling_probe(true); /* Restore settings. */ mallctl_prof_active_set(true, true); mallctl_thread_prof_active_set(true, false); } TEST_END int main(void) { return (test( test_prof_active)); }
3,706
25.862319
77
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/quarantine.c
#include "test/jemalloc_test.h" #define QUARANTINE_SIZE 8192 #define STRINGIFY_HELPER(x) #x #define STRINGIFY(x) STRINGIFY_HELPER(x) #ifdef JEMALLOC_FILL const char *malloc_conf = "abort:false,junk:true,redzone:true,quarantine:" STRINGIFY(QUARANTINE_SIZE); #endif void quarantine_clear(void) { void *p; p = mallocx(QUARANTINE_SIZE*2, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); dallocx(p, 0); } TEST_BEGIN(test_quarantine) { #define SZ ZU(256) #define NQUARANTINED (QUARANTINE_SIZE/SZ) void *quarantined[NQUARANTINED+1]; size_t i, j; test_skip_if(!config_fill); assert_zu_eq(nallocx(SZ, 0), SZ, "SZ=%zu does not precisely equal a size class", SZ); quarantine_clear(); /* * Allocate enough regions to completely fill the quarantine, plus one * more. The last iteration occurs with a completely full quarantine, * but no regions should be drained from the quarantine until the last * deallocation occurs. Therefore no region recycling should occur * until after this loop completes. */ for (i = 0; i < NQUARANTINED+1; i++) { void *p = mallocx(SZ, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); quarantined[i] = p; dallocx(p, 0); for (j = 0; j < i; j++) { assert_ptr_ne(p, quarantined[j], "Quarantined region recycled too early; " "i=%zu, j=%zu", i, j); } } #undef NQUARANTINED #undef SZ } TEST_END static bool detected_redzone_corruption; static void arena_redzone_corruption_replacement(void *ptr, size_t usize, bool after, size_t offset, uint8_t byte) { detected_redzone_corruption = true; } TEST_BEGIN(test_quarantine_redzone) { char *s; arena_redzone_corruption_t *arena_redzone_corruption_orig; test_skip_if(!config_fill); arena_redzone_corruption_orig = arena_redzone_corruption; arena_redzone_corruption = arena_redzone_corruption_replacement; /* Test underflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[-1] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); /* Test overflow. */ detected_redzone_corruption = false; s = (char *)mallocx(1, 0); assert_ptr_not_null((void *)s, "Unexpected mallocx() failure"); s[sallocx(s, 0)] = 0xbb; dallocx(s, 0); assert_true(detected_redzone_corruption, "Did not detect redzone corruption"); arena_redzone_corruption = arena_redzone_corruption_orig; } TEST_END int main(void) { return (test( test_quarantine, test_quarantine_redzone)); }
2,583
22.706422
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/ticker.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_ticker_tick) { #define NREPS 2 #define NTICKS 3 ticker_t ticker; int32_t i, j; ticker_init(&ticker, NTICKS); for (i = 0; i < NREPS; i++) { for (j = 0; j < NTICKS; j++) { assert_u_eq(ticker_read(&ticker), NTICKS - j, "Unexpected ticker value (i=%d, j=%d)", i, j); assert_false(ticker_tick(&ticker), "Unexpected ticker fire (i=%d, j=%d)", i, j); } assert_u32_eq(ticker_read(&ticker), 0, "Expected ticker depletion"); assert_true(ticker_tick(&ticker), "Expected ticker fire (i=%d)", i); assert_u32_eq(ticker_read(&ticker), NTICKS, "Expected ticker reset"); } #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_ticks) { #define NTICKS 3 ticker_t ticker; ticker_init(&ticker, NTICKS); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_false(ticker_ticks(&ticker, NTICKS), "Unexpected ticker fire"); assert_u_eq(ticker_read(&ticker), 0, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&ticker, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&ticker), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END TEST_BEGIN(test_ticker_copy) { #define NTICKS 3 ticker_t ta, tb; ticker_init(&ta, NTICKS); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS + 1), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); ticker_tick(&ta); ticker_copy(&tb, &ta); assert_u_eq(ticker_read(&tb), NTICKS - 1, "Unexpected ticker value"); assert_true(ticker_ticks(&tb, NTICKS), "Expected ticker fire"); assert_u_eq(ticker_read(&tb), NTICKS, "Unexpected ticker value"); #undef NTICKS } TEST_END int main(void) { return (test( test_ticker_tick, test_ticker_ticks, test_ticker_copy)); }
2,006
25.064935
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/unit/size_classes.c
#include "test/jemalloc_test.h" static size_t get_max_size_class(void) { unsigned nhchunks; size_t mib[4]; size_t sz, miblen, max_size_class; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nhchunks - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); return (max_size_class); } TEST_BEGIN(test_size_classes) { size_t size_class, max_size_class; szind_t index, max_index; max_size_class = get_max_size_class(); max_index = size2index(max_size_class); for (index = 0, size_class = index2size(index); index < max_index || size_class < max_size_class; index++, size_class = index2size(index)) { assert_true(index < max_index, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_true(size_class < max_size_class, "Loop conditionals should be equivalent; index=%u, " "size_class=%zu (%#zx)", index, size_class, size_class); assert_u_eq(index, size2index(size_class), "size2index() does not reverse index2size(): index=%u -->" " size_class=%zu --> index=%u --> size_class=%zu", index, size_class, size2index(size_class), index2size(size2index(size_class))); assert_zu_eq(size_class, index2size(size2index(size_class)), "index2size() does not reverse size2index(): index=%u -->" " size_class=%zu --> index=%u --> size_class=%zu", index, size_class, size2index(size_class), index2size(size2index(size_class))); assert_u_eq(index+1, size2index(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (index > 0) ? s2u(index2size(index-1)+1) : s2u(1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class-1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class), "s2u() does not compute same size class"); assert_zu_eq(s2u(size_class+1), index2size(index+1), "s2u() does not round up to next size class"); } assert_u_eq(index, size2index(index2size(index)), "size2index() does not reverse index2size()"); assert_zu_eq(max_size_class, index2size(size2index(max_size_class)), "index2size() does not reverse size2index()"); assert_zu_eq(size_class, s2u(index2size(index-1)+1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class-1), "s2u() does not round up to size class"); assert_zu_eq(size_class, s2u(size_class), "s2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_psize_classes) { size_t size_class, max_size_class; pszind_t pind, max_pind; max_size_class = get_max_size_class(); max_pind = psz2ind(max_size_class); for (pind = 0, size_class = pind2sz(pind); pind < max_pind || size_class < max_size_class; pind++, size_class = pind2sz(pind)) { assert_true(pind < max_pind, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_true(size_class < max_size_class, "Loop conditionals should be equivalent; pind=%u, " "size_class=%zu (%#zx)", pind, size_class, size_class); assert_u_eq(pind, psz2ind(size_class), "psz2ind() does not reverse pind2sz(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, psz2ind(size_class), pind2sz(psz2ind(size_class))); assert_zu_eq(size_class, pind2sz(psz2ind(size_class)), "pind2sz() does not reverse psz2ind(): pind=%u -->" " size_class=%zu --> pind=%u --> size_class=%zu", pind, size_class, psz2ind(size_class), pind2sz(psz2ind(size_class))); assert_u_eq(pind+1, psz2ind(size_class+1), "Next size_class does not round up properly"); assert_zu_eq(size_class, (pind > 0) ? psz2u(pind2sz(pind-1)+1) : psz2u(1), "psz2u() does not round up to size class"); assert_zu_eq(size_class, psz2u(size_class-1), "psz2u() does not round up to size class"); assert_zu_eq(size_class, psz2u(size_class), "psz2u() does not compute same size class"); assert_zu_eq(psz2u(size_class+1), pind2sz(pind+1), "psz2u() does not round up to next size class"); } assert_u_eq(pind, psz2ind(pind2sz(pind)), "psz2ind() does not reverse pind2sz()"); assert_zu_eq(max_size_class, pind2sz(psz2ind(max_size_class)), "pind2sz() does not reverse psz2ind()"); assert_zu_eq(size_class, psz2u(pind2sz(pind-1)+1), "psz2u() does not round up to size class"); assert_zu_eq(size_class, psz2u(size_class-1), "psz2u() does not round up to size class"); assert_zu_eq(size_class, psz2u(size_class), "psz2u() does not compute same size class"); } TEST_END TEST_BEGIN(test_overflow) { size_t max_size_class; max_size_class = get_max_size_class(); assert_u_eq(size2index(max_size_class+1), NSIZES, "size2index() should return NSIZES on overflow"); assert_u_eq(size2index(ZU(PTRDIFF_MAX)+1), NSIZES, "size2index() should return NSIZES on overflow"); assert_u_eq(size2index(SIZE_T_MAX), NSIZES, "size2index() should return NSIZES on overflow"); assert_zu_eq(s2u(max_size_class+1), 0, "s2u() should return 0 for unsupported size"); assert_zu_eq(s2u(ZU(PTRDIFF_MAX)+1), 0, "s2u() should return 0 for unsupported size"); assert_zu_eq(s2u(SIZE_T_MAX), 0, "s2u() should return 0 on overflow"); assert_u_eq(psz2ind(max_size_class+1), NPSIZES, "psz2ind() should return NPSIZES on overflow"); assert_u_eq(psz2ind(ZU(PTRDIFF_MAX)+1), NPSIZES, "psz2ind() should return NPSIZES on overflow"); assert_u_eq(psz2ind(SIZE_T_MAX), NPSIZES, "psz2ind() should return NPSIZES on overflow"); assert_zu_eq(psz2u(max_size_class+1), 0, "psz2u() should return 0 for unsupported size"); assert_zu_eq(psz2u(ZU(PTRDIFF_MAX)+1), 0, "psz2u() should return 0 for unsupported size"); assert_zu_eq(psz2u(SIZE_T_MAX), 0, "psz2u() should return 0 on overflow"); } TEST_END int main(void) { return (test( test_size_classes, test_psize_classes, test_overflow)); }
6,414
33.675676
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/src/SFMT.c
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.c * @brief SIMD oriented Fast Mersenne Twister(SFMT) * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #define SFMT_C_ #include "test/jemalloc_test.h" #include "test/SFMT-params.h" #if defined(JEMALLOC_BIG_ENDIAN) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(__BIG_ENDIAN__) && !defined(__amd64) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(HAVE_ALTIVEC) && !defined(BIG_ENDIAN64) #define BIG_ENDIAN64 1 #endif #if defined(ONLY64) && !defined(BIG_ENDIAN64) #if defined(__GNUC__) #error "-DONLY64 must be specified with -DBIG_ENDIAN64" #endif #undef ONLY64 #endif /*------------------------------------------------------ 128-bit SIMD data type for Altivec, SSE2 or standard C ------------------------------------------------------*/ #if defined(HAVE_ALTIVEC) /** 128-bit data structure */ union W128_T { vector unsigned int s; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #elif defined(HAVE_SSE2) /** 128-bit data structure */ union W128_T { __m128i si; uint32_t u[4]; }; /** 128-bit data type */ typedef union W128_T w128_t; #else /** 128-bit data structure */ struct W128_T { uint32_t u[4]; }; /** 128-bit data type */ typedef struct W128_T w128_t; #endif struct sfmt_s { /** the 128-bit internal state array */ w128_t sfmt[N]; /** index counter to the 32-bit internal state array */ int idx; /** a flag: it is 0 if and only if the internal state is not yet * initialized. */ int initialized; }; /*-------------------------------------- FILE GLOBAL VARIABLES internal state, index counter and flag --------------------------------------*/ /** a parity check vector which certificate the period of 2^{MEXP} */ static uint32_t parity[4] = {PARITY1, PARITY2, PARITY3, PARITY4}; /*---------------- STATIC FUNCTIONS ----------------*/ JEMALLOC_INLINE_C int idxof(int i); #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift); JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift); #endif JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx); JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size); JEMALLOC_INLINE_C uint32_t func1(uint32_t x); JEMALLOC_INLINE_C uint32_t func2(uint32_t x); static void period_certification(sfmt_t *ctx); #if defined(BIG_ENDIAN64) && !defined(ONLY64) JEMALLOC_INLINE_C void swap(w128_t *array, int size); #endif #if defined(HAVE_ALTIVEC) #include "test/SFMT-alti.h" #elif defined(HAVE_SSE2) #include "test/SFMT-sse2.h" #endif /** * This function simulate a 64-bit index of LITTLE ENDIAN * in BIG ENDIAN machine. */ #ifdef ONLY64 JEMALLOC_INLINE_C int idxof(int i) { return i ^ 1; } #else JEMALLOC_INLINE_C int idxof(int i) { return i; } #endif /** * This function simulates SIMD 128-bit right shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else JEMALLOC_INLINE_C void rshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th >> (shift * 8); ol = tl >> (shift * 8); ol |= th << (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif /** * This function simulates SIMD 128-bit left shift by the standard C. * The 128-bit integer given in in is shifted by (shift * 8) bits. * This function simulates the LITTLE ENDIAN SIMD. * @param out the output of this function * @param in the 128-bit data to be shifted * @param shift the shift value */ #ifdef ONLY64 JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[2] << 32) | ((uint64_t)in->u[3]); tl = ((uint64_t)in->u[0] << 32) | ((uint64_t)in->u[1]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[0] = (uint32_t)(ol >> 32); out->u[1] = (uint32_t)ol; out->u[2] = (uint32_t)(oh >> 32); out->u[3] = (uint32_t)oh; } #else JEMALLOC_INLINE_C void lshift128(w128_t *out, w128_t const *in, int shift) { uint64_t th, tl, oh, ol; th = ((uint64_t)in->u[3] << 32) | ((uint64_t)in->u[2]); tl = ((uint64_t)in->u[1] << 32) | ((uint64_t)in->u[0]); oh = th << (shift * 8); ol = tl << (shift * 8); oh |= tl >> (64 - shift * 8); out->u[1] = (uint32_t)(ol >> 32); out->u[0] = (uint32_t)ol; out->u[3] = (uint32_t)(oh >> 32); out->u[2] = (uint32_t)oh; } #endif #endif /** * This function represents the recursion formula. * @param r output * @param a a 128-bit part of the internal state array * @param b a 128-bit part of the internal state array * @param c a 128-bit part of the internal state array * @param d a 128-bit part of the internal state array */ #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) #ifdef ONLY64 JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK2) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK1) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK4) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK3) ^ y.u[3] ^ (d->u[3] << SL1); } #else JEMALLOC_INLINE_C void do_recursion(w128_t *r, w128_t *a, w128_t *b, w128_t *c, w128_t *d) { w128_t x; w128_t y; lshift128(&x, a, SL2); rshift128(&y, c, SR2); r->u[0] = a->u[0] ^ x.u[0] ^ ((b->u[0] >> SR1) & MSK1) ^ y.u[0] ^ (d->u[0] << SL1); r->u[1] = a->u[1] ^ x.u[1] ^ ((b->u[1] >> SR1) & MSK2) ^ y.u[1] ^ (d->u[1] << SL1); r->u[2] = a->u[2] ^ x.u[2] ^ ((b->u[2] >> SR1) & MSK3) ^ y.u[2] ^ (d->u[2] << SL1); r->u[3] = a->u[3] ^ x.u[3] ^ ((b->u[3] >> SR1) & MSK4) ^ y.u[3] ^ (d->u[3] << SL1); } #endif #endif #if (!defined(HAVE_ALTIVEC)) && (!defined(HAVE_SSE2)) /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE_C void gen_rand_all(sfmt_t *ctx) { int i; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } for (; i < N; i++) { do_recursion(&ctx->sfmt[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1 - N], r1, r2); r1 = r2; r2 = &ctx->sfmt[i]; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pseudorandom numbers to be generated. */ JEMALLOC_INLINE_C void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; w128_t *r1, *r2; r1 = &ctx->sfmt[N - 2]; r2 = &ctx->sfmt[N - 1]; for (i = 0; i < N - POS1; i++) { do_recursion(&array[i], &ctx->sfmt[i], &ctx->sfmt[i + POS1], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < N; i++) { do_recursion(&array[i], &ctx->sfmt[i], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (; i < size - N; i++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j] = array[j + size - N]; } for (; i < size; i++, j++) { do_recursion(&array[i], &array[i - N], &array[i + POS1 - N], r1, r2); r1 = r2; r2 = &array[i]; ctx->sfmt[j] = array[i]; } } #endif #if defined(BIG_ENDIAN64) && !defined(ONLY64) && !defined(HAVE_ALTIVEC) JEMALLOC_INLINE_C void swap(w128_t *array, int size) { int i; uint32_t x, y; for (i = 0; i < size; i++) { x = array[i].u[0]; y = array[i].u[2]; array[i].u[0] = array[i].u[1]; array[i].u[2] = array[i].u[3]; array[i].u[1] = x; array[i].u[3] = y; } } #endif /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func1(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1664525UL; } /** * This function represents a function used in the initialization * by init_by_array * @param x 32-bit integer * @return 32-bit integer */ static uint32_t func2(uint32_t x) { return (x ^ (x >> 27)) * (uint32_t)1566083941UL; } /** * This function certificate the period of 2^{MEXP} */ static void period_certification(sfmt_t *ctx) { int inner = 0; int i, j; uint32_t work; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; for (i = 0; i < 4; i++) inner ^= psfmt32[idxof(i)] & parity[i]; for (i = 16; i > 0; i >>= 1) inner ^= inner >> i; inner &= 1; /* check OK */ if (inner == 1) { return; } /* check NG, and modification */ for (i = 0; i < 4; i++) { work = 1; for (j = 0; j < 32; j++) { if ((work & parity[i]) != 0) { psfmt32[idxof(i)] ^= work; return; } work = work << 1; } } } /*---------------- PUBLIC FUNCTIONS ----------------*/ /** * This function returns the identification string. * The string shows the word size, the Mersenne exponent, * and all parameters of this generator. */ const char *get_idstring(void) { return IDSTR; } /** * This function returns the minimum size of array used for \b * fill_array32() function. * @return minimum size of array used for fill_array32() function. */ int get_min_array_size32(void) { return N32; } /** * This function returns the minimum size of array used for \b * fill_array64() function. * @return minimum size of array used for fill_array64() function. */ int get_min_array_size64(void) { return N64; } #ifndef ONLY64 /** * This function generates and returns 32-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * @return 32-bit pseudorandom number */ uint32_t gen_rand32(sfmt_t *ctx) { uint32_t r; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; assert(ctx->initialized); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } r = psfmt32[ctx->idx++]; return r; } /* Generate a random integer in [0..limit). */ uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit) { uint32_t ret, above; above = 0xffffffffU - (0xffffffffU % limit); while (1) { ret = gen_rand32(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #endif /** * This function generates and returns 64-bit pseudorandom number. * init_gen_rand or init_by_array must be called before this function. * The function gen_rand64 should not be called after gen_rand32, * unless an initialization is again executed. * @return 64-bit pseudorandom number */ uint64_t gen_rand64(sfmt_t *ctx) { #if defined(BIG_ENDIAN64) && !defined(ONLY64) uint32_t r1, r2; uint32_t *psfmt32 = &ctx->sfmt[0].u[0]; #else uint64_t r; uint64_t *psfmt64 = (uint64_t *)&ctx->sfmt[0].u[0]; #endif assert(ctx->initialized); assert(ctx->idx % 2 == 0); if (ctx->idx >= N32) { gen_rand_all(ctx); ctx->idx = 0; } #if defined(BIG_ENDIAN64) && !defined(ONLY64) r1 = psfmt32[ctx->idx]; r2 = psfmt32[ctx->idx + 1]; ctx->idx += 2; return ((uint64_t)r2 << 32) | r1; #else r = psfmt64[ctx->idx / 2]; ctx->idx += 2; return r; #endif } /* Generate a random integer in [0..limit). */ uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit) { uint64_t ret, above; above = KQU(0xffffffffffffffff) - (KQU(0xffffffffffffffff) % limit); while (1) { ret = gen_rand64(ctx); if (ret < above) { ret %= limit; break; } } return ret; } #ifndef ONLY64 /** * This function generates pseudorandom 32-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 624 and a * multiple of four. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 32-bit integers are filled * by this function. The pointer to the array must be \b "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 32-bit pseudorandom integers to be * generated. size must be a multiple of 4, and greater than or equal * to (MEXP / 128 + 1) * 4. * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array32(sfmt_t *ctx, uint32_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 4 == 0); assert(size >= N32); gen_rand_array(ctx, (w128_t *)array, size / 4); ctx->idx = N32; } #endif /** * This function generates pseudorandom 64-bit integers in the * specified array[] by one call. The number of pseudorandom integers * is specified by the argument size, which must be at least 312 and a * multiple of two. The generation by this function is much faster * than the following gen_rand function. * * For initialization, init_gen_rand or init_by_array must be called * before the first call of this function. This function can not be * used after calling gen_rand function, without initialization. * * @param array an array where pseudorandom 64-bit integers are filled * by this function. The pointer to the array must be "aligned" * (namely, must be a multiple of 16) in the SIMD version, since it * refers to the address of a 128-bit integer. In the standard C * version, the pointer is arbitrary. * * @param size the number of 64-bit pseudorandom integers to be * generated. size must be a multiple of 2, and greater than or equal * to (MEXP / 128 + 1) * 2 * * @note \b memalign or \b posix_memalign is available to get aligned * memory. Mac OSX doesn't have these functions, but \b malloc of OSX * returns the pointer to the aligned memory block. */ void fill_array64(sfmt_t *ctx, uint64_t *array, int size) { assert(ctx->initialized); assert(ctx->idx == N32); assert(size % 2 == 0); assert(size >= N64); gen_rand_array(ctx, (w128_t *)array, size / 2); ctx->idx = N32; #if defined(BIG_ENDIAN64) && !defined(ONLY64) swap((w128_t *)array, size /2); #endif } /** * This function initializes the internal state array with a 32-bit * integer seed. * * @param seed a 32-bit integer used as the seed. */ sfmt_t *init_gen_rand(uint32_t seed) { void *p; sfmt_t *ctx; int i; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; psfmt32[idxof(0)] = seed; for (i = 1; i < N32; i++) { psfmt32[idxof(i)] = 1812433253UL * (psfmt32[idxof(i - 1)] ^ (psfmt32[idxof(i - 1)] >> 30)) + i; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } /** * This function initializes the internal state array, * with an array of 32-bit integers used as the seeds * @param init_key the array of 32-bit integers, used as a seed. * @param key_length the length of init_key. */ sfmt_t *init_by_array(uint32_t *init_key, int key_length) { void *p; sfmt_t *ctx; int i, j, count; uint32_t r; int lag; int mid; int size = N * 4; uint32_t *psfmt32; if (posix_memalign(&p, sizeof(w128_t), sizeof(sfmt_t)) != 0) { return NULL; } ctx = (sfmt_t *)p; psfmt32 = &ctx->sfmt[0].u[0]; if (size >= 623) { lag = 11; } else if (size >= 68) { lag = 7; } else if (size >= 39) { lag = 5; } else { lag = 3; } mid = (size - lag) / 2; memset(ctx->sfmt, 0x8b, sizeof(ctx->sfmt)); if (key_length + 1 > N32) { count = key_length + 1; } else { count = N32; } r = func1(psfmt32[idxof(0)] ^ psfmt32[idxof(mid)] ^ psfmt32[idxof(N32 - 1)]); psfmt32[idxof(mid)] += r; r += key_length; psfmt32[idxof(mid + lag)] += r; psfmt32[idxof(0)] = r; count--; for (i = 1, j = 0; (j < count) && (j < key_length); j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += init_key[j] + i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (; j < count; j++) { r = func1(psfmt32[idxof(i)] ^ psfmt32[idxof((i + mid) % N32)] ^ psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] += r; r += i; psfmt32[idxof((i + mid + lag) % N32)] += r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } for (j = 0; j < N32; j++) { r = func2(psfmt32[idxof(i)] + psfmt32[idxof((i + mid) % N32)] + psfmt32[idxof((i + N32 - 1) % N32)]); psfmt32[idxof((i + mid) % N32)] ^= r; r -= i; psfmt32[idxof((i + mid + lag) % N32)] ^= r; psfmt32[idxof(i)] = r; i = (i + 1) % N32; } ctx->idx = N32; period_certification(ctx); ctx->initialized = 1; return ctx; } void fini_gen_rand(sfmt_t *ctx) { assert(ctx != NULL); ctx->initialized = 0; free(ctx); }
20,695
27.744444
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/allocated.c
#include "test/jemalloc_test.h" static const bool config_stats = #ifdef JEMALLOC_STATS true #else false #endif ; void * thd_start(void *arg) { int err; void *p; uint64_t a0, a1, d0, d1; uint64_t *ap0, *ap1, *dp0, *dp1; size_t sz, usize; sz = sizeof(a0); if ((err = mallctl("thread.allocated", (void *)&a0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(ap0); if ((err = mallctl("thread.allocatedp", (void *)&ap0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*ap0, a0, "\"thread.allocatedp\" should provide a pointer to internal " "storage"); sz = sizeof(d0); if ((err = mallctl("thread.deallocated", (void *)&d0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } sz = sizeof(dp0); if ((err = mallctl("thread.deallocatedp", (void *)&dp0, &sz, NULL, 0))) { if (err == ENOENT) goto label_ENOENT; test_fail("%s(): Error in mallctl(): %s", __func__, strerror(err)); } assert_u64_eq(*dp0, d0, "\"thread.deallocatedp\" should provide a pointer to internal " "storage"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() error"); sz = sizeof(a1); mallctl("thread.allocated", (void *)&a1, &sz, NULL, 0); sz = sizeof(ap1); mallctl("thread.allocatedp", (void *)&ap1, &sz, NULL, 0); assert_u64_eq(*ap1, a1, "Dereferenced \"thread.allocatedp\" value should equal " "\"thread.allocated\" value"); assert_ptr_eq(ap0, ap1, "Pointer returned by \"thread.allocatedp\" should not change"); usize = malloc_usable_size(p); assert_u64_le(a0 + usize, a1, "Allocated memory counter should increase by at least the amount " "explicitly allocated"); free(p); sz = sizeof(d1); mallctl("thread.deallocated", (void *)&d1, &sz, NULL, 0); sz = sizeof(dp1); mallctl("thread.deallocatedp", (void *)&dp1, &sz, NULL, 0); assert_u64_eq(*dp1, d1, "Dereferenced \"thread.deallocatedp\" value should equal " "\"thread.deallocated\" value"); assert_ptr_eq(dp0, dp1, "Pointer returned by \"thread.deallocatedp\" should not change"); assert_u64_le(d0 + usize, d1, "Deallocated memory counter should increase by at least the amount " "explicitly deallocated"); return (NULL); label_ENOENT: assert_false(config_stats, "ENOENT should only be returned if stats are disabled"); test_skip("\"thread.allocated\" mallctl not available"); return (NULL); } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return (test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread)); }
3,058
23.086614
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/xallocx.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL const char *malloc_conf = "junk:false"; #endif /* * Use a separate arena for xallocx() extension/contraction tests so that * internal allocation e.g. by heap profiling can't interpose allocations where * xallocx() would ordinarily be able to extend. */ static unsigned arena_ind(void) { static unsigned ind = 0; if (ind == 0) { size_t sz = sizeof(ind); assert_d_eq(mallctl("arenas.extend", (void *)&ind, &sz, NULL, 0), 0, "Unexpected mallctl failure creating arena"); } return (ind); } TEST_BEGIN(test_same_size) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_no_move) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz, sz-42, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END TEST_BEGIN(test_no_move_fail) { void *p; size_t sz, tsz; p = mallocx(42, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); sz = sallocx(p, 0); tsz = xallocx(p, sz + 5, 0, 0); assert_zu_eq(tsz, sz, "Unexpected size change: %zu --> %zu", sz, tsz); dallocx(p, 0); } TEST_END static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nsmall(void) { return (get_nsizes_impl("arenas.nbins")); } static unsigned get_nlarge(void) { return (get_nsizes_impl("arenas.nlruns")); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_small_size(size_t ind) { return (get_size_impl("arenas.bin.0.size", ind)); } static size_t get_large_size(size_t ind) { return (get_size_impl("arenas.lrun.0.size", ind)); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } TEST_BEGIN(test_size) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test smallest supported size. */ assert_zu_eq(xallocx(p, 1, 0, 0), small0, "Unexpected xallocx() behavior"); /* Test largest supported size. */ assert_zu_le(xallocx(p, hugemax, 0, 0), hugemax, "Unexpected xallocx() behavior"); /* Test size overflow. */ assert_zu_le(xallocx(p, hugemax+1, 0, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX, 0, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_size_extra_overflow) { size_t small0, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); /* Test overflows that can be resolved by clamping extra. */ assert_zu_le(xallocx(p, hugemax-1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax, 1, 0), hugemax, "Unexpected xallocx() behavior"); /* Test overflow such that hugemax-size underflows. */ assert_zu_le(xallocx(p, hugemax+1, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+2, 3, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-2, 2, 0), hugemax, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, SIZE_T_MAX-1, 1, 0), hugemax, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_small) { size_t small0, small1, hugemax; void *p; /* Get size classes. */ small0 = get_small_size(0); small1 = get_small_size(1); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(small0, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small1, 0, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, small1 - small0, 0), small0, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_eq(xallocx(p, small0, hugemax - small0 + 1, 0), small0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, small0, SIZE_T_MAX - small0, 0), small0, "Unexpected xallocx() behavior"); dallocx(p, 0); } TEST_END TEST_BEGIN(test_extra_large) { int flags = MALLOCX_ARENA(arena_ind()); size_t smallmax, large0, large1, large2, huge0, hugemax; void *p; /* Get size classes. */ smallmax = get_small_size(get_nsmall()-1); large0 = get_large_size(0); large1 = get_large_size(1); large2 = get_large_size(2); huge0 = get_huge_size(0); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(large2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, large2, 0, flags), large2, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_eq(xallocx(p, large0, 0, flags), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, smallmax, 0, flags), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, 0, flags), large2, "Unexpected xallocx() behavior"); /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large1, large2 - large1, flags), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, large1 - large0, flags), large1, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, smallmax, large0 - smallmax, flags), large0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, flags), large0, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_eq(xallocx(p, large2, 0, flags), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge0, 0, flags), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, flags), large0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_lt(xallocx(p, large0, huge0 - large0, flags), huge0, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large0, 0, flags), large0, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_eq(xallocx(p, large0, large2 - large0, flags), large2, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, large2, 0, flags), large2, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_lt(xallocx(p, large2, hugemax - large2 + 1, flags), huge0, "Unexpected xallocx() behavior"); dallocx(p, flags); } TEST_END TEST_BEGIN(test_extra_huge) { int flags = MALLOCX_ARENA(arena_ind()); size_t largemax, huge1, huge2, huge3, hugemax; void *p; /* Get size classes. */ largemax = get_large_size(get_nlarge()-1); huge1 = get_huge_size(1); huge2 = get_huge_size(2); huge3 = get_huge_size(3); hugemax = get_huge_size(get_nhuge()-1); p = mallocx(huge3, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, "Unexpected xallocx() behavior"); /* Test size decrease with zero extra. */ assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, largemax, 0, flags), huge1, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, "Unexpected xallocx() behavior"); /* Test size decrease with non-zero extra. */ assert_zu_eq(xallocx(p, huge1, huge3 - huge1, flags), huge3, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge2, huge3 - huge2, flags), huge3, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge1, huge2 - huge1, flags), huge2, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, largemax, huge1 - largemax, flags), huge1, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, "Unexpected xallocx() behavior"); /* Test size increase with zero extra. */ assert_zu_le(xallocx(p, huge3, 0, flags), huge3, "Unexpected xallocx() behavior"); assert_zu_le(xallocx(p, hugemax+1, 0, flags), huge3, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, huge1, SIZE_T_MAX - huge1, flags), hugemax, "Unexpected xallocx() behavior"); assert_zu_ge(xallocx(p, huge1, 0, flags), huge1, "Unexpected xallocx() behavior"); /* Test size increase with non-zero extra. */ assert_zu_le(xallocx(p, huge1, huge3 - huge1, flags), huge3, "Unexpected xallocx() behavior"); assert_zu_eq(xallocx(p, huge3, 0, flags), huge3, "Unexpected xallocx() behavior"); /* Test size+extra overflow. */ assert_zu_le(xallocx(p, huge3, hugemax - huge3 + 1, flags), hugemax, "Unexpected xallocx() behavior"); dallocx(p, flags); } TEST_END static void print_filled_extents(const void *p, uint8_t c, size_t len) { const uint8_t *pc = (const uint8_t *)p; size_t i, range0; uint8_t c0; malloc_printf(" p=%p, c=%#x, len=%zu:", p, c, len); range0 = 0; c0 = pc[0]; for (i = 0; i < len; i++) { if (pc[i] != c0) { malloc_printf(" %#x[%zu..%zu)", c0, range0, i); range0 = i; c0 = pc[i]; } } malloc_printf(" %#x[%zu..%zu)\n", c0, range0, i); } static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { const uint8_t *pc = (const uint8_t *)p; bool err; size_t i; for (i = offset, err = false; i < offset+len; i++) { if (pc[i] != c) err = true; } if (err) print_filled_extents(p, c, offset + len); return (err); } static void test_zero(size_t szmin, size_t szmax) { int flags = MALLOCX_ARENA(arena_ind()) | MALLOCX_ZERO; size_t sz, nsz; void *p; #define FILL_BYTE 0x7aU sz = szmax; p = mallocx(sz, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_false(validate_fill(p, 0x00, 0, sz), "Memory not filled: sz=%zu", sz); /* * Fill with non-zero so that non-debug builds are more likely to detect * errors. */ memset(p, FILL_BYTE, sz); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); /* Shrink in place so that we can expect growing in place to succeed. */ sz = szmin; assert_zu_eq(xallocx(p, sz, 0, flags), sz, "Unexpected xallocx() error"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); for (sz = szmin; sz < szmax; sz = nsz) { nsz = nallocx(sz+1, flags); assert_zu_eq(xallocx(p, sz+1, 0, flags), nsz, "Unexpected xallocx() failure"); assert_false(validate_fill(p, FILL_BYTE, 0, sz), "Memory not filled: sz=%zu", sz); assert_false(validate_fill(p, 0x00, sz, nsz-sz), "Memory not filled: sz=%zu, nsz-sz=%zu", sz, nsz-sz); memset((void *)((uintptr_t)p + sz), FILL_BYTE, nsz-sz); assert_false(validate_fill(p, FILL_BYTE, 0, nsz), "Memory not filled: nsz=%zu", nsz); } dallocx(p, flags); } TEST_BEGIN(test_zero_large) { size_t large0, largemax; /* Get size classes. */ large0 = get_large_size(0); largemax = get_large_size(get_nlarge()-1); test_zero(large0, largemax); } TEST_END TEST_BEGIN(test_zero_huge) { size_t huge0, huge1; /* Get size classes. */ huge0 = get_huge_size(0); huge1 = get_huge_size(1); test_zero(huge1, huge0 * 2); } TEST_END int main(void) { return (test( test_same_size, test_extra_no_move, test_no_move_fail, test_size, test_size_extra_overflow, test_extra_small, test_extra_large, test_extra_huge, test_zero_large, test_zero_huge)); }
12,608
24.319277
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/mallocx.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL const char *malloc_conf = "junk:false"; #endif static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_overflow) { size_t hugemax; hugemax = get_huge_size(get_nhuge()-1); assert_ptr_null(mallocx(hugemax+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", hugemax+1); assert_ptr_null(mallocx(ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for mallocx(size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(mallocx(SIZE_T_MAX, 0), "Expected OOM for mallocx(size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(mallocx(1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for mallocx(size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); } TEST_END TEST_BEGIN(test_oom) { size_t hugemax; bool oom; void *ptrs[3]; unsigned i; /* * It should be impossible to allocate three objects that each consume * nearly half the virtual address space. */ hugemax = get_huge_size(get_nhuge()-1); oom = false; for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { ptrs[i] = mallocx(hugemax, 0); if (ptrs[i] == NULL) oom = true; } assert_true(oom, "Expected OOM during series of calls to mallocx(size=%zu, 0)", hugemax); for (i = 0; i < sizeof(ptrs) / sizeof(void *); i++) { if (ptrs[i] != NULL) dallocx(ptrs[i], 0); } purge(); #if LG_SIZEOF_PTR == 3 assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x8000000000000000ULL)), "Expected OOM for mallocx()"); assert_ptr_null(mallocx(0x8000000000000000ULL, MALLOCX_ALIGN(0x80000000)), "Expected OOM for mallocx()"); #else assert_ptr_null(mallocx(0x80000000UL, MALLOCX_ALIGN(0x80000000UL)), "Expected OOM for mallocx()"); #endif } TEST_END TEST_BEGIN(test_basic) { #define MAXSZ (((size_t)1) << 23) size_t sz; for (sz = 1; sz < MAXSZ; sz = nallocx(sz, 0) + 1) { size_t nsz, rsz; void *p; nsz = nallocx(sz, 0); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); rsz = sallocx(p, 0); assert_zu_ge(rsz, sz, "Real size smaller than expected"); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch"); dallocx(p, 0); p = mallocx(sz, 0); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=0) error", sz); dallocx(p, 0); nsz = nallocx(sz, MALLOCX_ZERO); assert_zu_ne(nsz, 0, "Unexpected nallocx() error"); p = mallocx(sz, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx(size=%zx, flags=MALLOCX_ZERO) error", nsz); rsz = sallocx(p, 0); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() rsize mismatch"); dallocx(p, 0); purge(); } #undef MAXSZ } TEST_END TEST_BEGIN(test_alignment_and_size) { #define MAXALIGN (((size_t)1) << 23) #define NITER 4 size_t nsz, rsz, sz, alignment, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (sz = 1; sz < 3 * alignment && sz < (1U << 31); sz += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { nsz = nallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_zu_ne(nsz, 0, "nallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); ps[i] = mallocx(sz, MALLOCX_ALIGN(alignment) | MALLOCX_ZERO); assert_ptr_not_null(ps[i], "mallocx() error for alignment=%zu, " "size=%zu (%#zx)", alignment, sz, sz); rsz = sallocx(ps[i], 0); assert_zu_ge(rsz, sz, "Real size smaller than expected for " "alignment=%zu, size=%zu", alignment, sz); assert_zu_eq(nsz, rsz, "nallocx()/sallocx() size mismatch for " "alignment=%zu, size=%zu", alignment, sz); assert_ptr_null( (void *)((uintptr_t)ps[i] & (alignment-1)), "%p inadequately aligned for" " alignment=%zu, size=%zu", ps[i], alignment, sz); total += rsz; if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { dallocx(ps[i], 0); ps[i] = NULL; } } } purge(); } #undef MAXALIGN #undef NITER } TEST_END int main(void) { return (test( test_overflow, test_oom, test_basic, test_alignment_and_size)); }
5,506
22.434043
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/rallocx.c
#include "test/jemalloc_test.h" static unsigned get_nsizes_impl(const char *cmd) { unsigned ret; size_t z; z = sizeof(unsigned); assert_d_eq(mallctl(cmd, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctl(\"%s\", ...) failure", cmd); return (ret); } static unsigned get_nhuge(void) { return (get_nsizes_impl("arenas.nhchunks")); } static size_t get_size_impl(const char *cmd, size_t ind) { size_t ret; size_t z; size_t mib[4]; size_t miblen = 4; z = sizeof(size_t); assert_d_eq(mallctlnametomib(cmd, mib, &miblen), 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd); mib[2] = ind; z = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&ret, &z, NULL, 0), 0, "Unexpected mallctlbymib([\"%s\", %zu], ...) failure", cmd, ind); return (ret); } static size_t get_huge_size(size_t ind) { return (get_size_impl("arenas.hchunk.0.size", ind)); } TEST_BEGIN(test_grow_and_shrink) { void *p, *q; size_t tsz; #define NCYCLES 3 unsigned i, j; #define NSZS 2500 size_t szs[NSZS]; #define MAXSZ ZU(12 * 1024 * 1024) p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() error"); szs[0] = sallocx(p, 0); for (i = 0; i < NCYCLES; i++) { for (j = 1; j < NSZS && szs[j-1] < MAXSZ; j++) { q = rallocx(p, szs[j-1]+1, 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j-1], szs[j-1]+1); szs[j] = sallocx(q, 0); assert_zu_ne(szs[j], szs[j-1]+1, "Expected size to be at least: %zu", szs[j-1]+1); p = q; } for (j--; j > 0; j--) { q = rallocx(p, szs[j-1], 0); assert_ptr_not_null(q, "Unexpected rallocx() error for size=%zu-->%zu", szs[j], szs[j-1]); tsz = sallocx(q, 0); assert_zu_eq(tsz, szs[j-1], "Expected size=%zu, got size=%zu", szs[j-1], tsz); p = q; } } dallocx(p, 0); #undef MAXSZ #undef NSZS #undef NCYCLES } TEST_END static bool validate_fill(const void *p, uint8_t c, size_t offset, size_t len) { bool ret = false; const uint8_t *buf = (const uint8_t *)p; size_t i; for (i = 0; i < len; i++) { uint8_t b = buf[offset+i]; if (b != c) { test_fail("Allocation at %p (len=%zu) contains %#x " "rather than %#x at offset %zu", p, len, b, c, offset+i); ret = true; } } return (ret); } TEST_BEGIN(test_zero) { void *p, *q; size_t psz, qsz, i, j; size_t start_sizes[] = {1, 3*1024, 63*1024, 4095*1024}; #define FILL_BYTE 0xaaU #define RANGE 2048 for (i = 0; i < sizeof(start_sizes)/sizeof(size_t); i++) { size_t start_size = start_sizes[i]; p = mallocx(start_size, MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); psz = sallocx(p, 0); assert_false(validate_fill(p, 0, 0, psz), "Expected zeroed memory"); memset(p, FILL_BYTE, psz); assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); for (j = 1; j < RANGE; j++) { q = rallocx(p, start_size+j, MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error"); qsz = sallocx(q, 0); if (q != p || qsz != psz) { assert_false(validate_fill(q, FILL_BYTE, 0, psz), "Expected filled memory"); assert_false(validate_fill(q, 0, psz, qsz-psz), "Expected zeroed memory"); } if (psz != qsz) { memset((void *)((uintptr_t)q+psz), FILL_BYTE, qsz-psz); psz = qsz; } p = q; } assert_false(validate_fill(p, FILL_BYTE, 0, psz), "Expected filled memory"); dallocx(p, 0); } #undef FILL_BYTE } TEST_END TEST_BEGIN(test_align) { void *p, *q; size_t align; #define MAX_ALIGN (ZU(1) << 25) align = ZU(1); p = mallocx(1, MALLOCX_ALIGN(align)); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (align <<= 1; align <= MAX_ALIGN; align <<= 1) { q = rallocx(p, 1, MALLOCX_ALIGN(align)); assert_ptr_not_null(q, "Unexpected rallocx() error for align=%zu", align); assert_ptr_null( (void *)((uintptr_t)q & (align-1)), "%p inadequately aligned for align=%zu", q, align); p = q; } dallocx(p, 0); #undef MAX_ALIGN } TEST_END TEST_BEGIN(test_lg_align_and_zero) { void *p, *q; unsigned lg_align; size_t sz; #define MAX_LG_ALIGN 25 #define MAX_VALIDATE (ZU(1) << 22) lg_align = 0; p = mallocx(1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(p, "Unexpected mallocx() error"); for (lg_align++; lg_align <= MAX_LG_ALIGN; lg_align++) { q = rallocx(p, 1, MALLOCX_LG_ALIGN(lg_align)|MALLOCX_ZERO); assert_ptr_not_null(q, "Unexpected rallocx() error for lg_align=%u", lg_align); assert_ptr_null( (void *)((uintptr_t)q & ((ZU(1) << lg_align)-1)), "%p inadequately aligned for lg_align=%u", q, lg_align); sz = sallocx(q, 0); if ((sz << 1) <= MAX_VALIDATE) { assert_false(validate_fill(q, 0, 0, sz), "Expected zeroed memory"); } else { assert_false(validate_fill(q, 0, 0, MAX_VALIDATE), "Expected zeroed memory"); assert_false(validate_fill( (void *)((uintptr_t)q+sz-MAX_VALIDATE), 0, 0, MAX_VALIDATE), "Expected zeroed memory"); } p = q; } dallocx(p, 0); #undef MAX_VALIDATE #undef MAX_LG_ALIGN } TEST_END TEST_BEGIN(test_overflow) { size_t hugemax; void *p; hugemax = get_huge_size(get_nhuge()-1); p = mallocx(1, 0); assert_ptr_not_null(p, "Unexpected mallocx() failure"); assert_ptr_null(rallocx(p, hugemax+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", hugemax+1); assert_ptr_null(rallocx(p, ZU(PTRDIFF_MAX)+1, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", ZU(PTRDIFF_MAX)+1); assert_ptr_null(rallocx(p, SIZE_T_MAX, 0), "Expected OOM for rallocx(p, size=%#zx, 0)", SIZE_T_MAX); assert_ptr_null(rallocx(p, 1, MALLOCX_ALIGN(ZU(PTRDIFF_MAX)+1)), "Expected OOM for rallocx(p, size=1, MALLOCX_ALIGN(%#zx))", ZU(PTRDIFF_MAX)+1); dallocx(p, 0); } TEST_END int main(void) { return (test( test_grow_and_shrink, test_zero, test_align, test_lg_align_and_zero, test_overflow)); }
5,973
21.976923
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/thread_tcache_enabled.c
#include "test/jemalloc_test.h" static const bool config_tcache = #ifdef JEMALLOC_TCACHE true #else false #endif ; void * thd_start(void *arg) { int err; size_t sz; bool e0, e1; sz = sizeof(bool); if ((err = mallctl("thread.tcache.enabled", (void *)&e0, &sz, NULL, 0))) { if (err == ENOENT) { assert_false(config_tcache, "ENOENT should only be returned if tcache is " "disabled"); } goto label_ENOENT; } if (e0) { e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); } e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); e1 = true; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_true(e0, "tcache should be enabled"); free(malloc(1)); e1 = false; assert_d_eq(mallctl("thread.tcache.enabled", (void *)&e0, &sz, (void *)&e1, sz), 0, "Unexpected mallctl() error"); assert_false(e0, "tcache should be disabled"); free(malloc(1)); return (NULL); label_ENOENT: test_skip("\"thread.tcache.enabled\" mallctl not available"); return (NULL); } TEST_BEGIN(test_main_thread) { thd_start(NULL); } TEST_END TEST_BEGIN(test_subthread) { thd_t thd; thd_create(&thd, thd_start, NULL); thd_join(thd, NULL); } TEST_END int main(void) { /* Run tests multiple times to check for bad interactions. */ return (test( test_main_thread, test_subthread, test_main_thread, test_subthread, test_main_thread)); }
2,692
22.417391
68
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/chunk.c
#include "test/jemalloc_test.h" #ifdef JEMALLOC_FILL const char *malloc_conf = "junk:false"; #endif static chunk_hooks_t orig_hooks; static chunk_hooks_t old_hooks; static bool do_dalloc = true; static bool do_decommit; static bool did_alloc; static bool did_dalloc; static bool did_commit; static bool did_decommit; static bool did_purge; static bool did_split; static bool did_merge; #if 0 # define TRACE_HOOK(fmt, ...) malloc_printf(fmt, __VA_ARGS__) #else # define TRACE_HOOK(fmt, ...) #endif void * chunk_alloc(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { TRACE_HOOK("%s(new_addr=%p, size=%zu, alignment=%zu, *zero=%s, " "*commit=%s, arena_ind=%u)\n", __func__, new_addr, size, alignment, *zero ? "true" : "false", *commit ? "true" : "false", arena_ind); did_alloc = true; return (old_hooks.alloc(new_addr, size, alignment, zero, commit, arena_ind)); } bool chunk_dalloc(void *chunk, size_t size, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, committed=%s, arena_ind=%u)\n", __func__, chunk, size, committed ? "true" : "false", arena_ind); did_dalloc = true; if (!do_dalloc) return (true); return (old_hooks.dalloc(chunk, size, committed, arena_ind)); } bool chunk_commit(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); err = old_hooks.commit(chunk, size, offset, length, arena_ind); did_commit = !err; return (err); } bool chunk_decommit(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { bool err; TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu, " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); if (!do_decommit) return (true); err = old_hooks.decommit(chunk, size, offset, length, arena_ind); did_decommit = !err; return (err); } bool chunk_purge(void *chunk, size_t size, size_t offset, size_t length, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, offset=%zu, length=%zu " "arena_ind=%u)\n", __func__, chunk, size, offset, length, arena_ind); did_purge = true; return (old_hooks.purge(chunk, size, offset, length, arena_ind)); } bool chunk_split(void *chunk, size_t size, size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk=%p, size=%zu, size_a=%zu, size_b=%zu, " "committed=%s, arena_ind=%u)\n", __func__, chunk, size, size_a, size_b, committed ? "true" : "false", arena_ind); did_split = true; return (old_hooks.split(chunk, size, size_a, size_b, committed, arena_ind)); } bool chunk_merge(void *chunk_a, size_t size_a, void *chunk_b, size_t size_b, bool committed, unsigned arena_ind) { TRACE_HOOK("%s(chunk_a=%p, size_a=%zu, chunk_b=%p size_b=%zu, " "committed=%s, arena_ind=%u)\n", __func__, chunk_a, size_a, chunk_b, size_b, committed ? "true" : "false", arena_ind); did_merge = true; return (old_hooks.merge(chunk_a, size_a, chunk_b, size_b, committed, arena_ind)); } TEST_BEGIN(test_chunk) { void *p; size_t old_size, new_size, large0, large1, huge0, huge1, huge2, sz; unsigned arena_ind; int flags; size_t hooks_mib[3], purge_mib[3]; size_t hooks_miblen, purge_miblen; chunk_hooks_t new_hooks = { chunk_alloc, chunk_dalloc, chunk_commit, chunk_decommit, chunk_purge, chunk_split, chunk_merge }; bool xallocx_success_a, xallocx_success_b, xallocx_success_c; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.extend", (void *)&arena_ind, &sz, NULL, 0), 0, "Unexpected mallctl() failure"); flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE; /* Install custom chunk hooks. */ hooks_miblen = sizeof(hooks_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.chunk_hooks", hooks_mib, &hooks_miblen), 0, "Unexpected mallctlnametomib() failure"); hooks_mib[1] = (size_t)arena_ind; old_size = sizeof(chunk_hooks_t); new_size = sizeof(chunk_hooks_t); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, (void *)&new_hooks, new_size), 0, "Unexpected chunk_hooks error"); orig_hooks = old_hooks; assert_ptr_ne(old_hooks.alloc, chunk_alloc, "Unexpected alloc error"); assert_ptr_ne(old_hooks.dalloc, chunk_dalloc, "Unexpected dalloc error"); assert_ptr_ne(old_hooks.commit, chunk_commit, "Unexpected commit error"); assert_ptr_ne(old_hooks.decommit, chunk_decommit, "Unexpected decommit error"); assert_ptr_ne(old_hooks.purge, chunk_purge, "Unexpected purge error"); assert_ptr_ne(old_hooks.split, chunk_split, "Unexpected split error"); assert_ptr_ne(old_hooks.merge, chunk_merge, "Unexpected merge error"); /* Get large size classes. */ sz = sizeof(size_t); assert_d_eq(mallctl("arenas.lrun.0.size", (void *)&large0, &sz, NULL, 0), 0, "Unexpected arenas.lrun.0.size failure"); assert_d_eq(mallctl("arenas.lrun.1.size", (void *)&large1, &sz, NULL, 0), 0, "Unexpected arenas.lrun.1.size failure"); /* Get huge size classes. */ assert_d_eq(mallctl("arenas.hchunk.0.size", (void *)&huge0, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.0.size failure"); assert_d_eq(mallctl("arenas.hchunk.1.size", (void *)&huge1, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.1.size failure"); assert_d_eq(mallctl("arenas.hchunk.2.size", (void *)&huge2, &sz, NULL, 0), 0, "Unexpected arenas.hchunk.2.size failure"); /* Test dalloc/decommit/purge cascade. */ purge_miblen = sizeof(purge_mib)/sizeof(size_t); assert_d_eq(mallctlnametomib("arena.0.purge", purge_mib, &purge_miblen), 0, "Unexpected mallctlnametomib() failure"); purge_mib[1] = (size_t)arena_ind; do_dalloc = false; do_decommit = false; p = mallocx(huge0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_dalloc = false; did_decommit = false; did_purge = false; did_split = false; xallocx_success_a = (xallocx(p, huge0, 0, flags) == huge0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_a) { assert_true(did_dalloc, "Expected dalloc"); assert_false(did_decommit, "Unexpected decommit"); assert_true(did_purge, "Expected purge"); } assert_true(did_split, "Expected split"); dallocx(p, flags); do_dalloc = true; /* Test decommit/commit and observe split/merge. */ do_dalloc = false; do_decommit = true; p = mallocx(huge0 * 2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_decommit = false; did_commit = false; did_split = false; did_merge = false; xallocx_success_b = (xallocx(p, huge0, 0, flags) == huge0); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); if (xallocx_success_b) assert_true(did_split, "Expected split"); xallocx_success_c = (xallocx(p, huge0 * 2, 0, flags) == huge0 * 2); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); if (xallocx_success_b && xallocx_success_c) assert_true(did_merge, "Expected merge"); dallocx(p, flags); do_dalloc = true; do_decommit = false; /* Test purge for partial-chunk huge allocations. */ if (huge0 * 2 > huge2) { /* * There are at least four size classes per doubling, so a * successful xallocx() from size=huge2 to size=huge1 is * guaranteed to leave trailing purgeable memory. */ p = mallocx(huge2, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); did_purge = false; assert_zu_eq(xallocx(p, huge1, 0, flags), huge1, "Unexpected xallocx() failure"); assert_true(did_purge, "Expected purge"); dallocx(p, flags); } /* Test decommit for large allocations. */ do_decommit = true; p = mallocx(large1, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); did_decommit = false; assert_zu_eq(xallocx(p, large0, 0, flags), large0, "Unexpected xallocx() failure"); assert_d_eq(mallctlbymib(purge_mib, purge_miblen, NULL, NULL, NULL, 0), 0, "Unexpected arena.%u.purge error", arena_ind); did_commit = false; assert_zu_eq(xallocx(p, large1, 0, flags), large1, "Unexpected xallocx() failure"); assert_b_eq(did_decommit, did_commit, "Expected decommit/commit match"); dallocx(p, flags); do_decommit = false; /* Make sure non-huge allocation succeeds. */ p = mallocx(42, flags); assert_ptr_not_null(p, "Unexpected mallocx() error"); dallocx(p, flags); /* Restore chunk hooks. */ assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, NULL, NULL, (void *)&old_hooks, new_size), 0, "Unexpected chunk_hooks error"); assert_d_eq(mallctlbymib(hooks_mib, hooks_miblen, (void *)&old_hooks, &old_size, NULL, 0), 0, "Unexpected chunk_hooks error"); assert_ptr_eq(old_hooks.alloc, orig_hooks.alloc, "Unexpected alloc error"); assert_ptr_eq(old_hooks.dalloc, orig_hooks.dalloc, "Unexpected dalloc error"); assert_ptr_eq(old_hooks.commit, orig_hooks.commit, "Unexpected commit error"); assert_ptr_eq(old_hooks.decommit, orig_hooks.decommit, "Unexpected decommit error"); assert_ptr_eq(old_hooks.purge, orig_hooks.purge, "Unexpected purge error"); assert_ptr_eq(old_hooks.split, orig_hooks.split, "Unexpected split error"); assert_ptr_eq(old_hooks.merge, orig_hooks.merge, "Unexpected merge error"); } TEST_END int main(void) { return (test(test_chunk)); }
9,660
31.749153
74
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/aligned_alloc.c
#include "test/jemalloc_test.h" #define CHUNK 0x400000 #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; alignment = 0; set_errno(0); p = aligned_alloc(alignment, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment); for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { set_errno(0); p = aligned_alloc(alignment + 1, 1); assert_false(p != NULL || get_errno() != EINVAL, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(%zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif set_errno(0); p = aligned_alloc(alignment, size); assert_false(p != NULL || get_errno() != ENOMEM, "Expected error for aligned_alloc(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { ps[i] = aligned_alloc(alignment, size); if (ps[i] == NULL) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END int main(void) { return (test( test_alignment_errors, test_oom_errors, test_alignment_and_size)); }
3,053
20.814286
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/posix_memalign.c
#include "test/jemalloc_test.h" #define CHUNK 0x400000 #define MAXALIGN (((size_t)1) << 23) /* * On systems which can't merge extents, tests that call this function generate * a lot of dirty memory very quickly. Purging between cycles mitigates * potential OOM on e.g. 32-bit Windows. */ static void purge(void) { assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0, "Unexpected mallctl error"); } TEST_BEGIN(test_alignment_errors) { size_t alignment; void *p; for (alignment = 0; alignment < sizeof(void *); alignment++) { assert_d_eq(posix_memalign(&p, alignment, 1), EINVAL, "Expected error for invalid alignment %zu", alignment); } for (alignment = sizeof(size_t); alignment < MAXALIGN; alignment <<= 1) { assert_d_ne(posix_memalign(&p, alignment + 1, 1), 0, "Expected error for invalid alignment %zu", alignment + 1); } } TEST_END TEST_BEGIN(test_oom_errors) { size_t alignment, size; void *p; #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x8000000000000000); size = UINT64_C(0x8000000000000000); #else alignment = 0x80000000LU; size = 0x80000000LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); #if LG_SIZEOF_PTR == 3 alignment = UINT64_C(0x4000000000000000); size = UINT64_C(0xc000000000000001); #else alignment = 0x40000000LU; size = 0xc0000001LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); alignment = 0x10LU; #if LG_SIZEOF_PTR == 3 size = UINT64_C(0xfffffffffffffff0); #else size = 0xfffffff0LU; #endif assert_d_ne(posix_memalign(&p, alignment, size), 0, "Expected error for posix_memalign(&p, %zu, %zu)", alignment, size); } TEST_END TEST_BEGIN(test_alignment_and_size) { #define NITER 4 size_t alignment, size, total; unsigned i; int err; void *ps[NITER]; for (i = 0; i < NITER; i++) ps[i] = NULL; for (alignment = 8; alignment <= MAXALIGN; alignment <<= 1) { total = 0; for (size = 1; size < 3 * alignment && size < (1U << 31); size += (alignment >> (LG_SIZEOF_PTR-1)) - 1) { for (i = 0; i < NITER; i++) { err = posix_memalign(&ps[i], alignment, size); if (err) { char buf[BUFERROR_BUF]; buferror(get_errno(), buf, sizeof(buf)); test_fail( "Error for alignment=%zu, " "size=%zu (%#zx): %s", alignment, size, size, buf); } total += malloc_usable_size(ps[i]); if (total >= (MAXALIGN << 1)) break; } for (i = 0; i < NITER; i++) { if (ps[i] != NULL) { free(ps[i]); ps[i] = NULL; } } } purge(); } #undef NITER } TEST_END int main(void) { return (test( test_alignment_errors, test_oom_errors, test_alignment_and_size)); }
2,896
20.619403
79
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/integration/overflow.c
#include "test/jemalloc_test.h" TEST_BEGIN(test_overflow) { unsigned nhchunks; size_t mib[4]; size_t sz, miblen, max_size_class; void *p; sz = sizeof(unsigned); assert_d_eq(mallctl("arenas.nhchunks", (void *)&nhchunks, &sz, NULL, 0), 0, "Unexpected mallctl() error"); miblen = sizeof(mib) / sizeof(size_t); assert_d_eq(mallctlnametomib("arenas.hchunk.0.size", mib, &miblen), 0, "Unexpected mallctlnametomib() error"); mib[2] = nhchunks - 1; sz = sizeof(size_t); assert_d_eq(mallctlbymib(mib, miblen, (void *)&max_size_class, &sz, NULL, 0), 0, "Unexpected mallctlbymib() error"); assert_ptr_null(malloc(max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(malloc(SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(calloc(1, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); p = malloc(1); assert_ptr_not_null(p, "Unexpected malloc() OOM"); assert_ptr_null(realloc(p, max_size_class + 1), "Expected OOM due to over-sized allocation request"); assert_ptr_null(realloc(p, SIZE_T_MAX), "Expected OOM due to over-sized allocation request"); free(p); } TEST_END int main(void) { return (test( test_overflow)); }
1,373
26.48
73
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS_H #define SFMT_PARAMS_H #if !defined(MEXP) #ifdef __GNUC__ #warning "MEXP is not defined. I assume MEXP is 19937." #endif #define MEXP 19937 #endif /*----------------- BASIC DEFINITIONS -----------------*/ /** Mersenne Exponent. The period of the sequence * is a multiple of 2^MEXP-1. * #define MEXP 19937 */ /** SFMT generator has an internal state array of 128-bit integers, * and N is its size. */ #define N (MEXP / 128 + 1) /** N32 is the size of internal state array when regarded as an array * of 32-bit integers.*/ #define N32 (N * 4) /** N64 is the size of internal state array when regarded as an array * of 64-bit integers.*/ #define N64 (N * 2) /*---------------------- the parameters of SFMT following definitions are in paramsXXXX.h file. ----------------------*/ /** the pick up position of the array. #define POS1 122 */ /** the parameter of shift left as four 32-bit registers. #define SL1 18 */ /** the parameter of shift left as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SL2 1 */ /** the parameter of shift right as four 32-bit registers. #define SR1 11 */ /** the parameter of shift right as one 128-bit register. * The 128-bit integer is shifted by (SL2 * 8) bits. #define SR2 1 */ /** A bitmask, used in the recursion. These parameters are introduced * to break symmetry of SIMD. #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U */ /** These definitions are part of a 128-bit period certification vector. #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xc98e126aU */ #if MEXP == 607 #include "test/SFMT-params607.h" #elif MEXP == 1279 #include "test/SFMT-params1279.h" #elif MEXP == 2281 #include "test/SFMT-params2281.h" #elif MEXP == 4253 #include "test/SFMT-params4253.h" #elif MEXP == 11213 #include "test/SFMT-params11213.h" #elif MEXP == 19937 #include "test/SFMT-params19937.h" #elif MEXP == 44497 #include "test/SFMT-params44497.h" #elif MEXP == 86243 #include "test/SFMT-params86243.h" #elif MEXP == 132049 #include "test/SFMT-params132049.h" #elif MEXP == 216091 #include "test/SFMT-params216091.h" #else #ifdef __GNUC__ #error "MEXP is not valid." #undef MEXP #else #undef MEXP #endif #endif #endif /* SFMT_PARAMS_H */
4,286
31.233083
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params4253.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS4253_H #define SFMT_PARAMS4253_H #define POS1 17 #define SL1 20 #define SL2 1 #define SR1 7 #define SR2 1 #define MSK1 0x9f7bffffU #define MSK2 0x9fffff5fU #define MSK3 0x3efffffbU #define MSK4 0xfffff7bbU #define PARITY1 0xa8000001U #define PARITY2 0xaf5390a3U #define PARITY3 0xb740b3f8U #define PARITY4 0x6c11486dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-4253:17-20-1-7-1:9f7bffff-9fffff5f-3efffffb-fffff7bb" #endif /* SFMT_PARAMS4253_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params607.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS607_H #define SFMT_PARAMS607_H #define POS1 2 #define SL1 15 #define SL2 3 #define SR1 13 #define SR2 3 #define MSK1 0xfdff37ffU #define MSK2 0xef7f3f7dU #define MSK3 0xff777b7dU #define MSK4 0x7ff7fb2fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x5986f054U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-607:2-15-3-13-3:fdff37ff-ef7f3f7d-ff777b7d-7ff7fb2f" #endif /* SFMT_PARAMS607_H */
3,558
42.402439
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params216091.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS216091_H #define SFMT_PARAMS216091_H #define POS1 627 #define SL1 11 #define SL2 3 #define SR1 10 #define SR2 1 #define MSK1 0xbff7bff7U #define MSK2 0xbfffffffU #define MSK3 0xbffffa7fU #define MSK4 0xffddfbfbU #define PARITY1 0xf8000001U #define PARITY2 0x89e80709U #define PARITY3 0x3bd2b64bU #define PARITY4 0x0c64b1e4U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-216091:627-11-3-10-1:bff7bff7-bfffffff-bffffa7f-ffddfbfb" #endif /* SFMT_PARAMS216091_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/mq.h
void mq_nanosleep(unsigned ns); /* * Simple templated message queue implementation that relies on only mutexes for * synchronization (which reduces portability issues). Given the following * setup: * * typedef struct mq_msg_s mq_msg_t; * struct mq_msg_s { * mq_msg(mq_msg_t) link; * [message data] * }; * mq_gen(, mq_, mq_t, mq_msg_t, link) * * The API is as follows: * * bool mq_init(mq_t *mq); * void mq_fini(mq_t *mq); * unsigned mq_count(mq_t *mq); * mq_msg_t *mq_tryget(mq_t *mq); * mq_msg_t *mq_get(mq_t *mq); * void mq_put(mq_t *mq, mq_msg_t *msg); * * The message queue linkage embedded in each message is to be treated as * externally opaque (no need to initialize or clean up externally). mq_fini() * does not perform any cleanup of messages, since it knows nothing of their * payloads. */ #define mq_msg(a_mq_msg_type) ql_elm(a_mq_msg_type) #define mq_gen(a_attr, a_prefix, a_mq_type, a_mq_msg_type, a_field) \ typedef struct { \ mtx_t lock; \ ql_head(a_mq_msg_type) msgs; \ unsigned count; \ } a_mq_type; \ a_attr bool \ a_prefix##init(a_mq_type *mq) { \ \ if (mtx_init(&mq->lock)) \ return (true); \ ql_new(&mq->msgs); \ mq->count = 0; \ return (false); \ } \ a_attr void \ a_prefix##fini(a_mq_type *mq) \ { \ \ mtx_fini(&mq->lock); \ } \ a_attr unsigned \ a_prefix##count(a_mq_type *mq) \ { \ unsigned count; \ \ mtx_lock(&mq->lock); \ count = mq->count; \ mtx_unlock(&mq->lock); \ return (count); \ } \ a_attr a_mq_msg_type * \ a_prefix##tryget(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ \ mtx_lock(&mq->lock); \ msg = ql_first(&mq->msgs); \ if (msg != NULL) { \ ql_head_remove(&mq->msgs, a_mq_msg_type, a_field); \ mq->count--; \ } \ mtx_unlock(&mq->lock); \ return (msg); \ } \ a_attr a_mq_msg_type * \ a_prefix##get(a_mq_type *mq) \ { \ a_mq_msg_type *msg; \ unsigned ns; \ \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ \ ns = 1; \ while (true) { \ mq_nanosleep(ns); \ msg = a_prefix##tryget(mq); \ if (msg != NULL) \ return (msg); \ if (ns < 1000*1000*1000) { \ /* Double sleep time, up to max 1 second. */ \ ns <<= 1; \ if (ns > 1000*1000*1000) \ ns = 1000*1000*1000; \ } \ } \ } \ a_attr void \ a_prefix##put(a_mq_type *mq, a_mq_msg_type *msg) \ { \ \ mtx_lock(&mq->lock); \ ql_elm_new(msg, a_field); \ ql_tail_insert(&mq->msgs, msg, a_field); \ mq->count++; \ mtx_unlock(&mq->lock); \ }
2,902
25.390909
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/btalloc.h
/* btalloc() provides a mechanism for allocating via permuted backtraces. */ void *btalloc(size_t size, unsigned bits); #define btalloc_n_proto(n) \ void *btalloc_##n(size_t size, unsigned bits); btalloc_n_proto(0) btalloc_n_proto(1) #define btalloc_n_gen(n) \ void * \ btalloc_##n(size_t size, unsigned bits) \ { \ void *p; \ \ if (bits == 0) \ p = mallocx(size, 0); \ else { \ switch (bits & 0x1U) { \ case 0: \ p = (btalloc_0(size, bits >> 1)); \ break; \ case 1: \ p = (btalloc_1(size, bits >> 1)); \ break; \ default: not_reached(); \ } \ } \ /* Intentionally sabotage tail call optimization. */ \ assert_ptr_not_null(p, "Unexpected mallocx() failure"); \ return (p); \ }
825
24.8125
76
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params1279.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS1279_H #define SFMT_PARAMS1279_H #define POS1 7 #define SL1 14 #define SL2 3 #define SR1 5 #define SR2 1 #define MSK1 0xf7fefffdU #define MSK2 0x7fefcfffU #define MSK3 0xaff3ef3fU #define MSK4 0xb5ffff7fU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x20000000U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-1279:7-14-3-5-1:f7fefffd-7fefcfff-aff3ef3f-b5ffff7f" #endif /* SFMT_PARAMS1279_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params11213.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS11213_H #define SFMT_PARAMS11213_H #define POS1 68 #define SL1 14 #define SL2 3 #define SR1 7 #define SR2 3 #define MSK1 0xeffff7fbU #define MSK2 0xffffffefU #define MSK3 0xdfdfbfffU #define MSK4 0x7fffdbfdU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xe8148000U #define PARITY4 0xd0c7afa3U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-11213:68-14-3-7-3:effff7fb-ffffffef-dfdfbfff-7fffdbfd" #endif /* SFMT_PARAMS11213_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-sse2.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-sse2.h * @brief SIMD oriented Fast Mersenne Twister(SFMT) for Intel SSE2 * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * @note We assume LITTLE ENDIAN in this file * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software, see LICENSE.txt */ #ifndef SFMT_SSE2_H #define SFMT_SSE2_H /** * This function represents the recursion formula. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @param mask 128-bit mask * @return output */ JEMALLOC_ALWAYS_INLINE __m128i mm_recursion(__m128i *a, __m128i *b, __m128i c, __m128i d, __m128i mask) { __m128i v, x, y, z; x = _mm_load_si128(a); y = _mm_srli_epi32(*b, SR1); z = _mm_srli_si128(c, SR2); v = _mm_slli_epi32(d, SL1); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, v); x = _mm_slli_si128(x, SL2); y = _mm_and_si128(y, mask); z = _mm_xor_si128(z, x); z = _mm_xor_si128(z, y); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&ctx->sfmt[i].si, r); r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; __m128i r, r1, r2, mask; mask = _mm_set_epi32(MSK4, MSK3, MSK2, MSK1); r1 = _mm_load_si128(&ctx->sfmt[N - 2].si); r2 = _mm_load_si128(&ctx->sfmt[N - 1].si); for (i = 0; i < N - POS1; i++) { r = mm_recursion(&ctx->sfmt[i].si, &ctx->sfmt[i + POS1].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (; i < N; i++) { r = mm_recursion(&ctx->sfmt[i].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { r = _mm_load_si128(&array[j + size - N].si); _mm_store_si128(&ctx->sfmt[j].si, r); } for (; i < size; i++) { r = mm_recursion(&array[i - N].si, &array[i + POS1 - N].si, r1, r2, mask); _mm_store_si128(&array[i].si, r); _mm_store_si128(&ctx->sfmt[j++].si, r); r1 = r2; r2 = r; } } #endif
5,215
32.012658
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/math.h
#ifndef JEMALLOC_ENABLE_INLINE double ln_gamma(double x); double i_gamma(double x, double p, double ln_gamma_p); double pt_norm(double p); double pt_chi2(double p, double df, double ln_gamma_df_2); double pt_gamma(double p, double shape, double scale, double ln_gamma_shape); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(MATH_C_)) /* * Compute the natural log of Gamma(x), accurate to 10 decimal places. * * This implementation is based on: * * Pike, M.C., I.D. Hill (1966) Algorithm 291: Logarithm of Gamma function * [S14]. Communications of the ACM 9(9):684. */ JEMALLOC_INLINE double ln_gamma(double x) { double f, z; assert(x > 0.0); if (x < 7.0) { f = 1.0; z = x; while (z < 7.0) { f *= z; z += 1.0; } x = z; f = -log(f); } else f = 0.0; z = 1.0 / (x * x); return (f + (x-0.5) * log(x) - x + 0.918938533204673 + (((-0.000595238095238 * z + 0.000793650793651) * z - 0.002777777777778) * z + 0.083333333333333) / x); } /* * Compute the incomplete Gamma ratio for [0..x], where p is the shape * parameter, and ln_gamma_p is ln_gamma(p). * * This implementation is based on: * * Bhattacharjee, G.P. (1970) Algorithm AS 32: The incomplete Gamma integral. * Applied Statistics 19:285-287. */ JEMALLOC_INLINE double i_gamma(double x, double p, double ln_gamma_p) { double acu, factor, oflo, gin, term, rn, a, b, an, dif; double pn[6]; unsigned i; assert(p > 0.0); assert(x >= 0.0); if (x == 0.0) return (0.0); acu = 1.0e-10; oflo = 1.0e30; gin = 0.0; factor = exp(p * log(x) - x - ln_gamma_p); if (x <= 1.0 || x < p) { /* Calculation by series expansion. */ gin = 1.0; term = 1.0; rn = p; while (true) { rn += 1.0; term *= x / rn; gin += term; if (term <= acu) { gin *= factor / p; return (gin); } } } else { /* Calculation by continued fraction. */ a = 1.0 - p; b = a + x + 1.0; term = 0.0; pn[0] = 1.0; pn[1] = x; pn[2] = x + 1.0; pn[3] = x * b; gin = pn[2] / pn[3]; while (true) { a += 1.0; b += 2.0; term += 1.0; an = a * term; for (i = 0; i < 2; i++) pn[i+4] = b * pn[i+2] - an * pn[i]; if (pn[5] != 0.0) { rn = pn[4] / pn[5]; dif = fabs(gin - rn); if (dif <= acu && dif <= acu * rn) { gin = 1.0 - factor * gin; return (gin); } gin = rn; } for (i = 0; i < 4; i++) pn[i] = pn[i+2]; if (fabs(pn[4]) >= oflo) { for (i = 0; i < 4; i++) pn[i] /= oflo; } } } } /* * Given a value p in [0..1] of the lower tail area of the normal distribution, * compute the limit on the definite integral from [-inf..z] that satisfies p, * accurate to 16 decimal places. * * This implementation is based on: * * Wichura, M.J. (1988) Algorithm AS 241: The percentage points of the normal * distribution. Applied Statistics 37(3):477-484. */ JEMALLOC_INLINE double pt_norm(double p) { double q, r, ret; assert(p > 0.0 && p < 1.0); q = p - 0.5; if (fabs(q) <= 0.425) { /* p close to 1/2. */ r = 0.180625 - q * q; return (q * (((((((2.5090809287301226727e3 * r + 3.3430575583588128105e4) * r + 6.7265770927008700853e4) * r + 4.5921953931549871457e4) * r + 1.3731693765509461125e4) * r + 1.9715909503065514427e3) * r + 1.3314166789178437745e2) * r + 3.3871328727963666080e0) / (((((((5.2264952788528545610e3 * r + 2.8729085735721942674e4) * r + 3.9307895800092710610e4) * r + 2.1213794301586595867e4) * r + 5.3941960214247511077e3) * r + 6.8718700749205790830e2) * r + 4.2313330701600911252e1) * r + 1.0)); } else { if (q < 0.0) r = p; else r = 1.0 - p; assert(r > 0.0); r = sqrt(-log(r)); if (r <= 5.0) { /* p neither close to 1/2 nor 0 or 1. */ r -= 1.6; ret = ((((((((7.74545014278341407640e-4 * r + 2.27238449892691845833e-2) * r + 2.41780725177450611770e-1) * r + 1.27045825245236838258e0) * r + 3.64784832476320460504e0) * r + 5.76949722146069140550e0) * r + 4.63033784615654529590e0) * r + 1.42343711074968357734e0) / (((((((1.05075007164441684324e-9 * r + 5.47593808499534494600e-4) * r + 1.51986665636164571966e-2) * r + 1.48103976427480074590e-1) * r + 6.89767334985100004550e-1) * r + 1.67638483018380384940e0) * r + 2.05319162663775882187e0) * r + 1.0)); } else { /* p near 0 or 1. */ r -= 5.0; ret = ((((((((2.01033439929228813265e-7 * r + 2.71155556874348757815e-5) * r + 1.24266094738807843860e-3) * r + 2.65321895265761230930e-2) * r + 2.96560571828504891230e-1) * r + 1.78482653991729133580e0) * r + 5.46378491116411436990e0) * r + 6.65790464350110377720e0) / (((((((2.04426310338993978564e-15 * r + 1.42151175831644588870e-7) * r + 1.84631831751005468180e-5) * r + 7.86869131145613259100e-4) * r + 1.48753612908506148525e-2) * r + 1.36929880922735805310e-1) * r + 5.99832206555887937690e-1) * r + 1.0)); } if (q < 0.0) ret = -ret; return (ret); } } /* * Given a value p in [0..1] of the lower tail area of the Chi^2 distribution * with df degrees of freedom, where ln_gamma_df_2 is ln_gamma(df/2.0), compute * the upper limit on the definite integral from [0..z] that satisfies p, * accurate to 12 decimal places. * * This implementation is based on: * * Best, D.J., D.E. Roberts (1975) Algorithm AS 91: The percentage points of * the Chi^2 distribution. Applied Statistics 24(3):385-388. * * Shea, B.L. (1991) Algorithm AS R85: A remark on AS 91: The percentage * points of the Chi^2 distribution. Applied Statistics 40(1):233-235. */ JEMALLOC_INLINE double pt_chi2(double p, double df, double ln_gamma_df_2) { double e, aa, xx, c, ch, a, q, p1, p2, t, x, b, s1, s2, s3, s4, s5, s6; unsigned i; assert(p >= 0.0 && p < 1.0); assert(df > 0.0); e = 5.0e-7; aa = 0.6931471805; xx = 0.5 * df; c = xx - 1.0; if (df < -1.24 * log(p)) { /* Starting approximation for small Chi^2. */ ch = pow(p * xx * exp(ln_gamma_df_2 + xx * aa), 1.0 / xx); if (ch - e < 0.0) return (ch); } else { if (df > 0.32) { x = pt_norm(p); /* * Starting approximation using Wilson and Hilferty * estimate. */ p1 = 0.222222 / df; ch = df * pow(x * sqrt(p1) + 1.0 - p1, 3.0); /* Starting approximation for p tending to 1. */ if (ch > 2.2 * df + 6.0) { ch = -2.0 * (log(1.0 - p) - c * log(0.5 * ch) + ln_gamma_df_2); } } else { ch = 0.4; a = log(1.0 - p); while (true) { q = ch; p1 = 1.0 + ch * (4.67 + ch); p2 = ch * (6.73 + ch * (6.66 + ch)); t = -0.5 + (4.67 + 2.0 * ch) / p1 - (6.73 + ch * (13.32 + 3.0 * ch)) / p2; ch -= (1.0 - exp(a + ln_gamma_df_2 + 0.5 * ch + c * aa) * p2 / p1) / t; if (fabs(q / ch - 1.0) - 0.01 <= 0.0) break; } } } for (i = 0; i < 20; i++) { /* Calculation of seven-term Taylor series. */ q = ch; p1 = 0.5 * ch; if (p1 < 0.0) return (-1.0); p2 = p - i_gamma(p1, xx, ln_gamma_df_2); t = p2 * exp(xx * aa + ln_gamma_df_2 + p1 - c * log(ch)); b = t / ch; a = 0.5 * t - b * c; s1 = (210.0 + a * (140.0 + a * (105.0 + a * (84.0 + a * (70.0 + 60.0 * a))))) / 420.0; s2 = (420.0 + a * (735.0 + a * (966.0 + a * (1141.0 + 1278.0 * a)))) / 2520.0; s3 = (210.0 + a * (462.0 + a * (707.0 + 932.0 * a))) / 2520.0; s4 = (252.0 + a * (672.0 + 1182.0 * a) + c * (294.0 + a * (889.0 + 1740.0 * a))) / 5040.0; s5 = (84.0 + 264.0 * a + c * (175.0 + 606.0 * a)) / 2520.0; s6 = (120.0 + c * (346.0 + 127.0 * c)) / 5040.0; ch += t * (1.0 + 0.5 * t * s1 - b * c * (s1 - b * (s2 - b * (s3 - b * (s4 - b * (s5 - b * s6)))))); if (fabs(q / ch - 1.0) <= e) break; } return (ch); } /* * Given a value p in [0..1] and Gamma distribution shape and scale parameters, * compute the upper limit on the definite integral from [0..z] that satisfies * p. */ JEMALLOC_INLINE double pt_gamma(double p, double shape, double scale, double ln_gamma_shape) { return (pt_chi2(p, shape * 2.0, ln_gamma_shape) * 0.5 * scale); } #endif
8,172
25.195513
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/mtx.h
/* * mtx is a slightly simplified version of malloc_mutex. This code duplication * is unfortunate, but there are allocator bootstrapping considerations that * would leak into the test infrastructure if malloc_mutex were used directly * in tests. */ typedef struct { #ifdef _WIN32 CRITICAL_SECTION lock; #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #else pthread_mutex_t lock; #endif } mtx_t; bool mtx_init(mtx_t *mtx); void mtx_fini(mtx_t *mtx); void mtx_lock(mtx_t *mtx); void mtx_unlock(mtx_t *mtx);
584
23.375
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params2281.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS2281_H #define SFMT_PARAMS2281_H #define POS1 12 #define SL1 19 #define SL2 1 #define SR1 5 #define SR2 1 #define MSK1 0xbff7ffbfU #define MSK2 0xfdfffffeU #define MSK3 0xf7ffef7fU #define MSK4 0xf2f7cbbfU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x41dfa600U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-2281:12-19-1-5-1:bff7ffbf-fdfffffe-f7ffef7f-f2f7cbbf" #endif /* SFMT_PARAMS2281_H */
3,552
42.329268
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params19937.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS19937_H #define SFMT_PARAMS19937_H #define POS1 122 #define SL1 18 #define SL2 1 #define SR1 11 #define SR2 1 #define MSK1 0xdfffffefU #define MSK2 0xddfecb7fU #define MSK3 0xbffaffffU #define MSK4 0xbffffff6U #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0x13c9e684U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-19937:122-18-1-11-1:dfffffef-ddfecb7f-bffaffff-bffffff6" #endif /* SFMT_PARAMS19937_H */
3,560
42.426829
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/test.h
#define ASSERT_BUFSIZE 256 #define assert_cmp(t, a, b, cmp, neg_cmp, pri, ...) do { \ t a_ = (a); \ t b_ = (b); \ if (!(a_ cmp b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) "#cmp" (%s) --> " \ "%"pri" "#neg_cmp" %"pri": ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_, b_); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_ptr_eq(a, b, ...) assert_cmp(void *, a, b, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_ne(a, b, ...) assert_cmp(void *, a, b, !=, \ ==, "p", __VA_ARGS__) #define assert_ptr_null(a, ...) assert_cmp(void *, a, NULL, ==, \ !=, "p", __VA_ARGS__) #define assert_ptr_not_null(a, ...) assert_cmp(void *, a, NULL, !=, \ ==, "p", __VA_ARGS__) #define assert_c_eq(a, b, ...) assert_cmp(char, a, b, ==, !=, "c", __VA_ARGS__) #define assert_c_ne(a, b, ...) assert_cmp(char, a, b, !=, ==, "c", __VA_ARGS__) #define assert_c_lt(a, b, ...) assert_cmp(char, a, b, <, >=, "c", __VA_ARGS__) #define assert_c_le(a, b, ...) assert_cmp(char, a, b, <=, >, "c", __VA_ARGS__) #define assert_c_ge(a, b, ...) assert_cmp(char, a, b, >=, <, "c", __VA_ARGS__) #define assert_c_gt(a, b, ...) assert_cmp(char, a, b, >, <=, "c", __VA_ARGS__) #define assert_x_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "#x", __VA_ARGS__) #define assert_x_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "#x", __VA_ARGS__) #define assert_x_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "#x", __VA_ARGS__) #define assert_x_le(a, b, ...) assert_cmp(int, a, b, <=, >, "#x", __VA_ARGS__) #define assert_x_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "#x", __VA_ARGS__) #define assert_x_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "#x", __VA_ARGS__) #define assert_d_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "d", __VA_ARGS__) #define assert_d_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "d", __VA_ARGS__) #define assert_d_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "d", __VA_ARGS__) #define assert_d_le(a, b, ...) assert_cmp(int, a, b, <=, >, "d", __VA_ARGS__) #define assert_d_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "d", __VA_ARGS__) #define assert_d_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "d", __VA_ARGS__) #define assert_u_eq(a, b, ...) assert_cmp(int, a, b, ==, !=, "u", __VA_ARGS__) #define assert_u_ne(a, b, ...) assert_cmp(int, a, b, !=, ==, "u", __VA_ARGS__) #define assert_u_lt(a, b, ...) assert_cmp(int, a, b, <, >=, "u", __VA_ARGS__) #define assert_u_le(a, b, ...) assert_cmp(int, a, b, <=, >, "u", __VA_ARGS__) #define assert_u_ge(a, b, ...) assert_cmp(int, a, b, >=, <, "u", __VA_ARGS__) #define assert_u_gt(a, b, ...) assert_cmp(int, a, b, >, <=, "u", __VA_ARGS__) #define assert_ld_eq(a, b, ...) assert_cmp(long, a, b, ==, \ !=, "ld", __VA_ARGS__) #define assert_ld_ne(a, b, ...) assert_cmp(long, a, b, !=, \ ==, "ld", __VA_ARGS__) #define assert_ld_lt(a, b, ...) assert_cmp(long, a, b, <, \ >=, "ld", __VA_ARGS__) #define assert_ld_le(a, b, ...) assert_cmp(long, a, b, <=, \ >, "ld", __VA_ARGS__) #define assert_ld_ge(a, b, ...) assert_cmp(long, a, b, >=, \ <, "ld", __VA_ARGS__) #define assert_ld_gt(a, b, ...) assert_cmp(long, a, b, >, \ <=, "ld", __VA_ARGS__) #define assert_lu_eq(a, b, ...) assert_cmp(unsigned long, \ a, b, ==, !=, "lu", __VA_ARGS__) #define assert_lu_ne(a, b, ...) assert_cmp(unsigned long, \ a, b, !=, ==, "lu", __VA_ARGS__) #define assert_lu_lt(a, b, ...) assert_cmp(unsigned long, \ a, b, <, >=, "lu", __VA_ARGS__) #define assert_lu_le(a, b, ...) assert_cmp(unsigned long, \ a, b, <=, >, "lu", __VA_ARGS__) #define assert_lu_ge(a, b, ...) assert_cmp(unsigned long, \ a, b, >=, <, "lu", __VA_ARGS__) #define assert_lu_gt(a, b, ...) assert_cmp(unsigned long, \ a, b, >, <=, "lu", __VA_ARGS__) #define assert_qd_eq(a, b, ...) assert_cmp(long long, a, b, ==, \ !=, "qd", __VA_ARGS__) #define assert_qd_ne(a, b, ...) assert_cmp(long long, a, b, !=, \ ==, "qd", __VA_ARGS__) #define assert_qd_lt(a, b, ...) assert_cmp(long long, a, b, <, \ >=, "qd", __VA_ARGS__) #define assert_qd_le(a, b, ...) assert_cmp(long long, a, b, <=, \ >, "qd", __VA_ARGS__) #define assert_qd_ge(a, b, ...) assert_cmp(long long, a, b, >=, \ <, "qd", __VA_ARGS__) #define assert_qd_gt(a, b, ...) assert_cmp(long long, a, b, >, \ <=, "qd", __VA_ARGS__) #define assert_qu_eq(a, b, ...) assert_cmp(unsigned long long, \ a, b, ==, !=, "qu", __VA_ARGS__) #define assert_qu_ne(a, b, ...) assert_cmp(unsigned long long, \ a, b, !=, ==, "qu", __VA_ARGS__) #define assert_qu_lt(a, b, ...) assert_cmp(unsigned long long, \ a, b, <, >=, "qu", __VA_ARGS__) #define assert_qu_le(a, b, ...) assert_cmp(unsigned long long, \ a, b, <=, >, "qu", __VA_ARGS__) #define assert_qu_ge(a, b, ...) assert_cmp(unsigned long long, \ a, b, >=, <, "qu", __VA_ARGS__) #define assert_qu_gt(a, b, ...) assert_cmp(unsigned long long, \ a, b, >, <=, "qu", __VA_ARGS__) #define assert_jd_eq(a, b, ...) assert_cmp(intmax_t, a, b, ==, \ !=, "jd", __VA_ARGS__) #define assert_jd_ne(a, b, ...) assert_cmp(intmax_t, a, b, !=, \ ==, "jd", __VA_ARGS__) #define assert_jd_lt(a, b, ...) assert_cmp(intmax_t, a, b, <, \ >=, "jd", __VA_ARGS__) #define assert_jd_le(a, b, ...) assert_cmp(intmax_t, a, b, <=, \ >, "jd", __VA_ARGS__) #define assert_jd_ge(a, b, ...) assert_cmp(intmax_t, a, b, >=, \ <, "jd", __VA_ARGS__) #define assert_jd_gt(a, b, ...) assert_cmp(intmax_t, a, b, >, \ <=, "jd", __VA_ARGS__) #define assert_ju_eq(a, b, ...) assert_cmp(uintmax_t, a, b, ==, \ !=, "ju", __VA_ARGS__) #define assert_ju_ne(a, b, ...) assert_cmp(uintmax_t, a, b, !=, \ ==, "ju", __VA_ARGS__) #define assert_ju_lt(a, b, ...) assert_cmp(uintmax_t, a, b, <, \ >=, "ju", __VA_ARGS__) #define assert_ju_le(a, b, ...) assert_cmp(uintmax_t, a, b, <=, \ >, "ju", __VA_ARGS__) #define assert_ju_ge(a, b, ...) assert_cmp(uintmax_t, a, b, >=, \ <, "ju", __VA_ARGS__) #define assert_ju_gt(a, b, ...) assert_cmp(uintmax_t, a, b, >, \ <=, "ju", __VA_ARGS__) #define assert_zd_eq(a, b, ...) assert_cmp(ssize_t, a, b, ==, \ !=, "zd", __VA_ARGS__) #define assert_zd_ne(a, b, ...) assert_cmp(ssize_t, a, b, !=, \ ==, "zd", __VA_ARGS__) #define assert_zd_lt(a, b, ...) assert_cmp(ssize_t, a, b, <, \ >=, "zd", __VA_ARGS__) #define assert_zd_le(a, b, ...) assert_cmp(ssize_t, a, b, <=, \ >, "zd", __VA_ARGS__) #define assert_zd_ge(a, b, ...) assert_cmp(ssize_t, a, b, >=, \ <, "zd", __VA_ARGS__) #define assert_zd_gt(a, b, ...) assert_cmp(ssize_t, a, b, >, \ <=, "zd", __VA_ARGS__) #define assert_zu_eq(a, b, ...) assert_cmp(size_t, a, b, ==, \ !=, "zu", __VA_ARGS__) #define assert_zu_ne(a, b, ...) assert_cmp(size_t, a, b, !=, \ ==, "zu", __VA_ARGS__) #define assert_zu_lt(a, b, ...) assert_cmp(size_t, a, b, <, \ >=, "zu", __VA_ARGS__) #define assert_zu_le(a, b, ...) assert_cmp(size_t, a, b, <=, \ >, "zu", __VA_ARGS__) #define assert_zu_ge(a, b, ...) assert_cmp(size_t, a, b, >=, \ <, "zu", __VA_ARGS__) #define assert_zu_gt(a, b, ...) assert_cmp(size_t, a, b, >, \ <=, "zu", __VA_ARGS__) #define assert_d32_eq(a, b, ...) assert_cmp(int32_t, a, b, ==, \ !=, FMTd32, __VA_ARGS__) #define assert_d32_ne(a, b, ...) assert_cmp(int32_t, a, b, !=, \ ==, FMTd32, __VA_ARGS__) #define assert_d32_lt(a, b, ...) assert_cmp(int32_t, a, b, <, \ >=, FMTd32, __VA_ARGS__) #define assert_d32_le(a, b, ...) assert_cmp(int32_t, a, b, <=, \ >, FMTd32, __VA_ARGS__) #define assert_d32_ge(a, b, ...) assert_cmp(int32_t, a, b, >=, \ <, FMTd32, __VA_ARGS__) #define assert_d32_gt(a, b, ...) assert_cmp(int32_t, a, b, >, \ <=, FMTd32, __VA_ARGS__) #define assert_u32_eq(a, b, ...) assert_cmp(uint32_t, a, b, ==, \ !=, FMTu32, __VA_ARGS__) #define assert_u32_ne(a, b, ...) assert_cmp(uint32_t, a, b, !=, \ ==, FMTu32, __VA_ARGS__) #define assert_u32_lt(a, b, ...) assert_cmp(uint32_t, a, b, <, \ >=, FMTu32, __VA_ARGS__) #define assert_u32_le(a, b, ...) assert_cmp(uint32_t, a, b, <=, \ >, FMTu32, __VA_ARGS__) #define assert_u32_ge(a, b, ...) assert_cmp(uint32_t, a, b, >=, \ <, FMTu32, __VA_ARGS__) #define assert_u32_gt(a, b, ...) assert_cmp(uint32_t, a, b, >, \ <=, FMTu32, __VA_ARGS__) #define assert_d64_eq(a, b, ...) assert_cmp(int64_t, a, b, ==, \ !=, FMTd64, __VA_ARGS__) #define assert_d64_ne(a, b, ...) assert_cmp(int64_t, a, b, !=, \ ==, FMTd64, __VA_ARGS__) #define assert_d64_lt(a, b, ...) assert_cmp(int64_t, a, b, <, \ >=, FMTd64, __VA_ARGS__) #define assert_d64_le(a, b, ...) assert_cmp(int64_t, a, b, <=, \ >, FMTd64, __VA_ARGS__) #define assert_d64_ge(a, b, ...) assert_cmp(int64_t, a, b, >=, \ <, FMTd64, __VA_ARGS__) #define assert_d64_gt(a, b, ...) assert_cmp(int64_t, a, b, >, \ <=, FMTd64, __VA_ARGS__) #define assert_u64_eq(a, b, ...) assert_cmp(uint64_t, a, b, ==, \ !=, FMTu64, __VA_ARGS__) #define assert_u64_ne(a, b, ...) assert_cmp(uint64_t, a, b, !=, \ ==, FMTu64, __VA_ARGS__) #define assert_u64_lt(a, b, ...) assert_cmp(uint64_t, a, b, <, \ >=, FMTu64, __VA_ARGS__) #define assert_u64_le(a, b, ...) assert_cmp(uint64_t, a, b, <=, \ >, FMTu64, __VA_ARGS__) #define assert_u64_ge(a, b, ...) assert_cmp(uint64_t, a, b, >=, \ <, FMTu64, __VA_ARGS__) #define assert_u64_gt(a, b, ...) assert_cmp(uint64_t, a, b, >, \ <=, FMTu64, __VA_ARGS__) #define assert_b_eq(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ == b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) == (%s) --> %s != %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_b_ne(a, b, ...) do { \ bool a_ = (a); \ bool b_ = (b); \ if (!(a_ != b_)) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) != (%s) --> %s == %s: ", \ __func__, __FILE__, __LINE__, \ #a, #b, a_ ? "true" : "false", \ b_ ? "true" : "false"); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_true(a, ...) assert_b_eq(a, true, __VA_ARGS__) #define assert_false(a, ...) assert_b_eq(a, false, __VA_ARGS__) #define assert_str_eq(a, b, ...) do { \ if (strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) same as (%s) --> " \ "\"%s\" differs from \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_str_ne(a, b, ...) do { \ if (!strcmp((a), (b))) { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Failed assertion: " \ "(%s) differs from (%s) --> " \ "\"%s\" same as \"%s\": ", \ __func__, __FILE__, __LINE__, #a, #b, a, b); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } \ } while (0) #define assert_not_reached(...) do { \ char prefix[ASSERT_BUFSIZE]; \ char message[ASSERT_BUFSIZE]; \ malloc_snprintf(prefix, sizeof(prefix), \ "%s:%s:%d: Unreachable code reached: ", \ __func__, __FILE__, __LINE__); \ malloc_snprintf(message, sizeof(message), __VA_ARGS__); \ p_test_fail(prefix, message); \ } while (0) /* * If this enum changes, corresponding changes in test/test.sh.in are also * necessary. */ typedef enum { test_status_pass = 0, test_status_skip = 1, test_status_fail = 2, test_status_count = 3 } test_status_t; typedef void (test_t)(void); #define TEST_BEGIN(f) \ static void \ f(void) \ { \ p_test_init(#f); #define TEST_END \ goto label_test_end; \ label_test_end: \ p_test_fini(); \ } #define test(...) \ p_test(__VA_ARGS__, NULL) #define test_no_malloc_init(...) \ p_test_no_malloc_init(__VA_ARGS__, NULL) #define test_skip_if(e) do { \ if (e) { \ test_skip("%s:%s:%d: Test skipped: (%s)", \ __func__, __FILE__, __LINE__, #e); \ goto label_test_end; \ } \ } while (0) void test_skip(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); void test_fail(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); /* For private use by macros. */ test_status_t p_test(test_t *t, ...); test_status_t p_test_no_malloc_init(test_t *t, ...); void p_test_init(const char *name); void p_test_fini(void); void p_test_fail(const char *prefix, const char *message);
13,310
38.853293
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) pseudorandom * number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2006, 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt * * @note We assume that your system has inttypes.h. If your system * doesn't have inttypes.h, you have to typedef uint32_t and uint64_t, * and you have to define PRIu64 and PRIx64 in this file as follows: * @verbatim typedef unsigned int uint32_t typedef unsigned long long uint64_t #define PRIu64 "llu" #define PRIx64 "llx" @endverbatim * uint32_t must be exactly 32-bit unsigned integer type (no more, no * less), and uint64_t must be exactly 64-bit unsigned integer type. * PRIu64 and PRIx64 are used for printf function to print 64-bit * unsigned int and 64-bit unsigned int in hexadecimal format. */ #ifndef SFMT_H #define SFMT_H typedef struct sfmt_s sfmt_t; uint32_t gen_rand32(sfmt_t *ctx); uint32_t gen_rand32_range(sfmt_t *ctx, uint32_t limit); uint64_t gen_rand64(sfmt_t *ctx); uint64_t gen_rand64_range(sfmt_t *ctx, uint64_t limit); void fill_array32(sfmt_t *ctx, uint32_t *array, int size); void fill_array64(sfmt_t *ctx, uint64_t *array, int size); sfmt_t *init_gen_rand(uint32_t seed); sfmt_t *init_by_array(uint32_t *init_key, int key_length); void fini_gen_rand(sfmt_t *ctx); const char *get_idstring(void); int get_min_array_size32(void); int get_min_array_size64(void); #ifndef JEMALLOC_ENABLE_INLINE double to_real1(uint32_t v); double genrand_real1(sfmt_t *ctx); double to_real2(uint32_t v); double genrand_real2(sfmt_t *ctx); double to_real3(uint32_t v); double genrand_real3(sfmt_t *ctx); double to_res53(uint64_t v); double to_res53_mix(uint32_t x, uint32_t y); double genrand_res53(sfmt_t *ctx); double genrand_res53_mix(sfmt_t *ctx); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(SFMT_C_)) /* These real versions are due to Isaku Wada */ /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double to_real1(uint32_t v) { return v * (1.0/4294967295.0); /* divided by 2^32-1 */ } /** generates a random number on [0,1]-real-interval */ JEMALLOC_INLINE double genrand_real1(sfmt_t *ctx) { return to_real1(gen_rand32(ctx)); } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double to_real2(uint32_t v) { return v * (1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on [0,1)-real-interval */ JEMALLOC_INLINE double genrand_real2(sfmt_t *ctx) { return to_real2(gen_rand32(ctx)); } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double to_real3(uint32_t v) { return (((double)v) + 0.5)*(1.0/4294967296.0); /* divided by 2^32 */ } /** generates a random number on (0,1)-real-interval */ JEMALLOC_INLINE double genrand_real3(sfmt_t *ctx) { return to_real3(gen_rand32(ctx)); } /** These real versions are due to Isaku Wada */ /** generates a random number on [0,1) with 53-bit resolution*/ JEMALLOC_INLINE double to_res53(uint64_t v) { return v * (1.0/18446744073709551616.0L); } /** generates a random number on [0,1) with 53-bit resolution from two * 32 bit integers */ JEMALLOC_INLINE double to_res53_mix(uint32_t x, uint32_t y) { return to_res53(x | ((uint64_t)y << 32)); } /** generates a random number on [0,1) with 53-bit resolution */ JEMALLOC_INLINE double genrand_res53(sfmt_t *ctx) { return to_res53(gen_rand64(ctx)); } /** generates a random number on [0,1) with 53-bit resolution using 32bit integer. */ JEMALLOC_INLINE double genrand_res53_mix(sfmt_t *ctx) { uint32_t x, y; x = gen_rand32(ctx); y = gen_rand32(ctx); return to_res53_mix(x, y); } #endif #endif
5,805
32.755814
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params44497.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS44497_H #define SFMT_PARAMS44497_H #define POS1 330 #define SL1 5 #define SL2 3 #define SR1 9 #define SR2 3 #define MSK1 0xeffffffbU #define MSK2 0xdfbebfffU #define MSK3 0xbfbf7befU #define MSK4 0x9ffd7bffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xa3ac4000U #define PARITY4 0xecc1327aU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10) #define ALTI_SL2_PERM64 \ (vector unsigned char)(3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2) #define ALTI_SR2_PERM \ (vector unsigned char)(5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12) #define ALTI_SR2_PERM64 \ (vector unsigned char)(13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {3,21,21,21,7,0,1,2,11,4,5,6,15,8,9,10} #define ALTI_SL2_PERM64 {3,4,5,6,7,29,29,29,11,12,13,14,15,0,1,2} #define ALTI_SR2_PERM {5,6,7,0,9,10,11,4,13,14,15,8,19,19,19,12} #define ALTI_SR2_PERM64 {13,14,15,0,1,2,3,4,19,19,19,8,9,10,11,12} #endif /* For OSX */ #define IDSTR "SFMT-44497:330-5-3-9-3:effffffb-dfbebfff-bfbf7bef-9ffd7bff" #endif /* SFMT_PARAMS44497_H */
3,566
42.5
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-alti.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /** * @file SFMT-alti.h * * @brief SIMD oriented Fast Mersenne Twister(SFMT) * pseudorandom number generator * * @author Mutsuo Saito (Hiroshima University) * @author Makoto Matsumoto (Hiroshima University) * * Copyright (C) 2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * The new BSD License is applied to this software. * see LICENSE.txt */ #ifndef SFMT_ALTI_H #define SFMT_ALTI_H /** * This function represents the recursion formula in AltiVec and BIG ENDIAN. * @param a a 128-bit part of the interal state array * @param b a 128-bit part of the interal state array * @param c a 128-bit part of the interal state array * @param d a 128-bit part of the interal state array * @return output */ JEMALLOC_ALWAYS_INLINE vector unsigned int vec_recursion(vector unsigned int a, vector unsigned int b, vector unsigned int c, vector unsigned int d) { const vector unsigned int sl1 = ALTI_SL1; const vector unsigned int sr1 = ALTI_SR1; #ifdef ONLY64 const vector unsigned int mask = ALTI_MSK64; const vector unsigned char perm_sl = ALTI_SL2_PERM64; const vector unsigned char perm_sr = ALTI_SR2_PERM64; #else const vector unsigned int mask = ALTI_MSK; const vector unsigned char perm_sl = ALTI_SL2_PERM; const vector unsigned char perm_sr = ALTI_SR2_PERM; #endif vector unsigned int v, w, x, y, z; x = vec_perm(a, (vector unsigned int)perm_sl, perm_sl); v = a; y = vec_sr(b, sr1); z = vec_perm(c, (vector unsigned int)perm_sr, perm_sr); w = vec_sl(d, sl1); z = vec_xor(z, w); y = vec_and(y, mask); v = vec_xor(v, x); z = vec_xor(z, y); z = vec_xor(z, v); return z; } /** * This function fills the internal state array with pseudorandom * integers. */ JEMALLOC_INLINE void gen_rand_all(sfmt_t *ctx) { int i; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1 - N].s, r1, r2); ctx->sfmt[i].s = r; r1 = r2; r2 = r; } } /** * This function fills the user-specified array with pseudorandom * integers. * * @param array an 128-bit array to be filled by pseudorandom numbers. * @param size number of 128-bit pesudorandom numbers to be generated. */ JEMALLOC_INLINE void gen_rand_array(sfmt_t *ctx, w128_t *array, int size) { int i, j; vector unsigned int r, r1, r2; r1 = ctx->sfmt[N - 2].s; r2 = ctx->sfmt[N - 1].s; for (i = 0; i < N - POS1; i++) { r = vec_recursion(ctx->sfmt[i].s, ctx->sfmt[i + POS1].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (; i < N; i++) { r = vec_recursion(ctx->sfmt[i].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } /* main loop */ for (; i < size - N; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; r1 = r2; r2 = r; } for (j = 0; j < 2 * N - size; j++) { ctx->sfmt[j].s = array[j + size - N].s; } for (; i < size; i++) { r = vec_recursion(array[i - N].s, array[i + POS1 - N].s, r1, r2); array[i].s = r; ctx->sfmt[j++].s = r; r1 = r2; r2 = r; } } #ifndef ONLY64 #if defined(__APPLE__) #define ALTI_SWAP (vector unsigned char) \ (4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11) #else #define ALTI_SWAP {4, 5, 6, 7, 0, 1, 2, 3, 12, 13, 14, 15, 8, 9, 10, 11} #endif /** * This function swaps high and low 32-bit of 64-bit integers in user * specified array. * * @param array an 128-bit array to be swaped. * @param size size of 128-bit array. */ JEMALLOC_INLINE void swap(w128_t *array, int size) { int i; const vector unsigned char perm = ALTI_SWAP; for (i = 0; i < size; i++) { array[i].s = vec_perm(array[i].s, (vector unsigned int)perm, perm); } } #endif #endif
5,921
30.668449
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params86243.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS86243_H #define SFMT_PARAMS86243_H #define POS1 366 #define SL1 6 #define SL2 7 #define SR1 19 #define SR2 1 #define MSK1 0xfdbffbffU #define MSK2 0xbff7ff3fU #define MSK3 0xfd77efffU #define MSK4 0xbf9ff3ffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0x00000000U #define PARITY4 0xe9528d85U /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6) #define ALTI_SL2_PERM64 \ (vector unsigned char)(7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {25,25,25,25,3,25,25,25,7,0,1,2,11,4,5,6} #define ALTI_SL2_PERM64 {7,25,25,25,25,25,25,25,15,0,1,2,3,4,5,6} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-86243:366-6-7-19-1:fdbffbff-bff7ff3f-fd77efff-bf9ff3ff" #endif /* SFMT_PARAMS86243_H */
3,564
42.47561
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/test/include/test/SFMT-params132049.h
/* * This file derives from SFMT 1.3.3 * (http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/SFMT/index.html), which was * released under the terms of the following license: * * Copyright (c) 2006,2007 Mutsuo Saito, Makoto Matsumoto and Hiroshima * University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * Neither the name of the Hiroshima University nor the names of * its contributors may be used to endorse or promote products * derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef SFMT_PARAMS132049_H #define SFMT_PARAMS132049_H #define POS1 110 #define SL1 19 #define SL2 1 #define SR1 21 #define SR2 1 #define MSK1 0xffffbb5fU #define MSK2 0xfb6ebf95U #define MSK3 0xfffefffaU #define MSK4 0xcff77fffU #define PARITY1 0x00000001U #define PARITY2 0x00000000U #define PARITY3 0xcb520000U #define PARITY4 0xc7e91c7dU /* PARAMETERS FOR ALTIVEC */ #if defined(__APPLE__) /* For OSX */ #define ALTI_SL1 (vector unsigned int)(SL1, SL1, SL1, SL1) #define ALTI_SR1 (vector unsigned int)(SR1, SR1, SR1, SR1) #define ALTI_MSK (vector unsigned int)(MSK1, MSK2, MSK3, MSK4) #define ALTI_MSK64 \ (vector unsigned int)(MSK2, MSK1, MSK4, MSK3) #define ALTI_SL2_PERM \ (vector unsigned char)(1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8) #define ALTI_SL2_PERM64 \ (vector unsigned char)(1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0) #define ALTI_SR2_PERM \ (vector unsigned char)(7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14) #define ALTI_SR2_PERM64 \ (vector unsigned char)(15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14) #else /* For OTHER OSs(Linux?) */ #define ALTI_SL1 {SL1, SL1, SL1, SL1} #define ALTI_SR1 {SR1, SR1, SR1, SR1} #define ALTI_MSK {MSK1, MSK2, MSK3, MSK4} #define ALTI_MSK64 {MSK2, MSK1, MSK4, MSK3} #define ALTI_SL2_PERM {1,2,3,23,5,6,7,0,9,10,11,4,13,14,15,8} #define ALTI_SL2_PERM64 {1,2,3,4,5,6,7,31,9,10,11,12,13,14,15,0} #define ALTI_SR2_PERM {7,0,1,2,11,4,5,6,15,8,9,10,17,12,13,14} #define ALTI_SR2_PERM64 {15,0,1,2,3,4,5,6,17,8,9,10,11,12,13,14} #endif /* For OSX */ #define IDSTR "SFMT-132049:110-19-1-21-1:ffffbb5f-fb6ebf95-fffefffa-cff77fff" #endif /* SFMT_PARAMS132049_H */
3,564
42.47561
79
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/msvc_compat/C99/stdint.h
// ISO C9x compliant stdint.h for Microsoft Visual Studio // Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124 // // Copyright (c) 2006-2008 Alexander Chemeris // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // 3. The name of the author may be used to endorse or promote products // derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED // WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF // MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO // EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF // ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // /////////////////////////////////////////////////////////////////////////////// #ifndef _MSC_VER // [ #error "Use this header only with Microsoft Visual C++ compilers!" #endif // _MSC_VER ] #ifndef _MSC_STDINT_H_ // [ #define _MSC_STDINT_H_ #if _MSC_VER > 1000 #pragma once #endif #include <limits.h> // For Visual Studio 6 in C++ mode and for many Visual Studio versions when // compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}' // or compiler give many errors like this: // error C2733: second C linkage of overloaded function 'wmemchr' not allowed #ifdef __cplusplus extern "C" { #endif # include <wchar.h> #ifdef __cplusplus } #endif // Define _W64 macros to mark types changing their size, like intptr_t. #ifndef _W64 # if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300 # define _W64 __w64 # else # define _W64 # endif #endif // 7.18.1 Integer types // 7.18.1.1 Exact-width integer types // Visual Studio 6 and Embedded Visual C++ 4 doesn't // realize that, e.g. char has the same size as __int8 // so we give up on __intX for them. #if (_MSC_VER < 1300) typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; #else typedef signed __int8 int8_t; typedef signed __int16 int16_t; typedef signed __int32 int32_t; typedef unsigned __int8 uint8_t; typedef unsigned __int16 uint16_t; typedef unsigned __int32 uint32_t; #endif typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; // 7.18.1.2 Minimum-width integer types typedef int8_t int_least8_t; typedef int16_t int_least16_t; typedef int32_t int_least32_t; typedef int64_t int_least64_t; typedef uint8_t uint_least8_t; typedef uint16_t uint_least16_t; typedef uint32_t uint_least32_t; typedef uint64_t uint_least64_t; // 7.18.1.3 Fastest minimum-width integer types typedef int8_t int_fast8_t; typedef int16_t int_fast16_t; typedef int32_t int_fast32_t; typedef int64_t int_fast64_t; typedef uint8_t uint_fast8_t; typedef uint16_t uint_fast16_t; typedef uint32_t uint_fast32_t; typedef uint64_t uint_fast64_t; // 7.18.1.4 Integer types capable of holding object pointers #ifdef _WIN64 // [ typedef signed __int64 intptr_t; typedef unsigned __int64 uintptr_t; #else // _WIN64 ][ typedef _W64 signed int intptr_t; typedef _W64 unsigned int uintptr_t; #endif // _WIN64 ] // 7.18.1.5 Greatest-width integer types typedef int64_t intmax_t; typedef uint64_t uintmax_t; // 7.18.2 Limits of specified-width integer types #if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259 // 7.18.2.1 Limits of exact-width integer types #define INT8_MIN ((int8_t)_I8_MIN) #define INT8_MAX _I8_MAX #define INT16_MIN ((int16_t)_I16_MIN) #define INT16_MAX _I16_MAX #define INT32_MIN ((int32_t)_I32_MIN) #define INT32_MAX _I32_MAX #define INT64_MIN ((int64_t)_I64_MIN) #define INT64_MAX _I64_MAX #define UINT8_MAX _UI8_MAX #define UINT16_MAX _UI16_MAX #define UINT32_MAX _UI32_MAX #define UINT64_MAX _UI64_MAX // 7.18.2.2 Limits of minimum-width integer types #define INT_LEAST8_MIN INT8_MIN #define INT_LEAST8_MAX INT8_MAX #define INT_LEAST16_MIN INT16_MIN #define INT_LEAST16_MAX INT16_MAX #define INT_LEAST32_MIN INT32_MIN #define INT_LEAST32_MAX INT32_MAX #define INT_LEAST64_MIN INT64_MIN #define INT_LEAST64_MAX INT64_MAX #define UINT_LEAST8_MAX UINT8_MAX #define UINT_LEAST16_MAX UINT16_MAX #define UINT_LEAST32_MAX UINT32_MAX #define UINT_LEAST64_MAX UINT64_MAX // 7.18.2.3 Limits of fastest minimum-width integer types #define INT_FAST8_MIN INT8_MIN #define INT_FAST8_MAX INT8_MAX #define INT_FAST16_MIN INT16_MIN #define INT_FAST16_MAX INT16_MAX #define INT_FAST32_MIN INT32_MIN #define INT_FAST32_MAX INT32_MAX #define INT_FAST64_MIN INT64_MIN #define INT_FAST64_MAX INT64_MAX #define UINT_FAST8_MAX UINT8_MAX #define UINT_FAST16_MAX UINT16_MAX #define UINT_FAST32_MAX UINT32_MAX #define UINT_FAST64_MAX UINT64_MAX // 7.18.2.4 Limits of integer types capable of holding object pointers #ifdef _WIN64 // [ # define INTPTR_MIN INT64_MIN # define INTPTR_MAX INT64_MAX # define UINTPTR_MAX UINT64_MAX #else // _WIN64 ][ # define INTPTR_MIN INT32_MIN # define INTPTR_MAX INT32_MAX # define UINTPTR_MAX UINT32_MAX #endif // _WIN64 ] // 7.18.2.5 Limits of greatest-width integer types #define INTMAX_MIN INT64_MIN #define INTMAX_MAX INT64_MAX #define UINTMAX_MAX UINT64_MAX // 7.18.3 Limits of other integer types #ifdef _WIN64 // [ # define PTRDIFF_MIN _I64_MIN # define PTRDIFF_MAX _I64_MAX #else // _WIN64 ][ # define PTRDIFF_MIN _I32_MIN # define PTRDIFF_MAX _I32_MAX #endif // _WIN64 ] #define SIG_ATOMIC_MIN INT_MIN #define SIG_ATOMIC_MAX INT_MAX #ifndef SIZE_MAX // [ # ifdef _WIN64 // [ # define SIZE_MAX _UI64_MAX # else // _WIN64 ][ # define SIZE_MAX _UI32_MAX # endif // _WIN64 ] #endif // SIZE_MAX ] // WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h> #ifndef WCHAR_MIN // [ # define WCHAR_MIN 0 #endif // WCHAR_MIN ] #ifndef WCHAR_MAX // [ # define WCHAR_MAX _UI16_MAX #endif // WCHAR_MAX ] #define WINT_MIN 0 #define WINT_MAX _UI16_MAX #endif // __STDC_LIMIT_MACROS ] // 7.18.4 Limits of other integer types #if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260 // 7.18.4.1 Macros for minimum-width integer constants #define INT8_C(val) val##i8 #define INT16_C(val) val##i16 #define INT32_C(val) val##i32 #define INT64_C(val) val##i64 #define UINT8_C(val) val##ui8 #define UINT16_C(val) val##ui16 #define UINT32_C(val) val##ui32 #define UINT64_C(val) val##ui64 // 7.18.4.2 Macros for greatest-width integer constants #define INTMAX_C INT64_C #define UINTMAX_C UINT64_C #endif // __STDC_CONSTANT_MACROS ] #endif // _MSC_STDINT_H_ ]
7,728
30.165323
122
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/mutex.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct malloc_mutex_s malloc_mutex_t; #ifdef _WIN32 # define MALLOC_MUTEX_INITIALIZER #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) # define MALLOC_MUTEX_INITIALIZER \ {OS_UNFAIR_LOCK_INIT, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_OSSPIN)) # define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #elif (defined(JEMALLOC_MUTEX_INIT_CB)) # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} #else # if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \ defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP)) # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \ WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # else # define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT # define MALLOC_MUTEX_INITIALIZER \ {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)} # endif #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct malloc_mutex_s { #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 SRWLOCK lock; # else CRITICAL_SECTION lock; # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock lock; #elif (defined(JEMALLOC_OSSPIN)) OSSpinLock lock; #elif (defined(JEMALLOC_MUTEX_INIT_CB)) pthread_mutex_t lock; malloc_mutex_t *postponed_next; #else pthread_mutex_t lock; #endif witness_t witness; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_LAZY_LOCK extern bool isthreaded; #else # undef isthreaded /* Undo private_namespace.h definition. */ # define isthreaded true #endif bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name, witness_rank_t rank); void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex); bool malloc_mutex_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { witness_assert_not_owner(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 AcquireSRWLockExclusive(&mutex->lock); # else EnterCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_lock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockLock(&mutex->lock); #else pthread_mutex_lock(&mutex->lock); #endif witness_lock(tsdn, &mutex->witness); } } JEMALLOC_INLINE void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) { witness_unlock(tsdn, &mutex->witness); #ifdef _WIN32 # if _WIN32_WINNT >= 0x0600 ReleaseSRWLockExclusive(&mutex->lock); # else LeaveCriticalSection(&mutex->lock); # endif #elif (defined(JEMALLOC_OS_UNFAIR_LOCK)) os_unfair_lock_unlock(&mutex->lock); #elif (defined(JEMALLOC_OSSPIN)) OSSpinLockUnlock(&mutex->lock); #else pthread_mutex_unlock(&mutex->lock); #endif } } JEMALLOC_INLINE void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) witness_assert_owner(tsdn, &mutex->witness); } JEMALLOC_INLINE void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex) { if (isthreaded) witness_assert_not_owner(tsdn, &mutex->witness); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,264
27.817568
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/ctl.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ctl_node_s ctl_node_t; typedef struct ctl_named_node_s ctl_named_node_t; typedef struct ctl_indexed_node_s ctl_indexed_node_t; typedef struct ctl_arena_stats_s ctl_arena_stats_t; typedef struct ctl_stats_s ctl_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ctl_node_s { bool named; }; struct ctl_named_node_s { struct ctl_node_s node; const char *name; /* If (nchildren == 0), this is a terminal node. */ unsigned nchildren; const ctl_node_t *children; int (*ctl)(tsd_t *, const size_t *, size_t, void *, size_t *, void *, size_t); }; struct ctl_indexed_node_s { struct ctl_node_s node; const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t, size_t); }; struct ctl_arena_stats_s { bool initialized; unsigned nthreads; const char *dss; ssize_t lg_dirty_mult; ssize_t decay_time; size_t pactive; size_t pdirty; /* The remainder are only populated if config_stats is true. */ arena_stats_t astats; /* Aggregate stats for small size classes, based on bin stats. */ size_t allocated_small; uint64_t nmalloc_small; uint64_t ndalloc_small; uint64_t nrequests_small; malloc_bin_stats_t bstats[NBINS]; malloc_large_stats_t *lstats; /* nlclasses elements. */ malloc_huge_stats_t *hstats; /* nhclasses elements. */ }; struct ctl_stats_s { size_t allocated; size_t active; size_t metadata; size_t resident; size_t mapped; size_t retained; unsigned narenas; ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp, void *newp, size_t newlen); int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp, size_t *miblenp); int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen); bool ctl_boot(void); void ctl_prefork(tsdn_t *tsdn); void ctl_postfork_parent(tsdn_t *tsdn); void ctl_postfork_child(tsdn_t *tsdn); #define xmallctl(name, oldp, oldlenp, newp, newlen) do { \ if (je_mallctl(name, oldp, oldlenp, newp, newlen) \ != 0) { \ malloc_printf( \ "<jemalloc>: Failure in xmallctl(\"%s\", ...)\n", \ name); \ abort(); \ } \ } while (0) #define xmallctlnametomib(name, mibp, miblenp) do { \ if (je_mallctlnametomib(name, mibp, miblenp) != 0) { \ malloc_printf("<jemalloc>: Failure in " \ "xmallctlnametomib(\"%s\", ...)\n", name); \ abort(); \ } \ } while (0) #define xmallctlbymib(mib, miblen, oldp, oldlenp, newp, newlen) do { \ if (je_mallctlbymib(mib, miblen, oldp, oldlenp, newp, \ newlen) != 0) { \ malloc_write( \ "<jemalloc>: Failure in xmallctlbymib()\n"); \ abort(); \ } \ } while (0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
3,389
27.487395
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/ql.h
/* List definitions. */ #define ql_head(a_type) \ struct { \ a_type *qlh_first; \ } #define ql_head_initializer(a_head) {NULL} #define ql_elm(a_type) qr(a_type) /* List functions. */ #define ql_new(a_head) do { \ (a_head)->qlh_first = NULL; \ } while (0) #define ql_elm_new(a_elm, a_field) qr_new((a_elm), a_field) #define ql_first(a_head) ((a_head)->qlh_first) #define ql_last(a_head, a_field) \ ((ql_first(a_head) != NULL) \ ? qr_prev(ql_first(a_head), a_field) : NULL) #define ql_next(a_head, a_elm, a_field) \ ((ql_last(a_head, a_field) != (a_elm)) \ ? qr_next((a_elm), a_field) : NULL) #define ql_prev(a_head, a_elm, a_field) \ ((ql_first(a_head) != (a_elm)) ? qr_prev((a_elm), a_field) \ : NULL) #define ql_before_insert(a_head, a_qlelm, a_elm, a_field) do { \ qr_before_insert((a_qlelm), (a_elm), a_field); \ if (ql_first(a_head) == (a_qlelm)) { \ ql_first(a_head) = (a_elm); \ } \ } while (0) #define ql_after_insert(a_qlelm, a_elm, a_field) \ qr_after_insert((a_qlelm), (a_elm), a_field) #define ql_head_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = (a_elm); \ } while (0) #define ql_tail_insert(a_head, a_elm, a_field) do { \ if (ql_first(a_head) != NULL) { \ qr_before_insert(ql_first(a_head), (a_elm), a_field); \ } \ ql_first(a_head) = qr_next((a_elm), a_field); \ } while (0) #define ql_remove(a_head, a_elm, a_field) do { \ if (ql_first(a_head) == (a_elm)) { \ ql_first(a_head) = qr_next(ql_first(a_head), a_field); \ } \ if (ql_first(a_head) != (a_elm)) { \ qr_remove((a_elm), a_field); \ } else { \ ql_first(a_head) = NULL; \ } \ } while (0) #define ql_head_remove(a_head, a_type, a_field) do { \ a_type *t = ql_first(a_head); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_tail_remove(a_head, a_type, a_field) do { \ a_type *t = ql_last(a_head, a_field); \ ql_remove((a_head), t, a_field); \ } while (0) #define ql_foreach(a_var, a_head, a_field) \ qr_foreach((a_var), ql_first(a_head), a_field) #define ql_reverse_foreach(a_var, a_head, a_field) \ qr_reverse_foreach((a_var), ql_first(a_head), a_field)
2,369
27.902439
65
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/nstime.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct nstime_s nstime_t; /* Maximum supported number of seconds (~584 years). */ #define NSTIME_SEC_MAX KQU(18446744072) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct nstime_s { uint64_t ns; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void nstime_init(nstime_t *time, uint64_t ns); void nstime_init2(nstime_t *time, uint64_t sec, uint64_t nsec); uint64_t nstime_ns(const nstime_t *time); uint64_t nstime_sec(const nstime_t *time); uint64_t nstime_nsec(const nstime_t *time); void nstime_copy(nstime_t *time, const nstime_t *source); int nstime_compare(const nstime_t *a, const nstime_t *b); void nstime_add(nstime_t *time, const nstime_t *addend); void nstime_subtract(nstime_t *time, const nstime_t *subtrahend); void nstime_imultiply(nstime_t *time, uint64_t multiplier); void nstime_idivide(nstime_t *time, uint64_t divisor); uint64_t nstime_divide(const nstime_t *time, const nstime_t *divisor); #ifdef JEMALLOC_JET typedef bool (nstime_monotonic_t)(void); extern nstime_monotonic_t *nstime_monotonic; typedef bool (nstime_update_t)(nstime_t *); extern nstime_update_t *nstime_update; #else bool nstime_monotonic(void); bool nstime_update(nstime_t *time); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,738
34.489796
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/witness.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct witness_s witness_t; typedef unsigned witness_rank_t; typedef ql_head(witness_t) witness_list_t; typedef int witness_comp_t (const witness_t *, const witness_t *); /* * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by * the witness machinery. */ #define WITNESS_RANK_OMIT 0U #define WITNESS_RANK_INIT 1U #define WITNESS_RANK_CTL 1U #define WITNESS_RANK_ARENAS 2U #define WITNESS_RANK_PROF_DUMP 3U #define WITNESS_RANK_PROF_BT2GCTX 4U #define WITNESS_RANK_PROF_TDATAS 5U #define WITNESS_RANK_PROF_TDATA 6U #define WITNESS_RANK_PROF_GCTX 7U #define WITNESS_RANK_ARENA 8U #define WITNESS_RANK_ARENA_CHUNKS 9U #define WITNESS_RANK_ARENA_NODE_CACHE 10 #define WITNESS_RANK_BASE 11U #define WITNESS_RANK_LEAF 0xffffffffU #define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF #define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF #define WITNESS_RANK_DSS WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF #define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF #define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}} #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct witness_s { /* Name, used for printing lock order reversal messages. */ const char *name; /* * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses * must be acquired in order of increasing rank. */ witness_rank_t rank; /* * If two witnesses are of equal rank and they have the samp comp * function pointer, it is called as a last attempt to differentiate * between witnesses of equal rank. */ witness_comp_t *comp; /* Linkage for thread's currently owned locks. */ ql_elm(witness_t) link; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void witness_init(witness_t *witness, const char *name, witness_rank_t rank, witness_comp_t *comp); #ifdef JEMALLOC_JET typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *); extern witness_lock_error_t *witness_lock_error; #else void witness_lock_error(const witness_list_t *witnesses, const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_owner_error_t)(const witness_t *); extern witness_owner_error_t *witness_owner_error; #else void witness_owner_error(const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_not_owner_error_t)(const witness_t *); extern witness_not_owner_error_t *witness_not_owner_error; #else void witness_not_owner_error(const witness_t *witness); #endif #ifdef JEMALLOC_JET typedef void (witness_lockless_error_t)(const witness_list_t *); extern witness_lockless_error_t *witness_lockless_error; #else void witness_lockless_error(const witness_list_t *witnesses); #endif void witnesses_cleanup(tsd_t *tsd); void witness_fork_cleanup(tsd_t *tsd); void witness_prefork(tsd_t *tsd); void witness_postfork_parent(tsd_t *tsd); void witness_postfork_child(tsd_t *tsd); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool witness_owner(tsd_t *tsd, const witness_t *witness); void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness); void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness); void witness_assert_lockless(tsdn_t *tsdn); void witness_lock(tsdn_t *tsdn, witness_t *witness); void witness_unlock(tsdn_t *tsdn, witness_t *witness); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_)) JEMALLOC_INLINE bool witness_owner(tsd_t *tsd, const witness_t *witness) { witness_list_t *witnesses; witness_t *w; witnesses = tsd_witnessesp_get(tsd); ql_foreach(w, witnesses, link) { if (w == witness) return (true); } return (false); } JEMALLOC_INLINE void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness) { tsd_t *tsd; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; if (witness_owner(tsd, witness)) return; witness_owner_error(witness); } JEMALLOC_INLINE void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; witnesses = tsd_witnessesp_get(tsd); ql_foreach(w, witnesses, link) { if (w == witness) witness_not_owner_error(witness); } } JEMALLOC_INLINE void witness_assert_lockless(tsdn_t *tsdn) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); if (w != NULL) witness_lockless_error(witnesses); } JEMALLOC_INLINE void witness_lock(tsdn_t *tsdn, witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; witness_t *w; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; witness_assert_not_owner(tsdn, witness); witnesses = tsd_witnessesp_get(tsd); w = ql_last(witnesses, link); if (w == NULL) { /* No other locks; do nothing. */ } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) { /* Forking, and relaxed ranking satisfied. */ } else if (w->rank > witness->rank) { /* Not forking, rank order reversal. */ witness_lock_error(witnesses, witness); } else if (w->rank == witness->rank && (w->comp == NULL || w->comp != witness->comp || w->comp(w, witness) > 0)) { /* * Missing/incompatible comparison function, or comparison * function indicates rank order reversal. */ witness_lock_error(witnesses, witness); } ql_elm_new(witness, link); ql_tail_insert(witnesses, witness, link); } JEMALLOC_INLINE void witness_unlock(tsdn_t *tsdn, witness_t *witness) { tsd_t *tsd; witness_list_t *witnesses; if (!config_debug) return; if (tsdn_null(tsdn)) return; tsd = tsdn_tsd(tsdn); if (witness->rank == WITNESS_RANK_OMIT) return; /* * Check whether owner before removal, rather than relying on * witness_assert_owner() to abort, so that unit tests can test this * function's failure mode without causing undefined behavior. */ if (witness_owner(tsd, witness)) { witnesses = tsd_witnessesp_get(tsd); ql_remove(witnesses, witness, link); } else witness_assert_owner(tsdn, witness); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,051
25.411985
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/qr.h
/* Ring definitions. */ #define qr(a_type) \ struct { \ a_type *qre_next; \ a_type *qre_prev; \ } /* Ring functions. */ #define qr_new(a_qr, a_field) do { \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_next(a_qr, a_field) ((a_qr)->a_field.qre_next) #define qr_prev(a_qr, a_field) ((a_qr)->a_field.qre_prev) #define qr_before_insert(a_qrelm, a_qr, a_field) do { \ (a_qr)->a_field.qre_prev = (a_qrelm)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qrelm); \ (a_qr)->a_field.qre_prev->a_field.qre_next = (a_qr); \ (a_qrelm)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_after_insert(a_qrelm, a_qr, a_field) \ do \ { \ (a_qr)->a_field.qre_next = (a_qrelm)->a_field.qre_next; \ (a_qr)->a_field.qre_prev = (a_qrelm); \ (a_qr)->a_field.qre_next->a_field.qre_prev = (a_qr); \ (a_qrelm)->a_field.qre_next = (a_qr); \ } while (0) #define qr_meld(a_qr_a, a_qr_b, a_field) do { \ void *t; \ (a_qr_a)->a_field.qre_prev->a_field.qre_next = (a_qr_b); \ (a_qr_b)->a_field.qre_prev->a_field.qre_next = (a_qr_a); \ t = (a_qr_a)->a_field.qre_prev; \ (a_qr_a)->a_field.qre_prev = (a_qr_b)->a_field.qre_prev; \ (a_qr_b)->a_field.qre_prev = t; \ } while (0) /* * qr_meld() and qr_split() are functionally equivalent, so there's no need to * have two copies of the code. */ #define qr_split(a_qr_a, a_qr_b, a_field) \ qr_meld((a_qr_a), (a_qr_b), a_field) #define qr_remove(a_qr, a_field) do { \ (a_qr)->a_field.qre_prev->a_field.qre_next \ = (a_qr)->a_field.qre_next; \ (a_qr)->a_field.qre_next->a_field.qre_prev \ = (a_qr)->a_field.qre_prev; \ (a_qr)->a_field.qre_next = (a_qr); \ (a_qr)->a_field.qre_prev = (a_qr); \ } while (0) #define qr_foreach(var, a_qr, a_field) \ for ((var) = (a_qr); \ (var) != NULL; \ (var) = (((var)->a_field.qre_next != (a_qr)) \ ? (var)->a_field.qre_next : NULL)) #define qr_reverse_foreach(var, a_qr, a_field) \ for ((var) = ((a_qr) != NULL) ? qr_prev(a_qr, a_field) : NULL; \ (var) != NULL; \ (var) = (((var) != (a_qr)) \ ? (var)->a_field.qre_prev : NULL))
2,259
31.285714
78
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/spin.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct spin_s spin_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct spin_s { unsigned iteration; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void spin_init(spin_t *spin); void spin_adaptive(spin_t *spin); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_SPIN_C_)) JEMALLOC_INLINE void spin_init(spin_t *spin) { spin->iteration = 0; } JEMALLOC_INLINE void spin_adaptive(spin_t *spin) { volatile uint64_t i; for (i = 0; i < (KQU(1) << spin->iteration); i++) CPU_SPINWAIT; if (spin->iteration < 63) spin->iteration++; } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,154
21.211538
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/smoothstep.h
/* * This file was generated by the following command: * sh smoothstep.sh smoother 200 24 3 15 */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * This header defines a precomputed table based on the smoothstep family of * sigmoidal curves (https://en.wikipedia.org/wiki/Smoothstep) that grow from 0 * to 1 in 0 <= x <= 1. The table is stored as integer fixed point values so * that floating point math can be avoided. * * 3 2 * smoothstep(x) = -2x + 3x * * 5 4 3 * smootherstep(x) = 6x - 15x + 10x * * 7 6 5 4 * smootheststep(x) = -20x + 70x - 84x + 35x */ #define SMOOTHSTEP_VARIANT "smoother" #define SMOOTHSTEP_NSTEPS 200 #define SMOOTHSTEP_BFP 24 #define SMOOTHSTEP \ /* STEP(step, h, x, y) */ \ STEP( 1, UINT64_C(0x0000000000000014), 0.005, 0.000001240643750) \ STEP( 2, UINT64_C(0x00000000000000a5), 0.010, 0.000009850600000) \ STEP( 3, UINT64_C(0x0000000000000229), 0.015, 0.000032995181250) \ STEP( 4, UINT64_C(0x0000000000000516), 0.020, 0.000077619200000) \ STEP( 5, UINT64_C(0x00000000000009dc), 0.025, 0.000150449218750) \ STEP( 6, UINT64_C(0x00000000000010e8), 0.030, 0.000257995800000) \ STEP( 7, UINT64_C(0x0000000000001aa4), 0.035, 0.000406555756250) \ STEP( 8, UINT64_C(0x0000000000002777), 0.040, 0.000602214400000) \ STEP( 9, UINT64_C(0x00000000000037c2), 0.045, 0.000850847793750) \ STEP( 10, UINT64_C(0x0000000000004be6), 0.050, 0.001158125000000) \ STEP( 11, UINT64_C(0x000000000000643c), 0.055, 0.001529510331250) \ STEP( 12, UINT64_C(0x000000000000811f), 0.060, 0.001970265600000) \ STEP( 13, UINT64_C(0x000000000000a2e2), 0.065, 0.002485452368750) \ STEP( 14, UINT64_C(0x000000000000c9d8), 0.070, 0.003079934200000) \ STEP( 15, UINT64_C(0x000000000000f64f), 0.075, 0.003758378906250) \ STEP( 16, UINT64_C(0x0000000000012891), 0.080, 0.004525260800000) \ STEP( 17, UINT64_C(0x00000000000160e7), 0.085, 0.005384862943750) \ STEP( 18, UINT64_C(0x0000000000019f95), 0.090, 0.006341279400000) \ STEP( 19, UINT64_C(0x000000000001e4dc), 0.095, 0.007398417481250) \ STEP( 20, UINT64_C(0x00000000000230fc), 0.100, 0.008560000000000) \ STEP( 21, UINT64_C(0x0000000000028430), 0.105, 0.009829567518750) \ STEP( 22, UINT64_C(0x000000000002deb0), 0.110, 0.011210480600000) \ STEP( 23, UINT64_C(0x00000000000340b1), 0.115, 0.012705922056250) \ STEP( 24, UINT64_C(0x000000000003aa67), 0.120, 0.014318899200000) \ STEP( 25, UINT64_C(0x0000000000041c00), 0.125, 0.016052246093750) \ STEP( 26, UINT64_C(0x00000000000495a8), 0.130, 0.017908625800000) \ STEP( 27, UINT64_C(0x000000000005178b), 0.135, 0.019890532631250) \ STEP( 28, UINT64_C(0x000000000005a1cf), 0.140, 0.022000294400000) \ STEP( 29, UINT64_C(0x0000000000063498), 0.145, 0.024240074668750) \ STEP( 30, UINT64_C(0x000000000006d009), 0.150, 0.026611875000000) \ STEP( 31, UINT64_C(0x000000000007743f), 0.155, 0.029117537206250) \ STEP( 32, UINT64_C(0x0000000000082157), 0.160, 0.031758745600000) \ STEP( 33, UINT64_C(0x000000000008d76b), 0.165, 0.034537029243750) \ STEP( 34, UINT64_C(0x0000000000099691), 0.170, 0.037453764200000) \ STEP( 35, UINT64_C(0x00000000000a5edf), 0.175, 0.040510175781250) \ STEP( 36, UINT64_C(0x00000000000b3067), 0.180, 0.043707340800000) \ STEP( 37, UINT64_C(0x00000000000c0b38), 0.185, 0.047046189818750) \ STEP( 38, UINT64_C(0x00000000000cef5e), 0.190, 0.050527509400000) \ STEP( 39, UINT64_C(0x00000000000ddce6), 0.195, 0.054151944356250) \ STEP( 40, UINT64_C(0x00000000000ed3d8), 0.200, 0.057920000000000) \ STEP( 41, UINT64_C(0x00000000000fd439), 0.205, 0.061832044393750) \ STEP( 42, UINT64_C(0x000000000010de0e), 0.210, 0.065888310600000) \ STEP( 43, UINT64_C(0x000000000011f158), 0.215, 0.070088898931250) \ STEP( 44, UINT64_C(0x0000000000130e17), 0.220, 0.074433779200000) \ STEP( 45, UINT64_C(0x0000000000143448), 0.225, 0.078922792968750) \ STEP( 46, UINT64_C(0x00000000001563e7), 0.230, 0.083555655800000) \ STEP( 47, UINT64_C(0x0000000000169cec), 0.235, 0.088331959506250) \ STEP( 48, UINT64_C(0x000000000017df4f), 0.240, 0.093251174400000) \ STEP( 49, UINT64_C(0x0000000000192b04), 0.245, 0.098312651543750) \ STEP( 50, UINT64_C(0x00000000001a8000), 0.250, 0.103515625000000) \ STEP( 51, UINT64_C(0x00000000001bde32), 0.255, 0.108859214081250) \ STEP( 52, UINT64_C(0x00000000001d458b), 0.260, 0.114342425600000) \ STEP( 53, UINT64_C(0x00000000001eb5f8), 0.265, 0.119964156118750) \ STEP( 54, UINT64_C(0x0000000000202f65), 0.270, 0.125723194200000) \ STEP( 55, UINT64_C(0x000000000021b1bb), 0.275, 0.131618222656250) \ STEP( 56, UINT64_C(0x0000000000233ce3), 0.280, 0.137647820800000) \ STEP( 57, UINT64_C(0x000000000024d0c3), 0.285, 0.143810466693750) \ STEP( 58, UINT64_C(0x0000000000266d40), 0.290, 0.150104539400000) \ STEP( 59, UINT64_C(0x000000000028123d), 0.295, 0.156528321231250) \ STEP( 60, UINT64_C(0x000000000029bf9c), 0.300, 0.163080000000000) \ STEP( 61, UINT64_C(0x00000000002b753d), 0.305, 0.169757671268750) \ STEP( 62, UINT64_C(0x00000000002d32fe), 0.310, 0.176559340600000) \ STEP( 63, UINT64_C(0x00000000002ef8bc), 0.315, 0.183482925806250) \ STEP( 64, UINT64_C(0x000000000030c654), 0.320, 0.190526259200000) \ STEP( 65, UINT64_C(0x0000000000329b9f), 0.325, 0.197687089843750) \ STEP( 66, UINT64_C(0x0000000000347875), 0.330, 0.204963085800000) \ STEP( 67, UINT64_C(0x0000000000365cb0), 0.335, 0.212351836381250) \ STEP( 68, UINT64_C(0x0000000000384825), 0.340, 0.219850854400000) \ STEP( 69, UINT64_C(0x00000000003a3aa8), 0.345, 0.227457578418750) \ STEP( 70, UINT64_C(0x00000000003c340f), 0.350, 0.235169375000000) \ STEP( 71, UINT64_C(0x00000000003e342b), 0.355, 0.242983540956250) \ STEP( 72, UINT64_C(0x0000000000403ace), 0.360, 0.250897305600000) \ STEP( 73, UINT64_C(0x00000000004247c8), 0.365, 0.258907832993750) \ STEP( 74, UINT64_C(0x0000000000445ae9), 0.370, 0.267012224200000) \ STEP( 75, UINT64_C(0x0000000000467400), 0.375, 0.275207519531250) \ STEP( 76, UINT64_C(0x00000000004892d8), 0.380, 0.283490700800000) \ STEP( 77, UINT64_C(0x00000000004ab740), 0.385, 0.291858693568750) \ STEP( 78, UINT64_C(0x00000000004ce102), 0.390, 0.300308369400000) \ STEP( 79, UINT64_C(0x00000000004f0fe9), 0.395, 0.308836548106250) \ STEP( 80, UINT64_C(0x00000000005143bf), 0.400, 0.317440000000000) \ STEP( 81, UINT64_C(0x0000000000537c4d), 0.405, 0.326115448143750) \ STEP( 82, UINT64_C(0x000000000055b95b), 0.410, 0.334859570600000) \ STEP( 83, UINT64_C(0x000000000057fab1), 0.415, 0.343669002681250) \ STEP( 84, UINT64_C(0x00000000005a4015), 0.420, 0.352540339200000) \ STEP( 85, UINT64_C(0x00000000005c894e), 0.425, 0.361470136718750) \ STEP( 86, UINT64_C(0x00000000005ed622), 0.430, 0.370454915800000) \ STEP( 87, UINT64_C(0x0000000000612655), 0.435, 0.379491163256250) \ STEP( 88, UINT64_C(0x00000000006379ac), 0.440, 0.388575334400000) \ STEP( 89, UINT64_C(0x000000000065cfeb), 0.445, 0.397703855293750) \ STEP( 90, UINT64_C(0x00000000006828d6), 0.450, 0.406873125000000) \ STEP( 91, UINT64_C(0x00000000006a842f), 0.455, 0.416079517831250) \ STEP( 92, UINT64_C(0x00000000006ce1bb), 0.460, 0.425319385600000) \ STEP( 93, UINT64_C(0x00000000006f413a), 0.465, 0.434589059868750) \ STEP( 94, UINT64_C(0x000000000071a270), 0.470, 0.443884854200000) \ STEP( 95, UINT64_C(0x000000000074051d), 0.475, 0.453203066406250) \ STEP( 96, UINT64_C(0x0000000000766905), 0.480, 0.462539980800000) \ STEP( 97, UINT64_C(0x000000000078cde7), 0.485, 0.471891870443750) \ STEP( 98, UINT64_C(0x00000000007b3387), 0.490, 0.481254999400000) \ STEP( 99, UINT64_C(0x00000000007d99a4), 0.495, 0.490625624981250) \ STEP( 100, UINT64_C(0x0000000000800000), 0.500, 0.500000000000000) \ STEP( 101, UINT64_C(0x000000000082665b), 0.505, 0.509374375018750) \ STEP( 102, UINT64_C(0x000000000084cc78), 0.510, 0.518745000600000) \ STEP( 103, UINT64_C(0x0000000000873218), 0.515, 0.528108129556250) \ STEP( 104, UINT64_C(0x00000000008996fa), 0.520, 0.537460019200000) \ STEP( 105, UINT64_C(0x00000000008bfae2), 0.525, 0.546796933593750) \ STEP( 106, UINT64_C(0x00000000008e5d8f), 0.530, 0.556115145800000) \ STEP( 107, UINT64_C(0x000000000090bec5), 0.535, 0.565410940131250) \ STEP( 108, UINT64_C(0x0000000000931e44), 0.540, 0.574680614400000) \ STEP( 109, UINT64_C(0x0000000000957bd0), 0.545, 0.583920482168750) \ STEP( 110, UINT64_C(0x000000000097d729), 0.550, 0.593126875000000) \ STEP( 111, UINT64_C(0x00000000009a3014), 0.555, 0.602296144706250) \ STEP( 112, UINT64_C(0x00000000009c8653), 0.560, 0.611424665600000) \ STEP( 113, UINT64_C(0x00000000009ed9aa), 0.565, 0.620508836743750) \ STEP( 114, UINT64_C(0x0000000000a129dd), 0.570, 0.629545084200000) \ STEP( 115, UINT64_C(0x0000000000a376b1), 0.575, 0.638529863281250) \ STEP( 116, UINT64_C(0x0000000000a5bfea), 0.580, 0.647459660800000) \ STEP( 117, UINT64_C(0x0000000000a8054e), 0.585, 0.656330997318750) \ STEP( 118, UINT64_C(0x0000000000aa46a4), 0.590, 0.665140429400000) \ STEP( 119, UINT64_C(0x0000000000ac83b2), 0.595, 0.673884551856250) \ STEP( 120, UINT64_C(0x0000000000aebc40), 0.600, 0.682560000000000) \ STEP( 121, UINT64_C(0x0000000000b0f016), 0.605, 0.691163451893750) \ STEP( 122, UINT64_C(0x0000000000b31efd), 0.610, 0.699691630600000) \ STEP( 123, UINT64_C(0x0000000000b548bf), 0.615, 0.708141306431250) \ STEP( 124, UINT64_C(0x0000000000b76d27), 0.620, 0.716509299200000) \ STEP( 125, UINT64_C(0x0000000000b98c00), 0.625, 0.724792480468750) \ STEP( 126, UINT64_C(0x0000000000bba516), 0.630, 0.732987775800000) \ STEP( 127, UINT64_C(0x0000000000bdb837), 0.635, 0.741092167006250) \ STEP( 128, UINT64_C(0x0000000000bfc531), 0.640, 0.749102694400000) \ STEP( 129, UINT64_C(0x0000000000c1cbd4), 0.645, 0.757016459043750) \ STEP( 130, UINT64_C(0x0000000000c3cbf0), 0.650, 0.764830625000000) \ STEP( 131, UINT64_C(0x0000000000c5c557), 0.655, 0.772542421581250) \ STEP( 132, UINT64_C(0x0000000000c7b7da), 0.660, 0.780149145600000) \ STEP( 133, UINT64_C(0x0000000000c9a34f), 0.665, 0.787648163618750) \ STEP( 134, UINT64_C(0x0000000000cb878a), 0.670, 0.795036914200000) \ STEP( 135, UINT64_C(0x0000000000cd6460), 0.675, 0.802312910156250) \ STEP( 136, UINT64_C(0x0000000000cf39ab), 0.680, 0.809473740800000) \ STEP( 137, UINT64_C(0x0000000000d10743), 0.685, 0.816517074193750) \ STEP( 138, UINT64_C(0x0000000000d2cd01), 0.690, 0.823440659400000) \ STEP( 139, UINT64_C(0x0000000000d48ac2), 0.695, 0.830242328731250) \ STEP( 140, UINT64_C(0x0000000000d64063), 0.700, 0.836920000000000) \ STEP( 141, UINT64_C(0x0000000000d7edc2), 0.705, 0.843471678768750) \ STEP( 142, UINT64_C(0x0000000000d992bf), 0.710, 0.849895460600000) \ STEP( 143, UINT64_C(0x0000000000db2f3c), 0.715, 0.856189533306250) \ STEP( 144, UINT64_C(0x0000000000dcc31c), 0.720, 0.862352179200000) \ STEP( 145, UINT64_C(0x0000000000de4e44), 0.725, 0.868381777343750) \ STEP( 146, UINT64_C(0x0000000000dfd09a), 0.730, 0.874276805800000) \ STEP( 147, UINT64_C(0x0000000000e14a07), 0.735, 0.880035843881250) \ STEP( 148, UINT64_C(0x0000000000e2ba74), 0.740, 0.885657574400000) \ STEP( 149, UINT64_C(0x0000000000e421cd), 0.745, 0.891140785918750) \ STEP( 150, UINT64_C(0x0000000000e58000), 0.750, 0.896484375000000) \ STEP( 151, UINT64_C(0x0000000000e6d4fb), 0.755, 0.901687348456250) \ STEP( 152, UINT64_C(0x0000000000e820b0), 0.760, 0.906748825600000) \ STEP( 153, UINT64_C(0x0000000000e96313), 0.765, 0.911668040493750) \ STEP( 154, UINT64_C(0x0000000000ea9c18), 0.770, 0.916444344200000) \ STEP( 155, UINT64_C(0x0000000000ebcbb7), 0.775, 0.921077207031250) \ STEP( 156, UINT64_C(0x0000000000ecf1e8), 0.780, 0.925566220800000) \ STEP( 157, UINT64_C(0x0000000000ee0ea7), 0.785, 0.929911101068750) \ STEP( 158, UINT64_C(0x0000000000ef21f1), 0.790, 0.934111689400000) \ STEP( 159, UINT64_C(0x0000000000f02bc6), 0.795, 0.938167955606250) \ STEP( 160, UINT64_C(0x0000000000f12c27), 0.800, 0.942080000000000) \ STEP( 161, UINT64_C(0x0000000000f22319), 0.805, 0.945848055643750) \ STEP( 162, UINT64_C(0x0000000000f310a1), 0.810, 0.949472490600000) \ STEP( 163, UINT64_C(0x0000000000f3f4c7), 0.815, 0.952953810181250) \ STEP( 164, UINT64_C(0x0000000000f4cf98), 0.820, 0.956292659200000) \ STEP( 165, UINT64_C(0x0000000000f5a120), 0.825, 0.959489824218750) \ STEP( 166, UINT64_C(0x0000000000f6696e), 0.830, 0.962546235800000) \ STEP( 167, UINT64_C(0x0000000000f72894), 0.835, 0.965462970756250) \ STEP( 168, UINT64_C(0x0000000000f7dea8), 0.840, 0.968241254400000) \ STEP( 169, UINT64_C(0x0000000000f88bc0), 0.845, 0.970882462793750) \ STEP( 170, UINT64_C(0x0000000000f92ff6), 0.850, 0.973388125000000) \ STEP( 171, UINT64_C(0x0000000000f9cb67), 0.855, 0.975759925331250) \ STEP( 172, UINT64_C(0x0000000000fa5e30), 0.860, 0.977999705600000) \ STEP( 173, UINT64_C(0x0000000000fae874), 0.865, 0.980109467368750) \ STEP( 174, UINT64_C(0x0000000000fb6a57), 0.870, 0.982091374200000) \ STEP( 175, UINT64_C(0x0000000000fbe400), 0.875, 0.983947753906250) \ STEP( 176, UINT64_C(0x0000000000fc5598), 0.880, 0.985681100800000) \ STEP( 177, UINT64_C(0x0000000000fcbf4e), 0.885, 0.987294077943750) \ STEP( 178, UINT64_C(0x0000000000fd214f), 0.890, 0.988789519400000) \ STEP( 179, UINT64_C(0x0000000000fd7bcf), 0.895, 0.990170432481250) \ STEP( 180, UINT64_C(0x0000000000fdcf03), 0.900, 0.991440000000000) \ STEP( 181, UINT64_C(0x0000000000fe1b23), 0.905, 0.992601582518750) \ STEP( 182, UINT64_C(0x0000000000fe606a), 0.910, 0.993658720600000) \ STEP( 183, UINT64_C(0x0000000000fe9f18), 0.915, 0.994615137056250) \ STEP( 184, UINT64_C(0x0000000000fed76e), 0.920, 0.995474739200000) \ STEP( 185, UINT64_C(0x0000000000ff09b0), 0.925, 0.996241621093750) \ STEP( 186, UINT64_C(0x0000000000ff3627), 0.930, 0.996920065800000) \ STEP( 187, UINT64_C(0x0000000000ff5d1d), 0.935, 0.997514547631250) \ STEP( 188, UINT64_C(0x0000000000ff7ee0), 0.940, 0.998029734400000) \ STEP( 189, UINT64_C(0x0000000000ff9bc3), 0.945, 0.998470489668750) \ STEP( 190, UINT64_C(0x0000000000ffb419), 0.950, 0.998841875000000) \ STEP( 191, UINT64_C(0x0000000000ffc83d), 0.955, 0.999149152206250) \ STEP( 192, UINT64_C(0x0000000000ffd888), 0.960, 0.999397785600000) \ STEP( 193, UINT64_C(0x0000000000ffe55b), 0.965, 0.999593444243750) \ STEP( 194, UINT64_C(0x0000000000ffef17), 0.970, 0.999742004200000) \ STEP( 195, UINT64_C(0x0000000000fff623), 0.975, 0.999849550781250) \ STEP( 196, UINT64_C(0x0000000000fffae9), 0.980, 0.999922380800000) \ STEP( 197, UINT64_C(0x0000000000fffdd6), 0.985, 0.999967004818750) \ STEP( 198, UINT64_C(0x0000000000ffff5a), 0.990, 0.999990149400000) \ STEP( 199, UINT64_C(0x0000000000ffffeb), 0.995, 0.999998759356250) \ STEP( 200, UINT64_C(0x0000000001000000), 1.000, 1.000000000000000) \ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
16,061
64.02834
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/chunk_mmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_dalloc_mmap(void *chunk, size_t size); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
789
34.909091
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/chunk.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Size and alignment of memory chunks that are allocated by the OS's virtual * memory system. */ #define LG_CHUNK_DEFAULT 21 /* Return the chunk address for allocation address a. */ #define CHUNK_ADDR2BASE(a) \ ((void *)((uintptr_t)(a) & ~chunksize_mask)) /* Return the chunk offset of address a. */ #define CHUNK_ADDR2OFFSET(a) \ ((size_t)((uintptr_t)(a) & chunksize_mask)) /* Return the smallest chunk multiple that is >= s. */ #define CHUNK_CEILING(s) \ (((s) + chunksize_mask) & ~chunksize_mask) #define CHUNK_HOOKS_INITIALIZER { \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL, \ NULL \ } #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern size_t opt_lg_chunk; extern const char *opt_dss; extern rtree_t chunks_rtree; extern size_t chunksize; extern size_t chunksize_mask; /* (chunksize - 1). */ extern size_t chunk_npages; extern const chunk_hooks_t chunk_hooks_default; chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena); chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks); bool chunk_register(tsdn_t *tsdn, const void *chunk, const extent_node_t *node); void chunk_deregister(const void *chunk, const extent_node_t *node); void *chunk_alloc_base(size_t size); void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit, bool dalloc_node); void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment, size_t *sn, bool *zero, bool *commit); void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool committed); void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t sn, bool zeroed, bool committed); bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset, size_t length); bool chunk_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE extent_node_t *chunk_lookup(const void *chunk, bool dependent); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_)) JEMALLOC_INLINE extent_node_t * chunk_lookup(const void *ptr, bool dependent) { return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent)); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/ #include "jemalloc/internal/chunk_dss.h" #include "jemalloc/internal/chunk_mmap.h"
3,196
31.622449
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/ckh.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ckh_s ckh_t; typedef struct ckhc_s ckhc_t; /* Typedefs to allow easy function pointer passing. */ typedef void ckh_hash_t (const void *, size_t[2]); typedef bool ckh_keycomp_t (const void *, const void *); /* Maintain counters used to get an idea of performance. */ /* #define CKH_COUNT */ /* Print counter values in ckh_delete() (requires CKH_COUNT). */ /* #define CKH_VERBOSE */ /* * There are 2^LG_CKH_BUCKET_CELLS cells in each hash table bucket. Try to fit * one bucket per L1 cache line. */ #define LG_CKH_BUCKET_CELLS (LG_CACHELINE - LG_SIZEOF_PTR - 1) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Hash table cell. */ struct ckhc_s { const void *key; const void *data; }; struct ckh_s { #ifdef CKH_COUNT /* Counters used to get an idea of performance. */ uint64_t ngrows; uint64_t nshrinks; uint64_t nshrinkfails; uint64_t ninserts; uint64_t nrelocs; #endif /* Used for pseudo-random number generation. */ uint64_t prng_state; /* Total number of items. */ size_t count; /* * Minimum and current number of hash table buckets. There are * 2^LG_CKH_BUCKET_CELLS cells per bucket. */ unsigned lg_minbuckets; unsigned lg_curbuckets; /* Hash and comparison functions. */ ckh_hash_t *hash; ckh_keycomp_t *keycomp; /* Hash table with 2^lg_curbuckets buckets. */ ckhc_t *tab; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash, ckh_keycomp_t *keycomp); void ckh_delete(tsd_t *tsd, ckh_t *ckh); size_t ckh_count(ckh_t *ckh); bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data); bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data); bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key, void **data); bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data); void ckh_string_hash(const void *key, size_t r_hash[2]); bool ckh_string_keycomp(const void *k1, const void *k2); void ckh_pointer_hash(const void *key, size_t r_hash[2]); bool ckh_pointer_keycomp(const void *k1, const void *k2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,648
29.448276
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/rtree.h
/* * This radix tree implementation is tailored to the singular purpose of * associating metadata with chunks that are currently owned by jemalloc. * ******************************************************************************* */ #ifdef JEMALLOC_H_TYPES typedef struct rtree_node_elm_s rtree_node_elm_t; typedef struct rtree_level_s rtree_level_t; typedef struct rtree_s rtree_t; /* * RTREE_BITS_PER_LEVEL must be a power of two that is no larger than the * machine address width. */ #define LG_RTREE_BITS_PER_LEVEL 4 #define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL) /* Maximum rtree height. */ #define RTREE_HEIGHT_MAX \ ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL) /* Used for two-stage lock-free node initialization. */ #define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1) /* * The node allocation callback function's argument is the number of contiguous * rtree_node_elm_t structures to allocate, and the resulting memory must be * zeroed. */ typedef rtree_node_elm_t *(rtree_node_alloc_t)(size_t); typedef void (rtree_node_dalloc_t)(rtree_node_elm_t *); #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct rtree_node_elm_s { union { void *pun; rtree_node_elm_t *child; extent_node_t *val; }; }; struct rtree_level_s { /* * A non-NULL subtree points to a subtree rooted along the hypothetical * path to the leaf node corresponding to key 0. Depending on what keys * have been used to store to the tree, an arbitrary combination of * subtree pointers may remain NULL. * * Suppose keys comprise 48 bits, and LG_RTREE_BITS_PER_LEVEL is 4. * This results in a 3-level tree, and the leftmost leaf can be directly * accessed via subtrees[2], the subtree prefixed by 0x0000 (excluding * 0x00000000) can be accessed via subtrees[1], and the remainder of the * tree can be accessed via subtrees[0]. * * levels[0] : [<unused> | 0x0001******** | 0x0002******** | ...] * * levels[1] : [<unused> | 0x00000001**** | 0x00000002**** | ... ] * * levels[2] : [val(0x000000000000) | val(0x000000000001) | ...] * * This has practical implications on x64, which currently uses only the * lower 47 bits of virtual address space in userland, thus leaving * subtrees[0] unused and avoiding a level of tree traversal. */ union { void *subtree_pun; rtree_node_elm_t *subtree; }; /* Number of key bits distinguished by this level. */ unsigned bits; /* * Cumulative number of key bits distinguished by traversing to * corresponding tree level. */ unsigned cumbits; }; struct rtree_s { rtree_node_alloc_t *alloc; rtree_node_dalloc_t *dalloc; unsigned height; /* * Precomputed table used to convert from the number of leading 0 key * bits to which subtree level to start at. */ unsigned start_level[RTREE_HEIGHT_MAX]; rtree_level_t levels[RTREE_HEIGHT_MAX]; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS bool rtree_new(rtree_t *rtree, unsigned bits, rtree_node_alloc_t *alloc, rtree_node_dalloc_t *dalloc); void rtree_delete(rtree_t *rtree); rtree_node_elm_t *rtree_subtree_read_hard(rtree_t *rtree, unsigned level); rtree_node_elm_t *rtree_child_read_hard(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key); uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level); bool rtree_node_valid(rtree_node_elm_t *node); rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm, bool dependent); rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent); extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent); void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val); rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent); rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent); extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent); bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) JEMALLOC_ALWAYS_INLINE unsigned rtree_start_level(rtree_t *rtree, uintptr_t key) { unsigned start_level; if (unlikely(key == 0)) return (rtree->height - 1); start_level = rtree->start_level[lg_floor(key) >> LG_RTREE_BITS_PER_LEVEL]; assert(start_level < rtree->height); return (start_level); } JEMALLOC_ALWAYS_INLINE uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level) { return ((key >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - rtree->levels[level].cumbits)) & ((ZU(1) << rtree->levels[level].bits) - 1)); } JEMALLOC_ALWAYS_INLINE bool rtree_node_valid(rtree_node_elm_t *node) { return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_child_tryread(rtree_node_elm_t *elm, bool dependent) { rtree_node_elm_t *child; /* Double-checked read (first read may be stale. */ child = elm->child; if (!dependent && !rtree_node_valid(child)) child = atomic_read_p(&elm->pun); assert(!dependent || child != NULL); return (child); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level, bool dependent) { rtree_node_elm_t *child; child = rtree_child_tryread(elm, dependent); if (!dependent && unlikely(!rtree_node_valid(child))) child = rtree_child_read_hard(rtree, elm, level); assert(!dependent || child != NULL); return (child); } JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent) { if (dependent) { /* * Reading a val on behalf of a pointer to a valid allocation is * guaranteed to be a clean read even without synchronization, * because the rtree update became visible in memory before the * pointer came into existence. */ return (elm->val); } else { /* * An arbitrary read, e.g. on behalf of ivsalloc(), may not be * dependent on a previous rtree write, which means a stale read * could result if synchronization were omitted here. */ return (atomic_read_p(&elm->pun)); } } JEMALLOC_INLINE void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val) { atomic_write_p(&elm->pun, val); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; /* Double-checked read (first read may be stale. */ subtree = rtree->levels[level].subtree; if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = atomic_read_p(&rtree->levels[level].subtree_pun); assert(!dependent || subtree != NULL); return (subtree); } JEMALLOC_ALWAYS_INLINE rtree_node_elm_t * rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent) { rtree_node_elm_t *subtree; subtree = rtree_subtree_tryread(rtree, level, dependent); if (!dependent && unlikely(!rtree_node_valid(subtree))) subtree = rtree_subtree_read_hard(rtree, level); assert(!dependent || subtree != NULL); return (subtree); } JEMALLOC_ALWAYS_INLINE extent_node_t * rtree_get(rtree_t *rtree, uintptr_t key, bool dependent) { uintptr_t subkey; unsigned start_level; rtree_node_elm_t *node; start_level = rtree_start_level(rtree, key); node = rtree_subtree_tryread(rtree, start_level, dependent); #define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height) switch (start_level + RTREE_GET_BIAS) { #define RTREE_GET_SUBTREE(level) \ case level: \ assert(level < (RTREE_HEIGHT_MAX-1)); \ if (!dependent && unlikely(!rtree_node_valid(node))) \ return (NULL); \ subkey = rtree_subkey(rtree, key, level - \ RTREE_GET_BIAS); \ node = rtree_child_tryread(&node[subkey], dependent); \ /* Fall through. */ #define RTREE_GET_LEAF(level) \ case level: \ assert(level == (RTREE_HEIGHT_MAX-1)); \ if (!dependent && unlikely(!rtree_node_valid(node))) \ return (NULL); \ subkey = rtree_subkey(rtree, key, level - \ RTREE_GET_BIAS); \ /* \ * node is a leaf, so it contains values rather than \ * child pointers. \ */ \ return (rtree_val_read(rtree, &node[subkey], \ dependent)); #if RTREE_HEIGHT_MAX > 1 RTREE_GET_SUBTREE(0) #endif #if RTREE_HEIGHT_MAX > 2 RTREE_GET_SUBTREE(1) #endif #if RTREE_HEIGHT_MAX > 3 RTREE_GET_SUBTREE(2) #endif #if RTREE_HEIGHT_MAX > 4 RTREE_GET_SUBTREE(3) #endif #if RTREE_HEIGHT_MAX > 5 RTREE_GET_SUBTREE(4) #endif #if RTREE_HEIGHT_MAX > 6 RTREE_GET_SUBTREE(5) #endif #if RTREE_HEIGHT_MAX > 7 RTREE_GET_SUBTREE(6) #endif #if RTREE_HEIGHT_MAX > 8 RTREE_GET_SUBTREE(7) #endif #if RTREE_HEIGHT_MAX > 9 RTREE_GET_SUBTREE(8) #endif #if RTREE_HEIGHT_MAX > 10 RTREE_GET_SUBTREE(9) #endif #if RTREE_HEIGHT_MAX > 11 RTREE_GET_SUBTREE(10) #endif #if RTREE_HEIGHT_MAX > 12 RTREE_GET_SUBTREE(11) #endif #if RTREE_HEIGHT_MAX > 13 RTREE_GET_SUBTREE(12) #endif #if RTREE_HEIGHT_MAX > 14 RTREE_GET_SUBTREE(13) #endif #if RTREE_HEIGHT_MAX > 15 RTREE_GET_SUBTREE(14) #endif #if RTREE_HEIGHT_MAX > 16 # error Unsupported RTREE_HEIGHT_MAX #endif RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1) #undef RTREE_GET_SUBTREE #undef RTREE_GET_LEAF default: not_reached(); } #undef RTREE_GET_BIAS not_reached(); } JEMALLOC_INLINE bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val) { uintptr_t subkey; unsigned i, start_level; rtree_node_elm_t *node, *child; start_level = rtree_start_level(rtree, key); node = rtree_subtree_read(rtree, start_level, false); if (node == NULL) return (true); for (i = start_level; /**/; i++, node = child) { subkey = rtree_subkey(rtree, key, i); if (i == rtree->height - 1) { /* * node is a leaf, so it contains values rather than * child pointers. */ rtree_val_write(rtree, &node[subkey], val); return (false); } assert(i + 1 < rtree->height); child = rtree_child_read(rtree, &node[subkey], i, false); if (child == NULL) return (true); } not_reached(); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
10,608
27.907357
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/stats.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_stats_s tcache_bin_stats_t; typedef struct malloc_bin_stats_s malloc_bin_stats_t; typedef struct malloc_large_stats_s malloc_large_stats_t; typedef struct malloc_huge_stats_s malloc_huge_stats_t; typedef struct arena_stats_s arena_stats_t; typedef struct chunk_stats_s chunk_stats_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct tcache_bin_stats_s { /* * Number of allocation requests that corresponded to the size of this * bin. */ uint64_t nrequests; }; struct malloc_bin_stats_s { /* * Total number of allocation/deallocation requests served directly by * the bin. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to the size of this * bin. This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of regions of this size class, including regions * currently cached by tcache. */ size_t curregs; /* Number of tcache fills from this bin. */ uint64_t nfills; /* Number of tcache flushes to this bin. */ uint64_t nflushes; /* Total number of runs created for this bin's size class. */ uint64_t nruns; /* * Total number of runs reused by extracting them from the runs tree for * this bin's size class. */ uint64_t reruns; /* Current number of runs in this bin. */ size_t curruns; }; struct malloc_large_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. Note that tcache may allocate an object, then recycle it * many times, resulting many increments to nrequests, but only one * each to nmalloc and ndalloc. */ uint64_t nmalloc; uint64_t ndalloc; /* * Number of allocation requests that correspond to this size class. * This includes requests served by tcache, though tcache only * periodically merges into this counter. */ uint64_t nrequests; /* * Current number of runs of this size class, including runs currently * cached by tcache. */ size_t curruns; }; struct malloc_huge_stats_s { /* * Total number of allocation/deallocation requests served directly by * the arena. */ uint64_t nmalloc; uint64_t ndalloc; /* Current number of (multi-)chunk allocations of this size class. */ size_t curhchunks; }; struct arena_stats_s { /* Number of bytes currently mapped. */ size_t mapped; /* * Number of bytes currently retained as a side effect of munmap() being * disabled/bypassed. Retained bytes are technically mapped (though * always decommitted or purged), but they are excluded from the mapped * statistic (above). */ size_t retained; /* * Total number of purge sweeps, total number of madvise calls made, * and total pages purged in order to keep dirty unused memory under * control. */ uint64_t npurge; uint64_t nmadvise; uint64_t purged; /* * Number of bytes currently mapped purely for metadata purposes, and * number of bytes currently allocated for internal metadata. */ size_t metadata_mapped; size_t metadata_allocated; /* Protected via atomic_*_z(). */ /* Per-size-category statistics. */ size_t allocated_large; uint64_t nmalloc_large; uint64_t ndalloc_large; uint64_t nrequests_large; size_t allocated_huge; uint64_t nmalloc_huge; uint64_t ndalloc_huge; /* One element for each large size class. */ malloc_large_stats_t *lstats; /* One element for each huge size class. */ malloc_huge_stats_t *hstats; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_stats_print; extern size_t stats_cactive; void stats_print(void (*write)(void *, const char *), void *cbopaque, const char *opts); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE size_t stats_cactive_get(void); void stats_cactive_add(size_t size); void stats_cactive_sub(size_t size); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_)) JEMALLOC_INLINE size_t stats_cactive_get(void) { return (atomic_read_z(&stats_cactive)); } JEMALLOC_INLINE void stats_cactive_add(size_t size) { assert(size > 0); assert((size & chunksize_mask) == 0); atomic_add_z(&stats_cactive, size); } JEMALLOC_INLINE void stats_cactive_sub(size_t size) { assert(size > 0); assert((size & chunksize_mask) == 0); atomic_sub_z(&stats_cactive, size); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,028
24.39899
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/util.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef _WIN32 # ifdef _WIN64 # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "ll" # else # define FMT64_PREFIX "ll" # define FMTPTR_PREFIX "" # endif # define FMTd32 "d" # define FMTu32 "u" # define FMTx32 "x" # define FMTd64 FMT64_PREFIX "d" # define FMTu64 FMT64_PREFIX "u" # define FMTx64 FMT64_PREFIX "x" # define FMTdPTR FMTPTR_PREFIX "d" # define FMTuPTR FMTPTR_PREFIX "u" # define FMTxPTR FMTPTR_PREFIX "x" #else # include <inttypes.h> # define FMTd32 PRId32 # define FMTu32 PRIu32 # define FMTx32 PRIx32 # define FMTd64 PRId64 # define FMTu64 PRIu64 # define FMTx64 PRIx64 # define FMTdPTR PRIdPTR # define FMTuPTR PRIuPTR # define FMTxPTR PRIxPTR #endif /* Size of stack-allocated buffer passed to buferror(). */ #define BUFERROR_BUF 64 /* * Size of stack-allocated buffer used by malloc_{,v,vc}printf(). This must be * large enough for all possible uses within jemalloc. */ #define MALLOC_PRINTF_BUFSIZE 4096 /* Junk fill patterns. */ #ifndef JEMALLOC_ALLOC_JUNK # define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5) #endif #ifndef JEMALLOC_FREE_JUNK # define JEMALLOC_FREE_JUNK ((uint8_t)0x5a) #endif /* * Wrap a cpp argument that contains commas such that it isn't broken up into * multiple arguments. */ #define JEMALLOC_ARG_CONCAT(...) __VA_ARGS__ /* * Silence compiler warnings due to uninitialized values. This is used * wherever the compiler fails to recognize that the variable is never used * uninitialized. */ #ifdef JEMALLOC_CC_SILENCE # define JEMALLOC_CC_SILENCE_INIT(v) = v #else # define JEMALLOC_CC_SILENCE_INIT(v) #endif #ifdef __GNUC__ # define likely(x) __builtin_expect(!!(x), 1) # define unlikely(x) __builtin_expect(!!(x), 0) #else # define likely(x) !!(x) # define unlikely(x) !!(x) #endif #if !defined(JEMALLOC_INTERNAL_UNREACHABLE) # error JEMALLOC_INTERNAL_UNREACHABLE should have been defined by configure #endif #define unreachable() JEMALLOC_INTERNAL_UNREACHABLE() #include "jemalloc/internal/assert.h" /* Use to assert a particular configuration, e.g., cassert(config_debug). */ #define cassert(c) do { \ if (unlikely(!(c))) \ not_reached(); \ } while (0) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS int buferror(int err, char *buf, size_t buflen); uintmax_t malloc_strtoumax(const char *restrict nptr, char **restrict endptr, int base); void malloc_write(const char *s); /* * malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating * point math. */ size_t malloc_vsnprintf(char *str, size_t size, const char *format, va_list ap); size_t malloc_snprintf(char *str, size_t size, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque, const char *format, va_list ap); void malloc_cprintf(void (*write)(void *, const char *), void *cbopaque, const char *format, ...) JEMALLOC_FORMAT_PRINTF(3, 4); void malloc_printf(const char *format, ...) JEMALLOC_FORMAT_PRINTF(1, 2); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE unsigned ffs_llu(unsigned long long bitmap); unsigned ffs_lu(unsigned long bitmap); unsigned ffs_u(unsigned bitmap); unsigned ffs_zu(size_t bitmap); unsigned ffs_u64(uint64_t bitmap); unsigned ffs_u32(uint32_t bitmap); uint64_t pow2_ceil_u64(uint64_t x); uint32_t pow2_ceil_u32(uint32_t x); size_t pow2_ceil_zu(size_t x); unsigned lg_floor(size_t x); void set_errno(int errnum); int get_errno(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_UTIL_C_)) /* Sanity check. */ #if !defined(JEMALLOC_INTERNAL_FFSLL) || !defined(JEMALLOC_INTERNAL_FFSL) \ || !defined(JEMALLOC_INTERNAL_FFS) # error JEMALLOC_INTERNAL_FFS{,L,LL} should have been defined by configure #endif JEMALLOC_ALWAYS_INLINE unsigned ffs_llu(unsigned long long bitmap) { return (JEMALLOC_INTERNAL_FFSLL(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_lu(unsigned long bitmap) { return (JEMALLOC_INTERNAL_FFSL(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_u(unsigned bitmap) { return (JEMALLOC_INTERNAL_FFS(bitmap)); } JEMALLOC_ALWAYS_INLINE unsigned ffs_zu(size_t bitmap) { #if LG_SIZEOF_PTR == LG_SIZEOF_INT return (ffs_u(bitmap)); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG return (ffs_lu(bitmap)); #elif LG_SIZEOF_PTR == LG_SIZEOF_LONG_LONG return (ffs_llu(bitmap)); #else #error No implementation for size_t ffs() #endif } JEMALLOC_ALWAYS_INLINE unsigned ffs_u64(uint64_t bitmap) { #if LG_SIZEOF_LONG == 3 return (ffs_lu(bitmap)); #elif LG_SIZEOF_LONG_LONG == 3 return (ffs_llu(bitmap)); #else #error No implementation for 64-bit ffs() #endif } JEMALLOC_ALWAYS_INLINE unsigned ffs_u32(uint32_t bitmap) { #if LG_SIZEOF_INT == 2 return (ffs_u(bitmap)); #else #error No implementation for 32-bit ffs() #endif return (ffs_u(bitmap)); } JEMALLOC_INLINE uint64_t pow2_ceil_u64(uint64_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x |= x >> 32; x++; return (x); } JEMALLOC_INLINE uint32_t pow2_ceil_u32(uint32_t x) { x--; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; x++; return (x); } /* Compute the smallest power of 2 that is >= x. */ JEMALLOC_INLINE size_t pow2_ceil_zu(size_t x) { #if (LG_SIZEOF_PTR == 3) return (pow2_ceil_u64(x)); #else return (pow2_ceil_u32(x)); #endif } #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { size_t ret; assert(x != 0); asm ("bsr %1, %0" : "=r"(ret) // Outputs. : "r"(x) // Inputs. ); assert(ret < UINT_MAX); return ((unsigned)ret); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { unsigned long ret; assert(x != 0); #if (LG_SIZEOF_PTR == 3) _BitScanReverse64(&ret, x); #elif (LG_SIZEOF_PTR == 2) _BitScanReverse(&ret, x); #else # error "Unsupported type size for lg_floor()" #endif assert(ret < UINT_MAX); return ((unsigned)ret); } #elif (defined(JEMALLOC_HAVE_BUILTIN_CLZ)) JEMALLOC_INLINE unsigned lg_floor(size_t x) { assert(x != 0); #if (LG_SIZEOF_PTR == LG_SIZEOF_INT) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clz(x)); #elif (LG_SIZEOF_PTR == LG_SIZEOF_LONG) return (((8 << LG_SIZEOF_PTR) - 1) - __builtin_clzl(x)); #else # error "Unsupported type size for lg_floor()" #endif } #else JEMALLOC_INLINE unsigned lg_floor(size_t x) { assert(x != 0); x |= (x >> 1); x |= (x >> 2); x |= (x >> 4); x |= (x >> 8); x |= (x >> 16); #if (LG_SIZEOF_PTR == 3) x |= (x >> 32); #endif if (x == SIZE_T_MAX) return ((8 << LG_SIZEOF_PTR) - 1); x++; return (ffs_zu(x) - 2); } #endif /* Set error code. */ JEMALLOC_INLINE void set_errno(int errnum) { #ifdef _WIN32 SetLastError(errnum); #else errno = errnum; #endif } /* Get last error code. */ JEMALLOC_INLINE int get_errno(void) { #ifdef _WIN32 return (GetLastError()); #else return (errno); #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,458
20.746356
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/tcache.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct tcache_bin_info_s tcache_bin_info_t; typedef struct tcache_bin_s tcache_bin_t; typedef struct tcache_s tcache_t; typedef struct tcaches_s tcaches_t; /* * tcache pointers close to NULL are used to encode state information that is * used for two purposes: preventing thread caching on a per thread basis and * cleaning up during thread shutdown. */ #define TCACHE_STATE_DISABLED ((tcache_t *)(uintptr_t)1) #define TCACHE_STATE_REINCARNATED ((tcache_t *)(uintptr_t)2) #define TCACHE_STATE_PURGATORY ((tcache_t *)(uintptr_t)3) #define TCACHE_STATE_MAX TCACHE_STATE_PURGATORY /* * Absolute minimum number of cache slots for each small bin. */ #define TCACHE_NSLOTS_SMALL_MIN 20 /* * Absolute maximum number of cache slots for each small bin in the thread * cache. This is an additional constraint beyond that imposed as: twice the * number of regions per run for this size class. * * This constant must be an even number. */ #define TCACHE_NSLOTS_SMALL_MAX 200 /* Number of cache slots for large size classes. */ #define TCACHE_NSLOTS_LARGE 20 /* (1U << opt_lg_tcache_max) is used to compute tcache_maxclass. */ #define LG_TCACHE_MAXCLASS_DEFAULT 15 /* * TCACHE_GC_SWEEP is the approximate number of allocation events between * full GC sweeps. Integer rounding may cause the actual number to be * slightly higher, since GC is performed incrementally. */ #define TCACHE_GC_SWEEP 8192 /* Number of tcache allocation/deallocation events between incremental GCs. */ #define TCACHE_GC_INCR \ ((TCACHE_GC_SWEEP / NBINS) + ((TCACHE_GC_SWEEP / NBINS == 0) ? 0 : 1)) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS typedef enum { tcache_enabled_false = 0, /* Enable cast to/from bool. */ tcache_enabled_true = 1, tcache_enabled_default = 2 } tcache_enabled_t; /* * Read-only information associated with each element of tcache_t's tbins array * is stored separately, mainly to reduce memory usage. */ struct tcache_bin_info_s { unsigned ncached_max; /* Upper limit on ncached. */ }; struct tcache_bin_s { tcache_bin_stats_t tstats; int low_water; /* Min # cached since last GC. */ unsigned lg_fill_div; /* Fill (ncached_max >> lg_fill_div). */ unsigned ncached; /* # of cached objects. */ /* * To make use of adjacent cacheline prefetch, the items in the avail * stack goes to higher address for newer allocations. avail points * just above the available space, which means that * avail[-ncached, ... -1] are available items and the lowest item will * be allocated first. */ void **avail; /* Stack of available objects. */ }; struct tcache_s { ql_elm(tcache_t) link; /* Used for aggregating stats. */ uint64_t prof_accumbytes;/* Cleared after arena_prof_accum(). */ ticker_t gc_ticker; /* Drives incremental GC. */ szind_t next_gc_bin; /* Next bin to GC. */ tcache_bin_t tbins[1]; /* Dynamically sized. */ /* * The pointer stacks associated with tbins follow as a contiguous * array. During tcache initialization, the avail pointer in each * element of tbins is initialized to point to the proper offset within * this array. */ }; /* Linkage for list of available (previously used) explicit tcache IDs. */ struct tcaches_s { union { tcache_t *tcache; tcaches_t *next; }; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_tcache; extern ssize_t opt_lg_tcache_max; extern tcache_bin_info_t *tcache_bin_info; /* * Number of tcache bins. There are NBINS small-object bins, plus 0 or more * large-object bins. */ extern unsigned nhbins; /* Maximum cached size class. */ extern size_t tcache_maxclass; /* * Explicit tcaches, managed via the tcache.{create,flush,destroy} mallctls and * usable via the MALLOCX_TCACHE() flag. The automatic per thread tcaches are * completely disjoint from this data structure. tcaches starts off as a sparse * array, so it has no physical memory footprint until individual pages are * touched. This allows the entire array to be allocated the first time an * explicit tcache is created without a disproportionate impact on memory usage. */ extern tcaches_t *tcaches; size_t tcache_salloc(tsdn_t *tsdn, const void *ptr); void tcache_event_hard(tsd_t *tsd, tcache_t *tcache); void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, bool *tcache_success); void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, szind_t binind, unsigned rem); void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, unsigned rem, tcache_t *tcache); void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *oldarena, arena_t *newarena); tcache_t *tcache_get_hard(tsd_t *tsd); tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena); void tcache_cleanup(tsd_t *tsd); void tcache_enabled_cleanup(tsd_t *tsd); void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena); bool tcaches_create(tsd_t *tsd, unsigned *r_ind); void tcaches_flush(tsd_t *tsd, unsigned ind); void tcaches_destroy(tsd_t *tsd, unsigned ind); bool tcache_boot(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache); void tcache_flush(void); bool tcache_enabled_get(void); tcache_t *tcache_get(tsd_t *tsd, bool create); void tcache_enabled_set(bool enabled); void *tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success); void *tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t ind, bool zero, bool slow_path); void *tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t ind, bool zero, bool slow_path); void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path); void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, bool slow_path); tcache_t *tcaches_get(tsd_t *tsd, unsigned ind); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TCACHE_C_)) JEMALLOC_INLINE void tcache_flush(void) { tsd_t *tsd; cassert(config_tcache); tsd = tsd_fetch(); tcache_cleanup(tsd); } JEMALLOC_INLINE bool tcache_enabled_get(void) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = tsd_tcache_enabled_get(tsd); if (tcache_enabled == tcache_enabled_default) { tcache_enabled = (tcache_enabled_t)opt_tcache; tsd_tcache_enabled_set(tsd, tcache_enabled); } return ((bool)tcache_enabled); } JEMALLOC_INLINE void tcache_enabled_set(bool enabled) { tsd_t *tsd; tcache_enabled_t tcache_enabled; cassert(config_tcache); tsd = tsd_fetch(); tcache_enabled = (tcache_enabled_t)enabled; tsd_tcache_enabled_set(tsd, tcache_enabled); if (!enabled) tcache_cleanup(tsd); } JEMALLOC_ALWAYS_INLINE tcache_t * tcache_get(tsd_t *tsd, bool create) { tcache_t *tcache; if (!config_tcache) return (NULL); tcache = tsd_tcache_get(tsd); if (!create) return (tcache); if (unlikely(tcache == NULL) && tsd_nominal(tsd)) { tcache = tcache_get_hard(tsd); tsd_tcache_set(tsd, tcache); } return (tcache); } JEMALLOC_ALWAYS_INLINE void tcache_event(tsd_t *tsd, tcache_t *tcache) { if (TCACHE_GC_INCR == 0) return; if (unlikely(ticker_tick(&tcache->gc_ticker))) tcache_event_hard(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_easy(tcache_bin_t *tbin, bool *tcache_success) { void *ret; if (unlikely(tbin->ncached == 0)) { tbin->low_water = -1; *tcache_success = false; return (NULL); } /* * tcache_success (instead of ret) should be checked upon the return of * this function. We avoid checking (ret == NULL) because there is * never a null stored on the avail stack (which is unknown to the * compiler), and eagerly checking ret would cause pipeline stall * (waiting for the cacheline). */ *tcache_success = true; ret = *(tbin->avail - tbin->ncached); tbin->ncached--; if (unlikely((int)tbin->ncached < tbin->low_water)) tbin->low_water = tbin->ncached; return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; tcache_bin_t *tbin; bool tcache_success; size_t usize JEMALLOC_CC_SILENCE_INIT(0); assert(binind < NBINS); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { bool tcache_hard_success; arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache, tbin, binind, &tcache_hard_success); if (tcache_hard_success == false) return (NULL); } assert(ret); /* * Only compute usize if required. The checks in the following if * statement are all static. */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], false); } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else { if (slow_path && config_fill && unlikely(opt_junk_alloc)) { arena_alloc_junk_small(ret, &arena_bin_info[binind], true); } memset(ret, 0, usize); } if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void * tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size, szind_t binind, bool zero, bool slow_path) { void *ret; tcache_bin_t *tbin; bool tcache_success; assert(binind < nhbins); tbin = &tcache->tbins[binind]; ret = tcache_alloc_easy(tbin, &tcache_success); assert(tcache_success == (ret != NULL)); if (unlikely(!tcache_success)) { /* * Only allocate one large object at a time, because it's quite * expensive to create one and not use it. */ arena = arena_choose(tsd, arena); if (unlikely(arena == NULL)) return (NULL); ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero); if (ret == NULL) return (NULL); } else { size_t usize JEMALLOC_CC_SILENCE_INIT(0); /* Only compute usize on demand */ if (config_prof || (slow_path && config_fill) || unlikely(zero)) { usize = index2size(binind); assert(usize <= tcache_maxclass); } if (config_prof && usize == LARGE_MINCLASS) { arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ret); size_t pageind = (((uintptr_t)ret - (uintptr_t)chunk) >> LG_PAGE); arena_mapbits_large_binind_set(chunk, pageind, BININD_INVALID); } if (likely(!zero)) { if (slow_path && config_fill) { if (unlikely(opt_junk_alloc)) { memset(ret, JEMALLOC_ALLOC_JUNK, usize); } else if (unlikely(opt_zero)) memset(ret, 0, usize); } } else memset(ret, 0, usize); if (config_stats) tbin->tstats.nrequests++; if (config_prof) tcache->prof_accumbytes += usize; } tcache_event(tsd, tcache); return (ret); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind, bool slow_path) { tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_small(ptr, &arena_bin_info[binind]); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_small(tsd, tcache, tbin, binind, (tbin_info->ncached_max >> 1)); } assert(tbin->ncached < tbin_info->ncached_max); tbin->ncached++; *(tbin->avail - tbin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE void tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size, bool slow_path) { szind_t binind; tcache_bin_t *tbin; tcache_bin_info_t *tbin_info; assert((size & PAGE_MASK) == 0); assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS); assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass); binind = size2index(size); if (slow_path && config_fill && unlikely(opt_junk_free)) arena_dalloc_junk_large(ptr, size); tbin = &tcache->tbins[binind]; tbin_info = &tcache_bin_info[binind]; if (unlikely(tbin->ncached == tbin_info->ncached_max)) { tcache_bin_flush_large(tsd, tbin, binind, (tbin_info->ncached_max >> 1), tcache); } assert(tbin->ncached < tbin_info->ncached_max); tbin->ncached++; *(tbin->avail - tbin->ncached) = ptr; tcache_event(tsd, tcache); } JEMALLOC_ALWAYS_INLINE tcache_t * tcaches_get(tsd_t *tsd, unsigned ind) { tcaches_t *elm = &tcaches[ind]; if (unlikely(elm->tcache == NULL)) { elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd, NULL)); } return (elm->tcache); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
13,576
27.887234
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/base.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *base_alloc(tsdn_t *tsdn, size_t size); void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident, size_t *mapped); bool base_boot(void); void base_prefork(tsdn_t *tsdn); void base_postfork_parent(tsdn_t *tsdn); void base_postfork_child(tsdn_t *tsdn); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
911
34.076923
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/bitmap.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum bitmap bit count is 2^LG_BITMAP_MAXBITS. */ #define LG_BITMAP_MAXBITS LG_RUN_MAXREGS #define BITMAP_MAXBITS (ZU(1) << LG_BITMAP_MAXBITS) typedef struct bitmap_level_s bitmap_level_t; typedef struct bitmap_info_s bitmap_info_t; typedef unsigned long bitmap_t; #define LG_SIZEOF_BITMAP LG_SIZEOF_LONG /* Number of bits per group. */ #define LG_BITMAP_GROUP_NBITS (LG_SIZEOF_BITMAP + 3) #define BITMAP_GROUP_NBITS (ZU(1) << LG_BITMAP_GROUP_NBITS) #define BITMAP_GROUP_NBITS_MASK (BITMAP_GROUP_NBITS-1) /* * Do some analysis on how big the bitmap is before we use a tree. For a brute * force linear search, if we would have to call ffs_lu() more than 2^3 times, * use a tree instead. */ #if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3 # define USE_TREE #endif /* Number of groups required to store a given number of bits. */ #define BITMAP_BITS2GROUPS(nbits) \ ((nbits + BITMAP_GROUP_NBITS_MASK) >> LG_BITMAP_GROUP_NBITS) /* * Number of groups required at a particular level for a given number of bits. */ #define BITMAP_GROUPS_L0(nbits) \ BITMAP_BITS2GROUPS(nbits) #define BITMAP_GROUPS_L1(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(nbits)) #define BITMAP_GROUPS_L2(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS((nbits)))) #define BITMAP_GROUPS_L3(nbits) \ BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS(BITMAP_BITS2GROUPS( \ BITMAP_BITS2GROUPS((nbits))))) /* * Assuming the number of levels, number of groups required for a given number * of bits. */ #define BITMAP_GROUPS_1_LEVEL(nbits) \ BITMAP_GROUPS_L0(nbits) #define BITMAP_GROUPS_2_LEVEL(nbits) \ (BITMAP_GROUPS_1_LEVEL(nbits) + BITMAP_GROUPS_L1(nbits)) #define BITMAP_GROUPS_3_LEVEL(nbits) \ (BITMAP_GROUPS_2_LEVEL(nbits) + BITMAP_GROUPS_L2(nbits)) #define BITMAP_GROUPS_4_LEVEL(nbits) \ (BITMAP_GROUPS_3_LEVEL(nbits) + BITMAP_GROUPS_L3(nbits)) /* * Maximum number of groups required to support LG_BITMAP_MAXBITS. */ #ifdef USE_TREE #if LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS # define BITMAP_GROUPS_MAX BITMAP_GROUPS_1_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 2 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_2_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 3 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_3_LEVEL(BITMAP_MAXBITS) #elif LG_BITMAP_MAXBITS <= LG_BITMAP_GROUP_NBITS * 4 # define BITMAP_GROUPS_MAX BITMAP_GROUPS_4_LEVEL(BITMAP_MAXBITS) #else # error "Unsupported bitmap size" #endif /* Maximum number of levels possible. */ #define BITMAP_MAX_LEVELS \ (LG_BITMAP_MAXBITS / LG_SIZEOF_BITMAP) \ + !!(LG_BITMAP_MAXBITS % LG_SIZEOF_BITMAP) #else /* USE_TREE */ #define BITMAP_GROUPS_MAX BITMAP_BITS2GROUPS(BITMAP_MAXBITS) #endif /* USE_TREE */ #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct bitmap_level_s { /* Offset of this level's groups within the array of groups. */ size_t group_offset; }; struct bitmap_info_s { /* Logical number of bits in bitmap (stored at bottom level). */ size_t nbits; #ifdef USE_TREE /* Number of levels necessary for nbits. */ unsigned nlevels; /* * Only the first (nlevels+1) elements are used, and levels are ordered * bottom to top (e.g. the bottom level is stored in levels[0]). */ bitmap_level_t levels[BITMAP_MAX_LEVELS+1]; #else /* USE_TREE */ /* Number of groups necessary for nbits. */ size_t ngroups; #endif /* USE_TREE */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void bitmap_info_init(bitmap_info_t *binfo, size_t nbits); void bitmap_init(bitmap_t *bitmap, const bitmap_info_t *binfo); size_t bitmap_size(const bitmap_info_t *binfo); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo); bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo); void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_BITMAP_C_)) JEMALLOC_INLINE bool bitmap_full(bitmap_t *bitmap, const bitmap_info_t *binfo) { #ifdef USE_TREE size_t rgoff = binfo->levels[binfo->nlevels].group_offset - 1; bitmap_t rg = bitmap[rgoff]; /* The bitmap is full iff the root group is 0. */ return (rg == 0); #else size_t i; for (i = 0; i < binfo->ngroups; i++) { if (bitmap[i] != 0) return (false); } return (true); #endif } JEMALLOC_INLINE bool bitmap_get(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t g; assert(bit < binfo->nbits); goff = bit >> LG_BITMAP_GROUP_NBITS; g = bitmap[goff]; return (!(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK)))); } JEMALLOC_INLINE void bitmap_set(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; assert(bit < binfo->nbits); assert(!bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(bitmap_get(bitmap, binfo, bit)); #ifdef USE_TREE /* Propagate group state transitions up the tree. */ if (g == 0) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; assert(g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (g != 0) break; } } #endif } /* sfu: set first unset. */ JEMALLOC_INLINE size_t bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo) { size_t bit; bitmap_t g; unsigned i; assert(!bitmap_full(bitmap, binfo)); #ifdef USE_TREE i = binfo->nlevels - 1; g = bitmap[binfo->levels[i].group_offset]; bit = ffs_lu(g) - 1; while (i > 0) { i--; g = bitmap[binfo->levels[i].group_offset + bit]; bit = (bit << LG_BITMAP_GROUP_NBITS) + (ffs_lu(g) - 1); } #else i = 0; g = bitmap[0]; while ((bit = ffs_lu(g)) == 0) { i++; g = bitmap[i]; } bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1); #endif bitmap_set(bitmap, binfo, bit); return (bit); } JEMALLOC_INLINE void bitmap_unset(bitmap_t *bitmap, const bitmap_info_t *binfo, size_t bit) { size_t goff; bitmap_t *gp; bitmap_t g; UNUSED bool propagate; assert(bit < binfo->nbits); assert(bitmap_get(bitmap, binfo, bit)); goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; assert(!bitmap_get(bitmap, binfo, bit)); #ifdef USE_TREE /* Propagate group state transitions up the tree. */ if (propagate) { unsigned i; for (i = 1; i < binfo->nlevels; i++) { bit = goff; goff = bit >> LG_BITMAP_GROUP_NBITS; gp = &bitmap[binfo->levels[i].group_offset + goff]; g = *gp; propagate = (g == 0); assert((g & (ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK))) == 0); g ^= ZU(1) << (bit & BITMAP_GROUP_NBITS_MASK); *gp = g; if (!propagate) break; } } #endif /* USE_TREE */ } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
7,819
27.436364
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/ticker.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct ticker_s ticker_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct ticker_s { int32_t tick; int32_t nticks; }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void ticker_init(ticker_t *ticker, int32_t nticks); void ticker_copy(ticker_t *ticker, const ticker_t *other); int32_t ticker_read(const ticker_t *ticker); bool ticker_ticks(ticker_t *ticker, int32_t nticks); bool ticker_tick(ticker_t *ticker); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TICKER_C_)) JEMALLOC_INLINE void ticker_init(ticker_t *ticker, int32_t nticks) { ticker->tick = nticks; ticker->nticks = nticks; } JEMALLOC_INLINE void ticker_copy(ticker_t *ticker, const ticker_t *other) { *ticker = *other; } JEMALLOC_INLINE int32_t ticker_read(const ticker_t *ticker) { return (ticker->tick); } JEMALLOC_INLINE bool ticker_ticks(ticker_t *ticker, int32_t nticks) { if (unlikely(ticker->tick < nticks)) { ticker->tick = ticker->nticks; return (true); } ticker->tick -= nticks; return(false); } JEMALLOC_INLINE bool ticker_tick(ticker_t *ticker) { return (ticker_ticks(ticker, 1)); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,698
21.355263
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/prng.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* * Simple linear congruential pseudo-random number generator: * * prng(y) = (a*x + c) % m * * where the following constants ensure maximal period: * * a == Odd number (relatively prime to 2^n), and (a-1) is a multiple of 4. * c == Odd number (relatively prime to 2^n). * m == 2^32 * * See Knuth's TAOCP 3rd Ed., Vol. 2, pg. 17 for details on these constraints. * * This choice of m has the disadvantage that the quality of the bits is * proportional to bit position. For example, the lowest bit has a cycle of 2, * the next has a cycle of 4, etc. For this reason, we prefer to use the upper * bits. */ #define PRNG_A_32 UINT32_C(1103515241) #define PRNG_C_32 UINT32_C(12347) #define PRNG_A_64 UINT64_C(6364136223846793005) #define PRNG_C_64 UINT64_C(1442695040888963407) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t prng_state_next_u32(uint32_t state); uint64_t prng_state_next_u64(uint64_t state); size_t prng_state_next_zu(size_t state); uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic); uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range); size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic); uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic); uint64_t prng_range_u64(uint64_t *state, uint64_t range); size_t prng_range_zu(size_t *state, size_t range, bool atomic); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PRNG_C_)) JEMALLOC_ALWAYS_INLINE uint32_t prng_state_next_u32(uint32_t state) { return ((state * PRNG_A_32) + PRNG_C_32); } JEMALLOC_ALWAYS_INLINE uint64_t prng_state_next_u64(uint64_t state) { return ((state * PRNG_A_64) + PRNG_C_64); } JEMALLOC_ALWAYS_INLINE size_t prng_state_next_zu(size_t state) { #if LG_SIZEOF_PTR == 2 return ((state * PRNG_A_32) + PRNG_C_32); #elif LG_SIZEOF_PTR == 3 return ((state * PRNG_A_64) + PRNG_C_64); #else #error Unsupported pointer size #endif } JEMALLOC_ALWAYS_INLINE uint32_t prng_lg_range_u32(uint32_t *state, unsigned lg_range, bool atomic) { uint32_t ret, state1; assert(lg_range > 0); assert(lg_range <= 32); if (atomic) { uint32_t state0; do { state0 = atomic_read_uint32(state); state1 = prng_state_next_u32(state0); } while (atomic_cas_uint32(state, state0, state1)); } else { state1 = prng_state_next_u32(*state); *state = state1; } ret = state1 >> (32 - lg_range); return (ret); } /* 64-bit atomic operations cannot be supported on all relevant platforms. */ JEMALLOC_ALWAYS_INLINE uint64_t prng_lg_range_u64(uint64_t *state, unsigned lg_range) { uint64_t ret, state1; assert(lg_range > 0); assert(lg_range <= 64); state1 = prng_state_next_u64(*state); *state = state1; ret = state1 >> (64 - lg_range); return (ret); } JEMALLOC_ALWAYS_INLINE size_t prng_lg_range_zu(size_t *state, unsigned lg_range, bool atomic) { size_t ret, state1; assert(lg_range > 0); assert(lg_range <= ZU(1) << (3 + LG_SIZEOF_PTR)); if (atomic) { size_t state0; do { state0 = atomic_read_z(state); state1 = prng_state_next_zu(state0); } while (atomic_cas_z(state, state0, state1)); } else { state1 = prng_state_next_zu(*state); *state = state1; } ret = state1 >> ((ZU(1) << (3 + LG_SIZEOF_PTR)) - lg_range); return (ret); } JEMALLOC_ALWAYS_INLINE uint32_t prng_range_u32(uint32_t *state, uint32_t range, bool atomic) { uint32_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u32(pow2_ceil_u32(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u32(state, lg_range, atomic); } while (ret >= range); return (ret); } JEMALLOC_ALWAYS_INLINE uint64_t prng_range_u64(uint64_t *state, uint64_t range) { uint64_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_u64(state, lg_range); } while (ret >= range); return (ret); } JEMALLOC_ALWAYS_INLINE size_t prng_range_zu(size_t *state, size_t range, bool atomic) { size_t ret; unsigned lg_range; assert(range > 1); /* Compute the ceiling of lg(range). */ lg_range = ffs_u64(pow2_ceil_u64(range)) - 1; /* Generate a result in [0..range) via repeated trial. */ do { ret = prng_lg_range_zu(state, lg_range, atomic); } while (ret >= range); return (ret); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
5,087
23.461538
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/ph.h
/* * A Pairing Heap implementation. * * "The Pairing Heap: A New Form of Self-Adjusting Heap" * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf * * With auxiliary twopass list, described in a follow on paper. * * "Pairing Heaps: Experiments and Analysis" * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf * ******************************************************************************* */ #ifndef PH_H_ #define PH_H_ /* Node structure. */ #define phn(a_type) \ struct { \ a_type *phn_prev; \ a_type *phn_next; \ a_type *phn_lchild; \ } /* Root structure. */ #define ph(a_type) \ struct { \ a_type *ph_root; \ } /* Internal utility macros. */ #define phn_lchild_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_lchild) #define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \ a_phn->a_field.phn_lchild = a_lchild; \ } while (0) #define phn_next_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_next) #define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \ a_phn->a_field.phn_prev = a_prev; \ } while (0) #define phn_prev_get(a_type, a_field, a_phn) \ (a_phn->a_field.phn_prev) #define phn_next_set(a_type, a_field, a_phn, a_next) do { \ a_phn->a_field.phn_next = a_next; \ } while (0) #define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \ a_type *phn0child; \ \ assert(a_phn0 != NULL); \ assert(a_phn1 != NULL); \ assert(a_cmp(a_phn0, a_phn1) <= 0); \ \ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \ phn_next_set(a_type, a_field, a_phn1, phn0child); \ if (phn0child != NULL) \ phn_prev_set(a_type, a_field, phn0child, a_phn1); \ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \ } while (0) #define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \ if (a_phn0 == NULL) \ r_phn = a_phn1; \ else if (a_phn1 == NULL) \ r_phn = a_phn0; \ else if (a_cmp(a_phn0, a_phn1) < 0) { \ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \ a_cmp); \ r_phn = a_phn0; \ } else { \ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \ a_cmp); \ r_phn = a_phn1; \ } \ } while (0) #define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *head = NULL; \ a_type *tail = NULL; \ a_type *phn0 = a_phn; \ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \ \ /* \ * Multipass merge, wherein the first two elements of a FIFO \ * are repeatedly merged, and each result is appended to the \ * singly linked FIFO, until the FIFO contains only a single \ * element. We start with a sibling list but no reference to \ * its tail, so we do a single pass over the sibling list to \ * populate the FIFO. \ */ \ if (phn1 != NULL) { \ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \ if (phnrest != NULL) \ phn_prev_set(a_type, a_field, phnrest, NULL); \ phn_prev_set(a_type, a_field, phn0, NULL); \ phn_next_set(a_type, a_field, phn0, NULL); \ phn_prev_set(a_type, a_field, phn1, NULL); \ phn_next_set(a_type, a_field, phn1, NULL); \ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \ head = tail = phn0; \ phn0 = phnrest; \ while (phn0 != NULL) { \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ phnrest = phn_next_get(a_type, a_field, \ phn1); \ if (phnrest != NULL) { \ phn_prev_set(a_type, a_field, \ phnrest, NULL); \ } \ phn_prev_set(a_type, a_field, phn0, \ NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ phn_prev_set(a_type, a_field, phn1, \ NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = phnrest; \ } else { \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = NULL; \ } \ } \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, phn0); \ if (phn1 != NULL) { \ while (true) { \ head = phn_next_get(a_type, a_field, \ phn1); \ assert(phn_prev_get(a_type, a_field, \ phn0) == NULL); \ phn_next_set(a_type, a_field, phn0, \ NULL); \ assert(phn_prev_get(a_type, a_field, \ phn1) == NULL); \ phn_next_set(a_type, a_field, phn1, \ NULL); \ phn_merge(a_type, a_field, phn0, phn1, \ a_cmp, phn0); \ if (head == NULL) \ break; \ phn_next_set(a_type, a_field, tail, \ phn0); \ tail = phn0; \ phn0 = head; \ phn1 = phn_next_get(a_type, a_field, \ phn0); \ } \ } \ } \ r_phn = phn0; \ } while (0) #define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \ if (phn != NULL) { \ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \ phn_prev_set(a_type, a_field, phn, NULL); \ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \ assert(phn_next_get(a_type, a_field, phn) == NULL); \ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \ a_ph->ph_root); \ } \ } while (0) #define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \ if (lchild == NULL) \ r_phn = NULL; \ else { \ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \ r_phn); \ } \ } while (0) /* * The ph_proto() macro generates function prototypes that correspond to the * functions generated by an equivalently parameterized call to ph_gen(). */ #define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \ a_attr void a_prefix##new(a_ph_type *ph); \ a_attr bool a_prefix##empty(a_ph_type *ph); \ a_attr a_type *a_prefix##first(a_ph_type *ph); \ a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \ a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \ a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn); /* * The ph_gen() macro generates a type-specific pairing heap implementation, * based on the above cpp macros. */ #define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \ a_attr void \ a_prefix##new(a_ph_type *ph) \ { \ \ memset(ph, 0, sizeof(ph(a_type))); \ } \ a_attr bool \ a_prefix##empty(a_ph_type *ph) \ { \ \ return (ph->ph_root == NULL); \ } \ a_attr a_type * \ a_prefix##first(a_ph_type *ph) \ { \ \ if (ph->ph_root == NULL) \ return (NULL); \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ return (ph->ph_root); \ } \ a_attr void \ a_prefix##insert(a_ph_type *ph, a_type *phn) \ { \ \ memset(&phn->a_field, 0, sizeof(phn(a_type))); \ \ /* \ * Treat the root as an aux list during insertion, and lazily \ * merge during a_prefix##remove_first(). For elements that \ * are inserted, then removed via a_prefix##remove() before the \ * aux list is ever processed, this makes insert/remove \ * constant-time, whereas eager merging would make insert \ * O(log n). \ */ \ if (ph->ph_root == NULL) \ ph->ph_root = phn; \ else { \ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \ a_field, ph->ph_root)); \ if (phn_next_get(a_type, a_field, ph->ph_root) != \ NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, ph->ph_root), \ phn); \ } \ phn_prev_set(a_type, a_field, phn, ph->ph_root); \ phn_next_set(a_type, a_field, ph->ph_root, phn); \ } \ } \ a_attr a_type * \ a_prefix##remove_first(a_ph_type *ph) \ { \ a_type *ret; \ \ if (ph->ph_root == NULL) \ return (NULL); \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ \ ret = ph->ph_root; \ \ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \ ph->ph_root); \ \ return (ret); \ } \ a_attr void \ a_prefix##remove(a_ph_type *ph, a_type *phn) \ { \ a_type *replace, *parent; \ \ /* \ * We can delete from aux list without merging it, but we need \ * to merge if we are dealing with the root node. \ */ \ if (ph->ph_root == phn) { \ ph_merge_aux(a_type, a_field, ph, a_cmp); \ if (ph->ph_root == phn) { \ ph_merge_children(a_type, a_field, ph->ph_root, \ a_cmp, ph->ph_root); \ return; \ } \ } \ \ /* Get parent (if phn is leftmost child) before mutating. */ \ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \ if (phn_lchild_get(a_type, a_field, parent) != phn) \ parent = NULL; \ } \ /* Find a possible replacement node, and link to parent. */ \ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \ /* Set next/prev for sibling linked list. */ \ if (replace != NULL) { \ if (parent != NULL) { \ phn_prev_set(a_type, a_field, replace, parent); \ phn_lchild_set(a_type, a_field, parent, \ replace); \ } else { \ phn_prev_set(a_type, a_field, replace, \ phn_prev_get(a_type, a_field, phn)); \ if (phn_prev_get(a_type, a_field, phn) != \ NULL) { \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ replace); \ } \ } \ phn_next_set(a_type, a_field, replace, \ phn_next_get(a_type, a_field, phn)); \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ replace); \ } \ } else { \ if (parent != NULL) { \ a_type *next = phn_next_get(a_type, a_field, \ phn); \ phn_lchild_set(a_type, a_field, parent, next); \ if (next != NULL) { \ phn_prev_set(a_type, a_field, next, \ parent); \ } \ } else { \ assert(phn_prev_get(a_type, a_field, phn) != \ NULL); \ phn_next_set(a_type, a_field, \ phn_prev_get(a_type, a_field, phn), \ phn_next_get(a_type, a_field, phn)); \ } \ if (phn_next_get(a_type, a_field, phn) != NULL) { \ phn_prev_set(a_type, a_field, \ phn_next_get(a_type, a_field, phn), \ phn_prev_get(a_type, a_field, phn)); \ } \ } \ } #endif /* PH_H_ */
10,965
30.693642
86
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/huge.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero); void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, bool zero); bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero); void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t usize, size_t alignment, bool zero, tcache_t *tcache); #ifdef JEMALLOC_JET typedef void (huge_dalloc_junk_t)(void *, size_t); extern huge_dalloc_junk_t *huge_dalloc_junk; #endif void huge_dalloc(tsdn_t *tsdn, void *ptr); arena_t *huge_aalloc(const void *ptr); size_t huge_salloc(tsdn_t *tsdn, const void *ptr); prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr); void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx); void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,518
41.194444
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/assert.h
/* * Define a custom assert() in order to reduce the chances of deadlock during * assertion failure. */ #ifndef assert #define assert(e) do { \ if (unlikely(config_debug && !(e))) { \ malloc_printf( \ "<jemalloc>: %s:%d: Failed assertion: \"%s\"\n", \ __FILE__, __LINE__, #e); \ abort(); \ } \ } while (0) #endif #ifndef not_reached #define not_reached() do { \ if (config_debug) { \ malloc_printf( \ "<jemalloc>: %s:%d: Unreachable code reached\n", \ __FILE__, __LINE__); \ abort(); \ } \ unreachable(); \ } while (0) #endif #ifndef not_implemented #define not_implemented() do { \ if (config_debug) { \ malloc_printf("<jemalloc>: %s:%d: Not implemented\n", \ __FILE__, __LINE__); \ abort(); \ } \ } while (0) #endif #ifndef assert_not_implemented #define assert_not_implemented(e) do { \ if (unlikely(config_debug && !(e))) \ not_implemented(); \ } while (0) #endif
1,029
21.391304
77
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/atomic.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #define atomic_read_uint64(p) atomic_add_uint64(p, 0) #define atomic_read_uint32(p) atomic_add_uint32(p, 0) #define atomic_read_p(p) atomic_add_p(p, NULL) #define atomic_read_z(p) atomic_add_z(p, 0) #define atomic_read_u(p) atomic_add_u(p, 0) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES /* * All arithmetic functions return the arithmetic result of the atomic * operation. Some atomic operation APIs return the value prior to mutation, in * which case the following functions must redundantly compute the result so * that it can be returned. These functions are normally inlined, so the extra * operations can be optimized away if the return values aren't used by the * callers. * * <t> atomic_read_<t>(<t> *p) { return (*p); } * <t> atomic_add_<t>(<t> *p, <t> x) { return (*p += x); } * <t> atomic_sub_<t>(<t> *p, <t> x) { return (*p -= x); } * bool atomic_cas_<t>(<t> *p, <t> c, <t> s) * { * if (*p != c) * return (true); * *p = s; * return (false); * } * void atomic_write_<t>(<t> *p, <t> x) { *p = x; } */ #ifndef JEMALLOC_ENABLE_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x); uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x); bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s); void atomic_write_uint64(uint64_t *p, uint64_t x); uint32_t atomic_add_uint32(uint32_t *p, uint32_t x); uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x); bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s); void atomic_write_uint32(uint32_t *p, uint32_t x); void *atomic_add_p(void **p, void *x); void *atomic_sub_p(void **p, void *x); bool atomic_cas_p(void **p, void *c, void *s); void atomic_write_p(void **p, const void *x); size_t atomic_add_z(size_t *p, size_t x); size_t atomic_sub_z(size_t *p, size_t x); bool atomic_cas_z(size_t *p, size_t c, size_t s); void atomic_write_z(size_t *p, size_t x); unsigned atomic_add_u(unsigned *p, unsigned x); unsigned atomic_sub_u(unsigned *p, unsigned x); bool atomic_cas_u(unsigned *p, unsigned c, unsigned s); void atomic_write_u(unsigned *p, unsigned x); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ATOMIC_C_)) /******************************************************************************/ /* 64-bit operations. */ #if (LG_SIZEOF_PTR == 3 || LG_SIZEOF_INT == 3) # if (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { uint64_t t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { uint64_t t; x = (uint64_t)(-(int64_t)x); t = x; asm volatile ( "lock; xaddq %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint8_t success; asm volatile ( "lock; cmpxchgq %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" /* Clobbers. */ ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { asm volatile ( "xchgq %1, %0;" /* Lock is implied by xchgq. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { volatile atomic_uint_least64_t *a = (volatile atomic_uint_least64_t *)p; atomic_store(a, x); } # elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { /* * atomic_fetchadd_64() doesn't exist, but we only ever use this * function on LP64 systems, so atomic_fetchadd_long() will do. */ assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (atomic_fetchadd_long(p, (unsigned long)(-(long)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { assert(sizeof(uint64_t) == sizeof(unsigned long)); return (!atomic_cmpset_long(p, (unsigned long)c, (unsigned long)s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { assert(sizeof(uint64_t) == sizeof(unsigned long)); atomic_store_rel_long(p, x); } # elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64((int64_t)x, (int64_t *)p)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (OSAtomicAdd64(-((int64_t)x), (int64_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!OSAtomicCompareAndSwap64(c, s, (int64_t *)p)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { uint64_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint64(p); } while (atomic_cas_uint64(p, o, x)); } # elif (defined(_MSC_VER)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, x) + x); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (InterlockedExchangeAdd64(p, -((int64_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { uint64_t o; o = InterlockedCompareExchange64(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { InterlockedExchange64(p, x); } # elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_8)) JEMALLOC_INLINE uint64_t atomic_add_uint64(uint64_t *p, uint64_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint64_t atomic_sub_uint64(uint64_t *p, uint64_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint64(uint64_t *p, uint64_t c, uint64_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint64(uint64_t *p, uint64_t x) { __sync_lock_test_and_set(p, x); } # else # error "Missing implementation for 64-bit atomic operations" # endif #endif /******************************************************************************/ /* 32-bit operations. */ #if (defined(__i386__) || defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { uint32_t t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { uint32_t t; x = (uint32_t)(-(int32_t)x); t = x; asm volatile ( "lock; xaddl %0, %1;" : "+r" (t), "=m" (*p) /* Outputs. */ : "m" (*p) /* Inputs. */ ); return (t + x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint8_t success; asm volatile ( "lock; cmpxchgl %4, %0;" "sete %1;" : "=m" (*p), "=a" (success) /* Outputs. */ : "m" (*p), "a" (c), "r" (s) /* Inputs. */ : "memory" ); return (!(bool)success); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { asm volatile ( "xchgl %1, %0;" /* Lock is implied by xchgl. */ : "=m" (*p), "+r" (x) /* Outputs. */ : "m" (*p) /* Inputs. */ : "memory" /* Clobbers. */ ); } # elif (defined(JEMALLOC_C11ATOMICS)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_add(a, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (atomic_fetch_sub(a, x) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; return (!atomic_compare_exchange_strong(a, &c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { volatile atomic_uint_least32_t *a = (volatile atomic_uint_least32_t *)p; atomic_store(a, x); } #elif (defined(JEMALLOC_ATOMIC9)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (atomic_fetchadd_32(p, (uint32_t)(-(int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!atomic_cmpset_32(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { atomic_store_rel_32(p, x); } #elif (defined(JEMALLOC_OSATOMIC)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32((int32_t)x, (int32_t *)p)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (OSAtomicAdd32(-((int32_t)x), (int32_t *)p)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!OSAtomicCompareAndSwap32(c, s, (int32_t *)p)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { uint32_t o; /*The documented OSAtomic*() API does not expose an atomic exchange. */ do { o = atomic_read_uint32(p); } while (atomic_cas_uint32(p, o, x)); } #elif (defined(_MSC_VER)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, x) + x); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (InterlockedExchangeAdd(p, -((int32_t)x)) - x); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { uint32_t o; o = InterlockedCompareExchange(p, s, c); return (o != c); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { InterlockedExchange(p, x); } #elif (defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4) || \ defined(JE_FORCE_SYNC_COMPARE_AND_SWAP_4)) JEMALLOC_INLINE uint32_t atomic_add_uint32(uint32_t *p, uint32_t x) { return (__sync_add_and_fetch(p, x)); } JEMALLOC_INLINE uint32_t atomic_sub_uint32(uint32_t *p, uint32_t x) { return (__sync_sub_and_fetch(p, x)); } JEMALLOC_INLINE bool atomic_cas_uint32(uint32_t *p, uint32_t c, uint32_t s) { return (!__sync_bool_compare_and_swap(p, c, s)); } JEMALLOC_INLINE void atomic_write_uint32(uint32_t *p, uint32_t x) { __sync_lock_test_and_set(p, x); } #else # error "Missing implementation for 32-bit atomic operations" #endif /******************************************************************************/ /* Pointer operations. */ JEMALLOC_INLINE void * atomic_add_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE void * atomic_sub_p(void **p, void *x) { #if (LG_SIZEOF_PTR == 3) return ((void *)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((void *)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_p(void **p, void *c, void *s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_p(void **p, const void *x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* size_t operations. */ JEMALLOC_INLINE size_t atomic_add_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE size_t atomic_sub_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) return ((size_t)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_PTR == 2) return ((size_t)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_z(size_t *p, size_t c, size_t s) { #if (LG_SIZEOF_PTR == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_PTR == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_z(size_t *p, size_t x) { #if (LG_SIZEOF_PTR == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_PTR == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ /* unsigned operations. */ JEMALLOC_INLINE unsigned atomic_add_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)x)); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)x)); #endif } JEMALLOC_INLINE unsigned atomic_sub_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) return ((unsigned)atomic_add_uint64((uint64_t *)p, (uint64_t)-((int64_t)x))); #elif (LG_SIZEOF_INT == 2) return ((unsigned)atomic_add_uint32((uint32_t *)p, (uint32_t)-((int32_t)x))); #endif } JEMALLOC_INLINE bool atomic_cas_u(unsigned *p, unsigned c, unsigned s) { #if (LG_SIZEOF_INT == 3) return (atomic_cas_uint64((uint64_t *)p, (uint64_t)c, (uint64_t)s)); #elif (LG_SIZEOF_INT == 2) return (atomic_cas_uint32((uint32_t *)p, (uint32_t)c, (uint32_t)s)); #endif } JEMALLOC_INLINE void atomic_write_u(unsigned *p, unsigned x) { #if (LG_SIZEOF_INT == 3) atomic_write_uint64((uint64_t *)p, (uint64_t)x); #elif (LG_SIZEOF_INT == 2) atomic_write_uint32((uint32_t *)p, (uint32_t)x); #endif } /******************************************************************************/ #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
15,441
22.684049
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_decls.h
#ifndef JEMALLOC_INTERNAL_DECLS_H #define JEMALLOC_INTERNAL_DECLS_H #include <math.h> #ifdef _WIN32 # include <windows.h> # include "msvc_compat/windows_extra.h" #else # include <sys/param.h> # include <sys/mman.h> # if !defined(__pnacl__) && !defined(__native_client__) # include <sys/syscall.h> # if !defined(SYS_write) && defined(__NR_write) # define SYS_write __NR_write # endif # include <sys/uio.h> # endif # include <pthread.h> # ifdef JEMALLOC_OS_UNFAIR_LOCK # include <os/lock.h> # endif # ifdef JEMALLOC_GLIBC_MALLOC_HOOK # include <sched.h> # endif # include <errno.h> # include <sys/time.h> # include <time.h> # ifdef JEMALLOC_HAVE_MACH_ABSOLUTE_TIME # include <mach/mach_time.h> # endif #endif #include <sys/types.h> #include <limits.h> #ifndef SIZE_T_MAX # define SIZE_T_MAX SIZE_MAX #endif #include <stdarg.h> #include <stdbool.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <stddef.h> #ifndef offsetof # define offsetof(type, member) ((size_t)&(((type *)NULL)->member)) #endif #include <string.h> #include <strings.h> #include <ctype.h> #ifdef _MSC_VER # include <io.h> typedef intptr_t ssize_t; # define PATH_MAX 1024 # define STDERR_FILENO 2 # define __func__ __FUNCTION__ # ifdef JEMALLOC_HAS_RESTRICT # define restrict __restrict # endif /* Disable warnings about deprecated system functions. */ # pragma warning(disable: 4996) #if _MSC_VER < 1800 static int isblank(int c) { return (c == '\t' || c == ' '); } #endif #else # include <unistd.h> #endif #include <fcntl.h> #endif /* JEMALLOC_INTERNAL_H */
1,608
20.171053
68
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/mb.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void mb_write(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_)) #ifdef __i386__ /* * According to the Intel Architecture Software Developer's Manual, current * processors execute instructions in order from the perspective of other * processors in a multiprocessor system, but 1) Intel reserves the right to * change that, and 2) the compiler's optimizer could re-order instructions if * there weren't some form of barrier. Therefore, even if running on an * architecture that does not need memory barriers (everything through at least * i686), an "optimizer barrier" is necessary. */ JEMALLOC_INLINE void mb_write(void) { # if 0 /* This is a true memory barrier. */ asm volatile ("pusha;" "xor %%eax,%%eax;" "cpuid;" "popa;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); # else /* * This is hopefully enough to keep the compiler from reordering * instructions around this one. */ asm volatile ("nop;" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); # endif } #elif (defined(__amd64__) || defined(__x86_64__)) JEMALLOC_INLINE void mb_write(void) { asm volatile ("sfence" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__powerpc__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("eieio" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__sparc64__) JEMALLOC_INLINE void mb_write(void) { asm volatile ("membar #StoreStore" : /* Outputs. */ : /* Inputs. */ : "memory" /* Clobbers. */ ); } #elif defined(__tile__) JEMALLOC_INLINE void mb_write(void) { __sync_synchronize(); } #else /* * This is much slower than a simple memory barrier, but the semantics of mutex * unlock make this work. */ JEMALLOC_INLINE void mb_write(void) { malloc_mutex_t mtx; malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT); malloc_mutex_lock(TSDN_NULL, &mtx); malloc_mutex_unlock(TSDN_NULL, &mtx); } #endif #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
2,738
22.612069
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/quarantine.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct quarantine_obj_s quarantine_obj_t; typedef struct quarantine_s quarantine_t; /* Default per thread quarantine size if valgrind is enabled. */ #define JEMALLOC_VALGRIND_QUARANTINE_DEFAULT (ZU(1) << 24) #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct quarantine_obj_s { void *ptr; size_t usize; }; struct quarantine_s { size_t curbytes; size_t curobjs; size_t first; #define LG_MAXOBJS_INIT 10 size_t lg_maxobjs; quarantine_obj_t objs[1]; /* Dynamically sized ring buffer. */ }; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void quarantine_alloc_hook_work(tsd_t *tsd); void quarantine(tsd_t *tsd, void *ptr); void quarantine_cleanup(tsd_t *tsd); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE void quarantine_alloc_hook(void); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_QUARANTINE_C_)) JEMALLOC_ALWAYS_INLINE void quarantine_alloc_hook(void) { tsd_t *tsd; assert(config_fill && opt_quarantine); tsd = tsd_fetch(); if (tsd_quarantine_get(tsd) == NULL) quarantine_alloc_hook_work(tsd); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,593
25.131148
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/valgrind.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #ifdef JEMALLOC_VALGRIND #include <valgrind/valgrind.h> /* * The size that is reported to Valgrind must be consistent through a chain of * malloc..realloc..realloc calls. Request size isn't recorded anywhere in * jemalloc, so it is critical that all callers of these macros provide usize * rather than request size. As a result, buffer overflow detection is * technically weakened for the standard API, though it is generally accepted * practice to consider any extra bytes reported by malloc_usable_size() as * usable space. */ #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_noaccess(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_undefined(ptr, usize); \ } while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do { \ if (unlikely(in_valgrind)) \ valgrind_make_mem_defined(ptr, usize); \ } while (0) /* * The VALGRIND_MALLOCLIKE_BLOCK() and VALGRIND_RESIZEINPLACE_BLOCK() macro * calls must be embedded in macros rather than in functions so that when * Valgrind reports errors, there are no extra stack frames in the backtraces. */ #define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \ if (unlikely(in_valgrind && cond)) { \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \ zero); \ } \ } while (0) #define JEMALLOC_VALGRIND_REALLOC_MOVED_no(ptr, old_ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_MOVED_maybe(ptr, old_ptr) \ ((ptr) != (old_ptr)) #define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_no(ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_PTR_NULL_maybe(ptr) \ (ptr == NULL) #define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_no(old_ptr) \ (false) #define JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_maybe(old_ptr) \ (old_ptr == NULL) #define JEMALLOC_VALGRIND_REALLOC(moved, tsdn, ptr, usize, ptr_null, \ old_ptr, old_usize, old_rzsize, old_ptr_null, zero) do { \ if (unlikely(in_valgrind)) { \ size_t rzsize = p2rz(tsdn, ptr); \ \ if (!JEMALLOC_VALGRIND_REALLOC_MOVED_##moved(ptr, \ old_ptr)) { \ VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \ usize, rzsize); \ if (zero && old_usize < usize) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ old_usize), usize - old_usize); \ } \ } else { \ if (!JEMALLOC_VALGRIND_REALLOC_OLD_PTR_NULL_## \ old_ptr_null(old_ptr)) { \ valgrind_freelike_block(old_ptr, \ old_rzsize); \ } \ if (!JEMALLOC_VALGRIND_REALLOC_PTR_NULL_## \ ptr_null(ptr)) { \ size_t copy_size = (old_usize < usize) \ ? old_usize : usize; \ size_t tail_size = usize - copy_size; \ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, \ rzsize, false); \ if (copy_size > 0) { \ valgrind_make_mem_defined(ptr, \ copy_size); \ } \ if (zero && tail_size > 0) { \ valgrind_make_mem_defined( \ (void *)((uintptr_t)ptr + \ copy_size), tail_size); \ } \ } \ } \ } \ } while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do { \ if (unlikely(in_valgrind)) \ valgrind_freelike_block(ptr, rzsize); \ } while (0) #else #define RUNNING_ON_VALGRIND ((unsigned)0) #define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0) #define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0) #define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \ ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \ zero) do {} while (0) #define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0) #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #ifdef JEMALLOC_VALGRIND void valgrind_make_mem_noaccess(void *ptr, size_t usize); void valgrind_make_mem_undefined(void *ptr, size_t usize); void valgrind_make_mem_defined(void *ptr, size_t usize); void valgrind_freelike_block(void *ptr, size_t usize); #endif #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
4,841
36.534884
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/extent.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct extent_node_s extent_node_t; #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS /* Tree of extents. Use accessor functions for en_* fields. */ struct extent_node_s { /* Arena from which this extent came, if any. */ arena_t *en_arena; /* Pointer to the extent that this tree node is responsible for. */ void *en_addr; /* Total region size. */ size_t en_size; /* * Serial number (potentially non-unique). * * In principle serial numbers can wrap around on 32-bit systems if * JEMALLOC_MUNMAP is defined, but as long as comparison functions fall * back on address comparison for equal serial numbers, stable (if * imperfect) ordering is maintained. * * Serial numbers may not be unique even in the absence of wrap-around, * e.g. when splitting an extent and assigning the same serial number to * both resulting adjacent extents. */ size_t en_sn; /* * The zeroed flag is used by chunk recycling code to track whether * memory is zero-filled. */ bool en_zeroed; /* * True if physical memory is committed to the extent, whether * explicitly or implicitly as on a system that overcommits and * satisfies physical memory needs on demand via soft page faults. */ bool en_committed; /* * The achunk flag is used to validate that huge allocation lookups * don't return arena chunks. */ bool en_achunk; /* Profile counters, used for huge objects. */ prof_tctx_t *en_prof_tctx; /* Linkage for arena's runs_dirty and chunks_cache rings. */ arena_runs_dirty_link_t rd; qr(extent_node_t) cc_link; union { /* Linkage for the size/sn/address-ordered tree. */ rb_node(extent_node_t) szsnad_link; /* Linkage for arena's achunks, huge, and node_cache lists. */ ql_elm(extent_node_t) ql_link; }; /* Linkage for the address-ordered tree. */ rb_node(extent_node_t) ad_link; }; typedef rb_tree(extent_node_t) extent_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS rb_proto(, extent_tree_szsnad_, extent_tree_t, extent_node_t) rb_proto(, extent_tree_ad_, extent_tree_t, extent_node_t) #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE arena_t *extent_node_arena_get(const extent_node_t *node); void *extent_node_addr_get(const extent_node_t *node); size_t extent_node_size_get(const extent_node_t *node); size_t extent_node_sn_get(const extent_node_t *node); bool extent_node_zeroed_get(const extent_node_t *node); bool extent_node_committed_get(const extent_node_t *node); bool extent_node_achunk_get(const extent_node_t *node); prof_tctx_t *extent_node_prof_tctx_get(const extent_node_t *node); void extent_node_arena_set(extent_node_t *node, arena_t *arena); void extent_node_addr_set(extent_node_t *node, void *addr); void extent_node_size_set(extent_node_t *node, size_t size); void extent_node_sn_set(extent_node_t *node, size_t sn); void extent_node_zeroed_set(extent_node_t *node, bool zeroed); void extent_node_committed_set(extent_node_t *node, bool committed); void extent_node_achunk_set(extent_node_t *node, bool achunk); void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx); void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, size_t sn, bool zeroed, bool committed); void extent_node_dirty_linkage_init(extent_node_t *node); void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty); void extent_node_dirty_remove(extent_node_t *node); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_EXTENT_C_)) JEMALLOC_INLINE arena_t * extent_node_arena_get(const extent_node_t *node) { return (node->en_arena); } JEMALLOC_INLINE void * extent_node_addr_get(const extent_node_t *node) { return (node->en_addr); } JEMALLOC_INLINE size_t extent_node_size_get(const extent_node_t *node) { return (node->en_size); } JEMALLOC_INLINE size_t extent_node_sn_get(const extent_node_t *node) { return (node->en_sn); } JEMALLOC_INLINE bool extent_node_zeroed_get(const extent_node_t *node) { return (node->en_zeroed); } JEMALLOC_INLINE bool extent_node_committed_get(const extent_node_t *node) { assert(!node->en_achunk); return (node->en_committed); } JEMALLOC_INLINE bool extent_node_achunk_get(const extent_node_t *node) { return (node->en_achunk); } JEMALLOC_INLINE prof_tctx_t * extent_node_prof_tctx_get(const extent_node_t *node) { return (node->en_prof_tctx); } JEMALLOC_INLINE void extent_node_arena_set(extent_node_t *node, arena_t *arena) { node->en_arena = arena; } JEMALLOC_INLINE void extent_node_addr_set(extent_node_t *node, void *addr) { node->en_addr = addr; } JEMALLOC_INLINE void extent_node_size_set(extent_node_t *node, size_t size) { node->en_size = size; } JEMALLOC_INLINE void extent_node_sn_set(extent_node_t *node, size_t sn) { node->en_sn = sn; } JEMALLOC_INLINE void extent_node_zeroed_set(extent_node_t *node, bool zeroed) { node->en_zeroed = zeroed; } JEMALLOC_INLINE void extent_node_committed_set(extent_node_t *node, bool committed) { node->en_committed = committed; } JEMALLOC_INLINE void extent_node_achunk_set(extent_node_t *node, bool achunk) { node->en_achunk = achunk; } JEMALLOC_INLINE void extent_node_prof_tctx_set(extent_node_t *node, prof_tctx_t *tctx) { node->en_prof_tctx = tctx; } JEMALLOC_INLINE void extent_node_init(extent_node_t *node, arena_t *arena, void *addr, size_t size, size_t sn, bool zeroed, bool committed) { extent_node_arena_set(node, arena); extent_node_addr_set(node, addr); extent_node_size_set(node, size); extent_node_sn_set(node, sn); extent_node_zeroed_set(node, zeroed); extent_node_committed_set(node, committed); extent_node_achunk_set(node, false); if (config_prof) extent_node_prof_tctx_set(node, NULL); } JEMALLOC_INLINE void extent_node_dirty_linkage_init(extent_node_t *node) { qr_new(&node->rd, rd_link); qr_new(node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_insert(extent_node_t *node, arena_runs_dirty_link_t *runs_dirty, extent_node_t *chunks_dirty) { qr_meld(runs_dirty, &node->rd, rd_link); qr_meld(chunks_dirty, node, cc_link); } JEMALLOC_INLINE void extent_node_dirty_remove(extent_node_t *node) { qr_remove(&node->rd, rd_link); qr_remove(node, cc_link); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
6,787
24.04797
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/chunk_dss.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef enum { dss_prec_disabled = 0, dss_prec_primary = 1, dss_prec_secondary = 2, dss_prec_limit = 3 } dss_prec_t; #define DSS_PREC_DEFAULT dss_prec_secondary #define DSS_DEFAULT "secondary" #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS extern const char *dss_prec_names[]; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS dss_prec_t chunk_dss_prec_get(void); bool chunk_dss_prec_set(dss_prec_t dss_prec); void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit); bool chunk_in_dss(void *chunk); bool chunk_dss_mergeable(void *chunk_a, void *chunk_b); void chunk_dss_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,211
30.894737
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/jemalloc_internal_macros.h
/* * JEMALLOC_ALWAYS_INLINE and JEMALLOC_INLINE are used within header files for * functions that are static inline functions if inlining is enabled, and * single-definition library-private functions if inlining is disabled. * * JEMALLOC_ALWAYS_INLINE_C and JEMALLOC_INLINE_C are for use in .c files, in * which case the denoted functions are always static, regardless of whether * inlining is enabled. */ #if defined(JEMALLOC_DEBUG) || defined(JEMALLOC_CODE_COVERAGE) /* Disable inlining to make debugging/profiling easier. */ # define JEMALLOC_ALWAYS_INLINE # define JEMALLOC_ALWAYS_INLINE_C static # define JEMALLOC_INLINE # define JEMALLOC_INLINE_C static # define inline #else # define JEMALLOC_ENABLE_INLINE # ifdef JEMALLOC_HAVE_ATTR # define JEMALLOC_ALWAYS_INLINE \ static inline JEMALLOC_ATTR(unused) JEMALLOC_ATTR(always_inline) # define JEMALLOC_ALWAYS_INLINE_C \ static inline JEMALLOC_ATTR(always_inline) # else # define JEMALLOC_ALWAYS_INLINE static inline # define JEMALLOC_ALWAYS_INLINE_C static inline # endif # define JEMALLOC_INLINE static inline # define JEMALLOC_INLINE_C static inline # ifdef _MSC_VER # define inline _inline # endif #endif #ifdef JEMALLOC_CC_SILENCE # define UNUSED JEMALLOC_ATTR(unused) #else # define UNUSED #endif #define ZU(z) ((size_t)z) #define ZI(z) ((ssize_t)z) #define QU(q) ((uint64_t)q) #define QI(q) ((int64_t)q) #define KZU(z) ZU(z##ULL) #define KZI(z) ZI(z##LL) #define KQU(q) QU(q##ULL) #define KQI(q) QI(q##LL) #ifndef __DECONST # define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var)) #endif #ifndef JEMALLOC_HAS_RESTRICT # define restrict #endif
1,669
27.793103
78
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/pages.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *pages_map(void *addr, size_t size, bool *commit); void pages_unmap(void *addr, size_t size); void *pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size, bool *commit); bool pages_commit(void *addr, size_t size); bool pages_decommit(void *addr, size_t size); bool pages_purge(void *addr, size_t size); bool pages_huge(void *addr, size_t size); bool pages_nohuge(void *addr, size_t size); void pages_boot(void); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
1,077
34.933333
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/prof.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES typedef struct prof_bt_s prof_bt_t; typedef struct prof_cnt_s prof_cnt_t; typedef struct prof_tctx_s prof_tctx_t; typedef struct prof_gctx_s prof_gctx_t; typedef struct prof_tdata_s prof_tdata_t; /* Option defaults. */ #ifdef JEMALLOC_PROF # define PROF_PREFIX_DEFAULT "jeprof" #else # define PROF_PREFIX_DEFAULT "" #endif #define LG_PROF_SAMPLE_DEFAULT 19 #define LG_PROF_INTERVAL_DEFAULT -1 /* * Hard limit on stack backtrace depth. The version of prof_backtrace() that * is based on __builtin_return_address() necessarily has a hard-coded number * of backtrace frame handlers, and should be kept in sync with this setting. */ #define PROF_BT_MAX 128 /* Initial hash table size. */ #define PROF_CKH_MINITEMS 64 /* Size of memory buffer to use when writing dump files. */ #define PROF_DUMP_BUFSIZE 65536 /* Size of stack-allocated buffer used by prof_printf(). */ #define PROF_PRINTF_BUFSIZE 128 /* * Number of mutexes shared among all gctx's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NCTX_LOCKS 1024 /* * Number of mutexes shared among all tdata's. No space is allocated for these * unless profiling is enabled, so it's okay to over-provision. */ #define PROF_NTDATA_LOCKS 256 /* * prof_tdata pointers close to NULL are used to encode state information that * is used for cleaning up during thread shutdown. */ #define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) #define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) #define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS struct prof_bt_s { /* Backtrace, stored as len program counters. */ void **vec; unsigned len; }; #ifdef JEMALLOC_PROF_LIBGCC /* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ typedef struct { prof_bt_t *bt; unsigned max; } prof_unwind_data_t; #endif struct prof_cnt_s { /* Profiling counters. */ uint64_t curobjs; uint64_t curbytes; uint64_t accumobjs; uint64_t accumbytes; }; typedef enum { prof_tctx_state_initializing, prof_tctx_state_nominal, prof_tctx_state_dumping, prof_tctx_state_purgatory /* Dumper must finish destroying. */ } prof_tctx_state_t; struct prof_tctx_s { /* Thread data for thread that performed the allocation. */ prof_tdata_t *tdata; /* * Copy of tdata->thr_{uid,discrim}, necessary because tdata may be * defunct during teardown. */ uint64_t thr_uid; uint64_t thr_discrim; /* Profiling counters, protected by tdata->lock. */ prof_cnt_t cnts; /* Associated global context. */ prof_gctx_t *gctx; /* * UID that distinguishes multiple tctx's created by the same thread, * but coexisting in gctx->tctxs. There are two ways that such * coexistence can occur: * - A dumper thread can cause a tctx to be retained in the purgatory * state. * - Although a single "producer" thread must create all tctx's which * share the same thr_uid, multiple "consumers" can each concurrently * execute portions of prof_tctx_destroy(). prof_tctx_destroy() only * gets called once each time cnts.cur{objs,bytes} drop to 0, but this * threshold can be hit again before the first consumer finishes * executing prof_tctx_destroy(). */ uint64_t tctx_uid; /* Linkage into gctx's tctxs. */ rb_node(prof_tctx_t) tctx_link; /* * True during prof_alloc_prep()..prof_malloc_sample_object(), prevents * sample vs destroy race. */ bool prepared; /* Current dump-related state, protected by gctx->lock. */ prof_tctx_state_t state; /* * Copy of cnts snapshotted during early dump phase, protected by * dump_mtx. */ prof_cnt_t dump_cnts; }; typedef rb_tree(prof_tctx_t) prof_tctx_tree_t; struct prof_gctx_s { /* Protects nlimbo, cnt_summed, and tctxs. */ malloc_mutex_t *lock; /* * Number of threads that currently cause this gctx to be in a state of * limbo due to one of: * - Initializing this gctx. * - Initializing per thread counters associated with this gctx. * - Preparing to destroy this gctx. * - Dumping a heap profile that includes this gctx. * nlimbo must be 1 (single destroyer) in order to safely destroy the * gctx. */ unsigned nlimbo; /* * Tree of profile counters, one for each thread that has allocated in * this context. */ prof_tctx_tree_t tctxs; /* Linkage for tree of contexts to be dumped. */ rb_node(prof_gctx_t) dump_link; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Associated backtrace. */ prof_bt_t bt; /* Backtrace vector, variable size, referred to by bt. */ void *vec[1]; }; typedef rb_tree(prof_gctx_t) prof_gctx_tree_t; struct prof_tdata_s { malloc_mutex_t *lock; /* Monotonically increasing unique thread identifier. */ uint64_t thr_uid; /* * Monotonically increasing discriminator among tdata structures * associated with the same thr_uid. */ uint64_t thr_discrim; /* Included in heap profile dumps if non-NULL. */ char *thread_name; bool attached; bool expired; rb_node(prof_tdata_t) tdata_link; /* * Counter used to initialize prof_tctx_t's tctx_uid. No locking is * necessary when incrementing this field, because only one thread ever * does so. */ uint64_t tctx_uid_next; /* * Hash of (prof_bt_t *)-->(prof_tctx_t *). Each thread tracks * backtraces for which it has non-zero allocation/deallocation counters * associated with thread-specific prof_tctx_t objects. Other threads * may write to prof_tctx_t contents when freeing associated objects. */ ckh_t bt2tctx; /* Sampling state. */ uint64_t prng_state; uint64_t bytes_until_sample; /* State used to avoid dumping while operating on prof internals. */ bool enq; bool enq_idump; bool enq_gdump; /* * Set to true during an early dump phase for tdata's which are * currently being dumped. New threads' tdata's have this initialized * to false so that they aren't accidentally included in later dump * phases. */ bool dumping; /* * True if profiling is active for this tdata's thread * (thread.prof.active mallctl). */ bool active; /* Temporary storage for summation during dump. */ prof_cnt_t cnt_summed; /* Backtrace vector, used for calls to prof_backtrace(). */ void *vec[PROF_BT_MAX]; }; typedef rb_tree(prof_tdata_t) prof_tdata_tree_t; #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS extern bool opt_prof; extern bool opt_prof_active; extern bool opt_prof_thread_active_init; extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ extern bool opt_prof_gdump; /* High-water memory dumping. */ extern bool opt_prof_final; /* Final profile dumping. */ extern bool opt_prof_leak; /* Dump leak summary at exit. */ extern bool opt_prof_accum; /* Report cumulative bytes. */ extern char opt_prof_prefix[ /* Minimize memory bloat for non-prof builds. */ #ifdef JEMALLOC_PROF PATH_MAX + #endif 1]; /* Accessed via prof_active_[gs]et{_unlocked,}(). */ extern bool prof_active; /* Accessed via prof_gdump_[gs]et{_unlocked,}(). */ extern bool prof_gdump_val; /* * Profile dump interval, measured in bytes allocated. Each arena triggers a * profile dump when it reaches this threshold. The effect is that the * interval between profile dumps averages prof_interval, though the actual * interval between dumps will tend to be sporadic, and the interval will be a * maximum of approximately (prof_interval * narenas). */ extern uint64_t prof_interval; /* * Initialized as opt_lg_prof_sample, and potentially modified during profiling * resets. */ extern size_t lg_prof_sample; void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated); void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx); void bt_init(prof_bt_t *bt, void **vec); void prof_backtrace(prof_bt_t *bt); prof_tctx_t *prof_lookup(tsd_t *tsd, prof_bt_t *bt); #ifdef JEMALLOC_JET size_t prof_tdata_count(void); size_t prof_bt_count(void); const prof_cnt_t *prof_cnt_all(void); typedef int (prof_dump_open_t)(bool, const char *); extern prof_dump_open_t *prof_dump_open; typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *); extern prof_dump_header_t *prof_dump_header; #endif void prof_idump(tsdn_t *tsdn); bool prof_mdump(tsd_t *tsd, const char *filename); void prof_gdump(tsdn_t *tsdn); prof_tdata_t *prof_tdata_init(tsd_t *tsd); prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata); void prof_reset(tsd_t *tsd, size_t lg_sample); void prof_tdata_cleanup(tsd_t *tsd); bool prof_active_get(tsdn_t *tsdn); bool prof_active_set(tsdn_t *tsdn, bool active); const char *prof_thread_name_get(tsd_t *tsd); int prof_thread_name_set(tsd_t *tsd, const char *thread_name); bool prof_thread_active_get(tsd_t *tsd); bool prof_thread_active_set(tsd_t *tsd, bool active); bool prof_thread_active_init_get(tsdn_t *tsdn); bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init); bool prof_gdump_get(tsdn_t *tsdn); bool prof_gdump_set(tsdn_t *tsdn, bool active); void prof_boot0(void); void prof_boot1(void); bool prof_boot2(tsd_t *tsd); void prof_prefork0(tsdn_t *tsdn); void prof_prefork1(tsdn_t *tsdn); void prof_postfork_parent(tsdn_t *tsdn); void prof_postfork_child(tsdn_t *tsdn); void prof_sample_threshold_update(prof_tdata_t *tdata); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE bool prof_active_get_unlocked(void); bool prof_gdump_get_unlocked(void); prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create); prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr); void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *tctx); bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit, prof_tdata_t **tdata_out); prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update); void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx); void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx); void prof_free(tsd_t *tsd, const void *ptr, size_t usize); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) JEMALLOC_ALWAYS_INLINE bool prof_active_get_unlocked(void) { /* * Even if opt_prof is true, sampling can be temporarily disabled by * setting prof_active to false. No locking is used when reading * prof_active in the fast path, so there are no guarantees regarding * how long it will take for all threads to notice state changes. */ return (prof_active); } JEMALLOC_ALWAYS_INLINE bool prof_gdump_get_unlocked(void) { /* * No locking is used when reading prof_gdump_val in the fast path, so * there are no guarantees regarding how long it will take for all * threads to notice state changes. */ return (prof_gdump_val); } JEMALLOC_ALWAYS_INLINE prof_tdata_t * prof_tdata_get(tsd_t *tsd, bool create) { prof_tdata_t *tdata; cassert(config_prof); tdata = tsd_prof_tdata_get(tsd); if (create) { if (unlikely(tdata == NULL)) { if (tsd_nominal(tsd)) { tdata = prof_tdata_init(tsd); tsd_prof_tdata_set(tsd, tdata); } } else if (unlikely(tdata->expired)) { tdata = prof_tdata_reinit(tsd, tdata); tsd_prof_tdata_set(tsd, tdata); } assert(tdata == NULL || tdata->attached); } return (tdata); } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_tctx_get(tsdn_t *tsdn, const void *ptr) { cassert(config_prof); assert(ptr != NULL); return (arena_prof_tctx_get(tsdn, ptr)); } JEMALLOC_ALWAYS_INLINE void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_set(tsdn, ptr, usize, tctx); } JEMALLOC_ALWAYS_INLINE void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr, prof_tctx_t *old_tctx) { cassert(config_prof); assert(ptr != NULL); arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx); } JEMALLOC_ALWAYS_INLINE bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool update, prof_tdata_t **tdata_out) { prof_tdata_t *tdata; cassert(config_prof); tdata = prof_tdata_get(tsd, true); if (unlikely((uintptr_t)tdata <= (uintptr_t)PROF_TDATA_STATE_MAX)) tdata = NULL; if (tdata_out != NULL) *tdata_out = tdata; if (unlikely(tdata == NULL)) return (true); if (likely(tdata->bytes_until_sample >= usize)) { if (update) tdata->bytes_until_sample -= usize; return (true); } else { /* Compute new sample threshold. */ if (update) prof_sample_threshold_update(tdata); return (!tdata->active); } } JEMALLOC_ALWAYS_INLINE prof_tctx_t * prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update) { prof_tctx_t *ret; prof_tdata_t *tdata; prof_bt_t bt; assert(usize == s2u(usize)); if (!prof_active || likely(prof_sample_accum_update(tsd, usize, update, &tdata))) ret = (prof_tctx_t *)(uintptr_t)1U; else { bt_init(&bt, tdata->vec); prof_backtrace(&bt); ret = prof_lookup(tsd, &bt); } return (ret); } JEMALLOC_ALWAYS_INLINE void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx) { cassert(config_prof); assert(ptr != NULL); assert(usize == isalloc(tsdn, ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_malloc_sample_object(tsdn, ptr, usize, tctx); else prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U); } JEMALLOC_ALWAYS_INLINE void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr, size_t old_usize, prof_tctx_t *old_tctx) { bool sampled, old_sampled; cassert(config_prof); assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U); if (prof_active && !updated && ptr != NULL) { assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (prof_sample_accum_update(tsd, usize, true, NULL)) { /* * Don't sample. The usize passed to prof_alloc_prep() * was larger than what actually got allocated, so a * backtrace was captured for this allocation, even * though its actual usize was insufficient to cross the * sample threshold. */ prof_alloc_rollback(tsd, tctx, true); tctx = (prof_tctx_t *)(uintptr_t)1U; } } sampled = ((uintptr_t)tctx > (uintptr_t)1U); old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U); if (unlikely(sampled)) prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx); else prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx); if (unlikely(old_sampled)) prof_free_sampled_object(tsd, old_usize, old_tctx); } JEMALLOC_ALWAYS_INLINE void prof_free(tsd_t *tsd, const void *ptr, size_t usize) { prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); cassert(config_prof); assert(usize == isalloc(tsd_tsdn(tsd), ptr, true)); if (unlikely((uintptr_t)tctx > (uintptr_t)1U)) prof_free_sampled_object(tsd, usize, tctx); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
15,844
27.914234
81
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/hash.h
/* * The following hash function is based on MurmurHash3, placed into the public * domain by Austin Appleby. See https://github.com/aappleby/smhasher for * details. */ /******************************************************************************/ #ifdef JEMALLOC_H_TYPES #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed); void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]); void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]); void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_HASH_C_)) /******************************************************************************/ /* Internal implementation. */ JEMALLOC_INLINE uint32_t hash_rotl_32(uint32_t x, int8_t r) { return ((x << r) | (x >> (32 - r))); } JEMALLOC_INLINE uint64_t hash_rotl_64(uint64_t x, int8_t r) { return ((x << r) | (x >> (64 - r))); } JEMALLOC_INLINE uint32_t hash_get_block_32(const uint32_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) { uint32_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t)); return (ret); } return (p[i]); } JEMALLOC_INLINE uint64_t hash_get_block_64(const uint64_t *p, int i) { /* Handle unaligned read. */ if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) { uint64_t ret; memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t)); return (ret); } return (p[i]); } JEMALLOC_INLINE uint32_t hash_fmix_32(uint32_t h) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return (h); } JEMALLOC_INLINE uint64_t hash_fmix_64(uint64_t k) { k ^= k >> 33; k *= KQU(0xff51afd7ed558ccd); k ^= k >> 33; k *= KQU(0xc4ceb9fe1a85ec53); k ^= k >> 33; return (k); } JEMALLOC_INLINE uint32_t hash_x86_32(const void *key, int len, uint32_t seed) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 4; uint32_t h1 = seed; const uint32_t c1 = 0xcc9e2d51; const uint32_t c2 = 0x1b873593; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*4); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 13); h1 = h1*5 + 0xe6546b64; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*4); uint32_t k1 = 0; switch (len & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h1 = hash_fmix_32(h1); return (h1); } UNUSED JEMALLOC_INLINE void hash_x86_128(const void *key, const int len, uint32_t seed, uint64_t r_out[2]) { const uint8_t * data = (const uint8_t *) key; const int nblocks = len / 16; uint32_t h1 = seed; uint32_t h2 = seed; uint32_t h3 = seed; uint32_t h4 = seed; const uint32_t c1 = 0x239b961b; const uint32_t c2 = 0xab0e9789; const uint32_t c3 = 0x38b34ae5; const uint32_t c4 = 0xa1e38b93; /* body */ { const uint32_t *blocks = (const uint32_t *) (data + nblocks*16); int i; for (i = -nblocks; i; i++) { uint32_t k1 = hash_get_block_32(blocks, i*4 + 0); uint32_t k2 = hash_get_block_32(blocks, i*4 + 1); uint32_t k3 = hash_get_block_32(blocks, i*4 + 2); uint32_t k4 = hash_get_block_32(blocks, i*4 + 3); k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; h1 = hash_rotl_32(h1, 19); h1 += h2; h1 = h1*5 + 0x561ccd1b; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; h2 = hash_rotl_32(h2, 17); h2 += h3; h2 = h2*5 + 0x0bcaa747; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; h3 = hash_rotl_32(h3, 15); h3 += h4; h3 = h3*5 + 0x96cd1c35; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; h4 = hash_rotl_32(h4, 13); h4 += h1; h4 = h4*5 + 0x32ac3b17; } } /* tail */ { const uint8_t *tail = (const uint8_t *) (data + nblocks*16); uint32_t k1 = 0; uint32_t k2 = 0; uint32_t k3 = 0; uint32_t k4 = 0; switch (len & 15) { case 15: k4 ^= tail[14] << 16; case 14: k4 ^= tail[13] << 8; case 13: k4 ^= tail[12] << 0; k4 *= c4; k4 = hash_rotl_32(k4, 18); k4 *= c1; h4 ^= k4; case 12: k3 ^= tail[11] << 24; case 11: k3 ^= tail[10] << 16; case 10: k3 ^= tail[ 9] << 8; case 9: k3 ^= tail[ 8] << 0; k3 *= c3; k3 = hash_rotl_32(k3, 17); k3 *= c4; h3 ^= k3; case 8: k2 ^= tail[ 7] << 24; case 7: k2 ^= tail[ 6] << 16; case 6: k2 ^= tail[ 5] << 8; case 5: k2 ^= tail[ 4] << 0; k2 *= c2; k2 = hash_rotl_32(k2, 16); k2 *= c3; h2 ^= k2; case 4: k1 ^= tail[ 3] << 24; case 3: k1 ^= tail[ 2] << 16; case 2: k1 ^= tail[ 1] << 8; case 1: k1 ^= tail[ 0] << 0; k1 *= c1; k1 = hash_rotl_32(k1, 15); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h3 ^= len; h4 ^= len; h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; h1 = hash_fmix_32(h1); h2 = hash_fmix_32(h2); h3 = hash_fmix_32(h3); h4 = hash_fmix_32(h4); h1 += h2; h1 += h3; h1 += h4; h2 += h1; h3 += h1; h4 += h1; r_out[0] = (((uint64_t) h2) << 32) | h1; r_out[1] = (((uint64_t) h4) << 32) | h3; } UNUSED JEMALLOC_INLINE void hash_x64_128(const void *key, const int len, const uint32_t seed, uint64_t r_out[2]) { const uint8_t *data = (const uint8_t *) key; const int nblocks = len / 16; uint64_t h1 = seed; uint64_t h2 = seed; const uint64_t c1 = KQU(0x87c37b91114253d5); const uint64_t c2 = KQU(0x4cf5ad432745937f); /* body */ { const uint64_t *blocks = (const uint64_t *) (data); int i; for (i = 0; i < nblocks; i++) { uint64_t k1 = hash_get_block_64(blocks, i*2 + 0); uint64_t k2 = hash_get_block_64(blocks, i*2 + 1); k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; h1 = hash_rotl_64(h1, 27); h1 += h2; h1 = h1*5 + 0x52dce729; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; h2 = hash_rotl_64(h2, 31); h2 += h1; h2 = h2*5 + 0x38495ab5; } } /* tail */ { const uint8_t *tail = (const uint8_t*)(data + nblocks*16); uint64_t k1 = 0; uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((uint64_t)(tail[14])) << 48; case 14: k2 ^= ((uint64_t)(tail[13])) << 40; case 13: k2 ^= ((uint64_t)(tail[12])) << 32; case 12: k2 ^= ((uint64_t)(tail[11])) << 24; case 11: k2 ^= ((uint64_t)(tail[10])) << 16; case 10: k2 ^= ((uint64_t)(tail[ 9])) << 8; case 9: k2 ^= ((uint64_t)(tail[ 8])) << 0; k2 *= c2; k2 = hash_rotl_64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= ((uint64_t)(tail[ 7])) << 56; case 7: k1 ^= ((uint64_t)(tail[ 6])) << 48; case 6: k1 ^= ((uint64_t)(tail[ 5])) << 40; case 5: k1 ^= ((uint64_t)(tail[ 4])) << 32; case 4: k1 ^= ((uint64_t)(tail[ 3])) << 24; case 3: k1 ^= ((uint64_t)(tail[ 2])) << 16; case 2: k1 ^= ((uint64_t)(tail[ 1])) << 8; case 1: k1 ^= ((uint64_t)(tail[ 0])) << 0; k1 *= c1; k1 = hash_rotl_64(k1, 31); k1 *= c2; h1 ^= k1; } } /* finalization */ h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = hash_fmix_64(h1); h2 = hash_fmix_64(h2); h1 += h2; h2 += h1; r_out[0] = h1; r_out[1] = h2; } /******************************************************************************/ /* API. */ JEMALLOC_INLINE void hash(const void *key, size_t len, const uint32_t seed, size_t r_hash[2]) { assert(len <= INT_MAX); /* Unfortunate implementation limitation. */ #if (LG_SIZEOF_PTR == 3 && !defined(JEMALLOC_BIG_ENDIAN)) hash_x64_128(key, (int)len, seed, (uint64_t *)r_hash); #else { uint64_t hashes[2]; hash_x86_128(key, (int)len, seed, hashes); r_hash[0] = (size_t)hashes[0]; r_hash[1] = (size_t)hashes[1]; } #endif } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
8,394
22.449721
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/jemalloc/include/jemalloc/internal/tsd.h
/******************************************************************************/ #ifdef JEMALLOC_H_TYPES /* Maximum number of malloc_tsd users with cleanup functions. */ #define MALLOC_TSD_CLEANUPS_MAX 2 typedef bool (*malloc_tsd_cleanup_t)(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) typedef struct tsd_init_block_s tsd_init_block_t; typedef struct tsd_init_head_s tsd_init_head_t; #endif typedef struct tsd_s tsd_t; typedef struct tsdn_s tsdn_t; #define TSDN_NULL ((tsdn_t *)0) typedef enum { tsd_state_uninitialized, tsd_state_nominal, tsd_state_purgatory, tsd_state_reincarnated } tsd_state_t; /* * TLS/TSD-agnostic macro-based implementation of thread-specific data. There * are five macros that support (at least) three use cases: file-private, * library-private, and library-private inlined. Following is an example * library-private tsd variable: * * In example.h: * typedef struct { * int x; * int y; * } example_t; * #define EX_INITIALIZER JEMALLOC_CONCAT({0, 0}) * malloc_tsd_types(example_, example_t) * malloc_tsd_protos(, example_, example_t) * malloc_tsd_externs(example_, example_t) * In example.c: * malloc_tsd_data(, example_, example_t, EX_INITIALIZER) * malloc_tsd_funcs(, example_, example_t, EX_INITIALIZER, * example_tsd_cleanup) * * The result is a set of generated functions, e.g.: * * bool example_tsd_boot(void) {...} * bool example_tsd_booted_get(void) {...} * example_t *example_tsd_get(bool init) {...} * void example_tsd_set(example_t *val) {...} * * Note that all of the functions deal in terms of (a_type *) rather than * (a_type) so that it is possible to support non-pointer types (unlike * pthreads TSD). example_tsd_cleanup() is passed an (a_type *) pointer that is * cast to (void *). This means that the cleanup function needs to cast the * function argument to (a_type *), then dereference the resulting pointer to * access fields, e.g. * * void * example_tsd_cleanup(void *arg) * { * example_t *example = (example_t *)arg; * * example->x = 42; * [...] * if ([want the cleanup function to be called again]) * example_tsd_set(example); * } * * If example_tsd_set() is called within example_tsd_cleanup(), it will be * called again. This is similar to how pthreads TSD destruction works, except * that pthreads only calls the cleanup function again if the value was set to * non-NULL. */ /* malloc_tsd_types(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_types(a_name, a_type) #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_types(a_name, a_type) #elif (defined(_WIN32)) #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #else #define malloc_tsd_types(a_name, a_type) \ typedef struct { \ bool initialized; \ a_type val; \ } a_name##tsd_wrapper_t; #endif /* malloc_tsd_protos(). */ #define malloc_tsd_protos(a_attr, a_name, a_type) \ a_attr bool \ a_name##tsd_boot0(void); \ a_attr void \ a_name##tsd_boot1(void); \ a_attr bool \ a_name##tsd_boot(void); \ a_attr bool \ a_name##tsd_booted_get(void); \ a_attr a_type * \ a_name##tsd_get(bool init); \ a_attr void \ a_name##tsd_set(a_type *val); /* malloc_tsd_externs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern __thread bool a_name##tsd_initialized; \ extern bool a_name##tsd_booted; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_externs(a_name, a_type) \ extern __thread a_type a_name##tsd_tls; \ extern pthread_key_t a_name##tsd_tsd; \ extern bool a_name##tsd_booted; #elif (defined(_WIN32)) #define malloc_tsd_externs(a_name, a_type) \ extern DWORD a_name##tsd_tsd; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #else #define malloc_tsd_externs(a_name, a_type) \ extern pthread_key_t a_name##tsd_tsd; \ extern tsd_init_head_t a_name##tsd_init_head; \ extern a_name##tsd_wrapper_t a_name##tsd_boot_wrapper; \ extern bool a_name##tsd_booted; #endif /* malloc_tsd_data(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr __thread bool JEMALLOC_TLS_MODEL \ a_name##tsd_initialized = false; \ a_attr bool a_name##tsd_booted = false; #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr __thread a_type JEMALLOC_TLS_MODEL \ a_name##tsd_tls = a_initializer; \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr bool a_name##tsd_booted = false; #elif (defined(_WIN32)) #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr DWORD a_name##tsd_tsd; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #else #define malloc_tsd_data(a_attr, a_name, a_type, a_initializer) \ a_attr pthread_key_t a_name##tsd_tsd; \ a_attr tsd_init_head_t a_name##tsd_init_head = { \ ql_head_initializer(blocks), \ MALLOC_MUTEX_INITIALIZER \ }; \ a_attr a_name##tsd_wrapper_t a_name##tsd_boot_wrapper = { \ false, \ a_initializer \ }; \ a_attr bool a_name##tsd_booted = false; #endif /* malloc_tsd_funcs(). */ #ifdef JEMALLOC_MALLOC_THREAD_CLEANUP #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ \ if (a_name##tsd_initialized) { \ a_name##tsd_initialized = false; \ a_cleanup(&a_name##tsd_tls); \ } \ return (a_name##tsd_initialized); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ a_name##tsd_initialized = true; \ } #elif (defined(JEMALLOC_TLS)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_key_create(&a_name##tsd_tsd, a_cleanup) != \ 0) \ return (true); \ } \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ \ /* Do nothing. */ \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ return (a_name##tsd_boot0()); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (false); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ \ assert(a_name##tsd_booted); \ return (&a_name##tsd_tls); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ \ assert(a_name##tsd_booted); \ a_name##tsd_tls = (*val); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)(&a_name##tsd_tls))) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ } \ } #elif (defined(_WIN32)) #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr bool \ a_name##tsd_cleanup_wrapper(void) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (wrapper == NULL) \ return (false); \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ return (true); \ } \ } \ malloc_tsd_dalloc(wrapper); \ return (false); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (!TlsSetValue(a_name##tsd_tsd, (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \ { \ DWORD error = GetLastError(); \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ TlsGetValue(a_name##tsd_tsd); \ SetLastError(error); \ \ if (init && unlikely(wrapper == NULL)) { \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ a_name##tsd_tsd = TlsAlloc(); \ if (a_name##tsd_tsd == TLS_OUT_OF_INDEXES) \ return (true); \ if (a_cleanup != malloc_tsd_no_cleanup) { \ malloc_tsd_cleanup_register( \ &a_name##tsd_cleanup_wrapper); \ } \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (true); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(init); \ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #else #define malloc_tsd_funcs(a_attr, a_name, a_type, a_initializer, \ a_cleanup) \ /* Initialization/cleanup. */ \ a_attr void \ a_name##tsd_cleanup_wrapper(void *arg) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *)arg; \ \ if (a_cleanup != malloc_tsd_no_cleanup && \ wrapper->initialized) { \ wrapper->initialized = false; \ a_cleanup(&wrapper->val); \ if (wrapper->initialized) { \ /* Trigger another cleanup round. */ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error" \ " setting TSD for "#a_name"\n"); \ if (opt_abort) \ abort(); \ } \ return; \ } \ } \ malloc_tsd_dalloc(wrapper); \ } \ a_attr void \ a_name##tsd_wrapper_set(a_name##tsd_wrapper_t *wrapper) \ { \ \ if (pthread_setspecific(a_name##tsd_tsd, \ (void *)wrapper)) { \ malloc_write("<jemalloc>: Error setting" \ " TSD for "#a_name"\n"); \ abort(); \ } \ } \ a_attr a_name##tsd_wrapper_t * \ a_name##tsd_wrapper_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper = (a_name##tsd_wrapper_t *) \ pthread_getspecific(a_name##tsd_tsd); \ \ if (init && unlikely(wrapper == NULL)) { \ tsd_init_block_t block; \ wrapper = tsd_init_check_recursion( \ &a_name##tsd_init_head, &block); \ if (wrapper) \ return (wrapper); \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ block.data = wrapper; \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } else { \ wrapper->initialized = false; \ wrapper->val = a_initializer; \ } \ a_name##tsd_wrapper_set(wrapper); \ tsd_init_finish(&a_name##tsd_init_head, &block); \ } \ return (wrapper); \ } \ a_attr bool \ a_name##tsd_boot0(void) \ { \ \ if (pthread_key_create(&a_name##tsd_tsd, \ a_name##tsd_cleanup_wrapper) != 0) \ return (true); \ a_name##tsd_wrapper_set(&a_name##tsd_boot_wrapper); \ a_name##tsd_booted = true; \ return (false); \ } \ a_attr void \ a_name##tsd_boot1(void) \ { \ a_name##tsd_wrapper_t *wrapper; \ wrapper = (a_name##tsd_wrapper_t *) \ malloc_tsd_malloc(sizeof(a_name##tsd_wrapper_t)); \ if (wrapper == NULL) { \ malloc_write("<jemalloc>: Error allocating" \ " TSD for "#a_name"\n"); \ abort(); \ } \ memcpy(wrapper, &a_name##tsd_boot_wrapper, \ sizeof(a_name##tsd_wrapper_t)); \ a_name##tsd_wrapper_set(wrapper); \ } \ a_attr bool \ a_name##tsd_boot(void) \ { \ \ if (a_name##tsd_boot0()) \ return (true); \ a_name##tsd_boot1(); \ return (false); \ } \ a_attr bool \ a_name##tsd_booted_get(void) \ { \ \ return (a_name##tsd_booted); \ } \ a_attr bool \ a_name##tsd_get_allocates(void) \ { \ \ return (true); \ } \ /* Get/set. */ \ a_attr a_type * \ a_name##tsd_get(bool init) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(init); \ if (a_name##tsd_get_allocates() && !init && wrapper == NULL) \ return (NULL); \ return (&wrapper->val); \ } \ a_attr void \ a_name##tsd_set(a_type *val) \ { \ a_name##tsd_wrapper_t *wrapper; \ \ assert(a_name##tsd_booted); \ wrapper = a_name##tsd_wrapper_get(true); \ wrapper->val = *(val); \ if (a_cleanup != malloc_tsd_no_cleanup) \ wrapper->initialized = true; \ } #endif #endif /* JEMALLOC_H_TYPES */ /******************************************************************************/ #ifdef JEMALLOC_H_STRUCTS #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) struct tsd_init_block_s { ql_elm(tsd_init_block_t) link; pthread_t thread; void *data; }; struct tsd_init_head_s { ql_head(tsd_init_block_t) blocks; malloc_mutex_t lock; }; #endif #define MALLOC_TSD \ /* O(name, type) */ \ O(tcache, tcache_t *) \ O(thread_allocated, uint64_t) \ O(thread_deallocated, uint64_t) \ O(prof_tdata, prof_tdata_t *) \ O(iarena, arena_t *) \ O(arena, arena_t *) \ O(arenas_tdata, arena_tdata_t *) \ O(narenas_tdata, unsigned) \ O(arenas_tdata_bypass, bool) \ O(tcache_enabled, tcache_enabled_t) \ O(quarantine, quarantine_t *) \ O(witnesses, witness_list_t) \ O(witness_fork, bool) \ #define TSD_INITIALIZER { \ tsd_state_uninitialized, \ NULL, \ 0, \ 0, \ NULL, \ NULL, \ NULL, \ NULL, \ 0, \ false, \ tcache_enabled_default, \ NULL, \ ql_head_initializer(witnesses), \ false \ } struct tsd_s { tsd_state_t state; #define O(n, t) \ t n; MALLOC_TSD #undef O }; /* * Wrapper around tsd_t that makes it possible to avoid implicit conversion * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be * explicitly converted to tsd_t, which is non-nullable. */ struct tsdn_s { tsd_t tsd; }; static const tsd_t tsd_initializer = TSD_INITIALIZER; malloc_tsd_types(, tsd_t) #endif /* JEMALLOC_H_STRUCTS */ /******************************************************************************/ #ifdef JEMALLOC_H_EXTERNS void *malloc_tsd_malloc(size_t size); void malloc_tsd_dalloc(void *wrapper); void malloc_tsd_no_cleanup(void *arg); void malloc_tsd_cleanup_register(bool (*f)(void)); tsd_t *malloc_tsd_boot0(void); void malloc_tsd_boot1(void); #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ !defined(_WIN32)) void *tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block); void tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block); #endif void tsd_cleanup(void *arg); #endif /* JEMALLOC_H_EXTERNS */ /******************************************************************************/ #ifdef JEMALLOC_H_INLINES #ifndef JEMALLOC_ENABLE_INLINE malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t) tsd_t *tsd_fetch_impl(bool init); tsd_t *tsd_fetch(void); tsdn_t *tsd_tsdn(tsd_t *tsd); bool tsd_nominal(tsd_t *tsd); #define O(n, t) \ t *tsd_##n##p_get(tsd_t *tsd); \ t tsd_##n##_get(tsd_t *tsd); \ void tsd_##n##_set(tsd_t *tsd, t n); MALLOC_TSD #undef O tsdn_t *tsdn_fetch(void); bool tsdn_null(const tsdn_t *tsdn); tsd_t *tsdn_tsd(tsdn_t *tsdn); #endif #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_)) malloc_tsd_externs(, tsd_t) malloc_tsd_funcs(JEMALLOC_ALWAYS_INLINE, , tsd_t, tsd_initializer, tsd_cleanup) JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch_impl(bool init) { tsd_t *tsd = tsd_get(init); if (!init && tsd_get_allocates() && tsd == NULL) return (NULL); assert(tsd != NULL); if (unlikely(tsd->state != tsd_state_nominal)) { if (tsd->state == tsd_state_uninitialized) { tsd->state = tsd_state_nominal; /* Trigger cleanup handler registration. */ tsd_set(tsd); } else if (tsd->state == tsd_state_purgatory) { tsd->state = tsd_state_reincarnated; tsd_set(tsd); } else assert(tsd->state == tsd_state_reincarnated); } return (tsd); } JEMALLOC_ALWAYS_INLINE tsd_t * tsd_fetch(void) { return (tsd_fetch_impl(true)); } JEMALLOC_ALWAYS_INLINE tsdn_t * tsd_tsdn(tsd_t *tsd) { return ((tsdn_t *)tsd); } JEMALLOC_INLINE bool tsd_nominal(tsd_t *tsd) { return (tsd->state == tsd_state_nominal); } #define O(n, t) \ JEMALLOC_ALWAYS_INLINE t * \ tsd_##n##p_get(tsd_t *tsd) \ { \ \ return (&tsd->n); \ } \ \ JEMALLOC_ALWAYS_INLINE t \ tsd_##n##_get(tsd_t *tsd) \ { \ \ return (*tsd_##n##p_get(tsd)); \ } \ \ JEMALLOC_ALWAYS_INLINE void \ tsd_##n##_set(tsd_t *tsd, t n) \ { \ \ assert(tsd->state == tsd_state_nominal); \ tsd->n = n; \ } MALLOC_TSD #undef O JEMALLOC_ALWAYS_INLINE tsdn_t * tsdn_fetch(void) { if (!tsd_booted_get()) return (NULL); return (tsd_tsdn(tsd_fetch_impl(false))); } JEMALLOC_ALWAYS_INLINE bool tsdn_null(const tsdn_t *tsdn) { return (tsdn == NULL); } JEMALLOC_ALWAYS_INLINE tsd_t * tsdn_tsd(tsdn_t *tsdn) { assert(!tsdn_null(tsdn)); return (&tsdn->tsd); } #endif #endif /* JEMALLOC_H_INLINES */ /******************************************************************************/
21,743
26.593909
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/geohash-int/geohash_helper.c
/* * Copyright (c) 2013-2014, yinqiwen <yinqiwen@gmail.com> * Copyright (c) 2014, Matt Stancliff <matt@genges.com>. * Copyright (c) 2015-2016, Salvatore Sanfilippo <antirez@gmail.com>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ /* This is a C++ to C conversion from the ardb project. * This file started out as: * https://github.com/yinqiwen/ardb/blob/d42503/src/geo/geohash_helper.cpp */ #include "geohash_helper.h" #include <math.h> #define D_R (M_PI / 180.0) #define R_MAJOR 6378137.0 #define R_MINOR 6356752.3142 #define RATIO (R_MINOR / R_MAJOR) #define ECCENT (sqrt(1.0 - (RATIO *RATIO))) #define COM (0.5 * ECCENT) /// @brief The usual PI/180 constant const double DEG_TO_RAD = 0.017453292519943295769236907684886; /// @brief Earth's quatratic mean radius for WGS-84 const double EARTH_RADIUS_IN_METERS = 6372797.560856; const double MERCATOR_MAX = 20037726.37; const double MERCATOR_MIN = -20037726.37; static inline double deg_rad(double ang) { return ang * D_R; } static inline double rad_deg(double ang) { return ang / D_R; } /* This function is used in order to estimate the step (bits precision) * of the 9 search area boxes during radius queries. */ uint8_t geohashEstimateStepsByRadius(double range_meters, double lat) { if (range_meters == 0) return 26; int step = 1; while (range_meters < MERCATOR_MAX) { range_meters *= 2; step++; } step -= 2; /* Make sure range is included in most of the base cases. */ /* Wider range torwards the poles... Note: it is possible to do better * than this approximation by computing the distance between meridians * at this latitude, but this does the trick for now. */ if (lat > 66 || lat < -66) { step--; if (lat > 80 || lat < -80) step--; } /* Frame to valid range. */ if (step < 1) step = 1; if (step > 26) step = 26; return step; } /* Return the bounding box of the search area centered at latitude,longitude * having a radius of radius_meter. bounds[0] - bounds[2] is the minimum * and maxium longitude, while bounds[1] - bounds[3] is the minimum and * maximum latitude. */ int geohashBoundingBox(double longitude, double latitude, double radius_meters, double *bounds) { if (!bounds) return 0; bounds[0] = longitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); bounds[2] = longitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS/cos(deg_rad(latitude))); bounds[1] = latitude - rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); bounds[3] = latitude + rad_deg(radius_meters/EARTH_RADIUS_IN_METERS); return 1; } /* Return a set of areas (center + 8) that are able to cover a range query * for the specified position and radius. */ GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters) { GeoHashRange long_range, lat_range; GeoHashRadius radius; GeoHashBits hash; GeoHashNeighbors neighbors; GeoHashArea area; double min_lon, max_lon, min_lat, max_lat; double bounds[4]; int steps; geohashBoundingBox(longitude, latitude, radius_meters, bounds); min_lon = bounds[0]; min_lat = bounds[1]; max_lon = bounds[2]; max_lat = bounds[3]; steps = geohashEstimateStepsByRadius(radius_meters,latitude); geohashGetCoordRange(&long_range,&lat_range); geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); geohashNeighbors(&hash,&neighbors); geohashDecode(long_range,lat_range,hash,&area); /* Check if the step is enough at the limits of the covered area. * Sometimes when the search area is near an edge of the * area, the estimated step is not small enough, since one of the * north / south / west / east square is too near to the search area * to cover everything. */ int decrease_step = 0; { GeoHashArea north, south, east, west; geohashDecode(long_range, lat_range, neighbors.north, &north); geohashDecode(long_range, lat_range, neighbors.south, &south); geohashDecode(long_range, lat_range, neighbors.east, &east); geohashDecode(long_range, lat_range, neighbors.west, &west); if (geohashGetDistance(longitude,latitude,longitude,north.latitude.max) < radius_meters) decrease_step = 1; if (geohashGetDistance(longitude,latitude,longitude,south.latitude.min) < radius_meters) decrease_step = 1; if (geohashGetDistance(longitude,latitude,east.longitude.max,latitude) < radius_meters) decrease_step = 1; if (geohashGetDistance(longitude,latitude,west.longitude.min,latitude) < radius_meters) decrease_step = 1; } if (steps > 1 && decrease_step) { steps--; geohashEncode(&long_range,&lat_range,longitude,latitude,steps,&hash); geohashNeighbors(&hash,&neighbors); geohashDecode(long_range,lat_range,hash,&area); } /* Exclude the search areas that are useless. */ if (area.latitude.min < min_lat) { GZERO(neighbors.south); GZERO(neighbors.south_west); GZERO(neighbors.south_east); } if (area.latitude.max > max_lat) { GZERO(neighbors.north); GZERO(neighbors.north_east); GZERO(neighbors.north_west); } if (area.longitude.min < min_lon) { GZERO(neighbors.west); GZERO(neighbors.south_west); GZERO(neighbors.north_west); } if (area.longitude.max > max_lon) { GZERO(neighbors.east); GZERO(neighbors.south_east); GZERO(neighbors.north_east); } radius.hash = hash; radius.neighbors = neighbors; radius.area = area; return radius; } GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters) { return geohashGetAreasByRadius(longitude, latitude, radius_meters); } GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash) { uint64_t bits = hash.bits; bits <<= (52 - hash.step * 2); return bits; } /* Calculate distance using haversin great circle distance formula. */ double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d) { double lat1r, lon1r, lat2r, lon2r, u, v; lat1r = deg_rad(lat1d); lon1r = deg_rad(lon1d); lat2r = deg_rad(lat2d); lon2r = deg_rad(lon2d); u = sin((lat2r - lat1r) / 2); v = sin((lon2r - lon1r) / 2); return 2.0 * EARTH_RADIUS_IN_METERS * asin(sqrt(u * u + cos(lat1r) * cos(lat2r) * v * v)); } int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance) { *distance = geohashGetDistance(x1, y1, x2, y2); if (*distance > radius) return 0; return 1; } int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double *distance) { return geohashGetDistanceIfInRadius(x1, y1, x2, y2, radius, distance); }
8,623
38.559633
97
c