repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy_memset.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ #ifndef MEMCPY_MEMSET_H #define MEMCPY_MEMSET_H #include <stddef.h> #include <xmmintrin.h> #include "pmem2_arch.h" typedef void barrier_fn(void); typedef void flush64b_fn(const void *); static inline void barrier_after_ntstores(void) { /* * In this configuration pmem_drain does not contain sfence, so we have * to serialize non-temporal store instructions. */ _mm_sfence(); } static inline void no_barrier_after_ntstores(void) { /* * In this configuration pmem_drain contains sfence, so we don't have * to serialize non-temporal store instructions */ } static inline void noflush(const void *addr, size_t len) { /* NOP, not even pmemcheck annotation */ } static inline void noflush64b(const void *addr) { /* NOP, not even pmemcheck annotation */ } typedef void perf_barrier_fn(void); static force_inline void wc_barrier(void) { /* * Currently, for SSE2 and AVX code paths, use of non-temporal stores * on all generations of CPUs must be limited to the number of * write-combining buffers (12) because otherwise, suboptimal eviction * policy might impact performance when writing more data than WC * buffers can simultaneously hold. * * The AVX512 code path is not affected, probably because we are * overwriting whole cache lines. */ _mm_sfence(); } static force_inline void no_barrier(void) { } #ifndef AVX512F_AVAILABLE /* * XXX not supported in MSVC version we currently use. * Enable Windows tests pmem2_mem_ext when MSVC we * use will support AVX512F. */ #ifdef _MSC_VER #define AVX512F_AVAILABLE 0 #else #define AVX512F_AVAILABLE 1 #endif #endif #ifndef AVX_AVAILABLE #define AVX_AVAILABLE 1 #endif #ifndef SSE2_AVAILABLE #define SSE2_AVAILABLE 1 #endif #if SSE2_AVAILABLE void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len); void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len); void memmove_mov_sse2_empty(char *dest, const char *src, size_t len); void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_sse2_clflush(char *dest, int c, size_t len); void memset_mov_sse2_clflushopt(char *dest, int c, size_t len); void memset_mov_sse2_clwb(char *dest, int c, size_t len); void memset_mov_sse2_empty(char *dest, int c, size_t len); void memset_mov_sse2_noflush(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX_AVAILABLE void memmove_mov_avx_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx_empty(char *dest, const char *src, size_t len); void memmove_mov_avx_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len); void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len); void memset_mov_avx_clflush(char *dest, int c, size_t len); void memset_mov_avx_clflushopt(char *dest, int c, size_t len); void memset_mov_avx_clwb(char *dest, int c, size_t len); void memset_mov_avx_empty(char *dest, int c, size_t len); void memset_mov_avx_noflush(char *dest, int c, size_t len); void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len); void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len); #endif #if AVX512F_AVAILABLE void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len); void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len); void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len); void memset_mov_avx512f_clflush(char *dest, int c, size_t len); void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len); void memset_mov_avx512f_clwb(char *dest, int c, size_t len); void memset_mov_avx512f_empty(char *dest, int c, size_t len); void memset_mov_avx512f_noflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflush(char *dest, int c, size_t len); void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len); void memset_movnt_avx512f_clwb(char *dest, int c, size_t len); void memset_movnt_avx512f_empty(char *dest, int c, size_t len); void memset_movnt_avx512f_noflush(char *dest, int c, size_t len); #endif extern size_t Movnt_threshold; /* * SSE2/AVX1 only: * * How much data WC buffers can hold at the same time, after which sfence * is needed to flush them. * * For some reason sfence affects performance of reading from DRAM, so we have * to prefetch the source data earlier. */ #define PERF_BARRIER_SIZE (12 * CACHELINE_SIZE /* 768 */) /* * How much to prefetch initially. * Cannot be bigger than the size of L1 (32kB) - PERF_BARRIER_SIZE. */ #define INI_PREFETCH_SIZE (64 * CACHELINE_SIZE /* 4096 */) static force_inline void prefetch(const char *addr) { _mm_prefetch(addr, _MM_HINT_T0); } static force_inline void prefetch_ini_fw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src + i); } static force_inline void prefetch_ini_bw(const char *src, size_t len) { size_t pref = MIN(len, INI_PREFETCH_SIZE); for (size_t i = 0; i < pref; i += CACHELINE_SIZE) prefetch(src - i); } static force_inline void prefetch_next_fw(const char *src, const char *srcend) { const char *begin = src + INI_PREFETCH_SIZE; const char *end = begin + PERF_BARRIER_SIZE; if (end > srcend) end = srcend; for (const char *addr = begin; addr < end; addr += CACHELINE_SIZE) prefetch(addr); } static force_inline void prefetch_next_bw(const char *src, const char *srcbegin) { const char *begin = src - INI_PREFETCH_SIZE; const char *end = begin - PERF_BARRIER_SIZE; if (end < srcbegin) end = srcbegin; for (const char *addr = begin; addr >= end; addr -= CACHELINE_SIZE) prefetch(addr); } #endif
9,351
33.131387
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memset_movnt4x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); mm_stream_si128(dest, 8, xmm); mm_stream_si128(dest, 9, xmm); mm_stream_si128(dest, 10, xmm); mm_stream_si128(dest, 11, xmm); mm_stream_si128(dest, 12, xmm); mm_stream_si128(dest, 13, xmm); mm_stream_si128(dest, 14, xmm); mm_stream_si128(dest, 15, xmm); } static force_inline void memset_movnt2x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); mm_stream_si128(dest, 4, xmm); mm_stream_si128(dest, 5, xmm); mm_stream_si128(dest, 6, xmm); mm_stream_si128(dest, 7, xmm); } static force_inline void memset_movnt1x64b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); mm_stream_si128(dest, 2, xmm); mm_stream_si128(dest, 3, xmm); } static force_inline void memset_movnt1x32b(char *dest, __m128i xmm) { mm_stream_si128(dest, 0, xmm); mm_stream_si128(dest, 1, xmm); } static force_inline void memset_movnt1x16b(char *dest, __m128i xmm) { _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m128i xmm) { uint64_t x = (uint64_t)_mm_cvtsi128_si64(xmm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m128i xmm) { uint32_t x = (uint32_t)_mm_cvtsi128_si32(xmm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_sse2(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memset_movnt4x64b(dest, xmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, xmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, xmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, xmm); else if (len == 16) memset_movnt1x16b(dest, xmm); else if (len == 8) memset_movnt1x8b(dest, xmm); else if (len == 4) memset_movnt1x4b(dest, xmm); else goto nonnt; goto end; } nonnt: memset_small_sse2(dest, xmm, len, flush); end: barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_sse2_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_sse2_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_sse2_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_sse2_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_sse2(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
5,912
20.580292
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" #include "out.h" #include "valgrind_internal.h" static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memset_movnt8x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); mm256_stream_si256(dest, 8, ymm); mm256_stream_si256(dest, 9, ymm); mm256_stream_si256(dest, 10, ymm); mm256_stream_si256(dest, 11, ymm); mm256_stream_si256(dest, 12, ymm); mm256_stream_si256(dest, 13, ymm); mm256_stream_si256(dest, 14, ymm); mm256_stream_si256(dest, 15, ymm); } static force_inline void memset_movnt4x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); mm256_stream_si256(dest, 4, ymm); mm256_stream_si256(dest, 5, ymm); mm256_stream_si256(dest, 6, ymm); mm256_stream_si256(dest, 7, ymm); } static force_inline void memset_movnt2x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); mm256_stream_si256(dest, 2, ymm); mm256_stream_si256(dest, 3, ymm); } static force_inline void memset_movnt1x64b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); mm256_stream_si256(dest, 1, ymm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { mm256_stream_si256(dest, 0, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm0 = m256_get16b(ymm); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { char *orig_dest = dest; size_t orig_len = len; __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= PERF_BARRIER_SIZE) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memset_movnt8x64b(dest, ymm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, ymm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, ymm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, ymm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } /* variants without perf_barrier */ void memset_movnt_avx_noflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_empty_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflush_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clflushopt_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memset_movnt_avx_clwb_nobarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memset_movnt_avx_noflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, noflush, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_empty_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflush_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clflushopt_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memset_movnt_avx_clwb_wcbarrier(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
6,151
20.43554
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memset_mov32x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); mm512_store_si512(dest, 16, zmm); mm512_store_si512(dest, 17, zmm); mm512_store_si512(dest, 18, zmm); mm512_store_si512(dest, 19, zmm); mm512_store_si512(dest, 20, zmm); mm512_store_si512(dest, 21, zmm); mm512_store_si512(dest, 22, zmm); mm512_store_si512(dest, 23, zmm); mm512_store_si512(dest, 24, zmm); mm512_store_si512(dest, 25, zmm); mm512_store_si512(dest, 26, zmm); mm512_store_si512(dest, 27, zmm); mm512_store_si512(dest, 28, zmm); mm512_store_si512(dest, 29, zmm); mm512_store_si512(dest, 30, zmm); mm512_store_si512(dest, 31, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memset_mov16x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); mm512_store_si512(dest, 8, zmm); mm512_store_si512(dest, 9, zmm); mm512_store_si512(dest, 10, zmm); mm512_store_si512(dest, 11, zmm); mm512_store_si512(dest, 12, zmm); mm512_store_si512(dest, 13, zmm); mm512_store_si512(dest, 14, zmm); mm512_store_si512(dest, 15, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memset_mov8x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); mm512_store_si512(dest, 4, zmm); mm512_store_si512(dest, 5, zmm); mm512_store_si512(dest, 6, zmm); mm512_store_si512(dest, 7, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); mm512_store_si512(dest, 2, zmm); mm512_store_si512(dest, 3, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); mm512_store_si512(dest, 1, zmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m512i zmm, flush64b_fn flush64b) { mm512_store_si512(dest, 0, zmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx512f(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m512i zmm = _mm512_set1_epi8((char)c); /* See comment in memset_movnt_avx512f */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_mov32x64b(dest, zmm, flush64b); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_mov16x64b(dest, zmm, flush64b); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_mov8x64b(dest, zmm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, zmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, zmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, zmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx512f(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, noflush, noflush64b); } void memset_mov_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx512f(dest, c, len, flush_clwb_nolog, pmem_clwb); }
6,851
22.958042
69
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx512f.h" #include "out.h" #include "util.h" #include "valgrind_internal.h" static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memset_movnt32x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); mm512_stream_si512(dest, 16, zmm); mm512_stream_si512(dest, 17, zmm); mm512_stream_si512(dest, 18, zmm); mm512_stream_si512(dest, 19, zmm); mm512_stream_si512(dest, 20, zmm); mm512_stream_si512(dest, 21, zmm); mm512_stream_si512(dest, 22, zmm); mm512_stream_si512(dest, 23, zmm); mm512_stream_si512(dest, 24, zmm); mm512_stream_si512(dest, 25, zmm); mm512_stream_si512(dest, 26, zmm); mm512_stream_si512(dest, 27, zmm); mm512_stream_si512(dest, 28, zmm); mm512_stream_si512(dest, 29, zmm); mm512_stream_si512(dest, 30, zmm); mm512_stream_si512(dest, 31, zmm); } static force_inline void memset_movnt16x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); mm512_stream_si512(dest, 8, zmm); mm512_stream_si512(dest, 9, zmm); mm512_stream_si512(dest, 10, zmm); mm512_stream_si512(dest, 11, zmm); mm512_stream_si512(dest, 12, zmm); mm512_stream_si512(dest, 13, zmm); mm512_stream_si512(dest, 14, zmm); mm512_stream_si512(dest, 15, zmm); } static force_inline void memset_movnt8x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); mm512_stream_si512(dest, 4, zmm); mm512_stream_si512(dest, 5, zmm); mm512_stream_si512(dest, 6, zmm); mm512_stream_si512(dest, 7, zmm); } static force_inline void memset_movnt4x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); mm512_stream_si512(dest, 2, zmm); mm512_stream_si512(dest, 3, zmm); } static force_inline void memset_movnt2x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); mm512_stream_si512(dest, 1, zmm); } static force_inline void memset_movnt1x64b(char *dest, __m512i zmm) { mm512_stream_si512(dest, 0, zmm); } static force_inline void memset_movnt1x32b(char *dest, __m256i ymm) { _mm256_stream_si256((__m256i *)dest, ymm); } static force_inline void memset_movnt1x16b(char *dest, __m256i ymm) { __m128i xmm = _mm256_extracti128_si256(ymm, 0); _mm_stream_si128((__m128i *)dest, xmm); } static force_inline void memset_movnt1x8b(char *dest, __m256i ymm) { uint64_t x = m256_get8b(ymm); _mm_stream_si64((long long *)dest, (long long)x); } static force_inline void memset_movnt1x4b(char *dest, __m256i ymm) { uint32_t x = m256_get4b(ymm); _mm_stream_si32((int *)dest, (int)x); } static force_inline void memset_movnt_avx512f(char *dest, int c, size_t len, flush_fn flush, barrier_fn barrier) { char *orig_dest = dest; size_t orig_len = len; __m512i zmm = _mm512_set1_epi8((char)c); /* * Can't use _mm512_extracti64x4_epi64, because some versions of gcc * crash. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82887 */ __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx512f(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 32 * 64) { memset_movnt32x64b(dest, zmm); dest += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memset_movnt16x64b(dest, zmm); dest += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memset_movnt8x64b(dest, zmm); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_movnt4x64b(dest, zmm); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_movnt2x64b(dest, zmm); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_movnt1x64b(dest, zmm); dest += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memset_movnt1x32b(dest, ymm); else if (len == 16) memset_movnt1x16b(dest, ymm); else if (len == 8) memset_movnt1x8b(dest, ymm); else if (len == 4) memset_movnt1x4b(dest, ymm); else goto nonnt; goto end; } nonnt: memset_small_avx512f(dest, ymm, len, flush); end: avx_zeroupper(); barrier(); VALGRIND_DO_FLUSH(orig_dest, orig_len); } void memset_movnt_avx512f_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, noflush, barrier_after_ntstores); } void memset_movnt_avx512f_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_empty_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflush_nolog, barrier_after_ntstores); } void memset_movnt_avx512f_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memset_movnt_avx512f_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_movnt_avx512f(dest, c, len, flush_clwb_nolog, no_barrier_after_ntstores); }
6,397
21.607774
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_sse2.h" static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memset_mov4x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); mm_store_si128(dest, 8, xmm); mm_store_si128(dest, 9, xmm); mm_store_si128(dest, 10, xmm); mm_store_si128(dest, 11, xmm); mm_store_si128(dest, 12, xmm); mm_store_si128(dest, 13, xmm); mm_store_si128(dest, 14, xmm); mm_store_si128(dest, 15, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); mm_store_si128(dest, 4, xmm); mm_store_si128(dest, 5, xmm); mm_store_si128(dest, 6, xmm); mm_store_si128(dest, 7, xmm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m128i xmm, flush64b_fn flush64b) { mm_store_si128(dest, 0, xmm); mm_store_si128(dest, 1, xmm); mm_store_si128(dest, 2, xmm); mm_store_si128(dest, 3, xmm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_sse2(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m128i xmm = _mm_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_sse2(dest, xmm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 4 * 64) { memset_mov4x64b(dest, xmm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, xmm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, xmm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_sse2(dest, xmm, len, flush); } void memset_mov_sse2_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, noflush, noflush64b); } void memset_mov_sse2_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_sse2_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_sse2_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_sse2_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_sse2(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,304
20.461039
66
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMSET_SSE2_H #define PMEM2_MEMSET_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include <string.h> #include "out.h" static force_inline void memset_small_sse2_noflush(char *dest, __m128i xmm, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + 32), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 33..48 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + 16), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; le32: if (len > 16) { /* 17..32 */ _mm_storeu_si128((__m128i *)(dest + 0), xmm); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm); return; } /* 9..16 */ uint64_t d8 = (uint64_t)_mm_cvtsi128_si64(xmm); *(ua_uint64_t *)dest = d8; *(ua_uint64_t *)(dest + len - 8) = d8; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d4 = (uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint32_t *)dest = d4; *(ua_uint32_t *)(dest + len - 4) = d4; return; } /* 3..4 */ uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; *(ua_uint16_t *)(dest + len - 2) = d2; return; le2: if (len == 2) { uint16_t d2 = (uint16_t)(uint32_t)_mm_cvtsi128_si32(xmm); *(ua_uint16_t *)dest = d2; return; } *(uint8_t *)dest = (uint8_t)_mm_cvtsi128_si32(xmm); } static force_inline void memset_small_sse2(char *dest, __m128i xmm, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memset also does that, so we can't use it here. */ if (On_pmemcheck) { memset_nodrain_generic(dest, (uint8_t)_mm_cvtsi128_si32(xmm), len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memset_small_sse2_noflush(dest, xmm, len); } flush(dest, len); } #endif
2,213
20.085714
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memset/memset_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memset_avx.h" static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memset_mov8x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); mm256_store_si256(dest, 8, ymm); mm256_store_si256(dest, 9, ymm); mm256_store_si256(dest, 10, ymm); mm256_store_si256(dest, 11, ymm); mm256_store_si256(dest, 12, ymm); mm256_store_si256(dest, 13, ymm); mm256_store_si256(dest, 14, ymm); mm256_store_si256(dest, 15, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memset_mov4x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); mm256_store_si256(dest, 4, ymm); mm256_store_si256(dest, 5, ymm); mm256_store_si256(dest, 6, ymm); mm256_store_si256(dest, 7, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memset_mov2x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); mm256_store_si256(dest, 2, ymm); mm256_store_si256(dest, 3, ymm); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memset_mov1x64b(char *dest, __m256i ymm, flush64b_fn flush64b) { mm256_store_si256(dest, 0, ymm); mm256_store_si256(dest, 1, ymm); flush64b(dest + 0 * 64); } static force_inline void memset_mov_avx(char *dest, int c, size_t len, flush_fn flush, flush64b_fn flush64b) { __m256i ymm = _mm256_set1_epi8((char)c); size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memset_small_avx(dest, ymm, cnt, flush); dest += cnt; len -= cnt; } while (len >= 8 * 64) { memset_mov8x64b(dest, ymm, flush64b); dest += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memset_mov4x64b(dest, ymm, flush64b); dest += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memset_mov2x64b(dest, ymm, flush64b); dest += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memset_mov1x64b(dest, ymm, flush64b); dest += 1 * 64; len -= 1 * 64; } if (len) memset_small_avx(dest, ymm, len, flush); avx_zeroupper(); } void memset_mov_avx_noflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, noflush, noflush64b); } void memset_mov_avx_empty(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_empty_nolog, flush64b_empty); } void memset_mov_avx_clflush(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflush_nolog, pmem_clflush); } void memset_mov_avx_clflushopt(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clflushopt_nolog, pmem_clflushopt); } void memset_mov_avx_clwb(char *dest, int c, size_t len) { LOG(15, "dest %p c %d len %zu", dest, c, len); memset_mov_avx(dest, c, len, flush_clwb_nolog, pmem_clwb); }
3,890
20.73743
65
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_t_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "out.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_store_si128(char *dest, unsigned idx, __m128i src) { _mm_store_si128((__m128i *)dest + idx, src); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); mm_store_si128(dest, 8, xmm8); mm_store_si128(dest, 9, xmm9); mm_store_si128(dest, 10, xmm10); mm_store_si128(dest, 11, xmm11); mm_store_si128(dest, 12, xmm12); mm_store_si128(dest, 13, xmm13); mm_store_si128(dest, 14, xmm14); mm_store_si128(dest, 15, xmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); mm_store_si128(dest, 4, xmm4); mm_store_si128(dest, 5, xmm5); mm_store_si128(dest, 6, xmm6); mm_store_si128(dest, 7, xmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_store_si128(dest, 0, xmm0); mm_store_si128(dest, 1, xmm1); mm_store_si128(dest, 2, xmm2); mm_store_si128(dest, 3, xmm3); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_mov_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_sse2(dest - len, src - len, len, flush); } static force_inline void memmove_mov_sse2(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_sse_fw(dest, src, len, flush, flush64b); else memmove_mov_sse_bw(dest, src, len, flush, flush64b); } void memmove_mov_sse2_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, noflush, noflush64b); } void memmove_mov_sse2_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_sse2_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_sse2_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_sse2_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_sse2(dest, src, len, flush_clwb_nolog, pmem_clwb); }
5,820
22.566802
69
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_avx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_AVX_H #define PMEM2_MEMCPY_AVX_H #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_avx_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; /* 33..64 */ __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); __m256i ymm1 = _mm256_loadu_si256((__m256i *)(src + len - 32)); _mm256_storeu_si256((__m256i *)dest, ymm0); _mm256_storeu_si256((__m256i *)(dest + len - 32), ymm1); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ ua_uint64_t d80 = *(ua_uint64_t *)src; ua_uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ ua_uint32_t d40 = *(ua_uint32_t *)src; ua_uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ ua_uint16_t d20 = *(ua_uint16_t *)src; ua_uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_avx(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_avx_noflush(dest, src, len); } flush(dest, len); } #endif
2,173
20.524752
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_t_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_store_si256(char *dest, unsigned idx, __m256i src) { _mm256_store_si256((__m256i *)dest + idx, src); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); mm256_store_si256(dest, 8, ymm8); mm256_store_si256(dest, 9, ymm9); mm256_store_si256(dest, 10, ymm10); mm256_store_si256(dest, 11, ymm11); mm256_store_si256(dest, 12, ymm12); mm256_store_si256(dest, 13, ymm13); mm256_store_si256(dest, 14, ymm14); mm256_store_si256(dest, 15, ymm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); mm256_store_si256(dest, 4, ymm4); mm256_store_si256(dest, 5, ymm5); mm256_store_si256(dest, 6, ymm6); mm256_store_si256(dest, 7, ymm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); mm256_store_si256(dest, 2, ymm2); mm256_store_si256(dest, 3, ymm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_store_si256(dest, 0, ymm0); mm256_store_si256(dest, 1, ymm1); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx(dest, src, len, flush); } static force_inline void memmove_mov_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } while (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx_fw(dest, src, len, flush, flush64b); else memmove_mov_avx_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, noflush, noflush64b); } void memmove_mov_avx_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx(dest, src, len, flush_clwb_nolog, pmem_clwb); }
6,705
22.780142
68
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_t_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_store_si512(char *dest, unsigned idx, __m512i src) { _mm512_store_si512((__m512i *)dest + idx, src); } static force_inline void memmove_mov32x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); mm512_store_si512(dest, 16, zmm16); mm512_store_si512(dest, 17, zmm17); mm512_store_si512(dest, 18, zmm18); mm512_store_si512(dest, 19, zmm19); mm512_store_si512(dest, 20, zmm20); mm512_store_si512(dest, 21, zmm21); mm512_store_si512(dest, 22, zmm22); mm512_store_si512(dest, 23, zmm23); mm512_store_si512(dest, 24, zmm24); mm512_store_si512(dest, 25, zmm25); mm512_store_si512(dest, 26, zmm26); mm512_store_si512(dest, 27, zmm27); mm512_store_si512(dest, 28, zmm28); mm512_store_si512(dest, 29, zmm29); mm512_store_si512(dest, 30, zmm30); mm512_store_si512(dest, 31, zmm31); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); flush64b(dest + 16 * 64); flush64b(dest + 17 * 64); flush64b(dest + 18 * 64); flush64b(dest + 19 * 64); flush64b(dest + 20 * 64); flush64b(dest + 21 * 64); flush64b(dest + 22 * 64); flush64b(dest + 23 * 64); flush64b(dest + 24 * 64); flush64b(dest + 25 * 64); flush64b(dest + 26 * 64); flush64b(dest + 27 * 64); flush64b(dest + 28 * 64); flush64b(dest + 29 * 64); flush64b(dest + 30 * 64); flush64b(dest + 31 * 64); } static force_inline void memmove_mov16x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); mm512_store_si512(dest, 8, zmm8); mm512_store_si512(dest, 9, zmm9); mm512_store_si512(dest, 10, zmm10); mm512_store_si512(dest, 11, zmm11); mm512_store_si512(dest, 12, zmm12); mm512_store_si512(dest, 13, zmm13); mm512_store_si512(dest, 14, zmm14); mm512_store_si512(dest, 15, zmm15); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); flush64b(dest + 8 * 64); flush64b(dest + 9 * 64); flush64b(dest + 10 * 64); flush64b(dest + 11 * 64); flush64b(dest + 12 * 64); flush64b(dest + 13 * 64); flush64b(dest + 14 * 64); flush64b(dest + 15 * 64); } static force_inline void memmove_mov8x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); mm512_store_si512(dest, 4, zmm4); mm512_store_si512(dest, 5, zmm5); mm512_store_si512(dest, 6, zmm6); mm512_store_si512(dest, 7, zmm7); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); flush64b(dest + 4 * 64); flush64b(dest + 5 * 64); flush64b(dest + 6 * 64); flush64b(dest + 7 * 64); } static force_inline void memmove_mov4x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); mm512_store_si512(dest, 2, zmm2); mm512_store_si512(dest, 3, zmm3); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); flush64b(dest + 2 * 64); flush64b(dest + 3 * 64); } static force_inline void memmove_mov2x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_store_si512(dest, 0, zmm0); mm512_store_si512(dest, 1, zmm1); flush64b(dest + 0 * 64); flush64b(dest + 1 * 64); } static force_inline void memmove_mov1x64b(char *dest, const char *src, flush64b_fn flush64b) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_store_si512(dest, 0, zmm0); flush64b(dest + 0 * 64); } static force_inline void memmove_mov_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_mov32x64b(dest, src, flush64b); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_mov16x64b(dest, src, flush64b); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_mov8x64b(dest, src, flush64b); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_mov4x64b(dest, src, flush64b); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_mov2x64b(dest, src, flush64b); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_mov1x64b(dest, src, flush64b); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len) memmove_small_avx512f(dest, src, len, flush); } static force_inline void memmove_mov_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_mov32x64b(dest, src, flush64b); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_mov16x64b(dest, src, flush64b); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_mov8x64b(dest, src, flush64b); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_mov4x64b(dest, src, flush64b); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_mov2x64b(dest, src, flush64b); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_mov1x64b(dest, src, flush64b); } if (len) memmove_small_avx512f(dest - len, src - len, len, flush); } static force_inline void memmove_mov_avx512f(char *dest, const char *src, size_t len, flush_fn flush, flush64b_fn flush64b) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_mov_avx512f_fw(dest, src, len, flush, flush64b); else memmove_mov_avx512f_bw(dest, src, len, flush, flush64b); avx_zeroupper(); } void memmove_mov_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, noflush, noflush64b); } void memmove_mov_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_empty_nolog, flush64b_empty); } void memmove_mov_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflush_nolog, pmem_clflush); } void memmove_mov_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clflushopt_nolog, pmem_clflushopt); } void memmove_mov_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_mov_avx512f(dest, src, len, flush_clwb_nolog, pmem_clwb); }
11,422
25.020501
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_sse2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #ifndef PMEM2_MEMCPY_SSE2_H #define PMEM2_MEMCPY_SSE2_H #include <xmmintrin.h> #include <stddef.h> #include <stdint.h> #include "out.h" static force_inline void memmove_small_sse2_noflush(char *dest, const char *src, size_t len) { ASSERT(len <= 64); if (len <= 8) goto le8; if (len <= 32) goto le32; if (len > 48) { /* 49..64 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + 32)); __m128i xmm3 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + 32), xmm2); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm3); return; } /* 33..48 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + 16)); __m128i xmm2 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + 16), xmm1); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm2); return; le32: if (len > 16) { /* 17..32 */ __m128i xmm0 = _mm_loadu_si128((__m128i *)src); __m128i xmm1 = _mm_loadu_si128((__m128i *)(src + len - 16)); _mm_storeu_si128((__m128i *)dest, xmm0); _mm_storeu_si128((__m128i *)(dest + len - 16), xmm1); return; } /* 9..16 */ uint64_t d80 = *(ua_uint64_t *)src; uint64_t d81 = *(ua_uint64_t *)(src + len - 8); *(ua_uint64_t *)dest = d80; *(ua_uint64_t *)(dest + len - 8) = d81; return; le8: if (len <= 2) goto le2; if (len > 4) { /* 5..8 */ uint32_t d40 = *(ua_uint32_t *)src; uint32_t d41 = *(ua_uint32_t *)(src + len - 4); *(ua_uint32_t *)dest = d40; *(ua_uint32_t *)(dest + len - 4) = d41; return; } /* 3..4 */ uint16_t d20 = *(ua_uint16_t *)src; uint16_t d21 = *(ua_uint16_t *)(src + len - 2); *(ua_uint16_t *)dest = d20; *(ua_uint16_t *)(dest + len - 2) = d21; return; le2: if (len == 2) { *(ua_uint16_t *)dest = *(ua_uint16_t *)src; return; } *(uint8_t *)dest = *(uint8_t *)src; } static force_inline void memmove_small_sse2(char *dest, const char *src, size_t len, flush_fn flush) { /* * pmemcheck complains about "overwritten stores before they were made * persistent" for overlapping stores (last instruction in each code * path) in the optimized version. * libc's memcpy also does that, so we can't use it here. */ if (On_pmemcheck) { memmove_nodrain_generic(dest, src, len, PMEM2_F_MEM_NOFLUSH, NULL); } else { memmove_small_sse2_noflush(dest, src, len); } flush(dest, len); } #endif
2,726
22.307692
75
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_avx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx.h" #include "valgrind_internal.h" static force_inline __m256i mm256_loadu_si256(const char *src, unsigned idx) { return _mm256_loadu_si256((const __m256i *)src + idx); } static force_inline void mm256_stream_si256(char *dest, unsigned idx, __m256i src) { _mm256_stream_si256((__m256i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); __m256i ymm8 = mm256_loadu_si256(src, 8); __m256i ymm9 = mm256_loadu_si256(src, 9); __m256i ymm10 = mm256_loadu_si256(src, 10); __m256i ymm11 = mm256_loadu_si256(src, 11); __m256i ymm12 = mm256_loadu_si256(src, 12); __m256i ymm13 = mm256_loadu_si256(src, 13); __m256i ymm14 = mm256_loadu_si256(src, 14); __m256i ymm15 = mm256_loadu_si256(src, 15); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); mm256_stream_si256(dest, 8, ymm8); mm256_stream_si256(dest, 9, ymm9); mm256_stream_si256(dest, 10, ymm10); mm256_stream_si256(dest, 11, ymm11); mm256_stream_si256(dest, 12, ymm12); mm256_stream_si256(dest, 13, ymm13); mm256_stream_si256(dest, 14, ymm14); mm256_stream_si256(dest, 15, ymm15); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); __m256i ymm4 = mm256_loadu_si256(src, 4); __m256i ymm5 = mm256_loadu_si256(src, 5); __m256i ymm6 = mm256_loadu_si256(src, 6); __m256i ymm7 = mm256_loadu_si256(src, 7); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); mm256_stream_si256(dest, 4, ymm4); mm256_stream_si256(dest, 5, ymm5); mm256_stream_si256(dest, 6, ymm6); mm256_stream_si256(dest, 7, ymm7); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); __m256i ymm2 = mm256_loadu_si256(src, 2); __m256i ymm3 = mm256_loadu_si256(src, 3); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); mm256_stream_si256(dest, 2, ymm2); mm256_stream_si256(dest, 3, ymm3); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m256i ymm0 = mm256_loadu_si256(src, 0); __m256i ymm1 = mm256_loadu_si256(src, 1); mm256_stream_si256(dest, 0, ymm0); mm256_stream_si256(dest, 1, ymm1); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i ymm0 = _mm256_loadu_si256((__m256i *)src); mm256_stream_si256(dest, 0, ymm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (8 + 4) * 64); if (len) perf_barrier(); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_avx_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_avx_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_avx_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_avx_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_avx_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
10,092
21.731982
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_sse2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_sse2.h" #include "valgrind_internal.h" static force_inline __m128i mm_loadu_si128(const char *src, unsigned idx) { return _mm_loadu_si128((const __m128i *)src + idx); } static force_inline void mm_stream_si128(char *dest, unsigned idx, __m128i src) { _mm_stream_si128((__m128i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); __m128i xmm8 = mm_loadu_si128(src, 8); __m128i xmm9 = mm_loadu_si128(src, 9); __m128i xmm10 = mm_loadu_si128(src, 10); __m128i xmm11 = mm_loadu_si128(src, 11); __m128i xmm12 = mm_loadu_si128(src, 12); __m128i xmm13 = mm_loadu_si128(src, 13); __m128i xmm14 = mm_loadu_si128(src, 14); __m128i xmm15 = mm_loadu_si128(src, 15); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); mm_stream_si128(dest, 8, xmm8); mm_stream_si128(dest, 9, xmm9); mm_stream_si128(dest, 10, xmm10); mm_stream_si128(dest, 11, xmm11); mm_stream_si128(dest, 12, xmm12); mm_stream_si128(dest, 13, xmm13); mm_stream_si128(dest, 14, xmm14); mm_stream_si128(dest, 15, xmm15); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); __m128i xmm4 = mm_loadu_si128(src, 4); __m128i xmm5 = mm_loadu_si128(src, 5); __m128i xmm6 = mm_loadu_si128(src, 6); __m128i xmm7 = mm_loadu_si128(src, 7); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); mm_stream_si128(dest, 4, xmm4); mm_stream_si128(dest, 5, xmm5); mm_stream_si128(dest, 6, xmm6); mm_stream_si128(dest, 7, xmm7); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); __m128i xmm2 = mm_loadu_si128(src, 2); __m128i xmm3 = mm_loadu_si128(src, 3); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); mm_stream_si128(dest, 2, xmm2); mm_stream_si128(dest, 3, xmm3); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); __m128i xmm1 = mm_loadu_si128(src, 1); mm_stream_si128(dest, 0, xmm0); mm_stream_si128(dest, 1, xmm1); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i xmm0 = mm_loadu_si128(src, 0); mm_stream_si128(dest, 0, xmm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_sse_fw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_sse2(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } const char *srcend = src + len; prefetch_ini_fw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_fw(src, srcend); memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; return; } nonnt: memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse_bw(char *dest, const char *src, size_t len, flush_fn flush, perf_barrier_fn perf_barrier) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_sse2(dest, src, cnt, flush); } const char *srcbegin = src - len; prefetch_ini_bw(src, len); while (len >= PERF_BARRIER_SIZE) { prefetch_next_bw(src, srcbegin); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); COMPILE_ERROR_ON(PERF_BARRIER_SIZE != (4 + 4 + 4) * 64); if (len) perf_barrier(); } while (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) return; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } return; } nonnt: dest -= len; src -= len; memmove_small_sse2(dest, src, len, flush); } static force_inline void memmove_movnt_sse2(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier, perf_barrier_fn perf_barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_sse_fw(dest, src, len, flush, perf_barrier); else memmove_movnt_sse_bw(dest, src, len, flush, perf_barrier); barrier(); VALGRIND_DO_FLUSH(dest, len); } /* variants without perf_barrier */ void memmove_movnt_sse2_noflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_empty_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflush_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clflushopt_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, no_barrier); } void memmove_movnt_sse2_clwb_nobarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, no_barrier); } /* variants with perf_barrier */ void memmove_movnt_sse2_noflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, noflush, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_empty_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_empty_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflush_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflush_nolog, barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clflushopt_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores, wc_barrier); } void memmove_movnt_sse2_clwb_wcbarrier(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_sse2(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores, wc_barrier); }
9,636
21.463869
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/x86_64/memcpy/memcpy_nt_avx512f.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ #include <immintrin.h> #include <stddef.h> #include <stdint.h> #include "pmem2_arch.h" #include "avx.h" #include "flush.h" #include "memcpy_memset.h" #include "memcpy_avx512f.h" #include "valgrind_internal.h" static force_inline __m512i mm512_loadu_si512(const char *src, unsigned idx) { return _mm512_loadu_si512((const __m512i *)src + idx); } static force_inline void mm512_stream_si512(char *dest, unsigned idx, __m512i src) { _mm512_stream_si512((__m512i *)dest + idx, src); barrier(); } static force_inline void memmove_movnt32x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); __m512i zmm16 = mm512_loadu_si512(src, 16); __m512i zmm17 = mm512_loadu_si512(src, 17); __m512i zmm18 = mm512_loadu_si512(src, 18); __m512i zmm19 = mm512_loadu_si512(src, 19); __m512i zmm20 = mm512_loadu_si512(src, 20); __m512i zmm21 = mm512_loadu_si512(src, 21); __m512i zmm22 = mm512_loadu_si512(src, 22); __m512i zmm23 = mm512_loadu_si512(src, 23); __m512i zmm24 = mm512_loadu_si512(src, 24); __m512i zmm25 = mm512_loadu_si512(src, 25); __m512i zmm26 = mm512_loadu_si512(src, 26); __m512i zmm27 = mm512_loadu_si512(src, 27); __m512i zmm28 = mm512_loadu_si512(src, 28); __m512i zmm29 = mm512_loadu_si512(src, 29); __m512i zmm30 = mm512_loadu_si512(src, 30); __m512i zmm31 = mm512_loadu_si512(src, 31); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); mm512_stream_si512(dest, 16, zmm16); mm512_stream_si512(dest, 17, zmm17); mm512_stream_si512(dest, 18, zmm18); mm512_stream_si512(dest, 19, zmm19); mm512_stream_si512(dest, 20, zmm20); mm512_stream_si512(dest, 21, zmm21); mm512_stream_si512(dest, 22, zmm22); mm512_stream_si512(dest, 23, zmm23); mm512_stream_si512(dest, 24, zmm24); mm512_stream_si512(dest, 25, zmm25); mm512_stream_si512(dest, 26, zmm26); mm512_stream_si512(dest, 27, zmm27); mm512_stream_si512(dest, 28, zmm28); mm512_stream_si512(dest, 29, zmm29); mm512_stream_si512(dest, 30, zmm30); mm512_stream_si512(dest, 31, zmm31); } static force_inline void memmove_movnt16x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); __m512i zmm8 = mm512_loadu_si512(src, 8); __m512i zmm9 = mm512_loadu_si512(src, 9); __m512i zmm10 = mm512_loadu_si512(src, 10); __m512i zmm11 = mm512_loadu_si512(src, 11); __m512i zmm12 = mm512_loadu_si512(src, 12); __m512i zmm13 = mm512_loadu_si512(src, 13); __m512i zmm14 = mm512_loadu_si512(src, 14); __m512i zmm15 = mm512_loadu_si512(src, 15); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); mm512_stream_si512(dest, 8, zmm8); mm512_stream_si512(dest, 9, zmm9); mm512_stream_si512(dest, 10, zmm10); mm512_stream_si512(dest, 11, zmm11); mm512_stream_si512(dest, 12, zmm12); mm512_stream_si512(dest, 13, zmm13); mm512_stream_si512(dest, 14, zmm14); mm512_stream_si512(dest, 15, zmm15); } static force_inline void memmove_movnt8x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); __m512i zmm4 = mm512_loadu_si512(src, 4); __m512i zmm5 = mm512_loadu_si512(src, 5); __m512i zmm6 = mm512_loadu_si512(src, 6); __m512i zmm7 = mm512_loadu_si512(src, 7); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); mm512_stream_si512(dest, 4, zmm4); mm512_stream_si512(dest, 5, zmm5); mm512_stream_si512(dest, 6, zmm6); mm512_stream_si512(dest, 7, zmm7); } static force_inline void memmove_movnt4x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); __m512i zmm2 = mm512_loadu_si512(src, 2); __m512i zmm3 = mm512_loadu_si512(src, 3); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); mm512_stream_si512(dest, 2, zmm2); mm512_stream_si512(dest, 3, zmm3); } static force_inline void memmove_movnt2x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); __m512i zmm1 = mm512_loadu_si512(src, 1); mm512_stream_si512(dest, 0, zmm0); mm512_stream_si512(dest, 1, zmm1); } static force_inline void memmove_movnt1x64b(char *dest, const char *src) { __m512i zmm0 = mm512_loadu_si512(src, 0); mm512_stream_si512(dest, 0, zmm0); } static force_inline void memmove_movnt1x32b(char *dest, const char *src) { __m256i zmm0 = _mm256_loadu_si256((__m256i *)src); _mm256_stream_si256((__m256i *)dest, zmm0); } static force_inline void memmove_movnt1x16b(char *dest, const char *src) { __m128i ymm0 = _mm_loadu_si128((__m128i *)src); _mm_stream_si128((__m128i *)dest, ymm0); } static force_inline void memmove_movnt1x8b(char *dest, const char *src) { _mm_stream_si64((long long *)dest, *(long long *)src); } static force_inline void memmove_movnt1x4b(char *dest, const char *src) { _mm_stream_si32((int *)dest, *(int *)src); } static force_inline void memmove_movnt_avx512f_fw(char *dest, const char *src, size_t len, flush_fn flush) { size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { cnt = 64 - cnt; if (cnt > len) cnt = len; memmove_small_avx512f(dest, src, cnt, flush); dest += cnt; src += cnt; len -= cnt; } while (len >= 32 * 64) { memmove_movnt32x64b(dest, src); dest += 32 * 64; src += 32 * 64; len -= 32 * 64; } if (len >= 16 * 64) { memmove_movnt16x64b(dest, src); dest += 16 * 64; src += 16 * 64; len -= 16 * 64; } if (len >= 8 * 64) { memmove_movnt8x64b(dest, src); dest += 8 * 64; src += 8 * 64; len -= 8 * 64; } if (len >= 4 * 64) { memmove_movnt4x64b(dest, src); dest += 4 * 64; src += 4 * 64; len -= 4 * 64; } if (len >= 2 * 64) { memmove_movnt2x64b(dest, src); dest += 2 * 64; src += 2 * 64; len -= 2 * 64; } if (len >= 1 * 64) { memmove_movnt1x64b(dest, src); dest += 1 * 64; src += 1 * 64; len -= 1 * 64; } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) memmove_movnt1x32b(dest, src); else if (len == 16) memmove_movnt1x16b(dest, src); else if (len == 8) memmove_movnt1x8b(dest, src); else if (len == 4) memmove_movnt1x4b(dest, src); else goto nonnt; goto end; } nonnt: memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f_bw(char *dest, const char *src, size_t len, flush_fn flush) { dest += len; src += len; size_t cnt = (uint64_t)dest & 63; if (cnt > 0) { if (cnt > len) cnt = len; dest -= cnt; src -= cnt; len -= cnt; memmove_small_avx512f(dest, src, cnt, flush); } while (len >= 32 * 64) { dest -= 32 * 64; src -= 32 * 64; len -= 32 * 64; memmove_movnt32x64b(dest, src); } if (len >= 16 * 64) { dest -= 16 * 64; src -= 16 * 64; len -= 16 * 64; memmove_movnt16x64b(dest, src); } if (len >= 8 * 64) { dest -= 8 * 64; src -= 8 * 64; len -= 8 * 64; memmove_movnt8x64b(dest, src); } if (len >= 4 * 64) { dest -= 4 * 64; src -= 4 * 64; len -= 4 * 64; memmove_movnt4x64b(dest, src); } if (len >= 2 * 64) { dest -= 2 * 64; src -= 2 * 64; len -= 2 * 64; memmove_movnt2x64b(dest, src); } if (len >= 1 * 64) { dest -= 1 * 64; src -= 1 * 64; len -= 1 * 64; memmove_movnt1x64b(dest, src); } if (len == 0) goto end; /* There's no point in using more than 1 nt store for 1 cache line. */ if (util_is_pow2(len)) { if (len == 32) { dest -= 32; src -= 32; memmove_movnt1x32b(dest, src); } else if (len == 16) { dest -= 16; src -= 16; memmove_movnt1x16b(dest, src); } else if (len == 8) { dest -= 8; src -= 8; memmove_movnt1x8b(dest, src); } else if (len == 4) { dest -= 4; src -= 4; memmove_movnt1x4b(dest, src); } else { goto nonnt; } goto end; } nonnt: dest -= len; src -= len; memmove_small_avx512f(dest, src, len, flush); end: avx_zeroupper(); } static force_inline void memmove_movnt_avx512f(char *dest, const char *src, size_t len, flush_fn flush, barrier_fn barrier) { if ((uintptr_t)dest - (uintptr_t)src >= len) memmove_movnt_avx512f_fw(dest, src, len, flush); else memmove_movnt_avx512f_bw(dest, src, len, flush); barrier(); VALGRIND_DO_FLUSH(dest, len); } void memmove_movnt_avx512f_noflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, noflush, barrier_after_ntstores); } void memmove_movnt_avx512f_empty(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_empty_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflush(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflush_nolog, barrier_after_ntstores); } void memmove_movnt_avx512f_clflushopt(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clflushopt_nolog, no_barrier_after_ntstores); } void memmove_movnt_avx512f_clwb(char *dest, const char *src, size_t len) { LOG(15, "dest %p src %p len %zu", dest, src, len); memmove_movnt_avx512f(dest, src, len, flush_clwb_nolog, no_barrier_after_ntstores); }
11,246
23.45
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/aarch64/arm_cacheops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * ARM inline assembly to flush and invalidate caches * clwb => dc cvac * clflushopt => dc civac * fence => dmb ish * sfence => dmb ishst */ /* * Cache instructions on ARM: * ARMv8.0-a DC CVAC - cache clean to Point of Coherency * Meant for thread synchronization, usually implies * real memory flush but may mean less. * ARMv8.2-a DC CVAP - cache clean to Point of Persistency * Meant exactly for our use. * ARMv8.5-a DC CVADP - cache clean to Point of Deep Persistency * As of mid-2019 not on any commercially available CPU. * Any of the above may be disabled for EL0, but it's probably safe to consider * that a system configuration error. * Other flags include I (like "DC CIVAC") that invalidates the cache line, but * we don't want that. * * Memory fences: * * DMB [ISH] MFENCE * * DMB [ISH]ST SFENCE * * DMB [ISH]LD LFENCE * * Memory domains (cache coherency): * * non-shareable - local to a single core * * inner shareable (ISH) - a group of CPU clusters/sockets/other hardware * Linux requires that anything within one operating system/hypervisor * is within the same Inner Shareable domain. * * outer shareable (OSH) - one or more separate ISH domains * * full system (SY) - anything that can possibly access memory * Docs: ARM DDI 0487E.a page B2-144. * * Exception (privilege) levels: * * EL0 - userspace (ring 3) * * EL1 - kernel (ring 0) * * EL2 - hypervisor (ring -1) * * EL3 - "secure world" (ring -3) */ #ifndef AARCH64_CACHEOPS_H #define AARCH64_CACHEOPS_H #include <stdlib.h> static inline void arm_clean_va_to_poc(void const *p __attribute__((unused))) { asm volatile("dc cvac, %0" : : "r" (p) : "memory"); } static inline void arm_store_memory_barrier(void) { asm volatile("dmb ishst" : : : "memory"); } #endif
1,988
30.571429
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmem2/ppc64/init.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, IBM Corporation */ /* Copyright 2019-2020, Intel Corporation */ #include <errno.h> #include <sys/mman.h> #include "out.h" #include "pmem2_arch.h" #include "util.h" /* * Older assemblers versions do not support the latest versions of L, e.g. * Binutils 2.34. * Workaround this by using longs. */ #define __SYNC(l) ".long (0x7c0004AC | ((" #l ") << 21))" #define __DCBF(ra, rb, l) ".long (0x7c0000AC | ((" #l ") << 21)" \ " | ((" #ra ") << 16) | ((" #rb ") << 11))" static void ppc_fence(void) { LOG(15, NULL); /* * Force a memory barrier to flush out all cache lines. * Uses a heavyweight sync in order to guarantee the memory ordering * even with a data cache flush. * According to the POWER ISA 3.1, phwsync (aka. sync (L=4)) is treated * as a hwsync by processors compatible with previous versions of the * POWER ISA. */ asm volatile(__SYNC(4) : : : "memory"); } static void ppc_flush(const void *addr, size_t size) { LOG(15, "addr %p size %zu", addr, size); uintptr_t uptr = (uintptr_t)addr; uintptr_t end = uptr + size; /* round down the address */ uptr &= ~(CACHELINE_SIZE - 1); while (uptr < end) { /* * Flush the data cache block. * According to the POWER ISA 3.1, dcbstps (aka. dcbf (L=6)) * behaves as dcbf (L=0) on previous processors. */ asm volatile(__DCBF(0, %0, 6) : :"r"(uptr) : "memory"); uptr += CACHELINE_SIZE; } } void pmem2_arch_init(struct pmem2_arch_info *info) { LOG(3, "libpmem*: PPC64 support"); info->fence = ppc_fence; info->flush = ppc_flush; }
1,594
22.80597
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/getopt/getopt.c
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "getopt.h" #include <stddef.h> #include <string.h> #include <stdio.h> char* optarg; int optopt; /* The variable optind [...] shall be initialized to 1 by the system. */ int optind = 1; int opterr; static char* optcursor = NULL; static char *first = NULL; /* rotates argv array */ static void rotate(char **argv, int argc) { if (argc <= 1) return; char *tmp = argv[0]; memmove(argv, argv + 1, (argc - 1) * sizeof(char *)); argv[argc - 1] = tmp; } /* Implemented based on [1] and [2] for optional arguments. optopt is handled FreeBSD-style, per [3]. Other GNU and FreeBSD extensions are purely accidental. [1] https://pubs.opengroup.org/onlinepubs/000095399/functions/getopt.html [2] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html [3] https://www.freebsd.org/cgi/man.cgi?query=getopt&sektion=3&manpath=FreeBSD+9.0-RELEASE */ int getopt(int argc, char* const argv[], const char* optstring) { int optchar = -1; const char* optdecl = NULL; optarg = NULL; opterr = 0; optopt = 0; /* Unspecified, but we need it to avoid overrunning the argv bounds. */ if (optind >= argc) goto no_more_optchars; /* If, when getopt() is called argv[optind] is a null pointer, getopt() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } /* If, when getopt() is called argv[optind] points to the string "-", getopt() shall return -1 without changing optind. */ if (strcmp(argv[optind], "-") == 0) goto no_more_optchars; /* If, when getopt() is called argv[optind] points to the string "--", getopt() shall return -1 after incrementing optind. */ if (strcmp(argv[optind], "--") == 0) { ++optind; if (first) { do { rotate((char **)(argv + optind), argc - optind); } while (argv[optind] != first); } goto no_more_optchars; } if (optcursor == NULL || *optcursor == '\0') optcursor = argv[optind] + 1; optchar = *optcursor; /* FreeBSD: The variable optopt saves the last known option character returned by getopt(). */ optopt = optchar; /* The getopt() function shall return the next option character (if one is found) from argv that matches a character in optstring, if there is one that matches. */ optdecl = strchr(optstring, optchar); if (optdecl) { /* [I]f a character is followed by a colon, the option takes an argument. */ if (optdecl[1] == ':') { optarg = ++optcursor; if (*optarg == '\0') { /* GNU extension: Two colons mean an option takes an optional arg; if there is text in the current argv-element (i.e., in the same word as the option name itself, for example, "-oarg"), then it is returned in optarg, otherwise optarg is set to zero. */ if (optdecl[2] != ':') { /* If the option was the last character in the string pointed to by an element of argv, then optarg shall contain the next element of argv, and optind shall be incremented by 2. If the resulting value of optind is greater than argc, this indicates a missing option-argument, and getopt() shall return an error indication. Otherwise, optarg shall point to the string following the option character in that element of argv, and optind shall be incremented by 1. */ if (++optind < argc) { optarg = argv[optind]; } else { /* If it detects a missing option-argument, it shall return the colon character ( ':' ) if the first character of optstring was a colon, or a question-mark character ( '?' ) otherwise. */ optarg = NULL; fprintf(stderr, "%s: option requires an argument -- '%c'\n", argv[0], optchar); optchar = (optstring[0] == ':') ? ':' : '?'; } } else { optarg = NULL; } } optcursor = NULL; } } else { fprintf(stderr,"%s: invalid option -- '%c'\n", argv[0], optchar); /* If getopt() encounters an option character that is not contained in optstring, it shall return the question-mark ( '?' ) character. */ optchar = '?'; } if (optcursor == NULL || *++optcursor == '\0') ++optind; return optchar; no_more_optchars: optcursor = NULL; first = NULL; return -1; } /* Implementation based on [1]. [1] https://www.kernel.org/doc/man-pages/online/pages/man3/getopt.3.html */ int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex) { const struct option* o = longopts; const struct option* match = NULL; int num_matches = 0; size_t argument_name_length = 0; const char* current_argument = NULL; int retval = -1; optarg = NULL; optopt = 0; if (optind >= argc) return -1; /* If, when getopt() is called argv[optind] is a null pointer, getopt_long() shall return -1 without changing optind. */ if (argv[optind] == NULL) goto no_more_optchars; /* If, when getopt_long() is called *argv[optind] is not the character '-', permute argv to move non options to the end */ if (*argv[optind] != '-') { if (argc - optind <= 1) goto no_more_optchars; if (!first) first = argv[optind]; do { rotate((char **)(argv + optind), argc - optind); } while (*argv[optind] != '-' && argv[optind] != first); if (argv[optind] == first) goto no_more_optchars; } if (strlen(argv[optind]) < 3 || strncmp(argv[optind], "--", 2) != 0) return getopt(argc, argv, optstring); /* It's an option; starts with -- and is longer than two chars. */ current_argument = argv[optind] + 2; argument_name_length = strcspn(current_argument, "="); for (; o->name; ++o) { if (strncmp(o->name, current_argument, argument_name_length) == 0) { match = o; ++num_matches; if (strlen(o->name) == argument_name_length) { /* found match is exactly the one which we are looking for */ num_matches = 1; break; } } } if (num_matches == 1) { /* If longindex is not NULL, it points to a variable which is set to the index of the long option relative to longopts. */ if (longindex) *longindex = (int)(match - longopts); /* If flag is NULL, then getopt_long() shall return val. Otherwise, getopt_long() returns 0, and flag shall point to a variable which shall be set to val if the option is found, but left unchanged if the option is not found. */ if (match->flag) *(match->flag) = match->val; retval = match->flag ? 0 : match->val; if (match->has_arg != no_argument) { optarg = strchr(argv[optind], '='); if (optarg != NULL) ++optarg; if (match->has_arg == required_argument) { /* Only scan the next argv for required arguments. Behavior is not specified, but has been observed with Ubuntu and Mac OSX. */ if (optarg == NULL && ++optind < argc) { optarg = argv[optind]; } if (optarg == NULL) retval = ':'; } } else if (strchr(argv[optind], '=')) { /* An argument was provided to a non-argument option. I haven't seen this specified explicitly, but both GNU and BSD-based implementations show this behavior. */ retval = '?'; } } else { /* Unknown option or ambiguous match. */ retval = '?'; if (num_matches == 0) { fprintf(stderr, "%s: unrecognized option -- '%s'\n", argv[0], argv[optind]); } else { fprintf(stderr, "%s: option '%s' is ambiguous\n", argv[0], argv[optind]); } } ++optind; return retval; no_more_optchars: first = NULL; return -1; }
9,866
32.561224
91
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/getopt/getopt.h
/* * *Copyright (c) 2012, Kim Gräsman * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Kim Gräsman nor the * names of contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL KIM GRÄSMAN BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef INCLUDED_GETOPT_PORT_H #define INCLUDED_GETOPT_PORT_H #if defined(__cplusplus) extern "C" { #endif #define no_argument 0 #define required_argument 1 #define optional_argument 2 extern char* optarg; extern int optind, opterr, optopt; struct option { const char* name; int has_arg; int* flag; int val; }; int getopt(int argc, char* const argv[], const char* optstring); int getopt_long(int argc, char* const argv[], const char* optstring, const struct option* longopts, int* longindex); #if defined(__cplusplus) } #endif #endif // INCLUDED_GETOPT_PORT_H
2,137
35.237288
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/include/win_mmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_mmap.h -- (internal) tracks the regions mapped by mmap */ #ifndef WIN_MMAP_H #define WIN_MMAP_H 1 #include "queue.h" #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define rounddown(x, y) (((x) / (y)) * (y)) void win_mmap_init(void); void win_mmap_fini(void); /* allocation/mmap granularity */ extern unsigned long long Mmap_align; typedef enum FILE_MAPPING_TRACKER_FLAGS { FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED = 0x0001, /* * This should hold the value of all flags ORed for debug purpose. */ FILE_MAPPING_TRACKER_FLAGS_MASK = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED } FILE_MAPPING_TRACKER_FLAGS; /* * this structure tracks the file mappings outstanding per file handle */ typedef struct FILE_MAPPING_TRACKER { PMDK_SORTEDQ_ENTRY(FILE_MAPPING_TRACKER) ListEntry; HANDLE FileHandle; HANDLE FileMappingHandle; void *BaseAddress; void *EndAddress; DWORD Access; os_off_t Offset; size_t FileLen; FILE_MAPPING_TRACKER_FLAGS Flags; } FILE_MAPPING_TRACKER, *PFILE_MAPPING_TRACKER; extern SRWLOCK FileMappingQLock; extern PMDK_SORTEDQ_HEAD(FMLHead, FILE_MAPPING_TRACKER) FileMappingQHead; #endif /* WIN_MMAP_H */
2,871
34.02439
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/include/platform.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * platform.h -- dirty hacks to compile Linux code on Windows using VC++ * * This is included to each source file using "/FI" (forced include) option. * * XXX - it is a subject for refactoring */ #ifndef PLATFORM_H #define PLATFORM_H 1 #pragma warning(disable : 4996) #pragma warning(disable : 4200) /* allow flexible array member */ #pragma warning(disable : 4819) /* non unicode characters */ #ifdef __cplusplus extern "C" { #endif /* Prevent PMDK compilation for 32-bit platforms */ #if defined(_WIN32) && !defined(_WIN64) #error "32-bit builds of PMDK are not supported!" #endif #define _CRT_RAND_S /* rand_s() */ #include <windows.h> #include <stdint.h> #include <time.h> #include <io.h> #include <process.h> #include <fcntl.h> #include <sys/types.h> #include <malloc.h> #include <signal.h> #include <intrin.h> #include <direct.h> /* use uuid_t definition from util.h */ #ifdef uuid_t #undef uuid_t #endif /* a few trivial substitutions */ #define PATH_MAX MAX_PATH #define __thread __declspec(thread) #define __func__ __FUNCTION__ #ifdef _DEBUG #define DEBUG #endif /* * The inline keyword is available only in VC++. * https://msdn.microsoft.com/en-us/library/bw1hbe6y.aspx */ #ifndef __cplusplus #define inline __inline #endif /* XXX - no equivalents in VC++ */ #define __attribute__(a) #define __builtin_constant_p(cnd) 0 /* * missing definitions */ /* errno.h */ #define ELIBACC 79 /* cannot access a needed shared library */ /* sys/stat.h */ #define S_IRUSR S_IREAD #define S_IWUSR S_IWRITE #define S_IRGRP S_IRUSR #define S_IWGRP S_IWUSR #define O_SYNC 0 typedef int mode_t; #define fchmod(fd, mode) 0 /* XXX - dummy */ #define setlinebuf(fp) setvbuf(fp, NULL, _IOLBF, BUFSIZ); /* unistd.h */ typedef long long os_off_t; typedef long long ssize_t; int setenv(const char *name, const char *value, int overwrite); int unsetenv(const char *name); /* fcntl.h */ int posix_fallocate(int fd, os_off_t offset, os_off_t len); /* string.h */ #define strtok_r strtok_s /* time.h */ #define CLOCK_MONOTONIC 1 #define CLOCK_REALTIME 2 int clock_gettime(int id, struct timespec *ts); /* signal.h */ typedef unsigned long long sigset_t; /* one bit for each signal */ C_ASSERT(NSIG <= sizeof(sigset_t) * 8); struct sigaction { void (*sa_handler) (int signum); /* void (*sa_sigaction)(int, siginfo_t *, void *); */ sigset_t sa_mask; int sa_flags; void (*sa_restorer) (void); }; __inline int sigemptyset(sigset_t *set) { *set = 0; return 0; } __inline int sigfillset(sigset_t *set) { *set = ~0; return 0; } __inline int sigaddset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set |= (1ULL << (signum - 1)); return 0; } __inline int sigdelset(sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } *set &= ~(1ULL << (signum - 1)); return 0; } __inline int sigismember(const sigset_t *set, int signum) { if (signum <= 0 || signum >= NSIG) { errno = EINVAL; return -1; } return ((*set & (1ULL << (signum - 1))) ? 1 : 0); } /* sched.h */ /* * sched_yield -- yield the processor */ __inline int sched_yield(void) { SwitchToThread(); return 0; /* always succeeds */ } /* * helper macros for library ctor/dtor function declarations */ #define MSVC_CONSTR(func) \ void func(void); \ __pragma(comment(linker, "/include:_" #func)) \ __pragma(section(".CRT$XCU", read)) \ __declspec(allocate(".CRT$XCU")) \ const void (WINAPI *_##func)(void) = (const void (WINAPI *)(void))func; #define MSVC_DESTR(func) \ void func(void); \ static void _##func##_reg(void) { atexit(func); }; \ MSVC_CONSTR(_##func##_reg) #ifdef __cplusplus } #endif #endif /* PLATFORM_H */
5,431
22.929515
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/include/endian.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * endian.h -- convert values between host and big-/little-endian byte order */ #ifndef ENDIAN_H #define ENDIAN_H 1 /* * XXX: On Windows we can assume little-endian architecture */ #include <intrin.h> #define htole16(a) (a) #define htole32(a) (a) #define htole64(a) (a) #define le16toh(a) (a) #define le32toh(a) (a) #define le64toh(a) (a) #define htobe16(x) _byteswap_ushort(x) #define htobe32(x) _byteswap_ulong(x) #define htobe64(x) _byteswap_uint64(x) #define be16toh(x) _byteswap_ushort(x) #define be32toh(x) _byteswap_ulong(x) #define be64toh(x) _byteswap_uint64(x) #endif /* ENDIAN_H */
696
20.121212
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/include/sys/file.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * sys/file.h -- file locking */
1,750
45.078947
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/windows/include/sys/param.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * sys/param.h -- a few useful macros */ #ifndef SYS_PARAM_H #define SYS_PARAM_H 1 #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) #define howmany(x, y) (((x) + ((y) - 1)) / (y)) #define BPB 8 /* bits per byte */ #define setbit(b, i) ((b)[(i) / BPB] |= 1 << ((i) % BPB)) #define isset(b, i) ((b)[(i) / BPB] & (1 << ((i) % BPB))) #define isclr(b, i) (((b)[(i) / BPB] & (1 << ((i) % BPB))) == 0) #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* SYS_PARAM_H */
612
24.541667
64
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemblk.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemblk.h -- definitions of libpmemblk entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemblk provides support for arrays of atomically-writable blocks. * * See libpmemblk(7) for details. */ #ifndef LIBPMEMBLK_H #define LIBPMEMBLK_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemblk_open pmemblk_openW #define pmemblk_create pmemblk_createW #define pmemblk_check pmemblk_checkW #define pmemblk_check_version pmemblk_check_versionW #define pmemblk_errormsg pmemblk_errormsgW #define pmemblk_ctl_get pmemblk_ctl_getW #define pmemblk_ctl_set pmemblk_ctl_setW #define pmemblk_ctl_exec pmemblk_ctl_execW #else #define pmemblk_open pmemblk_openU #define pmemblk_create pmemblk_createU #define pmemblk_check pmemblk_checkU #define pmemblk_check_version pmemblk_check_versionU #define pmemblk_errormsg pmemblk_errormsgU #define pmemblk_ctl_get pmemblk_ctl_getU #define pmemblk_ctl_set pmemblk_ctl_setU #define pmemblk_ctl_exec pmemblk_ctl_execU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemblk */ typedef struct pmemblk PMEMblkpool; /* * PMEMBLK_MAJOR_VERSION and PMEMBLK_MINOR_VERSION provide the current version * of the libpmemblk API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemblk_check_version(). */ #define PMEMBLK_MAJOR_VERSION 1 #define PMEMBLK_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemblk_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemblk_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemblk_check_versionW(unsigned major_required, unsigned minor_required); #endif /* XXX - unify minimum pool size for both OS-es */ #ifndef _WIN32 #if defined(__x86_64__) || defined(__M_X64__) || defined(__aarch64__) /* minimum pool size: 16MiB + 4KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 8)) #elif defined(__PPC64__) /* minimum pool size: 16MiB + 128KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 128)) #else #error unable to recognize ISA at compile time #endif #else /* minimum pool size: 16MiB + 64KiB (minimum BTT size + mmap alignment) */ #define PMEMBLK_MIN_POOL ((size_t)((1u << 20) * 16 + (1u << 10) * 64)) #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMBLK_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define PMEMBLK_MIN_BLK ((size_t)512) #ifndef _WIN32 PMEMblkpool *pmemblk_open(const char *path, size_t bsize); #else PMEMblkpool *pmemblk_openU(const char *path, size_t bsize); PMEMblkpool *pmemblk_openW(const wchar_t *path, size_t bsize); #endif #ifndef _WIN32 PMEMblkpool *pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode); #else PMEMblkpool *pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode); PMEMblkpool *pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemblk_check(const char *path, size_t bsize); #else int pmemblk_checkU(const char *path, size_t bsize); int pmemblk_checkW(const wchar_t *path, size_t bsize); #endif void pmemblk_close(PMEMblkpool *pbp); size_t pmemblk_bsize(PMEMblkpool *pbp); size_t pmemblk_nblock(PMEMblkpool *pbp); int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno); int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno); int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno); int pmemblk_set_error(PMEMblkpool *pbp, long long blockno); /* * Passing NULL to pmemblk_set_funcs() tells libpmemblk to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemblk. */ void pmemblk_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemblk_errormsg(void); #else const char *pmemblk_errormsgU(void); const wchar_t *pmemblk_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg); #else int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg); int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg); int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemblk.h */
5,183
30.418182
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmempool.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * libpmempool.h -- definitions of libpmempool entry points * * See libpmempool(7) for details. */ #ifndef LIBPMEMPOOL_H #define LIBPMEMPOOL_H 1 #include <stdint.h> #include <stddef.h> #include <limits.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmempool_check_status pmempool_check_statusW #define pmempool_check_args pmempool_check_argsW #define pmempool_check_init pmempool_check_initW #define pmempool_check pmempool_checkW #define pmempool_sync pmempool_syncW #define pmempool_transform pmempool_transformW #define pmempool_rm pmempool_rmW #define pmempool_check_version pmempool_check_versionW #define pmempool_errormsg pmempool_errormsgW #define pmempool_feature_enable pmempool_feature_enableW #define pmempool_feature_disable pmempool_feature_disableW #define pmempool_feature_query pmempool_feature_queryW #else #define pmempool_check_status pmempool_check_statusU #define pmempool_check_args pmempool_check_argsU #define pmempool_check_init pmempool_check_initU #define pmempool_check pmempool_checkU #define pmempool_sync pmempool_syncU #define pmempool_transform pmempool_transformU #define pmempool_rm pmempool_rmU #define pmempool_check_version pmempool_check_versionU #define pmempool_errormsg pmempool_errormsgU #define pmempool_feature_enable pmempool_feature_enableU #define pmempool_feature_disable pmempool_feature_disableU #define pmempool_feature_query pmempool_feature_queryU #endif #endif #ifdef __cplusplus extern "C" { #endif /* PMEMPOOL CHECK */ /* * pool types */ enum pmempool_pool_type { PMEMPOOL_POOL_TYPE_DETECT, PMEMPOOL_POOL_TYPE_LOG, PMEMPOOL_POOL_TYPE_BLK, PMEMPOOL_POOL_TYPE_OBJ, PMEMPOOL_POOL_TYPE_BTT, PMEMPOOL_POOL_TYPE_RESERVED1, /* used to be cto */ }; /* * perform repairs */ #define PMEMPOOL_CHECK_REPAIR (1U << 0) /* * emulate repairs */ #define PMEMPOOL_CHECK_DRY_RUN (1U << 1) /* * perform hazardous repairs */ #define PMEMPOOL_CHECK_ADVANCED (1U << 2) /* * do not ask before repairs */ #define PMEMPOOL_CHECK_ALWAYS_YES (1U << 3) /* * generate info statuses */ #define PMEMPOOL_CHECK_VERBOSE (1U << 4) /* * generate string format statuses */ #define PMEMPOOL_CHECK_FORMAT_STR (1U << 5) /* * types of check statuses */ enum pmempool_check_msg_type { PMEMPOOL_CHECK_MSG_TYPE_INFO, PMEMPOOL_CHECK_MSG_TYPE_ERROR, PMEMPOOL_CHECK_MSG_TYPE_QUESTION, }; /* * check result types */ enum pmempool_check_result { PMEMPOOL_CHECK_RESULT_CONSISTENT, PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT, PMEMPOOL_CHECK_RESULT_REPAIRED, PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR, PMEMPOOL_CHECK_RESULT_ERROR, PMEMPOOL_CHECK_RESULT_SYNC_REQ, }; /* * check context */ typedef struct pmempool_check_ctx PMEMpoolcheck; /* * finalize the check and get the result */ enum pmempool_check_result pmempool_check_end(PMEMpoolcheck *ppc); /* PMEMPOOL RM */ #define PMEMPOOL_RM_FORCE (1U << 0) /* ignore any errors */ #define PMEMPOOL_RM_POOLSET_LOCAL (1U << 1) /* remove local poolsets */ #define PMEMPOOL_RM_POOLSET_REMOTE (1U << 2) /* remove remote poolsets */ /* * LIBPMEMPOOL SYNC */ /* * fix bad blocks - it requires creating or reading special recovery files */ #define PMEMPOOL_SYNC_FIX_BAD_BLOCKS (1U << 0) /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_SYNC_DRY_RUN (1U << 1) /* * LIBPMEMPOOL TRANSFORM */ /* * do not apply changes, only check if operation is viable */ #define PMEMPOOL_TRANSFORM_DRY_RUN (1U << 1) /* * PMEMPOOL_MAJOR_VERSION and PMEMPOOL_MINOR_VERSION provide the current version * of the libpmempool API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmempool_check_version(). */ #define PMEMPOOL_MAJOR_VERSION 1 #define PMEMPOOL_MINOR_VERSION 3 /* * check status */ struct pmempool_check_statusU { enum pmempool_check_msg_type type; struct { const char *msg; const char *answer; } str; }; #ifndef _WIN32 #define pmempool_check_status pmempool_check_statusU #else struct pmempool_check_statusW { enum pmempool_check_msg_type type; struct { const wchar_t *msg; const wchar_t *answer; } str; }; #endif /* * check context arguments */ struct pmempool_check_argsU { const char *path; const char *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #ifndef _WIN32 #define pmempool_check_args pmempool_check_argsU #else struct pmempool_check_argsW { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; unsigned flags; }; #endif /* * initialize a check context */ #ifndef _WIN32 PMEMpoolcheck * pmempool_check_init(struct pmempool_check_args *args, size_t args_size); #else PMEMpoolcheck * pmempool_check_initU(struct pmempool_check_argsU *args, size_t args_size); PMEMpoolcheck * pmempool_check_initW(struct pmempool_check_argsW *args, size_t args_size); #endif /* * start / resume the check */ #ifndef _WIN32 struct pmempool_check_status *pmempool_check(PMEMpoolcheck *ppc); #else struct pmempool_check_statusU *pmempool_checkU(PMEMpoolcheck *ppc); struct pmempool_check_statusW *pmempool_checkW(PMEMpoolcheck *ppc); #endif /* * LIBPMEMPOOL SYNC & TRANSFORM */ /* * Synchronize data between replicas within a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_sync(const char *poolset_file, unsigned flags); #else int pmempool_syncU(const char *poolset_file, unsigned flags); int pmempool_syncW(const wchar_t *poolset_file, unsigned flags); #endif /* * Modify internal structure of a poolset. * * EXPERIMENTAL */ #ifndef _WIN32 int pmempool_transform(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); #else int pmempool_transformU(const char *poolset_file_src, const char *poolset_file_dst, unsigned flags); int pmempool_transformW(const wchar_t *poolset_file_src, const wchar_t *poolset_file_dst, unsigned flags); #endif /* PMEMPOOL feature enable, disable, query */ /* * feature types */ enum pmempool_feature { PMEMPOOL_FEAT_SINGLEHDR, PMEMPOOL_FEAT_CKSUM_2K, PMEMPOOL_FEAT_SHUTDOWN_STATE, PMEMPOOL_FEAT_CHECK_BAD_BLOCKS, }; /* PMEMPOOL FEATURE ENABLE */ #ifndef _WIN32 int pmempool_feature_enable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_enableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_enableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE DISABLE */ #ifndef _WIN32 int pmempool_feature_disable(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_disableU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_disableW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL FEATURE QUERY */ #ifndef _WIN32 int pmempool_feature_query(const char *path, enum pmempool_feature feature, unsigned flags); #else int pmempool_feature_queryU(const char *path, enum pmempool_feature feature, unsigned flags); int pmempool_feature_queryW(const wchar_t *path, enum pmempool_feature feature, unsigned flags); #endif /* PMEMPOOL RM */ #ifndef _WIN32 int pmempool_rm(const char *path, unsigned flags); #else int pmempool_rmU(const char *path, unsigned flags); int pmempool_rmW(const wchar_t *path, unsigned flags); #endif #ifndef _WIN32 const char *pmempool_check_version(unsigned major_required, unsigned minor_required); #else const char *pmempool_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmempool_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmempool_errormsg(void); #else const char *pmempool_errormsgU(void); const wchar_t *pmempool_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmempool.h */
8,009
22.910448
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/librpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * librpmem.h -- definitions of librpmem entry points (EXPERIMENTAL) * * This library provides low-level support for remote access to persistent * memory utilizing RDMA-capable RNICs. * * See librpmem(7) for details. */ #ifndef LIBRPMEM_H #define LIBRPMEM_H 1 #include <sys/types.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif typedef struct rpmem_pool RPMEMpool; #define RPMEM_POOL_HDR_SIG_LEN 8 #define RPMEM_POOL_HDR_UUID_LEN 16 /* uuid byte length */ #define RPMEM_POOL_USER_FLAGS_LEN 16 struct rpmem_pool_attr { char signature[RPMEM_POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ uint32_t compat_features; /* mask: compatible "may" features */ uint32_t incompat_features; /* mask: "must support" features */ uint32_t ro_compat_features; /* mask: force RO if unsupported */ unsigned char poolset_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char uuid[RPMEM_POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char next_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* next pool uuid */ unsigned char prev_uuid[RPMEM_POOL_HDR_UUID_LEN]; /* prev pool uuid */ unsigned char user_flags[RPMEM_POOL_USER_FLAGS_LEN]; /* user flags */ }; RPMEMpool *rpmem_create(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, const struct rpmem_pool_attr *create_attr); RPMEMpool *rpmem_open(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, struct rpmem_pool_attr *open_attr); int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr); int rpmem_close(RPMEMpool *rpp); #define RPMEM_PERSIST_RELAXED (1U << 0) #define RPMEM_FLUSH_RELAXED (1U << 0) int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags); int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); #define RPMEM_REMOVE_FORCE 0x1 #define RPMEM_REMOVE_POOL_SET 0x2 int rpmem_remove(const char *target, const char *pool_set, int flags); /* * RPMEM_MAJOR_VERSION and RPMEM_MINOR_VERSION provide the current version of * the librpmem API as provided by this header file. Applications can verify * that the version available at run-time is compatible with the version used * at compile-time by passing these defines to rpmem_check_version(). */ #define RPMEM_MAJOR_VERSION 1 #define RPMEM_MINOR_VERSION 3 const char *rpmem_check_version(unsigned major_required, unsigned minor_required); const char *rpmem_errormsg(void); /* minimum size of a pool */ #define RPMEM_MIN_POOL ((size_t)(1024 * 8)) /* 8 KB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define RPMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifdef __cplusplus } #endif #endif /* librpmem.h */
3,197
31.30303
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj.h -- definitions of libpmemobj entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemobj provides a pmem-resident transactional object store. * * See libpmemobj(7) for details. */ #ifndef LIBPMEMOBJ_H #define LIBPMEMOBJ_H 1 #include <libpmemobj/action.h> #include <libpmemobj/atomic.h> #include <libpmemobj/ctl.h> #include <libpmemobj/iterator.h> #include <libpmemobj/lists_atomic.h> #include <libpmemobj/pool.h> #include <libpmemobj/thread.h> #include <libpmemobj/tx.h> #endif /* libpmemobj.h */
662
23.555556
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemlog.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemlog.h -- definitions of libpmemlog entry points * * This library provides support for programming with persistent memory (pmem). * * libpmemlog provides support for pmem-resident log files. * * See libpmemlog(7) for details. */ #ifndef LIBPMEMLOG_H #define LIBPMEMLOG_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemlog_open pmemlog_openW #define pmemlog_create pmemlog_createW #define pmemlog_check pmemlog_checkW #define pmemlog_check_version pmemlog_check_versionW #define pmemlog_errormsg pmemlog_errormsgW #define pmemlog_ctl_get pmemlog_ctl_getW #define pmemlog_ctl_set pmemlog_ctl_setW #define pmemlog_ctl_exec pmemlog_ctl_execW #else #define pmemlog_open pmemlog_openU #define pmemlog_create pmemlog_createU #define pmemlog_check pmemlog_checkU #define pmemlog_check_version pmemlog_check_versionU #define pmemlog_errormsg pmemlog_errormsgU #define pmemlog_ctl_get pmemlog_ctl_getU #define pmemlog_ctl_set pmemlog_ctl_setU #define pmemlog_ctl_exec pmemlog_ctl_execU #endif #else #include <sys/uio.h> #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type, internal to libpmemlog */ typedef struct pmemlog PMEMlogpool; /* * PMEMLOG_MAJOR_VERSION and PMEMLOG_MINOR_VERSION provide the current * version of the libpmemlog API as provided by this header file. * Applications can verify that the version available at run-time * is compatible with the version used at compile-time by passing * these defines to pmemlog_check_version(). */ #define PMEMLOG_MAJOR_VERSION 1 #define PMEMLOG_MINOR_VERSION 1 #ifndef _WIN32 const char *pmemlog_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemlog_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemlog_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * support for PMEM-resident log files... */ #define PMEMLOG_MIN_POOL ((size_t)(1024 * 1024 * 2)) /* min pool size: 2MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMLOG_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #ifndef _WIN32 PMEMlogpool *pmemlog_open(const char *path); #else PMEMlogpool *pmemlog_openU(const char *path); PMEMlogpool *pmemlog_openW(const wchar_t *path); #endif #ifndef _WIN32 PMEMlogpool *pmemlog_create(const char *path, size_t poolsize, mode_t mode); #else PMEMlogpool *pmemlog_createU(const char *path, size_t poolsize, mode_t mode); PMEMlogpool *pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemlog_check(const char *path); #else int pmemlog_checkU(const char *path); int pmemlog_checkW(const wchar_t *path); #endif void pmemlog_close(PMEMlogpool *plp); size_t pmemlog_nbyte(PMEMlogpool *plp); int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count); int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt); long long pmemlog_tell(PMEMlogpool *plp); void pmemlog_rewind(PMEMlogpool *plp); void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg); /* * Passing NULL to pmemlog_set_funcs() tells libpmemlog to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemlog. */ void pmemlog_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); #ifndef _WIN32 const char *pmemlog_errormsg(void); #else const char *pmemlog_errormsgU(void); const wchar_t *pmemlog_errormsgW(void); #endif #ifndef _WIN32 /* EXPERIMENTAL */ int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg); #else int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg); int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg); int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg); #endif #ifdef __cplusplus } #endif #endif /* libpmemlog.h */
4,540
28.679739
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmem.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmem.h -- definitions of libpmem entry points * * This library provides support for programming with persistent memory (pmem). * * libpmem provides support for using raw pmem directly. * * See libpmem(7) for details. */ #ifndef LIBPMEM_H #define LIBPMEM_H 1 #include <sys/types.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem_map_file pmem_map_fileW #define pmem_check_version pmem_check_versionW #define pmem_errormsg pmem_errormsgW #else #define pmem_map_file pmem_map_fileU #define pmem_check_version pmem_check_versionU #define pmem_errormsg pmem_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEM_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * flags supported by pmem_map_file() */ #define PMEM_FILE_CREATE (1 << 0) #define PMEM_FILE_EXCL (1 << 1) #define PMEM_FILE_SPARSE (1 << 2) #define PMEM_FILE_TMPFILE (1 << 3) #ifndef _WIN32 void *pmem_map_file(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #else void *pmem_map_fileU(const char *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); void *pmem_map_fileW(const wchar_t *path, size_t len, int flags, mode_t mode, size_t *mapped_lenp, int *is_pmemp); #endif int pmem_unmap(void *addr, size_t len); int pmem_is_pmem(const void *addr, size_t len); void pmem_persist(const void *addr, size_t len); int pmem_msync(const void *addr, size_t len); int pmem_has_auto_flush(void); void pmem_flush(const void *addr, size_t len); void pmem_deep_flush(const void *addr, size_t len); int pmem_deep_drain(const void *addr, size_t len); int pmem_deep_persist(const void *addr, size_t len); void pmem_drain(void); int pmem_has_hw_drain(void); void *pmem_memmove_persist(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_persist(void *pmemdest, const void *src, size_t len); void *pmem_memset_persist(void *pmemdest, int c, size_t len); void *pmem_memmove_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memcpy_nodrain(void *pmemdest, const void *src, size_t len); void *pmem_memset_nodrain(void *pmemdest, int c, size_t len); #define PMEM_F_MEM_NODRAIN (1U << 0) #define PMEM_F_MEM_NONTEMPORAL (1U << 1) #define PMEM_F_MEM_TEMPORAL (1U << 2) #define PMEM_F_MEM_WC (1U << 3) #define PMEM_F_MEM_WB (1U << 4) #define PMEM_F_MEM_NOFLUSH (1U << 5) #define PMEM_F_MEM_VALID_FLAGS (PMEM_F_MEM_NODRAIN | \ PMEM_F_MEM_NONTEMPORAL | \ PMEM_F_MEM_TEMPORAL | \ PMEM_F_MEM_WC | \ PMEM_F_MEM_WB | \ PMEM_F_MEM_NOFLUSH) void *pmem_memmove(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memcpy(void *pmemdest, const void *src, size_t len, unsigned flags); void *pmem_memset(void *pmemdest, int c, size_t len, unsigned flags); /* * PMEM_MAJOR_VERSION and PMEM_MINOR_VERSION provide the current version of the * libpmem API as provided by this header file. Applications can verify that * the version available at run-time is compatible with the version used at * compile-time by passing these defines to pmem_check_version(). */ #define PMEM_MAJOR_VERSION 1 #define PMEM_MINOR_VERSION 1 #ifndef _WIN32 const char *pmem_check_version(unsigned major_required, unsigned minor_required); #else const char *pmem_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmem_check_versionW(unsigned major_required, unsigned minor_required); #endif #ifndef _WIN32 const char *pmem_errormsg(void); #else const char *pmem_errormsgU(void); const wchar_t *pmem_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmem.h */
3,829
28.015152
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmem2.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * libpmem2.h -- definitions of libpmem2 entry points (EXPERIMENTAL) * * This library provides support for programming with persistent memory (pmem). * * libpmem2 provides support for using raw pmem directly. * * See libpmem2(7) for details. */ #ifndef LIBPMEM2_H #define LIBPMEM2_H 1 #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmem2_source_device_id pmem2_source_device_idW #define pmem2_errormsg pmem2_errormsgW #define pmem2_perror pmem2_perrorW #else #define pmem2_source_device_id pmem2_source_device_idU #define pmem2_errormsg pmem2_errormsgU #define pmem2_perror pmem2_perrorU #endif #endif #ifdef __cplusplus extern "C" { #endif #define PMEM2_E_UNKNOWN (-100000) #define PMEM2_E_NOSUPP (-100001) #define PMEM2_E_FILE_HANDLE_NOT_SET (-100003) #define PMEM2_E_INVALID_FILE_HANDLE (-100004) #define PMEM2_E_INVALID_FILE_TYPE (-100005) #define PMEM2_E_MAP_RANGE (-100006) #define PMEM2_E_MAPPING_EXISTS (-100007) #define PMEM2_E_GRANULARITY_NOT_SET (-100008) #define PMEM2_E_GRANULARITY_NOT_SUPPORTED (-100009) #define PMEM2_E_OFFSET_OUT_OF_RANGE (-100010) #define PMEM2_E_OFFSET_UNALIGNED (-100011) #define PMEM2_E_INVALID_ALIGNMENT_FORMAT (-100012) #define PMEM2_E_INVALID_ALIGNMENT_VALUE (-100013) #define PMEM2_E_INVALID_SIZE_FORMAT (-100014) #define PMEM2_E_LENGTH_UNALIGNED (-100015) #define PMEM2_E_MAPPING_NOT_FOUND (-100016) #define PMEM2_E_BUFFER_TOO_SMALL (-100017) #define PMEM2_E_SOURCE_EMPTY (-100018) #define PMEM2_E_INVALID_SHARING_VALUE (-100019) #define PMEM2_E_SRC_DEVDAX_PRIVATE (-100020) #define PMEM2_E_INVALID_ADDRESS_REQUEST_TYPE (-100021) #define PMEM2_E_ADDRESS_UNALIGNED (-100022) #define PMEM2_E_ADDRESS_NULL (-100023) #define PMEM2_E_DEEP_FLUSH_RANGE (-100024) #define PMEM2_E_INVALID_REGION_FORMAT (-100025) #define PMEM2_E_DAX_REGION_NOT_FOUND (-100026) #define PMEM2_E_INVALID_DEV_FORMAT (-100027) #define PMEM2_E_CANNOT_READ_BOUNDS (-100028) #define PMEM2_E_NO_BAD_BLOCK_FOUND (-100029) #define PMEM2_E_LENGTH_OUT_OF_RANGE (-100030) #define PMEM2_E_INVALID_PROT_FLAG (-100031) #define PMEM2_E_NO_ACCESS (-100032) /* source setup */ struct pmem2_source; int pmem2_source_from_fd(struct pmem2_source **src, int fd); int pmem2_source_from_anon(struct pmem2_source **src, size_t size); #ifdef _WIN32 int pmem2_source_from_handle(struct pmem2_source **src, HANDLE handle); #endif int pmem2_source_size(const struct pmem2_source *src, size_t *size); int pmem2_source_alignment(const struct pmem2_source *src, size_t *alignment); int pmem2_source_delete(struct pmem2_source **src); /* vm reservation setup */ struct pmem2_vm_reservation; int pmem2_vm_reservation_new(struct pmem2_vm_reservation **rsv, size_t size, void *address); int pmem2_vm_reservation_delete(struct pmem2_vm_reservation **rsv); /* config setup */ struct pmem2_config; int pmem2_config_new(struct pmem2_config **cfg); int pmem2_config_delete(struct pmem2_config **cfg); enum pmem2_granularity { PMEM2_GRANULARITY_BYTE, PMEM2_GRANULARITY_CACHE_LINE, PMEM2_GRANULARITY_PAGE, }; int pmem2_config_set_required_store_granularity(struct pmem2_config *cfg, enum pmem2_granularity g); int pmem2_config_set_offset(struct pmem2_config *cfg, size_t offset); int pmem2_config_set_length(struct pmem2_config *cfg, size_t length); enum pmem2_sharing_type { PMEM2_SHARED, PMEM2_PRIVATE, }; int pmem2_config_set_sharing(struct pmem2_config *cfg, enum pmem2_sharing_type type); #define PMEM2_PROT_EXEC (1U << 29) #define PMEM2_PROT_READ (1U << 30) #define PMEM2_PROT_WRITE (1U << 31) #define PMEM2_PROT_NONE 0 int pmem2_config_set_protection(struct pmem2_config *cfg, unsigned prot); enum pmem2_address_request_type { PMEM2_ADDRESS_FIXED_REPLACE = 1, PMEM2_ADDRESS_FIXED_NOREPLACE = 2, }; int pmem2_config_set_address(struct pmem2_config *cfg, void *addr, enum pmem2_address_request_type request_type); int pmem2_config_set_vm_reservation(struct pmem2_config *cfg, struct pmem2_vm_reservation *rsv, size_t offset); void pmem2_config_clear_address(struct pmem2_config *cfg); /* mapping */ struct pmem2_map; int pmem2_map(const struct pmem2_config *cfg, const struct pmem2_source *src, struct pmem2_map **map_ptr); int pmem2_unmap(struct pmem2_map **map_ptr); void *pmem2_map_get_address(struct pmem2_map *map); size_t pmem2_map_get_size(struct pmem2_map *map); enum pmem2_granularity pmem2_map_get_store_granularity(struct pmem2_map *map); /* flushing */ typedef void (*pmem2_persist_fn)(const void *ptr, size_t size); typedef void (*pmem2_flush_fn)(const void *ptr, size_t size); typedef void (*pmem2_drain_fn)(void); pmem2_persist_fn pmem2_get_persist_fn(struct pmem2_map *map); pmem2_flush_fn pmem2_get_flush_fn(struct pmem2_map *map); pmem2_drain_fn pmem2_get_drain_fn(struct pmem2_map *map); #define PMEM2_F_MEM_NODRAIN (1U << 0) #define PMEM2_F_MEM_NONTEMPORAL (1U << 1) #define PMEM2_F_MEM_TEMPORAL (1U << 2) #define PMEM2_F_MEM_WC (1U << 3) #define PMEM2_F_MEM_WB (1U << 4) #define PMEM2_F_MEM_NOFLUSH (1U << 5) #define PMEM2_F_MEM_VALID_FLAGS (PMEM2_F_MEM_NODRAIN | \ PMEM2_F_MEM_NONTEMPORAL | \ PMEM2_F_MEM_TEMPORAL | \ PMEM2_F_MEM_WC | \ PMEM2_F_MEM_WB | \ PMEM2_F_MEM_NOFLUSH) typedef void *(*pmem2_memmove_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memcpy_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*pmem2_memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); pmem2_memmove_fn pmem2_get_memmove_fn(struct pmem2_map *map); pmem2_memcpy_fn pmem2_get_memcpy_fn(struct pmem2_map *map); pmem2_memset_fn pmem2_get_memset_fn(struct pmem2_map *map); /* RAS */ int pmem2_deep_flush(struct pmem2_map *map, void *ptr, size_t size); #ifndef _WIN32 int pmem2_source_device_id(const struct pmem2_source *src, char *id, size_t *len); #else int pmem2_source_device_idW(const struct pmem2_source *src, wchar_t *id, size_t *len); int pmem2_source_device_idU(const struct pmem2_source *src, char *id, size_t *len); #endif int pmem2_source_device_usc(const struct pmem2_source *src, uint64_t *usc); struct pmem2_badblock_context; struct pmem2_badblock { size_t offset; size_t length; }; int pmem2_badblock_context_new(const struct pmem2_source *src, struct pmem2_badblock_context **bbctx); int pmem2_badblock_next(struct pmem2_badblock_context *bbctx, struct pmem2_badblock *bb); void pmem2_badblock_context_delete( struct pmem2_badblock_context **bbctx); int pmem2_badblock_clear(struct pmem2_badblock_context *bbctx, const struct pmem2_badblock *bb); /* error handling */ #ifndef _WIN32 const char *pmem2_errormsg(void); #else const char *pmem2_errormsgU(void); const wchar_t *pmem2_errormsgW(void); #endif int pmem2_err_to_errno(int); #ifndef _WIN32 void pmem2_perror(const char *format, ...) __attribute__((__format__(__printf__, 1, 2))); #else void pmem2_perrorU(const char *format, ...); void pmem2_perrorW(const wchar_t *format, ...); #endif #ifdef __cplusplus } #endif #endif /* libpmem2.h */
7,202
25.677778
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/ctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * libpmemobj/ctl.h -- definitions of pmemobj_ctl related entry points */ #ifndef LIBPMEMOBJ_CTL_H #define LIBPMEMOBJ_CTL_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Allocation class interface * * When requesting an object from the allocator, the first step is to determine * which allocation class best approximates the size of the object. * Once found, the appropriate free list, called bucket, for that * class is selected in a fashion that minimizes contention between threads. * Depending on the requested size and the allocation class, it might happen * that the object size (including required metadata) would be bigger than the * allocation class size - called unit size. In those situations, the object is * constructed from two or more units (up to 64). * * If the requested number of units cannot be retrieved from the selected * bucket, the thread reaches out to the global, shared, heap which manages * memory in 256 kilobyte chunks and gives it out in a best-fit fashion. This * operation must be performed under an exclusive lock. * Once the thread is in the possession of a chunk, the lock is dropped, and the * memory is split into units that repopulate the bucket. * * These are the CTL entry points that control allocation classes: * - heap.alloc_class.[class_id].desc * Creates/retrieves allocation class information * * It's VERY important to remember that the allocation classes are a RUNTIME * property of the allocator - they are NOT stored persistently in the pool. * It's recommended to always create custom allocation classes immediately after * creating or opening the pool, before any use. * If there are existing objects created using a class that is no longer stored * in the runtime state of the allocator, they can be normally freed, but * allocating equivalent objects will be done using the allocation class that * is currently defined for that size. * * Please see the libpmemobj man page for more information about entry points. */ /* * Persistent allocation header */ enum pobj_header_type { /* * 64-byte header used up until the version 1.3 of the library, * functionally equivalent to the compact header. * It's not recommended to create any new classes with this header. */ POBJ_HEADER_LEGACY, /* * 16-byte header used by the default allocation classes. All library * metadata is by default allocated using this header. * Supports type numbers and variably sized allocations. */ POBJ_HEADER_COMPACT, /* * 0-byte header with metadata stored exclusively in a bitmap. This * ensures that objects are allocated in memory contiguously and * without attached headers. * This can be used to create very small allocation classes, but it * does not support type numbers. * Additionally, allocations with this header can only span a single * unit. * Objects allocated with this header do show up when iterating through * the heap using pmemobj_first/pmemobj_next functions, but have a * type_num equal 0. */ POBJ_HEADER_NONE, MAX_POBJ_HEADER_TYPES }; /* * Description of allocation classes */ struct pobj_alloc_class_desc { /* * The number of bytes in a single unit of allocation. A single * allocation can span up to 64 units (or 1 in the case of no header). * If one creates an allocation class with a certain unit size and * forces it to handle bigger sizes, more than one unit * will be used. * For example, an allocation class with a compact header and 128 bytes * unit size, for a request of 200 bytes will create a memory block * containing 256 bytes that spans two units. The usable size of that * allocation will be 240 bytes: 2 * 128 - 16 (header). */ size_t unit_size; /* * Desired alignment of objects from the allocation class. * If non zero, must be a power of two and an even divisor of unit size. * * All allocation classes have default alignment * of 64. User data alignment is affected by the size of a header. For * compact one this means that the alignment is 48 bytes. * */ size_t alignment; /* * The minimum number of units that must be present in a * single, contiguous, memory block. * Those blocks (internally called runs), are fetched on demand from the * heap. Accessing that global state is a serialization point for the * allocator and thus it is imperative for performance and scalability * that a reasonable amount of memory is fetched in a single call. * Threads generally do not share memory blocks from which they * allocate, but blocks do go back to the global heap if they are no * longer actively used for allocation. */ unsigned units_per_block; /* * The header of allocations that originate from this allocation class. */ enum pobj_header_type header_type; /* * The identifier of this allocation class. */ unsigned class_id; }; enum pobj_stats_enabled { POBJ_STATS_ENABLED_TRANSIENT, POBJ_STATS_ENABLED_BOTH, POBJ_STATS_ENABLED_PERSISTENT, POBJ_STATS_DISABLED, }; #ifndef _WIN32 /* EXPERIMENTAL */ int pmemobj_ctl_get(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_set(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_exec(PMEMobjpool *pop, const char *name, void *arg); #else int pmemobj_ctl_getU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_getW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_setU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_setW(PMEMobjpool *pop, const wchar_t *name, void *arg); int pmemobj_ctl_execU(PMEMobjpool *pop, const char *name, void *arg); int pmemobj_ctl_execW(PMEMobjpool *pop, const wchar_t *name, void *arg); #ifndef PMDK_UTF8_API #define pmemobj_ctl_get pmemobj_ctl_getW #define pmemobj_ctl_set pmemobj_ctl_setW #define pmemobj_ctl_exec pmemobj_ctl_execW #else #define pmemobj_ctl_get pmemobj_ctl_getU #define pmemobj_ctl_set pmemobj_ctl_setU #define pmemobj_ctl_exec pmemobj_ctl_execU #endif #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/ctl.h */
6,198
34.221591
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/lists_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic.h -- definitions of libpmemobj atomic lists macros */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_H #define LIBPMEMOBJ_LISTS_ATOMIC_H 1 #include <libpmemobj/lists_atomic_base.h> #include <libpmemobj/thread.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ #define POBJ_LIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_LIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ PMEMmutex lock;\ } #define POBJ_LIST_FIRST(head) ((head)->pe_first) #define POBJ_LIST_LAST(head, field) (\ TOID_IS_NULL((head)->pe_first) ?\ (head)->pe_first :\ D_RO((head)->pe_first)->field.pe_prev) #define POBJ_LIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_LIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_LIST_PREV(elm, field) (D_RO(elm)->field.pe_prev) #define POBJ_LIST_DEST_HEAD 1 #define POBJ_LIST_DEST_TAIL 0 #define POBJ_LIST_DEST_BEFORE 1 #define POBJ_LIST_DEST_AFTER 0 #define POBJ_LIST_FOREACH(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH", __FILE__, __LINE__),\ (var) = POBJ_LIST_FIRST((head));\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_NEXT((var), field),\ POBJ_LIST_FIRST((head))) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_NEXT((var), field))) #define POBJ_LIST_FOREACH_REVERSE(var, head, field)\ for (_pobj_debug_notice("POBJ_LIST_FOREACH_REVERSE", __FILE__, __LINE__),\ (var) = POBJ_LIST_LAST((head), field);\ TOID_IS_NULL((var)) == 0;\ TOID_EQUALS(POBJ_LIST_PREV((var), field),\ POBJ_LIST_LAST((head), field)) ?\ TOID_ASSIGN((var), OID_NULL) :\ ((var) = POBJ_LIST_PREV((var), field))) #define POBJ_LIST_INSERT_HEAD(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_INSERT_TAIL(pop, head, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), OID_NULL,\ POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_INSERT_AFTER(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_INSERT_BEFORE(pop, head, listelm, elm, field)\ pmemobj_list_insert((pop), \ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid,\ 1 /* before */, (elm).oid) #define POBJ_LIST_INSERT_NEW_HEAD(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_HEAD, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_TAIL(pop, head, field, size, constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), OID_NULL, POBJ_LIST_DEST_TAIL, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_AFTER(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF((head)->pe_first, field),\ (head), (listelm).oid, 0 /* after */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_INSERT_NEW_BEFORE(pop, head, listelm, field, size,\ constr, arg)\ pmemobj_list_insert_new((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (listelm).oid, 1 /* before */, (size),\ TOID_TYPE_NUM_OF((head)->pe_first), (constr), (arg)) #define POBJ_LIST_REMOVE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 0 /* no free */) #define POBJ_LIST_REMOVE_FREE(pop, head, elm, field)\ pmemobj_list_remove((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head), (elm).oid, 1 /* free */) #define POBJ_LIST_MOVE_ELEMENT_HEAD(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_HEAD, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_TAIL(pop, head, head_new, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new), OID_NULL, POBJ_LIST_DEST_TAIL, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_AFTER(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 0 /* after */, (elm).oid) #define POBJ_LIST_MOVE_ELEMENT_BEFORE(pop,\ head, head_new, listelm, elm, field, field_new)\ pmemobj_list_move((pop),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head), field),\ (head),\ TOID_OFFSETOF(POBJ_LIST_FIRST(head_new), field_new),\ (head_new),\ (listelm).oid,\ 1 /* before */, (elm).oid) #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic.h */
5,121
30.042424
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/iterator.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator.h -- definitions of libpmemobj iterator macros */ #ifndef LIBPMEMOBJ_ITERATOR_H #define LIBPMEMOBJ_ITERATOR_H 1 #include <libpmemobj/iterator_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif static inline PMEMoid POBJ_FIRST_TYPE_NUM(PMEMobjpool *pop, uint64_t type_num) { PMEMoid _pobj_ret = pmemobj_first(pop); while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != type_num) { _pobj_ret = pmemobj_next(_pobj_ret); } return _pobj_ret; } static inline PMEMoid POBJ_NEXT_TYPE_NUM(PMEMoid o) { PMEMoid _pobj_ret = o; do { _pobj_ret = pmemobj_next(_pobj_ret);\ } while (!OID_IS_NULL(_pobj_ret) && pmemobj_type_num(_pobj_ret) != pmemobj_type_num(o)); return _pobj_ret; } #define POBJ_FIRST(pop, t) ((TOID(t))POBJ_FIRST_TYPE_NUM(pop, TOID_TYPE_NUM(t))) #define POBJ_NEXT(o) ((__typeof__(o))POBJ_NEXT_TYPE_NUM((o).oid)) /* * Iterates through every existing allocated object. */ #define POBJ_FOREACH(pop, varoid)\ for (_pobj_debug_notice("POBJ_FOREACH", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0; varoid = pmemobj_next(varoid)) /* * Safe variant of POBJ_FOREACH in which pmemobj_free on varoid is allowed */ #define POBJ_FOREACH_SAFE(pop, varoid, nvaroid)\ for (_pobj_debug_notice("POBJ_FOREACH_SAFE", __FILE__, __LINE__),\ varoid = pmemobj_first(pop);\ (varoid).off != 0 && (nvaroid = pmemobj_next(varoid), 1);\ varoid = nvaroid) /* * Iterates through every object of the specified type. */ #define POBJ_FOREACH_TYPE(pop, var)\ POBJ_FOREACH(pop, (var).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) /* * Safe variant of POBJ_FOREACH_TYPE in which pmemobj_free on var * is allowed. */ #define POBJ_FOREACH_SAFE_TYPE(pop, var, nvar)\ POBJ_FOREACH_SAFE(pop, (var).oid, (nvar).oid)\ if (pmemobj_type_num((var).oid) == TOID_TYPE_NUM_OF(var)) #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator.h */
2,041
23.60241
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/lists_atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/lists_atomic_base.h -- definitions of libpmemobj atomic lists */ #ifndef LIBPMEMOBJ_LISTS_ATOMIC_BASE_H #define LIBPMEMOBJ_LISTS_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional persistent atomic circular doubly-linked list */ int pmemobj_list_insert(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, PMEMoid oid); PMEMoid pmemobj_list_insert_new(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid dest, int before, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); int pmemobj_list_remove(PMEMobjpool *pop, size_t pe_offset, void *head, PMEMoid oid, int free); int pmemobj_list_move(PMEMobjpool *pop, size_t pe_old_offset, void *head_old, size_t pe_new_offset, void *head_new, PMEMoid dest, int before, PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/lists_atomic_base.h */
1,022
24.575
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/tx_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/tx_base.h -- definitions of libpmemobj transactional entry points */ #ifndef LIBPMEMOBJ_TX_BASE_H #define LIBPMEMOBJ_TX_BASE_H 1 #include <setjmp.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Transactions * * Stages are changed only by the pmemobj_tx_* functions, each transition * to the TX_STAGE_ONABORT is followed by a longjmp to the jmp_buf provided in * the pmemobj_tx_begin function. */ enum pobj_tx_stage { TX_STAGE_NONE, /* no transaction in this thread */ TX_STAGE_WORK, /* transaction in progress */ TX_STAGE_ONCOMMIT, /* successfully committed */ TX_STAGE_ONABORT, /* tx_begin failed or transaction aborted */ TX_STAGE_FINALLY, /* always called */ MAX_TX_STAGE }; /* * Always returns the current transaction stage for a thread. */ enum pobj_tx_stage pmemobj_tx_stage(void); enum pobj_tx_param { TX_PARAM_NONE, TX_PARAM_MUTEX, /* PMEMmutex */ TX_PARAM_RWLOCK, /* PMEMrwlock */ TX_PARAM_CB, /* pmemobj_tx_callback cb, void *arg */ }; enum pobj_log_type { TX_LOG_TYPE_SNAPSHOT, TX_LOG_TYPE_INTENT, }; enum pobj_tx_failure_behavior { POBJ_TX_FAILURE_ABORT, POBJ_TX_FAILURE_RETURN, }; #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__COVERITY__) #define pmdk_use_attr_deprec_with_msg 0 #endif #if !defined(pmdk_use_attr_deprec_with_msg) && defined(__clang__) #if __has_extension(attribute_deprecated_with_message) #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) && \ defined(__GNUC__) && !defined(__INTEL_COMPILER) #if __GNUC__ * 100 + __GNUC_MINOR__ >= 601 /* 6.1 */ #define pmdk_use_attr_deprec_with_msg 1 #else #define pmdk_use_attr_deprec_with_msg 0 #endif #endif #if !defined(pmdk_use_attr_deprec_with_msg) #define pmdk_use_attr_deprec_with_msg 0 #endif #if pmdk_use_attr_deprec_with_msg #define tx_lock_deprecated __attribute__((deprecated(\ "enum pobj_tx_lock is deprecated, use enum pobj_tx_param"))) #else #define tx_lock_deprecated #endif /* deprecated, do not use */ enum tx_lock_deprecated pobj_tx_lock { TX_LOCK_NONE tx_lock_deprecated = TX_PARAM_NONE, TX_LOCK_MUTEX tx_lock_deprecated = TX_PARAM_MUTEX, TX_LOCK_RWLOCK tx_lock_deprecated = TX_PARAM_RWLOCK, }; typedef void (*pmemobj_tx_callback)(PMEMobjpool *pop, enum pobj_tx_stage stage, void *); #define POBJ_TX_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_NO_FLUSH |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_NO_ABORT) #define POBJ_XADD_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XADD_NO_SNAPSHOT POBJ_FLAG_NO_SNAPSHOT #define POBJ_XADD_ASSUME_INITIALIZED POBJ_FLAG_ASSUME_INITIALIZED #define POBJ_XADD_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XADD_VALID_FLAGS (POBJ_XADD_NO_FLUSH |\ POBJ_XADD_NO_SNAPSHOT |\ POBJ_XADD_ASSUME_INITIALIZED |\ POBJ_XADD_NO_ABORT) #define POBJ_XLOCK_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOCK_VALID_FLAGS (POBJ_XLOCK_NO_ABORT) #define POBJ_XFREE_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XFREE_VALID_FLAGS (POBJ_XFREE_NO_ABORT) #define POBJ_XPUBLISH_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XPUBLISH_VALID_FLAGS (POBJ_XPUBLISH_NO_ABORT) #define POBJ_XLOG_APPEND_BUFFER_NO_ABORT POBJ_FLAG_TX_NO_ABORT #define POBJ_XLOG_APPEND_BUFFER_VALID_FLAGS (POBJ_XLOG_APPEND_BUFFER_NO_ABORT) /* * Starts a new transaction in the current thread. * If called within an open transaction, starts a nested transaction. * * If successful, transaction stage changes to TX_STAGE_WORK and function * returns zero. Otherwise, stage changes to TX_STAGE_ONABORT and an error * number is returned. */ int pmemobj_tx_begin(PMEMobjpool *pop, jmp_buf env, ...); /* * Adds lock of given type to current transaction. * 'Flags' is a bitmask of the following values: * - POBJ_XLOCK_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xlock(enum pobj_tx_param type, void *lockp, uint64_t flags); /* * Adds lock of given type to current transaction. */ int pmemobj_tx_lock(enum pobj_tx_param type, void *lockp); /* * Aborts current transaction * * Causes transition to TX_STAGE_ONABORT. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_abort(int errnum); /* * Commits current transaction * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_commit(void); /* * Cleanups current transaction. Must always be called after pmemobj_tx_begin, * even if starting the transaction failed. * * If called during TX_STAGE_NONE, has no effect. * * Always causes transition to TX_STAGE_NONE. * * If transaction was successful, returns 0. Otherwise returns error code set * by pmemobj_tx_abort. * * This function must *not* be called during TX_STAGE_WORK. */ int pmemobj_tx_end(void); /* * Performs the actions associated with current stage of the transaction, * and makes the transition to the next stage. Current stage must always * be obtained by calling pmemobj_tx_stage. * * This function must be called in transaction. */ void pmemobj_tx_process(void); /* * Returns last transaction error code. */ int pmemobj_tx_errno(void); /* * Takes a "snapshot" of the memory block of given size and located at given * offset 'off' in the object 'oid' and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range(PMEMoid oid, uint64_t off, size_t size); /* * Takes a "snapshot" of the given memory region and saves it in the undo log. * The application is then free to directly modify the object in that memory * range. In case of failure or abort, all the changes within this range will * be rolled-back automatically. The supplied block of memory has to be within * the given pool. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_add_range_direct(const void *ptr, size_t size); /* * Behaves exactly the same as pmemobj_tx_add_range when 'flags' equals 0. * 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range(PMEMoid oid, uint64_t off, size_t size, uint64_t flags); /* * Behaves exactly the same as pmemobj_tx_add_range_direct when 'flags' equals * 0. 'Flags' is a bitmask of the following values: * - POBJ_XADD_NO_FLUSH - skips flush on commit * - POBJ_XADD_NO_SNAPSHOT - added range will not be snapshotted * - POBJ_XADD_ASSUME_INITIALIZED - added range is assumed to be initialized * - POBJ_XADD_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. */ int pmemobj_tx_xadd_range_direct(const void *ptr, size_t size, uint64_t flags); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_alloc(size_t size, uint64_t type_num); /* * Transactionally allocates a new object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xalloc(size_t size, uint64_t type_num, uint64_t flags); /* * Transactionally allocates new zeroed object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zalloc(size_t size, uint64_t type_num); /* * Transactionally resizes an existing object. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_realloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally resizes an existing object, if extended new space is zeroed. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_zrealloc(PMEMoid oid, size_t size, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_strdup(const char *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xstrdup(const char *s, uint64_t type_num, uint64_t flags); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_wcsdup(const wchar_t *s, uint64_t type_num); /* * Transactionally allocates a new object with duplicate of the wide character * string s. * * If successful, returns PMEMoid. * Otherwise, stage changes to TX_STAGE_ONABORT and an OID_NULL is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XALLOC_ZERO - zero the allocated object * - POBJ_XALLOC_NO_FLUSH - skip flush on commit * - POBJ_XALLOC_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ PMEMoid pmemobj_tx_xwcsdup(const wchar_t *s, uint64_t type_num, uint64_t flags); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_free(PMEMoid oid); /* * Transactionally frees an existing object. * * If successful, returns zero. * Otherwise, the stage changes to TX_STAGE_ONABORT and the error number is * returned. * 'Flags' is a bitmask of the following values: * - POBJ_XFREE_NO_ABORT - if the function does not end successfully, * do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xfree(PMEMoid oid, uint64_t flags); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_append_buffer(enum pobj_log_type type, void *addr, size_t size); /* * Append user allocated buffer to the ulog. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * 'Flags' is a bitmask of the following values: * - POBJ_XLOG_APPEND_BUFFER_NO_ABORT - if the function does not end * successfully, do not abort the transaction and return the error number. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_xlog_append_buffer(enum pobj_log_type type, void *addr, size_t size, uint64_t flags); /* * Enables or disables automatic ulog allocations. * * If successful, returns zero. * Otherwise, stage changes to TX_STAGE_ONABORT and an error number is returned. * * This function must be called during TX_STAGE_WORK. */ int pmemobj_tx_log_auto_alloc(enum pobj_log_type type, int on_off); /* * Calculates and returns size for user buffers for snapshots. */ size_t pmemobj_tx_log_snapshots_max_size(size_t *sizes, size_t nsizes); /* * Calculates and returns size for user buffers for intents. */ size_t pmemobj_tx_log_intents_max_size(size_t nintents); /* * Sets volatile pointer to the user data for the current transaction. */ void pmemobj_tx_set_user_data(void *data); /* * Gets volatile pointer to the user data associated with the current * transaction. */ void *pmemobj_tx_get_user_data(void); /* * Sets the failure behavior of transactional functions. * * This function must be called during TX_STAGE_WORK. */ void pmemobj_tx_set_failure_behavior(enum pobj_tx_failure_behavior behavior); /* * Returns failure behavior for the current transaction. * * This function must be called during TX_STAGE_WORK. */ enum pobj_tx_failure_behavior pmemobj_tx_get_failure_behavior(void); #ifdef __cplusplus } #endif #endif /* libpmemobj/tx_base.h */
14,087
30.237251
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/pool_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/pool_base.h -- definitions of libpmemobj pool entry points */ #ifndef LIBPMEMOBJ_POOL_BASE_H #define LIBPMEMOBJ_POOL_BASE_H 1 #include <stddef.h> #include <sys/types.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif //NEW //#define _GNU_SOURCE //#include <sys/types.h> //#include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> //int __real_open(const char *__path, int __oflag); //int __wrap_open(const char *__path, int __oflag); void* open_device(const char* pathname); //END NEW #define PMEMOBJ_MIN_POOL ((size_t)(1024 * 1024 * 256)) /* 8 MiB */ /* * This limit is set arbitrary to incorporate a pool header and required * alignment plus supply. */ #define PMEMOBJ_MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ /* * Pool management. */ #ifdef _WIN32 #ifndef PMDK_UTF8_API #define pmemobj_open pmemobj_openW #define pmemobj_create pmemobj_createW #define pmemobj_check pmemobj_checkW #else #define pmemobj_open pmemobj_openU #define pmemobj_create pmemobj_createU #define pmemobj_check pmemobj_checkU #endif #endif #ifndef _WIN32 PMEMobjpool *pmemobj_open(const char *path, const char *layout); #else PMEMobjpool *pmemobj_openU(const char *path, const char *layout); PMEMobjpool *pmemobj_openW(const wchar_t *path, const wchar_t *layout); #endif #ifndef _WIN32 PMEMobjpool *pmemobj_create(const char *path, const char *layout, size_t poolsize, mode_t mode); #else PMEMobjpool *pmemobj_createU(const char *path, const char *layout, size_t poolsize, mode_t mode); PMEMobjpool *pmemobj_createW(const wchar_t *path, const wchar_t *layout, size_t poolsize, mode_t mode); #endif #ifndef _WIN32 int pmemobj_check(const char *path, const char *layout); #else int pmemobj_checkU(const char *path, const char *layout); int pmemobj_checkW(const wchar_t *path, const wchar_t *layout); #endif void pmemobj_close(PMEMobjpool *pop); /* * If called for the first time on a newly created pool, the root object * of given size is allocated. Otherwise, it returns the existing root object. * In such case, the size must be not less than the actual root object size * stored in the pool. If it's larger, the root object is automatically * resized. * * This function is thread-safe. */ PMEMoid pmemobj_root(PMEMobjpool *pop, size_t size); /* * Same as above, but calls the constructor function when the object is first * created and on all subsequent reallocations. */ PMEMoid pmemobj_root_construct(PMEMobjpool *pop, size_t size, pmemobj_constr constructor, void *arg); /* * Returns the size in bytes of the root object. Always equal to the requested * size. */ size_t pmemobj_root_size(PMEMobjpool *pop); /* * Sets volatile pointer to the user data for specified pool. */ void pmemobj_set_user_data(PMEMobjpool *pop, void *data); /* * Gets volatile pointer to the user data associated with the specified pool. */ void *pmemobj_get_user_data(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif /* libpmemobj/pool_base.h */
3,095
24.377049
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/action_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * libpmemobj/action_base.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_BASE_H #define LIBPMEMOBJ_ACTION_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif enum pobj_action_type { /* a heap action (e.g., alloc) */ POBJ_ACTION_TYPE_HEAP, /* a single memory operation (e.g., value set) */ POBJ_ACTION_TYPE_MEM, POBJ_MAX_ACTION_TYPE }; struct pobj_action_heap { /* offset to the element being freed/allocated */ uint64_t offset; /* usable size of the element being allocated */ uint64_t usable_size; }; struct pobj_action { /* * These fields are internal for the implementation and are not * guaranteed to be stable across different versions of the API. * Use with caution. * * This structure should NEVER be stored on persistent memory! */ enum pobj_action_type type; uint32_t data[3]; union { struct pobj_action_heap heap; uint64_t data2[14]; }; }; #define POBJ_ACTION_XRESERVE_VALID_FLAGS\ (POBJ_XALLOC_CLASS_MASK |\ POBJ_XALLOC_ARENA_MASK |\ POBJ_XALLOC_ZERO) PMEMoid pmemobj_reserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num); PMEMoid pmemobj_xreserve(PMEMobjpool *pop, struct pobj_action *act, size_t size, uint64_t type_num, uint64_t flags); void pmemobj_set_value(PMEMobjpool *pop, struct pobj_action *act, uint64_t *ptr, uint64_t value); void pmemobj_defer_free(PMEMobjpool *pop, PMEMoid oid, struct pobj_action *act); int pmemobj_publish(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_publish(struct pobj_action *actv, size_t actvcnt); int pmemobj_tx_xpublish(struct pobj_action *actv, size_t actvcnt, uint64_t flags); void pmemobj_cancel(PMEMobjpool *pop, struct pobj_action *actv, size_t actvcnt); #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
1,935
24.813333
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/types.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * libpmemobj/types.h -- definitions of libpmemobj type-safe macros */ #ifndef LIBPMEMOBJ_TYPES_H #define LIBPMEMOBJ_TYPES_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif #define TOID_NULL(t) ((TOID(t))OID_NULL) #define PMEMOBJ_MAX_LAYOUT ((size_t)1024) /* * Type safety macros */ #if !(defined _MSC_VER || defined __clang__) #define TOID_ASSIGN(o, value)(\ {\ (o).oid = value;\ (o); /* to avoid "error: statement with no effect" */\ }) #else /* _MSC_VER or __clang__ */ #define TOID_ASSIGN(o, value) ((o).oid = value, (o)) #endif #if (defined _MSC_VER && _MSC_VER < 1912) /* * XXX - workaround for offsetof issue in VS 15.3, * it has been fixed since Visual Studio 2017 Version 15.5 * (_MSC_VER == 1912) */ #ifdef PMEMOBJ_OFFSETOF_WA #ifdef _CRT_USE_BUILTIN_OFFSETOF #undef offsetof #define offsetof(s, m) ((size_t)&reinterpret_cast < char const volatile& > \ ((((s *)0)->m))) #endif #else #ifdef _CRT_USE_BUILTIN_OFFSETOF #error "Invalid definition of offsetof() macro - see: \ https://developercommunity.visualstudio.com/content/problem/96174/\ offsetof-macro-is-broken-for-nested-objects.html \ Please upgrade your VS, fix offsetof as described under the link or define \ PMEMOBJ_OFFSETOF_WA to enable workaround in libpmemobj.h" #endif #endif #endif /* _MSC_VER */ #define TOID_EQUALS(lhs, rhs)\ ((lhs).oid.off == (rhs).oid.off &&\ (lhs).oid.pool_uuid_lo == (rhs).oid.pool_uuid_lo) /* type number of root object */ #define POBJ_ROOT_TYPE_NUM 0 #define _toid_struct #define _toid_union #define _toid_enum #define _POBJ_LAYOUT_REF(name) (sizeof(_pobj_layout_##name##_ref)) /* * Typed OID */ #define TOID(t)\ union _toid_##t##_toid #ifdef __cplusplus #define _TOID_CONSTR(t)\ _toid_##t##_toid()\ { }\ _toid_##t##_toid(PMEMoid _oid) : oid(_oid)\ { } #else #define _TOID_CONSTR(t) #endif /* * Declaration of typed OID */ #define _TOID_DECLARE(t, i)\ typedef uint8_t _toid_##t##_toid_type_num[(i) + 1];\ TOID(t)\ {\ _TOID_CONSTR(t)\ PMEMoid oid;\ t *_type;\ _toid_##t##_toid_type_num *_type_num;\ } /* * Declaration of typed OID of an object */ #define TOID_DECLARE(t, i) _TOID_DECLARE(t, i) /* * Declaration of typed OID of a root object */ #define TOID_DECLARE_ROOT(t) _TOID_DECLARE(t, POBJ_ROOT_TYPE_NUM) /* * Type number of specified type */ #define TOID_TYPE_NUM(t) (sizeof(_toid_##t##_toid_type_num) - 1) /* * Type number of object read from typed OID */ #define TOID_TYPE_NUM_OF(o) (sizeof(*(o)._type_num) - 1) /* * NULL check */ #define TOID_IS_NULL(o) ((o).oid.off == 0) /* * Validates whether type number stored in typed OID is the same * as type number stored in object's metadata */ #define TOID_VALID(o) (TOID_TYPE_NUM_OF(o) == pmemobj_type_num((o).oid)) /* * Checks whether the object is of a given type */ #define OID_INSTANCEOF(o, t) (TOID_TYPE_NUM(t) == pmemobj_type_num(o)) /* * Begin of layout declaration */ #define POBJ_LAYOUT_BEGIN(name)\ typedef uint8_t _pobj_layout_##name##_ref[__COUNTER__ + 1] /* * End of layout declaration */ #define POBJ_LAYOUT_END(name)\ typedef char _pobj_layout_##name##_cnt[__COUNTER__ + 1 -\ _POBJ_LAYOUT_REF(name)]; /* * Number of types declared inside layout without the root object */ #define POBJ_LAYOUT_TYPES_NUM(name) (sizeof(_pobj_layout_##name##_cnt) - 1) /* * Declaration of typed OID inside layout declaration */ #define POBJ_LAYOUT_TOID(name, t)\ TOID_DECLARE(t, (__COUNTER__ + 1 - _POBJ_LAYOUT_REF(name))); /* * Declaration of typed OID of root inside layout declaration */ #define POBJ_LAYOUT_ROOT(name, t)\ TOID_DECLARE_ROOT(t); /* * Name of declared layout */ #define POBJ_LAYOUT_NAME(name) #name #define TOID_TYPEOF(o) __typeof__(*(o)._type) #define TOID_OFFSETOF(o, field) offsetof(TOID_TYPEOF(o), field) /* * XXX - DIRECT_RW and DIRECT_RO are not available when compiled using VC++ * as C code (/TC). Use /TP option. */ #ifndef _MSC_VER #define DIRECT_RW(o) (\ {__typeof__(o) _o; _o._type = NULL; (void)_o;\ (__typeof__(*(o)._type) *)pmemobj_direct((o).oid); }) #define DIRECT_RO(o) ((const __typeof__(*(o)._type) *)pmemobj_direct((o).oid)) #elif defined(__cplusplus) /* * XXX - On Windows, these macros do not behave exactly the same as on Linux. */ #define DIRECT_RW(o) \ (reinterpret_cast < __typeof__((o)._type) > (pmemobj_direct((o).oid))) #define DIRECT_RO(o) \ (reinterpret_cast < const __typeof__((o)._type) > \ (pmemobj_direct((o).oid))) #endif /* (defined(_MSC_VER) || defined(__cplusplus)) */ #define D_RW DIRECT_RW #define D_RO DIRECT_RO #ifdef __cplusplus } #endif #endif /* libpmemobj/types.h */
4,701
21.825243
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/base.h -- definitions of base libpmemobj entry points */ #ifndef LIBPMEMOBJ_BASE_H #define LIBPMEMOBJ_BASE_H 1 #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stddef.h> #include <stdint.h> #ifdef _WIN32 #include <pmemcompat.h> #ifndef PMDK_UTF8_API #define pmemobj_check_version pmemobj_check_versionW #define pmemobj_errormsg pmemobj_errormsgW #else #define pmemobj_check_version pmemobj_check_versionU #define pmemobj_errormsg pmemobj_errormsgU #endif #endif #ifdef __cplusplus extern "C" { #endif /* * opaque type internal to libpmemobj */ typedef struct pmemobjpool PMEMobjpool; #define PMEMOBJ_MAX_ALLOC_SIZE ((size_t)0x3FFDFFFC0) /* * allocation functions flags */ #define POBJ_FLAG_ZERO (((uint64_t)1) << 0) #define POBJ_FLAG_NO_FLUSH (((uint64_t)1) << 1) #define POBJ_FLAG_NO_SNAPSHOT (((uint64_t)1) << 2) #define POBJ_FLAG_ASSUME_INITIALIZED (((uint64_t)1) << 3) #define POBJ_FLAG_TX_NO_ABORT (((uint64_t)1) << 4) #define POBJ_CLASS_ID(id) (((uint64_t)(id)) << 48) #define POBJ_ARENA_ID(id) (((uint64_t)(id)) << 32) #define POBJ_XALLOC_CLASS_MASK ((((uint64_t)1 << 16) - 1) << 48) #define POBJ_XALLOC_ARENA_MASK ((((uint64_t)1 << 16) - 1) << 32) #define POBJ_XALLOC_ZERO POBJ_FLAG_ZERO #define POBJ_XALLOC_NO_FLUSH POBJ_FLAG_NO_FLUSH #define POBJ_XALLOC_NO_ABORT POBJ_FLAG_TX_NO_ABORT /* * pmemobj_mem* flags */ #define PMEMOBJ_F_MEM_NODRAIN (1U << 0) #define PMEMOBJ_F_MEM_NONTEMPORAL (1U << 1) #define PMEMOBJ_F_MEM_TEMPORAL (1U << 2) #define PMEMOBJ_F_MEM_WC (1U << 3) #define PMEMOBJ_F_MEM_WB (1U << 4) #define PMEMOBJ_F_MEM_NOFLUSH (1U << 5) /* * pmemobj_mem*, pmemobj_xflush & pmemobj_xpersist flags */ #define PMEMOBJ_F_RELAXED (1U << 31) /* * Persistent memory object */ /* * Object handle */ typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; static const PMEMoid OID_NULL = { 0, 0 }; #define OID_IS_NULL(o) ((o).off == 0) #define OID_EQUALS(lhs, rhs)\ ((lhs).off == (rhs).off &&\ (lhs).pool_uuid_lo == (rhs).pool_uuid_lo) PMEMobjpool *pmemobj_pool_by_ptr(const void *addr); PMEMobjpool *pmemobj_pool_by_oid(PMEMoid oid); #ifndef _WIN32 extern int _pobj_cache_invalidate; extern __thread struct _pobj_pcache { PMEMobjpool *pop; uint64_t uuid_lo; int invalidate; } _pobj_cached_pool; /* * Returns the direct pointer of an object. */ static inline void * pmemobj_direct_inline(PMEMoid oid) { if (oid.off == 0 || oid.pool_uuid_lo == 0) return NULL; struct _pobj_pcache *cache = &_pobj_cached_pool; if (_pobj_cache_invalidate != cache->invalidate || cache->uuid_lo != oid.pool_uuid_lo) { cache->invalidate = _pobj_cache_invalidate; if (!(cache->pop = pmemobj_pool_by_oid(oid))) { cache->uuid_lo = 0; return NULL; } cache->uuid_lo = oid.pool_uuid_lo; } return (void *)((uintptr_t)cache->pop + oid.off); } #endif /* _WIN32 */ /* * Returns the direct pointer of an object. */ #if defined(_WIN32) || defined(_PMEMOBJ_INTRNL) ||\ defined(PMEMOBJ_DIRECT_NON_INLINE) void *pmemobj_direct(PMEMoid oid); #else #define pmemobj_direct pmemobj_direct_inline #endif struct pmemvlt { uint64_t runid; }; #define PMEMvlt(T)\ struct {\ struct pmemvlt vlt;\ T value;\ } /* * Returns lazily initialized volatile variable. (EXPERIMENTAL) */ void *pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg); /* * Returns the OID of the object pointed to by addr. */ PMEMoid pmemobj_oid(const void *addr); /* * Returns the number of usable bytes in the object. May be greater than * the requested size of the object because of internal alignment. * * Can be used with objects allocated by any of the available methods. */ size_t pmemobj_alloc_usable_size(PMEMoid oid); /* * Returns the type number of the object. */ uint64_t pmemobj_type_num(PMEMoid oid); /* * Pmemobj specific low-level memory manipulation functions. * * These functions are meant to be used with pmemobj pools, because they provide * additional functionality specific to this type of pool. These may include * for example replication support. They also take advantage of the knowledge * of the type of memory in the pool (pmem/non-pmem) to assure persistence. */ /* * Pmemobj version of memcpy. Data copied is made persistent. */ void *pmemobj_memcpy_persist(PMEMobjpool *pop, void *dest, const void *src, size_t len); /* * Pmemobj version of memset. Data range set is made persistent. */ void *pmemobj_memset_persist(PMEMobjpool *pop, void *dest, int c, size_t len); /* * Pmemobj version of memcpy. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memcpy(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memmove. Data copied is made persistent (unless opted-out * using flags). */ void *pmemobj_memmove(PMEMobjpool *pop, void *dest, const void *src, size_t len, unsigned flags); /* * Pmemobj version of memset. Data range set is made persistent (unless * opted-out using flags). */ void *pmemobj_memset(PMEMobjpool *pop, void *dest, int c, size_t len, unsigned flags); /* * Pmemobj version of pmem_persist. */ void pmemobj_persist(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_persist with additional flags argument. */ int pmemobj_xpersist(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_flush. */ void pmemobj_flush(PMEMobjpool *pop, const void *addr, size_t len); /* * Pmemobj version of pmem_flush with additional flags argument. */ int pmemobj_xflush(PMEMobjpool *pop, const void *addr, size_t len, unsigned flags); /* * Pmemobj version of pmem_drain. */ void pmemobj_drain(PMEMobjpool *pop); /* * Version checking. */ /* * PMEMOBJ_MAJOR_VERSION and PMEMOBJ_MINOR_VERSION provide the current version * of the libpmemobj API as provided by this header file. Applications can * verify that the version available at run-time is compatible with the version * used at compile-time by passing these defines to pmemobj_check_version(). */ #define PMEMOBJ_MAJOR_VERSION 2 #define PMEMOBJ_MINOR_VERSION 4 #ifndef _WIN32 const char *pmemobj_check_version(unsigned major_required, unsigned minor_required); #else const char *pmemobj_check_versionU(unsigned major_required, unsigned minor_required); const wchar_t *pmemobj_check_versionW(unsigned major_required, unsigned minor_required); #endif /* * Passing NULL to pmemobj_set_funcs() tells libpmemobj to continue to use the * default for that function. The replacement functions must not make calls * back into libpmemobj. */ void pmemobj_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); typedef int (*pmemobj_constr)(PMEMobjpool *pop, void *ptr, void *arg); /* * (debug helper function) logs notice message if used inside a transaction */ void _pobj_debug_notice(const char *func_name, const char *file, int line); #ifndef _WIN32 const char *pmemobj_errormsg(void); #else const char *pmemobj_errormsgU(void); const wchar_t *pmemobj_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif /* libpmemobj/base.h */
7,415
23.72
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/tx.h -- definitions of libpmemobj transactional macros */ #ifndef LIBPMEMOBJ_TX_H #define LIBPMEMOBJ_TX_H 1 #include <errno.h> #include <string.h> #include <libpmemobj/tx_base.h> #include <libpmemobj/types.h> extern uint64_t waitCycles; extern uint64_t resetCycles; //extern int current_tx1 = 1 ; #ifdef __cplusplus extern "C" { #endif #ifdef POBJ_TX_CRASH_ON_NO_ONABORT #define TX_ONABORT_CHECK do {\ if (_stage == TX_STAGE_ONABORT)\ abort();\ } while (0) #else #define TX_ONABORT_CHECK do {} while (0) #endif #define _POBJ_TX_BEGIN(pop, ...)\ {\ jmp_buf _tx_env;\ enum pobj_tx_stage _stage;\ int _pobj_errno;\ if (setjmp(_tx_env)) {\ errno = pmemobj_tx_errno();\ } else {\ _pobj_errno = pmemobj_tx_begin(pop, _tx_env, __VA_ARGS__,\ TX_PARAM_NONE);\ if (_pobj_errno)\ errno = _pobj_errno;\ }\ while ((_stage = pmemobj_tx_stage()) != TX_STAGE_NONE) {\ switch (_stage) {\ case TX_STAGE_WORK: #define TX_BEGIN_PARAM(pop, ...)\ _POBJ_TX_BEGIN(pop, ##__VA_ARGS__) #define TX_BEGIN_LOCK TX_BEGIN_PARAM /* Just to let compiler warn when incompatible function pointer is used */ static inline pmemobj_tx_callback _pobj_validate_cb_sig(pmemobj_tx_callback cb) { return cb; } #define TX_BEGIN_CB(pop, cb, arg, ...) _POBJ_TX_BEGIN(pop, TX_PARAM_CB,\ _pobj_validate_cb_sig(cb), arg, ##__VA_ARGS__) #define TX_BEGIN(pop) _POBJ_TX_BEGIN(pop, TX_PARAM_NONE) #define TX_ONABORT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONABORT: #define TX_ONCOMMIT\ pmemobj_tx_process();\ break;\ case TX_STAGE_ONCOMMIT: #define TX_FINALLY\ pmemobj_tx_process();\ break;\ case TX_STAGE_FINALLY: #define TX_END\ pmemobj_tx_process();\ break;\ default:\ TX_ONABORT_CHECK;\ pmemobj_tx_process();\ break;\ }\ }\ _pobj_errno = pmemobj_tx_end();\ if (_pobj_errno)\ errno = _pobj_errno;\ } #define TX_ADD(o)\ pmemobj_tx_add_range((o).oid, 0, sizeof(*(o)._type)) #define TX_ADD_FIELD(o, field)\ TX_ADD_DIRECT(&(D_RO(o)->field)) #define TX_ADD_DIRECT(p)\ pmemobj_tx_add_range_direct(p, sizeof(*(p))) #define TX_ADD_FIELD_DIRECT(p, field)\ pmemobj_tx_add_range_direct(&(p)->field, sizeof((p)->field)) #define TX_XADD(o, flags)\ pmemobj_tx_xadd_range((o).oid, 0, sizeof(*(o)._type), flags) #define TX_XADD_FIELD(o, field, flags)\ TX_XADD_DIRECT(&(D_RO(o)->field), flags) #define TX_XADD_DIRECT(p, flags)\ pmemobj_tx_xadd_range_direct(p, sizeof(*(p)), flags) #define TX_XADD_FIELD_DIRECT(p, field, flags)\ pmemobj_tx_xadd_range_direct(&(p)->field, sizeof((p)->field), flags) #define TX_NEW(t)\ ((TOID(t))pmemobj_tx_alloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ALLOC(t, size)\ ((TOID(t))pmemobj_tx_alloc(size, TOID_TYPE_NUM(t))) #define TX_ZNEW(t)\ ((TOID(t))pmemobj_tx_zalloc(sizeof(t), TOID_TYPE_NUM(t))) #define TX_ZALLOC(t, size)\ ((TOID(t))pmemobj_tx_zalloc(size, TOID_TYPE_NUM(t))) #define TX_XALLOC(t, size, flags)\ ((TOID(t))pmemobj_tx_xalloc(size, TOID_TYPE_NUM(t), flags)) /* XXX - not available when compiled with VC++ as C code (/TC) */ #if !defined(_MSC_VER) || defined(__cplusplus) #define TX_REALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_realloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #define TX_ZREALLOC(o, size)\ ((__typeof__(o))pmemobj_tx_zrealloc((o).oid, size, TOID_TYPE_NUM_OF(o))) #endif /* !defined(_MSC_VER) || defined(__cplusplus) */ #define TX_STRDUP(s, type_num)\ pmemobj_tx_strdup(s, type_num) #define TX_XSTRDUP(s, type_num, flags)\ pmemobj_tx_xstrdup(s, type_num, flags) #define TX_WCSDUP(s, type_num)\ pmemobj_tx_wcsdup(s, type_num) #define TX_XWCSDUP(s, type_num, flags)\ pmemobj_tx_xwcsdup(s, type_num, flags) #define TX_FREE(o)\ pmemobj_tx_free((o).oid) #define TX_XFREE(o, flags)\ pmemobj_tx_xfree((o).oid, flags) #define TX_SET(o, field, value) (\ TX_ADD_FIELD(o, field),\ D_RW(o)->field = (value)) #define TX_SET_DIRECT(p, field, value) (\ TX_ADD_FIELD_DIRECT(p, field),\ (p)->field = (value)) static inline void * TX_MEMCPY(void *dest, const void *src, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memcpy(dest, src, num); } static inline void * TX_MEMSET(void *dest, int c, size_t num) { pmemobj_tx_add_range_direct(dest, num); return memset(dest, c, num); } #ifdef __cplusplus } #endif #endif /* libpmemobj/tx.h */
4,386
21.848958
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/atomic_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/atomic_base.h -- definitions of libpmemobj atomic entry points */ #ifndef LIBPMEMOBJ_ATOMIC_BASE_H #define LIBPMEMOBJ_ATOMIC_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Non-transactional atomic allocations * * Those functions can be used outside transactions. The allocations are always * aligned to the cache-line boundary. */ #define POBJ_XALLOC_VALID_FLAGS (POBJ_XALLOC_ZERO |\ POBJ_XALLOC_CLASS_MASK) /* * Allocates a new object from the pool and calls a constructor function before * returning. It is guaranteed that allocated object is either properly * initialized, or if it's interrupted before the constructor completes, the * memory reserved for the object is automatically reclaimed. */ int pmemobj_alloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg); /* * Allocates with flags a new object from the pool. */ int pmemobj_xalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, uint64_t flags, pmemobj_constr constructor, void *arg); /* * Allocates a new zeroed object from the pool. */ int pmemobj_zalloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object. */ int pmemobj_realloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Resizes an existing object, if extended new space is zeroed. */ int pmemobj_zrealloc(PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num); /* * Allocates a new object with duplicate of the string s. */ int pmemobj_strdup(PMEMobjpool *pop, PMEMoid *oidp, const char *s, uint64_t type_num); /* * Allocates a new object with duplicate of the wide character string s. */ int pmemobj_wcsdup(PMEMobjpool *pop, PMEMoid *oidp, const wchar_t *s, uint64_t type_num); /* * Frees an existing object. */ void pmemobj_free(PMEMoid *oidp); struct pobj_defrag_result { size_t total; /* number of processed objects */ size_t relocated; /* number of relocated objects */ }; /* * Performs defragmentation on the provided array of objects. */ int pmemobj_defrag(PMEMobjpool *pop, PMEMoid **oidv, size_t oidcnt, struct pobj_defrag_result *result); #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic_base.h */
2,386
24.393617
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/thread.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/thread.h -- definitions of libpmemobj thread/locking entry points */ #ifndef LIBPMEMOBJ_THREAD_H #define LIBPMEMOBJ_THREAD_H 1 #include <time.h> #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * Locking. */ #define _POBJ_CL_SIZE 64 /* cache line size */ typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMmutex; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMrwlock; typedef union { long long align; char padding[_POBJ_CL_SIZE]; } PMEMcond; void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp); int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp); void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout); int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp); void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp); int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout); int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp); #ifdef __cplusplus } #endif #endif /* libpmemobj/thread.h */
2,150
28.875
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/action.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * libpmemobj/action.h -- definitions of libpmemobj action interface */ #ifndef LIBPMEMOBJ_ACTION_H #define LIBPMEMOBJ_ACTION_H 1 #include <libpmemobj/action_base.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_RESERVE_NEW(pop, t, act)\ ((TOID(t))pmemobj_reserve(pop, act, sizeof(t), TOID_TYPE_NUM(t))) #define POBJ_RESERVE_ALLOC(pop, t, size, act)\ ((TOID(t))pmemobj_reserve(pop, act, size, TOID_TYPE_NUM(t))) #define POBJ_XRESERVE_NEW(pop, t, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, sizeof(t), TOID_TYPE_NUM(t), flags)) #define POBJ_XRESERVE_ALLOC(pop, t, size, act, flags)\ ((TOID(t))pmemobj_xreserve(pop, act, size, TOID_TYPE_NUM(t), flags)) #ifdef __cplusplus } #endif #endif /* libpmemobj/action_base.h */
829
23.411765
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * libpmemobj/atomic.h -- definitions of libpmemobj atomic macros */ #ifndef LIBPMEMOBJ_ATOMIC_H #define LIBPMEMOBJ_ATOMIC_H 1 #include <libpmemobj/atomic_base.h> #include <libpmemobj/types.h> #ifdef __cplusplus extern "C" { #endif #define POBJ_NEW(pop, o, t, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ALLOC(pop, o, t, size, constr, arg)\ pmemobj_alloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t),\ (constr), (arg)) #define POBJ_ZNEW(pop, o, t)\ pmemobj_zalloc((pop), (PMEMoid *)(o), sizeof(t), TOID_TYPE_NUM(t)) #define POBJ_ZALLOC(pop, o, t, size)\ pmemobj_zalloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_REALLOC(pop, o, t, size)\ pmemobj_realloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_ZREALLOC(pop, o, t, size)\ pmemobj_zrealloc((pop), (PMEMoid *)(o), (size), TOID_TYPE_NUM(t)) #define POBJ_FREE(o)\ pmemobj_free((PMEMoid *)(o)) #ifdef __cplusplus } #endif #endif /* libpmemobj/atomic.h */
1,115
23.26087
66
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/include/libpmemobj/iterator_base.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * libpmemobj/iterator_base.h -- definitions of libpmemobj iterator entry points */ #ifndef LIBPMEMOBJ_ITERATOR_BASE_H #define LIBPMEMOBJ_ITERATOR_BASE_H 1 #include <libpmemobj/base.h> #ifdef __cplusplus extern "C" { #endif /* * The following functions allow access to the entire collection of objects. * * Use with conjunction with non-transactional allocations. Pmemobj pool acts * as a generic container (list) of objects that are not assigned to any * user-defined data structures. */ /* * Returns the first object of the specified type number. */ PMEMoid pmemobj_first(PMEMobjpool *pop); /* * Returns the next object of the same type. */ PMEMoid pmemobj_next(PMEMoid oid); #ifdef __cplusplus } #endif #endif /* libpmemobj/iterator_base.h */
855
20.4
80
h
null
NearPMSW-main/nearpm/shadow/include/txopt.h
// The starting address of the selected counter_atomic writes #ifndef TXOPT_H #define TXOPT_H #define COUNTER_ATOMIC_VADDR (4096UL*1024*1024) #define NUM_COUNTER_ATOMIC_PAGE 262144 // The starting address of the flush cache instruction #define CACHE_FLUSH_VADDR (4096UL*1024*1024+4*NUM_COUNTER_ATOMIC_PAGE*1024) // The starting address of the flush metadata cache instruction #define METADATA_CACHE_FLUSH_VADDR (4096UL*1024*1024+(4*NUM_COUNTER_ATOMIC_PAGE+4)*1024) #define STATUS_OUTPUT_VADDR (METADATA_CACHE_FLUSH_VADDR + 1024UL) #define INIT_METADATA_CACHE_VADDR (STATUS_OUTPUT_VADDR + 1024UL) #define TXOPT_VADDR (INIT_METADATA_CACHE_VADDR+1024UL) #define CACHE_LINE_SIZE 64UL #include <vector> #include <deque> #include <cstdlib> #include <cstdint> #include <atomic> #include <stdio.h> #include <cassert> enum opt_flag { FLAG_OPT, FLAG_OPT_VAL, FLAG_OPT_ADDR, FLAG_OPT_DATA, FLAG_OPT_DATA_VAL, /* register no execute */ FLAG_OPT_REG, FLAG_OPT_VAL_REG, FLAG_OPT_ADDR_REG, FLAG_OPT_DATA_REG, FLAG_OPT_DATA_VAL_REG, /* execute registered OPT */ FLAG_OPT_START }; struct opt_t { //int pid; int obj_id; }; // Fields in the OPT packet // Used by both SW and HW struct opt_packet_t { void* opt_obj; void* pmemaddr; //void* data_ptr; //int seg_id; //int data_val; unsigned size; opt_flag type; }; // OPT with both data and addr ready volatile void OPT(void* opt_obj, bool reg, void* pmemaddr, void* data, unsigned size); //#define OPT(opt_obj, pmemaddr, data, size) \ // *((opt_packet_t*)TXOPT_VADDR) = (opt_packet_t){opt_obj, pmemaddr, size, FLAG_OPT_DATA}; // OPT with both data (int) and addr ready volatile void OPT_VAL(void* opt_obj, bool reg, void* pmemaddr, int data_val); // OPT with only data ready volatile void OPT_DATA(void* opt_obj, bool reg, void* data, unsigned size); // OPT with only addr ready volatile void OPT_ADDR(void* opt_obj, bool reg, void* pmemaddr, unsigned size); // OPT with only data (int) ready volatile void OPT_DATA_VAL(void* opt_obj, bool reg, int data_val); // Begin OPT operation volatile void OPT_START(void* opt_obj); // store barrier volatile void s_fence(); // flush both metadata cache and data cache volatile void flush_caches(void* addr, unsigned size); // flush data cache only volatile void cache_flush(void* addr, unsigned size); // flush metadata cache only volatile void metadata_cache_flush(void* addr, unsigned size); // malloc that is cache-line aligned void *aligned_malloc(int size); class CounterAtomic { public: static void* counter_atomic_malloc(unsigned _size); // size is num of bytes static volatile void statOutput(); static volatile void initCounterCache(); uint64_t getValue(); uint64_t getPtr(); CounterAtomic(); CounterAtomic(uint64_t _val); CounterAtomic(bool _val); CounterAtomic& operator=(uint64_t _val); CounterAtomic& operator+(uint64_t _val); CounterAtomic& operator++(); CounterAtomic& operator--(); CounterAtomic& operator-(uint64_t _val); bool operator==(uint64_t _val); bool operator!=(uint64_t _val); private: void init(); static uint64_t getNextAtomicAddr(unsigned _size); static uint64_t getNextCacheFlushAddr(unsigned _size); //static uint64_t getNextPersistBarrierAddr(unsigned _size); static uint64_t getNextCounterCacheFlushAddr(unsigned _size); static uint64_t currAtomicAddr; static uint64_t currCacheFlushAddr; //static uint64_t currPersistentBarrierAddr; static uint64_t currCounterCacheFlushAddr; /* static bool hasAllocateCacheFlush; static bool hasAllocateCounterCacheFlush; static bool hasAllocatePersistBarrier; */ //uint64_t val; uint64_t val_addr = 0; }; #endif
3,665
26.155556
90
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/slab_automove.c
/* Copyright 2017 Facebook. * * Use and distribution licensed under the BSD license. See * the LICENSE file for full text. */ /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include "slab_automove.h" #include <stdlib.h> #include <string.h> #define MIN_PAGES_FOR_SOURCE 2 #define MIN_PAGES_FOR_RECLAIM 2.5 struct window_data { uint64_t age; uint64_t dirty; uint64_t evicted; }; typedef struct { struct window_data *window_data; uint32_t window_size; uint32_t window_cur; double max_age_ratio; item_stats_automove iam_before[MAX_NUMBER_OF_SLAB_CLASSES]; item_stats_automove iam_after[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_before[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_after[MAX_NUMBER_OF_SLAB_CLASSES]; } slab_automove; void *slab_automove_init(struct settings *settings) { uint32_t window_size = settings->slab_automove_window; double max_age_ratio = settings->slab_automove_ratio; slab_automove *a = calloc(1, sizeof(slab_automove)); if (a == NULL) return NULL; a->window_data = calloc(window_size * MAX_NUMBER_OF_SLAB_CLASSES, sizeof(struct window_data)); a->window_size = window_size; a->max_age_ratio = max_age_ratio; if (a->window_data == NULL) { free(a); return NULL; } // do a dry run to fill the before structs fill_item_stats_automove(a->iam_before); fill_slab_stats_automove(a->sam_before); return (void *)a; } void slab_automove_free(void *arg) { slab_automove *a = (slab_automove *)arg; free(a->window_data); free(a); } static void window_sum(struct window_data *wd, struct window_data *w, uint32_t size) { int x; for (x = 0; x < size; x++) { struct window_data *d = &wd[x]; w->age += d->age; w->dirty += d->dirty; w->evicted += d->evicted; } } // TODO: if oldest is dirty, find next oldest. // still need to base ratio off of absolute age void slab_automove_run(void *arg, int *src, int *dst) { slab_automove *a = (slab_automove *)arg; int n; struct window_data w_sum; int oldest = -1; uint64_t oldest_age = 0; int youngest = -1; uint64_t youngest_age = ~0; bool youngest_evicting = false; *src = -1; *dst = -1; // fill after structs fill_item_stats_automove(a->iam_after); fill_slab_stats_automove(a->sam_after); a->window_cur++; // iterate slabs for (n = POWER_SMALLEST; n < MAX_NUMBER_OF_SLAB_CLASSES; n++) { int w_offset = n * a->window_size; struct window_data *wd = &a->window_data[w_offset + (a->window_cur % a->window_size)]; memset(wd, 0, sizeof(struct window_data)); // summarize the window-up-to-now. memset(&w_sum, 0, sizeof(struct window_data)); window_sum(&a->window_data[w_offset], &w_sum, a->window_size); // if page delta, or evicted delta, mark window dirty // (or outofmemory) if (a->iam_after[n].evicted - a->iam_before[n].evicted > 0 || a->iam_after[n].outofmemory - a->iam_before[n].outofmemory > 0) { wd->evicted = 1; wd->dirty = 1; } if (a->sam_after[n].total_pages - a->sam_before[n].total_pages > 0) { wd->dirty = 1; } // set age into window wd->age = a->iam_after[n].age; // grab age as average of window total uint64_t age = w_sum.age / a->window_size; // if > N free chunks and not dirty, make decision. if (a->sam_after[n].free_chunks > a->sam_after[n].chunks_per_page * MIN_PAGES_FOR_RECLAIM) { if (w_sum.dirty == 0) { *src = n; *dst = 0; break; } } // if oldest and have enough pages, is oldest if (age > oldest_age && a->sam_after[n].total_pages > MIN_PAGES_FOR_SOURCE) { oldest = n; oldest_age = age; } // grab evicted count from window // if > half the window and youngest, mark as youngest if (age < youngest_age && w_sum.evicted > a->window_size / 2) { youngest = n; youngest_age = age; youngest_evicting = wd->evicted ? true : false; } } memcpy(a->iam_before, a->iam_after, sizeof(item_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); memcpy(a->sam_before, a->sam_after, sizeof(slab_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); // if we have a youngest and oldest, and oldest is outside the ratio, // also, only make decisions if window has filled once. if (youngest != -1 && oldest != -1 && a->window_cur > a->window_size) { if (youngest_age < ((double)oldest_age * a->max_age_ratio) && youngest_evicting) { *src = oldest; *dst = youngest; } } return; }
4,939
31.287582
100
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/slabs.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ /* slabs memory allocation */ #ifndef SLABS_H #define SLABS_H /** Init the subsystem. 1st argument is the limit on no. of bytes to allocate, 0 if no limit. 2nd argument is the growth factor; each slab will use a chunk size equal to the previous slab's chunk size times this factor. 3rd argument specifies if the slab allocator should allocate all memory up front (if true), or allocate memory in chunks as it is needed (if false) */ void slabs_init(const size_t limit, const double factor, const bool prealloc, const uint32_t *slab_sizes); /** Call only during init. Pre-allocates all available memory */ void slabs_prefill_global(void); #ifdef PSLAB int slabs_dump_sizes(uint32_t *slab_sizes, int max); void slabs_prefill_global_from_pmem(void); void slabs_update_policy(void); int do_slabs_renewslab(const unsigned int id, char *ptr); void do_slab_realloc(item *it, unsigned int id); void do_slabs_free(void *ptr, const size_t size, unsigned int id); #endif /** * Given object size, return id to use when allocating/freeing memory for object * 0 means error: can't store such a large object */ unsigned int slabs_clsid(const size_t size); /** Allocate object of given length. 0 on error */ /*@null@*/ #define SLABS_ALLOC_NO_NEWPAGE 1 void *slabs_alloc(const size_t size, unsigned int id, uint64_t *total_bytes, unsigned int flags); /** Free previously allocated object */ void slabs_free(void *ptr, size_t size, unsigned int id); /** Adjust the stats for memory requested */ void slabs_adjust_mem_requested(unsigned int id, size_t old, size_t ntotal); /** Adjust global memory limit up or down */ bool slabs_adjust_mem_limit(size_t new_mem_limit); /** Return a datum for stats in binary protocol */ bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c); typedef struct { unsigned int chunks_per_page; unsigned int chunk_size; long int free_chunks; long int total_pages; } slab_stats_automove; void fill_slab_stats_automove(slab_stats_automove *am); unsigned int global_page_pool_size(bool *mem_flag); /** Fill buffer with stats */ /*@null@*/ void slabs_stats(ADD_STAT add_stats, void *c); /* Hints as to freespace in slab class */ unsigned int slabs_available_chunks(unsigned int id, bool *mem_flag, uint64_t *total_bytes, unsigned int *chunks_perslab); void slabs_mlock(void); void slabs_munlock(void); int start_slab_maintenance_thread(void); void stop_slab_maintenance_thread(void); enum reassign_result_type { REASSIGN_OK=0, REASSIGN_RUNNING, REASSIGN_BADCLASS, REASSIGN_NOSPARE, REASSIGN_SRC_DST_SAME }; enum reassign_result_type slabs_reassign(int src, int dst); void slabs_rebalancer_pause(void); void slabs_rebalancer_resume(void); #ifdef EXTSTORE void slabs_set_storage(void *arg); #endif #endif
3,024
31.180851
122
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/storage.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #ifdef EXTSTORE #include "storage.h" #include <stdlib.h> #include <string.h> #include <limits.h> #define PAGE_BUCKET_DEFAULT 0 #define PAGE_BUCKET_COMPACT 1 #define PAGE_BUCKET_CHUNKED 2 #define PAGE_BUCKET_LOWTTL 3 int lru_maintainer_store(void *storage, const int clsid) { //int i; int did_moves = 0; int item_age = settings.ext_item_age; bool mem_limit_reached = false; unsigned int chunks_free; struct lru_pull_tail_return it_info; // FIXME: need to directly ask the slabber how big a class is if (slabs_clsid(settings.ext_item_size) > clsid) return 0; chunks_free = slabs_available_chunks(clsid, &mem_limit_reached, NULL, NULL); // if we are low on chunks and no spare, push out early. if (chunks_free < settings.ext_free_memchunks[clsid] && mem_limit_reached) item_age = 0; it_info.it = NULL; lru_pull_tail(clsid, COLD_LRU, 0, LRU_PULL_RETURN_ITEM, 0, &it_info); /* Item is locked, and we have a reference to it. */ if (it_info.it == NULL) { return did_moves; } obj_io io; item *it = it_info.it; /* First, storage for the header object */ size_t orig_ntotal = ITEM_ntotal(it); uint32_t flags; if ((it->it_flags & ITEM_HDR) == 0 && (item_age == 0 || current_time - it->time > item_age)) { // FIXME: flag conversion again if (settings.inline_ascii_response) { flags = (uint32_t) strtoul(ITEM_suffix(it), (char **) NULL, 10); } else if (it->nsuffix > 0) { flags = *((uint32_t *)ITEM_suffix(it)); } else { flags = 0; } item *hdr_it = do_item_alloc(ITEM_key(it), it->nkey, flags, it->exptime, sizeof(item_hdr)); /* Run the storage write understanding the start of the item is dirty. * We will fill it (time/exptime/etc) from the header item on read. */ if (hdr_it != NULL) { int bucket = (it->it_flags & ITEM_CHUNKED) ? PAGE_BUCKET_CHUNKED : PAGE_BUCKET_DEFAULT; // Compres soon to expire items into similar pages. if (it->exptime - current_time < settings.ext_low_ttl) { bucket = PAGE_BUCKET_LOWTTL; } hdr_it->it_flags |= ITEM_HDR; io.len = orig_ntotal; io.mode = OBJ_IO_WRITE; // NOTE: when the item is read back in, the slab mover // may see it. Important to have refcount>=2 or ~ITEM_LINKED assert(it->refcount >= 2); if (extstore_write_request(storage, bucket, &io) == 0) { // cuddle the hash value into the time field so we don't have // to recalculate it. item *buf_it = (item *) io.buf; buf_it->time = it_info.hv; // copy from past the headers + time headers. // TODO: should be in items.c if (it->it_flags & ITEM_CHUNKED) { // Need to loop through the item and copy item_chunk *sch = (item_chunk *) ITEM_data(it); int remain = orig_ntotal; int copied = 0; // copy original header int hdrtotal = ITEM_ntotal(it) - it->nbytes; memcpy((char *)io.buf+32, (char *)it+32, hdrtotal - 32); copied = hdrtotal; // copy data in like it were one large object. while (sch && remain) { assert(remain >= sch->used); memcpy((char *)io.buf+copied, sch->data, sch->used); // FIXME: use one variable? remain -= sch->used; copied += sch->used; sch = sch->next; } } else { memcpy((char *)io.buf+32, (char *)it+32, io.len-32); } // crc what we copied so we can do it sequentially. buf_it->it_flags &= ~ITEM_LINKED; buf_it->exptime = crc32c(0, (char*)io.buf+32, orig_ntotal-32); extstore_write(storage, &io); item_hdr *hdr = (item_hdr *) ITEM_data(hdr_it); hdr->page_version = io.page_version; hdr->page_id = io.page_id; hdr->offset = io.offset; // overload nbytes for the header it hdr_it->nbytes = it->nbytes; /* success! Now we need to fill relevant data into the new * header and replace. Most of this requires the item lock */ /* CAS gets set while linking. Copy post-replace */ item_replace(it, hdr_it, it_info.hv); ITEM_set_cas(hdr_it, ITEM_get_cas(it)); do_item_remove(hdr_it); did_moves = 1; LOGGER_LOG(NULL, LOG_EVICTIONS, LOGGER_EXTSTORE_WRITE, it, bucket); } else { /* Failed to write for some reason, can't continue. */ slabs_free(hdr_it, ITEM_ntotal(hdr_it), ITEM_clsid(hdr_it)); } } } do_item_remove(it); item_unlock(it_info.hv); return did_moves; } /* Fetch stats from the external storage system and decide to compact. * If we're more than half full, start skewing how aggressively to run * compaction, up to a desired target when all pages are full. */ static int storage_compact_check(void *storage, logger *l, uint32_t *page_id, uint64_t *page_version, uint64_t *page_size, bool *drop_unread) { struct extstore_stats st; int x; double rate; uint64_t frag_limit; uint64_t low_version = ULLONG_MAX; uint64_t lowest_version = ULLONG_MAX; unsigned int low_page = 0; unsigned int lowest_page = 0; extstore_get_stats(storage, &st); if (st.pages_used == 0) return 0; // lets pick a target "wasted" value and slew. if (st.pages_free > settings.ext_compact_under) return 0; *drop_unread = false; // the number of free pages reduces the configured frag limit // this allows us to defrag early if pages are very empty. rate = 1.0 - ((double)st.pages_free / st.page_count); rate *= settings.ext_max_frag; frag_limit = st.page_size * rate; LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_FRAGINFO, NULL, rate, frag_limit); st.page_data = calloc(st.page_count, sizeof(struct extstore_page_data)); extstore_get_page_data(storage, &st); // find oldest page by version that violates the constraint for (x = 0; x < st.page_count; x++) { if (st.page_data[x].version == 0 || st.page_data[x].bucket == PAGE_BUCKET_LOWTTL) continue; if (st.page_data[x].version < lowest_version) { lowest_page = x; lowest_version = st.page_data[x].version; } if (st.page_data[x].bytes_used < frag_limit) { if (st.page_data[x].version < low_version) { low_page = x; low_version = st.page_data[x].version; } } } *page_size = st.page_size; free(st.page_data); // we have a page + version to attempt to reclaim. if (low_version != ULLONG_MAX) { *page_id = low_page; *page_version = low_version; return 1; } else if (lowest_version != ULLONG_MAX && settings.ext_drop_unread && st.pages_free <= settings.ext_drop_under) { // nothing matched the frag rate barrier, so pick the absolute oldest // version if we're configured to drop items. *page_id = lowest_page; *page_version = lowest_version; *drop_unread = true; return 1; } return 0; } static pthread_t storage_compact_tid; static pthread_mutex_t storage_compact_plock; #define MIN_STORAGE_COMPACT_SLEEP 10000 #define MAX_STORAGE_COMPACT_SLEEP 2000000 struct storage_compact_wrap { obj_io io; pthread_mutex_t lock; // gates the bools. bool done; bool submitted; bool miss; // version flipped out from under us }; static void storage_compact_readback(void *storage, logger *l, bool drop_unread, char *readback_buf, uint32_t page_id, uint64_t page_version, uint64_t read_size) { uint64_t offset = 0; unsigned int rescues = 0; unsigned int lost = 0; unsigned int skipped = 0; while (offset < read_size) { item *hdr_it = NULL; item_hdr *hdr = NULL; item *it = (item *)(readback_buf+offset); unsigned int ntotal; // probably zeroed out junk at the end of the wbuf if (it->nkey == 0) { break; } ntotal = ITEM_ntotal(it); uint32_t hv = (uint32_t)it->time; item_lock(hv); // We don't have a conn and don't need to do most of do_item_get hdr_it = assoc_find(ITEM_key(it), it->nkey, hv); if (hdr_it != NULL) { bool do_write = false; refcount_incr(hdr_it); // Check validity but don't bother removing it. if ((hdr_it->it_flags & ITEM_HDR) && !item_is_flushed(hdr_it) && (hdr_it->exptime == 0 || hdr_it->exptime > current_time)) { hdr = (item_hdr *)ITEM_data(hdr_it); if (hdr->page_id == page_id && hdr->page_version == page_version) { // Item header is still completely valid. extstore_delete(storage, page_id, page_version, 1, ntotal); // drop inactive items. if (drop_unread && GET_LRU(hdr_it->slabs_clsid) == COLD_LRU) { do_write = false; skipped++; } else { do_write = true; } } } if (do_write) { bool do_update = false; int tries; obj_io io; io.len = ntotal; io.mode = OBJ_IO_WRITE; for (tries = 10; tries > 0; tries--) { if (extstore_write_request(storage, PAGE_BUCKET_COMPACT, &io) == 0) { memcpy(io.buf, it, io.len); extstore_write(storage, &io); do_update = true; break; } else { usleep(1000); } } if (do_update) { if (it->refcount == 2) { hdr->page_version = io.page_version; hdr->page_id = io.page_id; hdr->offset = io.offset; rescues++; } else { lost++; // TODO: re-alloc and replace header. } } else { lost++; } } do_item_remove(hdr_it); } item_unlock(hv); offset += ntotal; if (read_size - offset < sizeof(struct _stritem)) break; } STATS_LOCK(); stats.extstore_compact_lost += lost; stats.extstore_compact_rescues += rescues; stats.extstore_compact_skipped += skipped; STATS_UNLOCK(); LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_READ_END, NULL, page_id, offset, rescues, lost, skipped); } static void _storage_compact_cb(void *e, obj_io *io, int ret) { struct storage_compact_wrap *wrap = (struct storage_compact_wrap *)io->data; assert(wrap->submitted == true); pthread_mutex_lock(&wrap->lock); if (ret < 1) { wrap->miss = true; } wrap->done = true; pthread_mutex_unlock(&wrap->lock); } // TODO: hoist the storage bits from lru_maintainer_thread in here. // would be nice if they could avoid hammering the same locks though? // I guess it's only COLD. that's probably fine. static void *storage_compact_thread(void *arg) { void *storage = arg; useconds_t to_sleep = MAX_STORAGE_COMPACT_SLEEP; bool compacting = false; uint64_t page_version = 0; uint64_t page_size = 0; uint64_t page_offset = 0; uint32_t page_id = 0; bool drop_unread = false; char *readback_buf = NULL; struct storage_compact_wrap wrap; logger *l = logger_create(); if (l == NULL) { fprintf(stderr, "Failed to allocate logger for storage compaction thread\n"); abort(); } readback_buf = malloc(settings.ext_wbuf_size); if (readback_buf == NULL) { fprintf(stderr, "Failed to allocate readback buffer for storage compaction thread\n"); abort(); } pthread_mutex_init(&wrap.lock, NULL); wrap.done = false; wrap.submitted = false; wrap.io.data = &wrap; wrap.io.buf = (void *)readback_buf; wrap.io.len = settings.ext_wbuf_size; wrap.io.mode = OBJ_IO_READ; wrap.io.cb = _storage_compact_cb; pthread_mutex_lock(&storage_compact_plock); while (1) { pthread_mutex_unlock(&storage_compact_plock); if (to_sleep) { extstore_run_maint(storage); usleep(to_sleep); } pthread_mutex_lock(&storage_compact_plock); if (!compacting && storage_compact_check(storage, l, &page_id, &page_version, &page_size, &drop_unread)) { page_offset = 0; compacting = true; LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_START, NULL, page_id, page_version); } if (compacting) { pthread_mutex_lock(&wrap.lock); if (page_offset < page_size && !wrap.done && !wrap.submitted) { wrap.io.page_version = page_version; wrap.io.page_id = page_id; wrap.io.offset = page_offset; // FIXME: should be smarter about io->next (unlink at use?) wrap.io.next = NULL; wrap.submitted = true; wrap.miss = false; extstore_submit(storage, &wrap.io); } else if (wrap.miss) { LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_ABORT, NULL, page_id); wrap.done = false; wrap.submitted = false; compacting = false; } else if (wrap.submitted && wrap.done) { LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_READ_START, NULL, page_id, page_offset); storage_compact_readback(storage, l, drop_unread, readback_buf, page_id, page_version, settings.ext_wbuf_size); page_offset += settings.ext_wbuf_size; wrap.done = false; wrap.submitted = false; } else if (page_offset >= page_size) { compacting = false; wrap.done = false; wrap.submitted = false; extstore_close_page(storage, page_id, page_version); LOGGER_LOG(l, LOG_SYSEVENTS, LOGGER_COMPACT_END, NULL, page_id); } pthread_mutex_unlock(&wrap.lock); if (to_sleep > MIN_STORAGE_COMPACT_SLEEP) to_sleep /= 2; } else { if (to_sleep < MAX_STORAGE_COMPACT_SLEEP) to_sleep += MIN_STORAGE_COMPACT_SLEEP; } } free(readback_buf); return NULL; } // TODO // logger needs logger_destroy() to exist/work before this is safe. /*int stop_storage_compact_thread(void) { int ret; pthread_mutex_lock(&lru_maintainer_lock); do_run_lru_maintainer_thread = 0; pthread_mutex_unlock(&lru_maintainer_lock); if ((ret = pthread_join(lru_maintainer_tid, NULL)) != 0) { fprintf(stderr, "Failed to stop LRU maintainer thread: %s\n", strerror(ret)); return -1; } settings.lru_maintainer_thread = false; return 0; }*/ void storage_compact_pause(void) { pthread_mutex_lock(&storage_compact_plock); } void storage_compact_resume(void) { pthread_mutex_unlock(&storage_compact_plock); } int start_storage_compact_thread(void *arg) { int ret; pthread_mutex_init(&storage_compact_plock, NULL); if ((ret = pthread_create(&storage_compact_tid, NULL, storage_compact_thread, arg)) != 0) { fprintf(stderr, "Can't create storage_compact thread: %s\n", strerror(ret)); return -1; } return 0; } #endif
16,719
35.347826
99
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/pslab.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ #ifndef PSLAB_H #define PSLAB_H #include <libpmem.h> #define PSLAB_POLICY_DRAM 0 #define PSLAB_POLICY_PMEM 1 #define PSLAB_POLICY_BALANCED 2 #define pmem_member_persist(p, m) \ pmem_persist(&(p)->m, sizeof ((p)->m)) #define pmem_member_flush(p, m) \ pmem_flush(&(p)->m, sizeof ((p)->m)) #define pmem_flush_from(p, t, m) \ pmem_flush(&(p)->m, sizeof (t) - offsetof(t, m)); #define pslab_item_data_persist(it) pmem_persist((it)->data, ITEM_dtotal(it) #define pslab_item_data_flush(it) pmem_flush((it)->data, ITEM_dtotal(it)) int pslab_create(char *pool_name, uint32_t pool_size, uint32_t slab_size, uint32_t *slabclass_sizes, int slabclass_num); int pslab_pre_recover(char *name, uint32_t *slab_sizes, int slab_max, int slab_page_size); int pslab_do_recover(void); time_t pslab_process_started(time_t process_started); void pslab_update_flushtime(uint32_t time); void pslab_use_slab(void *p, int id, unsigned int size); void *pslab_get_free_slab(void *slab); int pslab_contains(char *p); uint64_t pslab_addr2off(void *addr); extern bool pslab_force; #endif
1,186
30.236842
90
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/config.h
/* config.h. Generated from config.h.in by configure. */ /* config.h.in. Generated from configure.ac by autoheader. */ /* Set to nonzero if you want to include DTRACE */ /* #undef ENABLE_DTRACE */ /* Set to nonzero if you want to include SASL */ /* #undef ENABLE_SASL */ /* Set to nonzero if you want to enable a SASL pwdb */ /* #undef ENABLE_SASL_PWDB */ /* machine is bigendian */ /* #undef ENDIAN_BIG */ /* machine is littleendian */ #define ENDIAN_LITTLE 1 /* Set to nonzero if you want to enable extstorextstore */ /* #undef EXTSTORE */ /* Define to 1 if support accept4 */ #define HAVE_ACCEPT4 1 /* Define to 1 if you have the `clock_gettime' function. */ #define HAVE_CLOCK_GETTIME 1 /* Define this if you have an implementation of drop_privileges() */ /* #undef HAVE_DROP_PRIVILEGES */ /* Define this if you have an implementation of drop_worker_privileges() */ /* #undef HAVE_DROP_WORKER_PRIVILEGES */ /* GCC 64bit Atomics available */ /* #undef HAVE_GCC_64ATOMICS */ /* GCC Atomics available */ #define HAVE_GCC_ATOMICS 1 /* Define to 1 if support getopt_long */ #define HAVE_GETOPT_LONG 1 /* Define to 1 if you have the `getpagesizes' function. */ /* #undef HAVE_GETPAGESIZES */ /* Have ntohll */ /* #undef HAVE_HTONLL */ /* Define to 1 if you have the <inttypes.h> header file. */ #define HAVE_INTTYPES_H 1 /* Define to 1 if you have the `memcntl' function. */ /* #undef HAVE_MEMCNTL */ /* Define to 1 if you have the <memory.h> header file. */ #define HAVE_MEMORY_H 1 /* Define to 1 if you have the `mlockall' function. */ #define HAVE_MLOCKALL 1 /* Define to 1 if you have the `pledge' function. */ /* #undef HAVE_PLEDGE */ /* we have sasl_callback_ft */ /* #undef HAVE_SASL_CALLBACK_FT */ /* Set to nonzero if your SASL implementation supports SASL_CB_GETCONF */ /* #undef HAVE_SASL_CB_GETCONF */ /* Define to 1 if you have the <sasl/sasl.h> header file. */ /* #undef HAVE_SASL_SASL_H */ /* Define to 1 if you have the `setppriv' function. */ /* #undef HAVE_SETPPRIV */ /* Define to 1 if you have the `sigignore' function. */ #define HAVE_SIGIGNORE 1 /* Define to 1 if stdbool.h conforms to C99. */ #define HAVE_STDBOOL_H 1 /* Define to 1 if you have the <stdint.h> header file. */ #define HAVE_STDINT_H 1 /* Define to 1 if you have the <stdlib.h> header file. */ #define HAVE_STDLIB_H 1 /* Define to 1 if you have the <strings.h> header file. */ #define HAVE_STRINGS_H 1 /* Define to 1 if you have the <string.h> header file. */ #define HAVE_STRING_H 1 /* Define to 1 if you have the <sys/stat.h> header file. */ #define HAVE_SYS_STAT_H 1 /* Define to 1 if you have the <sys/types.h> header file. */ #define HAVE_SYS_TYPES_H 1 /* Define this if you have umem.h */ /* #undef HAVE_UMEM_H */ /* Define to 1 if you have the <unistd.h> header file. */ #define HAVE_UNISTD_H 1 /* Define to 1 if the system has the type `_Bool'. */ #define HAVE__BOOL 1 /* Machine need alignment */ /* #undef NEED_ALIGN */ /* Name of package */ #define PACKAGE "memcached" /* Define to the address where bug reports for this package should be sent. */ #define PACKAGE_BUGREPORT "memcached@googlegroups.com" /* Define to the full name of this package. */ #define PACKAGE_NAME "memcached" /* Define to the full name and version of this package. */ #define PACKAGE_STRING "memcached 1.5.4" /* Define to the one symbol short name of this package. */ #define PACKAGE_TARNAME "memcached" /* Define to the home page for this package. */ #define PACKAGE_URL "" /* Define to the version of this package. */ #define PACKAGE_VERSION "1.5.4" /* Set to nonzero if you want to enable pslab */ #define PSLAB 1 /* Define to 1 if you have the ANSI C header files. */ #define STDC_HEADERS 1 /* Version number of package */ #define VERSION "1.5.4" /* find sigignore on Linux */ #define _GNU_SOURCE 1 /* Define to empty if `const' does not conform to ANSI C. */ /* #undef const */ /* define to int if socklen_t not available */ /* #undef socklen_t */ #if HAVE_STDBOOL_H #include <stdbool.h> #else #define bool char #define false 0 #define true 1 #endif #ifdef HAVE_INTTYPES_H #include <inttypes.h> #endif
4,134
24.368098
78
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/sasl_defs.h
#ifndef SASL_DEFS_H #define SASL_DEFS_H 1 // Longest one I could find was ``9798-U-RSA-SHA1-ENC'' #define MAX_SASL_MECH_LEN 32 #if defined(HAVE_SASL_SASL_H) && defined(ENABLE_SASL) #include <sasl/sasl.h> void init_sasl(void); extern char my_sasl_hostname[1025]; #else /* End of SASL support */ typedef void* sasl_conn_t; #define init_sasl() {} #define sasl_dispose(x) {} #define sasl_server_new(a, b, c, d, e, f, g, h) 1 #define sasl_listmech(a, b, c, d, e, f, g, h) 1 #define sasl_server_start(a, b, c, d, e, f) 1 #define sasl_server_step(a, b, c, d, e) 1 #define sasl_getprop(a, b, c) {} #define SASL_OK 0 #define SASL_CONTINUE -1 #endif /* sasl compat */ #endif /* SASL_DEFS_H */
693
20.6875
55
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/openbsd_priv.c
#include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include "memcached.h" /* * this section of code will drop all (OpenBSD) privileges including * those normally granted to all userland process (basic privileges). The * effect of this is that after running this code, the process will not able * to fork(), exec(), etc. See pledge(2) for more information. */ void drop_privileges() { extern char *__progname; if (settings.socketpath != NULL) { if (pledge("stdio unix", NULL) == -1) { fprintf(stderr, "%s: pledge: %s\n", __progname, strerror(errno)); exit(EXIT_FAILURE); } } else { if (pledge("stdio inet", NULL) == -1) { fprintf(stderr, "%s: pledge: %s\n", __progname, strerror(errno)); exit(EXIT_FAILURE); } } }
853
28.448276
76
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/logger.h
/* logging functions */ #ifndef LOGGER_H #define LOGGER_H #include "bipbuffer.h" /* TODO: starttime tunable */ #define LOGGER_BUF_SIZE 1024 * 64 #define LOGGER_WATCHER_BUF_SIZE 1024 * 256 #define LOGGER_ENTRY_MAX_SIZE 2048 #define GET_LOGGER() ((logger *) pthread_getspecific(logger_key)); /* Inlined from memcached.h - should go into sub header */ typedef unsigned int rel_time_t; enum log_entry_type { LOGGER_ASCII_CMD = 0, LOGGER_EVICTION, LOGGER_ITEM_GET, LOGGER_ITEM_STORE, LOGGER_CRAWLER_STATUS, LOGGER_SLAB_MOVE, #ifdef EXTSTORE LOGGER_EXTSTORE_WRITE, LOGGER_COMPACT_START, LOGGER_COMPACT_ABORT, LOGGER_COMPACT_READ_START, LOGGER_COMPACT_READ_END, LOGGER_COMPACT_END, LOGGER_COMPACT_FRAGINFO, #endif }; enum log_entry_subtype { LOGGER_TEXT_ENTRY = 0, LOGGER_EVICTION_ENTRY, LOGGER_ITEM_GET_ENTRY, LOGGER_ITEM_STORE_ENTRY, #ifdef EXTSTORE LOGGER_EXT_WRITE_ENTRY, #endif }; enum logger_ret_type { LOGGER_RET_OK = 0, LOGGER_RET_NOSPACE, LOGGER_RET_ERR }; enum logger_parse_entry_ret { LOGGER_PARSE_ENTRY_OK = 0, LOGGER_PARSE_ENTRY_FULLBUF, LOGGER_PARSE_ENTRY_FAILED }; typedef const struct { enum log_entry_subtype subtype; int reqlen; uint16_t eflags; char *format; } entry_details; /* log entry intermediary structures */ struct logentry_eviction { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; char key[]; }; #ifdef EXTSTORE struct logentry_ext_write { long long int exptime; uint32_t latime; uint16_t it_flags; uint8_t nkey; uint8_t clsid; uint8_t bucket; char key[]; }; #endif struct logentry_item_get { uint8_t was_found; uint8_t nkey; uint8_t clsid; char key[]; }; struct logentry_item_store { int status; int cmd; rel_time_t ttl; uint8_t nkey; uint8_t clsid; char key[]; }; /* end intermediary structures */ typedef struct _logentry { enum log_entry_subtype event; uint16_t eflags; uint64_t gid; struct timeval tv; /* not monotonic! */ int size; union { void *entry; /* probably an item */ char end; } data[]; } logentry; #define LOG_SYSEVENTS (1<<1) /* threads start/stop/working */ #define LOG_FETCHERS (1<<2) /* get/gets/etc */ #define LOG_MUTATIONS (1<<3) /* set/append/incr/etc */ #define LOG_SYSERRORS (1<<4) /* malloc/etc errors */ #define LOG_CONNEVENTS (1<<5) /* new client, closed, etc */ #define LOG_EVICTIONS (1<<6) /* details of evicted items */ #define LOG_STRICT (1<<7) /* block worker instead of drop */ #define LOG_RAWCMDS (1<<9) /* raw ascii commands */ typedef struct _logger { struct _logger *prev; struct _logger *next; pthread_mutex_t mutex; /* guard for this + *buf */ uint64_t written; /* entries written to the buffer */ uint64_t dropped; /* entries dropped */ uint64_t blocked; /* times blocked instead of dropped */ uint16_t fetcher_ratio; /* log one out of every N fetches */ uint16_t mutation_ratio; /* log one out of every N mutations */ uint16_t eflags; /* flags this logger should log */ bipbuf_t *buf; const entry_details *entry_map; } logger; enum logger_watcher_type { LOGGER_WATCHER_STDERR = 0, LOGGER_WATCHER_CLIENT = 1 }; typedef struct { void *c; /* original connection structure. still with source thread attached */ int sfd; /* client fd */ int id; /* id number for watcher list */ uint64_t skipped; /* lines skipped since last successful print */ bool failed_flush; /* recently failed to write out (EAGAIN), wait before retry */ enum logger_watcher_type t; /* stderr, client, syslog, etc */ uint16_t eflags; /* flags we are interested in */ bipbuf_t *buf; /* per-watcher output buffer */ } logger_watcher; struct logger_stats { uint64_t worker_dropped; uint64_t worker_written; uint64_t watcher_skipped; uint64_t watcher_sent; }; extern pthread_key_t logger_key; /* public functions */ void logger_init(void); logger *logger_create(void); #define LOGGER_LOG(l, flag, type, ...) \ do { \ logger *myl = l; \ if (l == NULL) \ myl = GET_LOGGER(); \ if (myl->eflags & flag) \ logger_log(myl, type, __VA_ARGS__); \ } while (0) enum logger_ret_type logger_log(logger *l, const enum log_entry_type event, const void *entry, ...); enum logger_add_watcher_ret { LOGGER_ADD_WATCHER_TOO_MANY = 0, LOGGER_ADD_WATCHER_OK, LOGGER_ADD_WATCHER_FAILED }; enum logger_add_watcher_ret logger_add_watcher(void *c, const int sfd, uint16_t f); #endif
4,680
24.032086
100
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/thread.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Thread management for memcached. */ #include "memcached.h" #ifdef EXTSTORE #include "storage.h" #endif #include <assert.h> #include <stdio.h> #include <errno.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #ifdef __sun #include <atomic.h> #endif #define ITEMS_PER_ALLOC 64 /* An item in the connection queue. */ enum conn_queue_item_modes { queue_new_conn, /* brand new connection. */ queue_redispatch, /* redispatching from side thread */ }; typedef struct conn_queue_item CQ_ITEM; struct conn_queue_item { int sfd; enum conn_states init_state; int event_flags; int read_buffer_size; enum network_transport transport; enum conn_queue_item_modes mode; conn *c; CQ_ITEM *next; }; /* A connection queue. */ typedef struct conn_queue CQ; struct conn_queue { CQ_ITEM *head; CQ_ITEM *tail; pthread_mutex_t lock; }; /* Locks for cache LRU operations */ pthread_mutex_t lru_locks[POWER_LARGEST]; /* Connection lock around accepting new connections */ pthread_mutex_t conn_lock = PTHREAD_MUTEX_INITIALIZER; #if !defined(HAVE_GCC_ATOMICS) && !defined(__sun) pthread_mutex_t atomics_mutex = PTHREAD_MUTEX_INITIALIZER; #endif /* Lock for global stats */ static pthread_mutex_t stats_lock = PTHREAD_MUTEX_INITIALIZER; /* Lock to cause worker threads to hang up after being woken */ static pthread_mutex_t worker_hang_lock; /* Free list of CQ_ITEM structs */ static CQ_ITEM *cqi_freelist; static pthread_mutex_t cqi_freelist_lock; static pthread_mutex_t *item_locks; /* size of the item lock hash table */ static uint32_t item_lock_count; unsigned int item_lock_hashpower; #define hashsize(n) ((unsigned long int)1<<(n)) #define hashmask(n) (hashsize(n)-1) /* * Each libevent instance has a wakeup pipe, which other threads * can use to signal that they've put a new connection on its queue. */ static LIBEVENT_THREAD *threads; /* * Number of worker threads that have finished setting themselves up. */ static int init_count = 0; static pthread_mutex_t init_lock; static pthread_cond_t init_cond; static void thread_libevent_process(int fd, short which, void *arg); /* item_lock() must be held for an item before any modifications to either its * associated hash bucket, or the structure itself. * LRU modifications must hold the item lock, and the LRU lock. * LRU's accessing items must item_trylock() before modifying an item. * Items accessible from an LRU must not be freed or modified * without first locking and removing from the LRU. */ void item_lock(uint32_t hv) { mutex_lock(&item_locks[hv & hashmask(item_lock_hashpower)]); } void *item_trylock(uint32_t hv) { pthread_mutex_t *lock = &item_locks[hv & hashmask(item_lock_hashpower)]; if (pthread_mutex_trylock(lock) == 0) { return lock; } return NULL; } void item_trylock_unlock(void *lock) { mutex_unlock((pthread_mutex_t *) lock); } void item_unlock(uint32_t hv) { mutex_unlock(&item_locks[hv & hashmask(item_lock_hashpower)]); } static void wait_for_thread_registration(int nthreads) { while (init_count < nthreads) { pthread_cond_wait(&init_cond, &init_lock); } } static void register_thread_initialized(void) { pthread_mutex_lock(&init_lock); init_count++; pthread_cond_signal(&init_cond); pthread_mutex_unlock(&init_lock); /* Force worker threads to pile up if someone wants us to */ pthread_mutex_lock(&worker_hang_lock); pthread_mutex_unlock(&worker_hang_lock); } /* Must not be called with any deeper locks held */ void pause_threads(enum pause_thread_types type) { char buf[1]; int i; buf[0] = 0; switch (type) { case PAUSE_ALL_THREADS: lru_maintainer_pause(); slabs_rebalancer_pause(); lru_crawler_pause(); #ifdef EXTSTORE storage_compact_pause(); #endif case PAUSE_WORKER_THREADS: buf[0] = 'p'; pthread_mutex_lock(&worker_hang_lock); break; case RESUME_ALL_THREADS: lru_maintainer_resume(); slabs_rebalancer_resume(); lru_crawler_resume(); #ifdef EXTSTORE storage_compact_resume(); #endif case RESUME_WORKER_THREADS: pthread_mutex_unlock(&worker_hang_lock); break; default: fprintf(stderr, "Unknown lock type: %d\n", type); assert(1 == 0); break; } /* Only send a message if we have one. */ if (buf[0] == 0) { return; } pthread_mutex_lock(&init_lock); init_count = 0; for (i = 0; i < settings.num_threads; i++) { if (write(threads[i].notify_send_fd, buf, 1) != 1) { perror("Failed writing to notify pipe"); /* TODO: This is a fatal problem. Can it ever happen temporarily? */ } } wait_for_thread_registration(settings.num_threads); pthread_mutex_unlock(&init_lock); } /* * Initializes a connection queue. */ static void cq_init(CQ *cq) { pthread_mutex_init(&cq->lock, NULL); cq->head = NULL; cq->tail = NULL; } /* * Looks for an item on a connection queue, but doesn't block if there isn't * one. * Returns the item, or NULL if no item is available */ static CQ_ITEM *cq_pop(CQ *cq) { CQ_ITEM *item; pthread_mutex_lock(&cq->lock); item = cq->head; if (NULL != item) { cq->head = item->next; if (NULL == cq->head) cq->tail = NULL; } pthread_mutex_unlock(&cq->lock); return item; } /* * Adds an item to a connection queue. */ static void cq_push(CQ *cq, CQ_ITEM *item) { item->next = NULL; pthread_mutex_lock(&cq->lock); if (NULL == cq->tail) cq->head = item; else cq->tail->next = item; cq->tail = item; pthread_mutex_unlock(&cq->lock); } /* * Returns a fresh connection queue item. */ static CQ_ITEM *cqi_new(void) { CQ_ITEM *item = NULL; pthread_mutex_lock(&cqi_freelist_lock); if (cqi_freelist) { item = cqi_freelist; cqi_freelist = item->next; } pthread_mutex_unlock(&cqi_freelist_lock); if (NULL == item) { int i; /* Allocate a bunch of items at once to reduce fragmentation */ item = malloc(sizeof(CQ_ITEM) * ITEMS_PER_ALLOC); if (NULL == item) { STATS_LOCK(); stats.malloc_fails++; STATS_UNLOCK(); return NULL; } /* * Link together all the new items except the first one * (which we'll return to the caller) for placement on * the freelist. */ for (i = 2; i < ITEMS_PER_ALLOC; i++) item[i - 1].next = &item[i]; pthread_mutex_lock(&cqi_freelist_lock); item[ITEMS_PER_ALLOC - 1].next = cqi_freelist; cqi_freelist = &item[1]; pthread_mutex_unlock(&cqi_freelist_lock); } return item; } /* * Frees a connection queue item (adds it to the freelist.) */ static void cqi_free(CQ_ITEM *item) { pthread_mutex_lock(&cqi_freelist_lock); item->next = cqi_freelist; cqi_freelist = item; pthread_mutex_unlock(&cqi_freelist_lock); } /* * Creates a worker thread. */ static void create_worker(void *(*func)(void *), void *arg) { pthread_attr_t attr; int ret; pthread_attr_init(&attr); if ((ret = pthread_create(&((LIBEVENT_THREAD*)arg)->thread_id, &attr, func, arg)) != 0) { fprintf(stderr, "Can't create thread: %s\n", strerror(ret)); exit(1); } } /* * Sets whether or not we accept new connections. */ void accept_new_conns(const bool do_accept) { pthread_mutex_lock(&conn_lock); do_accept_new_conns(do_accept); pthread_mutex_unlock(&conn_lock); } /****************************** LIBEVENT THREADS *****************************/ /* * Set up a thread's information. */ static void setup_thread(LIBEVENT_THREAD *me) { me->base = event_init(); if (! me->base) { fprintf(stderr, "Can't allocate event base\n"); exit(1); } /* Listen for notifications from other threads */ event_set(&me->notify_event, me->notify_receive_fd, EV_READ | EV_PERSIST, thread_libevent_process, me); event_base_set(me->base, &me->notify_event); if (event_add(&me->notify_event, 0) == -1) { fprintf(stderr, "Can't monitor libevent notify pipe\n"); exit(1); } me->new_conn_queue = malloc(sizeof(struct conn_queue)); if (me->new_conn_queue == NULL) { perror("Failed to allocate memory for connection queue"); exit(EXIT_FAILURE); } cq_init(me->new_conn_queue); if (pthread_mutex_init(&me->stats.mutex, NULL) != 0) { perror("Failed to initialize mutex"); exit(EXIT_FAILURE); } me->suffix_cache = cache_create("suffix", SUFFIX_SIZE, sizeof(char*), NULL, NULL); if (me->suffix_cache == NULL) { fprintf(stderr, "Failed to create suffix cache\n"); exit(EXIT_FAILURE); } #ifdef EXTSTORE me->io_cache = cache_create("io", sizeof(io_wrap), sizeof(char*), NULL, NULL); if (me->io_cache == NULL) { fprintf(stderr, "Failed to create IO object cache\n"); exit(EXIT_FAILURE); } #endif } /* * Worker thread: main event loop */ static void *worker_libevent(void *arg) { LIBEVENT_THREAD *me = arg; /* Any per-thread setup can happen here; memcached_thread_init() will block until * all threads have finished initializing. */ me->l = logger_create(); me->lru_bump_buf = item_lru_bump_buf_create(); if (me->l == NULL || me->lru_bump_buf == NULL) { abort(); } if (settings.drop_privileges) { drop_worker_privileges(); } register_thread_initialized(); event_base_loop(me->base, 0); return NULL; } /* * Processes an incoming "handle a new connection" item. This is called when * input arrives on the libevent wakeup pipe. */ static void thread_libevent_process(int fd, short which, void *arg) { LIBEVENT_THREAD *me = arg; CQ_ITEM *item; char buf[1]; conn *c; unsigned int timeout_fd; if (read(fd, buf, 1) != 1) { if (settings.verbose > 0) fprintf(stderr, "Can't read from libevent pipe\n"); return; } switch (buf[0]) { case 'c': item = cq_pop(me->new_conn_queue); if (NULL == item) { break; } switch (item->mode) { case queue_new_conn: c = conn_new(item->sfd, item->init_state, item->event_flags, item->read_buffer_size, item->transport, me->base); if (c == NULL) { if (IS_UDP(item->transport)) { fprintf(stderr, "Can't listen for events on UDP socket\n"); exit(1); } else { if (settings.verbose > 0) { fprintf(stderr, "Can't listen for events on fd %d\n", item->sfd); } close(item->sfd); } } else { c->thread = me; } break; case queue_redispatch: conn_worker_readd(item->c); break; } cqi_free(item); break; /* we were told to pause and report in */ case 'p': register_thread_initialized(); break; /* a client socket timed out */ case 't': if (read(fd, &timeout_fd, sizeof(timeout_fd)) != sizeof(timeout_fd)) { if (settings.verbose > 0) fprintf(stderr, "Can't read timeout fd from libevent pipe\n"); return; } conn_close_idle(conns[timeout_fd]); break; } } /* Which thread we assigned a connection to most recently. */ static int last_thread = -1; /* * Dispatches a new connection to another thread. This is only ever called * from the main thread, either during initialization (for UDP) or because * of an incoming connection. */ void dispatch_conn_new(int sfd, enum conn_states init_state, int event_flags, int read_buffer_size, enum network_transport transport) { CQ_ITEM *item = cqi_new(); char buf[1]; if (item == NULL) { close(sfd); /* given that malloc failed this may also fail, but let's try */ fprintf(stderr, "Failed to allocate memory for connection object\n"); return ; } int tid = (last_thread + 1) % settings.num_threads; LIBEVENT_THREAD *thread = threads + tid; last_thread = tid; item->sfd = sfd; item->init_state = init_state; item->event_flags = event_flags; item->read_buffer_size = read_buffer_size; item->transport = transport; item->mode = queue_new_conn; cq_push(thread->new_conn_queue, item); MEMCACHED_CONN_DISPATCH(sfd, thread->thread_id); buf[0] = 'c'; if (write(thread->notify_send_fd, buf, 1) != 1) { perror("Writing to thread notify pipe"); } } /* * Re-dispatches a connection back to the original thread. Can be called from * any side thread borrowing a connection. */ void redispatch_conn(conn *c) { CQ_ITEM *item = cqi_new(); char buf[1]; if (item == NULL) { /* Can't cleanly redispatch connection. close it forcefully. */ c->state = conn_closed; close(c->sfd); return; } LIBEVENT_THREAD *thread = c->thread; item->sfd = c->sfd; item->init_state = conn_new_cmd; item->c = c; item->mode = queue_redispatch; cq_push(thread->new_conn_queue, item); buf[0] = 'c'; if (write(thread->notify_send_fd, buf, 1) != 1) { perror("Writing to thread notify pipe"); } } /* This misses the allow_new_conns flag :( */ void sidethread_conn_close(conn *c) { c->state = conn_closed; if (settings.verbose > 1) fprintf(stderr, "<%d connection closed from side thread.\n", c->sfd); close(c->sfd); STATS_LOCK(); stats_state.curr_conns--; STATS_UNLOCK(); return; } /********************************* ITEM ACCESS *******************************/ /* * Allocates a new item. */ item *item_alloc(char *key, size_t nkey, int flags, rel_time_t exptime, int nbytes) { item *it; /* do_item_alloc handles its own locks */ it = do_item_alloc(key, nkey, flags, exptime, nbytes); return it; } /* * Returns an item if it hasn't been marked as expired, * lazy-expiring as needed. */ item *item_get(const char *key, const size_t nkey, conn *c, const bool do_update) { item *it; uint32_t hv; hv = hash(key, nkey); item_lock(hv); it = do_item_get(key, nkey, hv, c, do_update); item_unlock(hv); return it; } item *item_touch(const char *key, size_t nkey, uint32_t exptime, conn *c) { item *it; uint32_t hv; hv = hash(key, nkey); item_lock(hv); it = do_item_touch(key, nkey, exptime, hv, c); item_unlock(hv); return it; } /* * Links an item into the LRU and hashtable. */ int item_link(item *item) { int ret; uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_item_link(item, hv); item_unlock(hv); return ret; } /* * Decrements the reference count on an item and adds it to the freelist if * needed. */ void item_remove(item *item) { uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_remove(item); item_unlock(hv); } /* * Replaces one item with another in the hashtable. * Unprotected by a mutex lock since the core server does not require * it to be thread-safe. */ int item_replace(item *old_it, item *new_it, const uint32_t hv) { return do_item_replace(old_it, new_it, hv); } /* * Unlinks an item from the LRU and hashtable. */ void item_unlink(item *item) { uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); do_item_unlink(item, hv); item_unlock(hv); } /* * Does arithmetic on a numeric item value. */ enum delta_result_type add_delta(conn *c, const char *key, const size_t nkey, int incr, const int64_t delta, char *buf, uint64_t *cas) { enum delta_result_type ret; uint32_t hv; hv = hash(key, nkey); item_lock(hv); ret = do_add_delta(c, key, nkey, incr, delta, buf, cas, hv); item_unlock(hv); return ret; } /* * Stores an item in the cache (high level, obeys set/add/replace semantics) */ enum store_item_type store_item(item *item, int comm, conn* c) { enum store_item_type ret; uint32_t hv; hv = hash(ITEM_key(item), item->nkey); item_lock(hv); ret = do_store_item(item, comm, c, hv); item_unlock(hv); return ret; } /******************************* GLOBAL STATS ******************************/ void STATS_LOCK() { pthread_mutex_lock(&stats_lock); } void STATS_UNLOCK() { pthread_mutex_unlock(&stats_lock); } void threadlocal_stats_reset(void) { int ii; for (ii = 0; ii < settings.num_threads; ++ii) { pthread_mutex_lock(&threads[ii].stats.mutex); #define X(name) threads[ii].stats.name = 0; THREAD_STATS_FIELDS #ifdef EXTSTORE EXTSTORE_THREAD_STATS_FIELDS #endif #undef X memset(&threads[ii].stats.slab_stats, 0, sizeof(threads[ii].stats.slab_stats)); memset(&threads[ii].stats.lru_hits, 0, sizeof(uint64_t) * POWER_LARGEST); pthread_mutex_unlock(&threads[ii].stats.mutex); } } void threadlocal_stats_aggregate(struct thread_stats *stats) { int ii, sid; /* The struct has a mutex, but we can safely set the whole thing * to zero since it is unused when aggregating. */ memset(stats, 0, sizeof(*stats)); for (ii = 0; ii < settings.num_threads; ++ii) { pthread_mutex_lock(&threads[ii].stats.mutex); #define X(name) stats->name += threads[ii].stats.name; THREAD_STATS_FIELDS #ifdef EXTSTORE EXTSTORE_THREAD_STATS_FIELDS #endif #undef X for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) { #define X(name) stats->slab_stats[sid].name += \ threads[ii].stats.slab_stats[sid].name; SLAB_STATS_FIELDS #undef X } for (sid = 0; sid < POWER_LARGEST; sid++) { stats->lru_hits[sid] += threads[ii].stats.lru_hits[sid]; stats->slab_stats[CLEAR_LRU(sid)].get_hits += threads[ii].stats.lru_hits[sid]; } pthread_mutex_unlock(&threads[ii].stats.mutex); } } void slab_stats_aggregate(struct thread_stats *stats, struct slab_stats *out) { int sid; memset(out, 0, sizeof(*out)); for (sid = 0; sid < MAX_NUMBER_OF_SLAB_CLASSES; sid++) { #define X(name) out->name += stats->slab_stats[sid].name; SLAB_STATS_FIELDS #undef X } } /* * Initializes the thread subsystem, creating various worker threads. * * nthreads Number of worker event handler threads to spawn */ void memcached_thread_init(int nthreads, void *arg) { int i; int power; for (i = 0; i < POWER_LARGEST; i++) { pthread_mutex_init(&lru_locks[i], NULL); } pthread_mutex_init(&worker_hang_lock, NULL); pthread_mutex_init(&init_lock, NULL); pthread_cond_init(&init_cond, NULL); pthread_mutex_init(&cqi_freelist_lock, NULL); cqi_freelist = NULL; /* Want a wide lock table, but don't waste memory */ if (nthreads < 3) { power = 10; } else if (nthreads < 4) { power = 11; } else if (nthreads < 5) { power = 12; } else if (nthreads <= 10) { power = 13; } else if (nthreads <= 20) { power = 14; } else { /* 32k buckets. just under the hashpower default. */ power = 15; } if (power >= hashpower) { fprintf(stderr, "Hash table power size (%d) cannot be equal to or less than item lock table (%d)\n", hashpower, power); fprintf(stderr, "Item lock table grows with `-t N` (worker threadcount)\n"); fprintf(stderr, "Hash table grows with `-o hashpower=N` \n"); exit(1); } item_lock_count = hashsize(power); item_lock_hashpower = power; item_locks = calloc(item_lock_count, sizeof(pthread_mutex_t)); if (! item_locks) { perror("Can't allocate item locks"); exit(1); } for (i = 0; i < item_lock_count; i++) { pthread_mutex_init(&item_locks[i], NULL); } threads = calloc(nthreads, sizeof(LIBEVENT_THREAD)); if (! threads) { perror("Can't allocate thread descriptors"); exit(1); } for (i = 0; i < nthreads; i++) { int fds[2]; if (pipe(fds)) { perror("Can't create notify pipe"); exit(1); } threads[i].notify_receive_fd = fds[0]; threads[i].notify_send_fd = fds[1]; #ifdef EXTSTORE threads[i].storage = arg; #endif setup_thread(&threads[i]); /* Reserve three fds for the libevent base, and two for the pipe */ stats_state.reserved_fds += 5; } /* Create threads after we've done all the libevent setup. */ for (i = 0; i < nthreads; i++) { create_worker(worker_libevent, &threads[i]); } /* Wait for all the threads to set themselves up before returning. */ pthread_mutex_lock(&init_lock); wait_for_thread_registration(nthreads); pthread_mutex_unlock(&init_lock); }
21,856
26.017305
127
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/cache.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include <stdlib.h> #include <string.h> #include <inttypes.h> #ifndef NDEBUG #include <signal.h> #endif #include "cache.h" #ifndef NDEBUG const uint64_t redzone_pattern = 0xdeadbeefcafebabe; int cache_error = 0; #endif const int initial_pool_size = 64; cache_t* cache_create(const char *name, size_t bufsize, size_t align, cache_constructor_t* constructor, cache_destructor_t* destructor) { cache_t* ret = calloc(1, sizeof(cache_t)); char* nm = strdup(name); void** ptr = calloc(initial_pool_size, sizeof(void*)); if (ret == NULL || nm == NULL || ptr == NULL || pthread_mutex_init(&ret->mutex, NULL) == -1) { free(ret); free(nm); free(ptr); return NULL; } ret->name = nm; ret->ptr = ptr; ret->freetotal = initial_pool_size; ret->constructor = constructor; ret->destructor = destructor; #ifndef NDEBUG ret->bufsize = bufsize + 2 * sizeof(redzone_pattern); #else ret->bufsize = bufsize; #endif return ret; } static inline void* get_object(void *ptr) { #ifndef NDEBUG uint64_t *pre = ptr; return pre + 1; #else return ptr; #endif } void cache_destroy(cache_t *cache) { while (cache->freecurr > 0) { void *ptr = cache->ptr[--cache->freecurr]; if (cache->destructor) { cache->destructor(get_object(ptr), NULL); } free(ptr); } free(cache->name); free(cache->ptr); pthread_mutex_destroy(&cache->mutex); free(cache); } void* cache_alloc(cache_t *cache) { void *ret; pthread_mutex_lock(&cache->mutex); ret = do_cache_alloc(cache); pthread_mutex_unlock(&cache->mutex); return ret; } void* do_cache_alloc(cache_t *cache) { void *ret; void *object; if (cache->freecurr > 0) { ret = cache->ptr[--cache->freecurr]; object = get_object(ret); } else { object = ret = malloc(cache->bufsize); if (ret != NULL) { object = get_object(ret); if (cache->constructor != NULL && cache->constructor(object, NULL, 0) != 0) { free(ret); object = NULL; } } } #ifndef NDEBUG if (object != NULL) { /* add a simple form of buffer-check */ uint64_t *pre = ret; *pre = redzone_pattern; ret = pre+1; memcpy(((char*)ret) + cache->bufsize - (2 * sizeof(redzone_pattern)), &redzone_pattern, sizeof(redzone_pattern)); } #endif return object; } void cache_free(cache_t *cache, void *ptr) { pthread_mutex_lock(&cache->mutex); do_cache_free(cache, ptr); pthread_mutex_unlock(&cache->mutex); } void do_cache_free(cache_t *cache, void *ptr) { #ifndef NDEBUG /* validate redzone... */ if (memcmp(((char*)ptr) + cache->bufsize - (2 * sizeof(redzone_pattern)), &redzone_pattern, sizeof(redzone_pattern)) != 0) { raise(SIGABRT); cache_error = 1; return; } uint64_t *pre = ptr; --pre; if (*pre != redzone_pattern) { raise(SIGABRT); cache_error = -1; return; } ptr = pre; #endif if (cache->freecurr < cache->freetotal) { cache->ptr[cache->freecurr++] = ptr; } else { /* try to enlarge free connections array */ size_t newtotal = cache->freetotal * 2; void **new_free = realloc(cache->ptr, sizeof(char *) * newtotal); if (new_free) { cache->freetotal = newtotal; cache->ptr = new_free; cache->ptr[cache->freecurr++] = ptr; } else { if (cache->destructor) { cache->destructor(ptr, NULL); } free(ptr); } } }
3,862
23.762821
77
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/util.c
#include <stdio.h> #include <assert.h> #include <ctype.h> #include <errno.h> #include <string.h> #include <stdlib.h> #include <stdarg.h> #include "memcached.h" static char *uriencode_map[256]; static char uriencode_str[768]; void uriencode_init(void) { int x; char *str = uriencode_str; for (x = 0; x < 256; x++) { if (isalnum(x) || x == '-' || x == '.' || x == '_' || x == '~') { uriencode_map[x] = NULL; } else { snprintf(str, 4, "%%%02hhX", (unsigned char)x); uriencode_map[x] = str; str += 3; /* lobbing off the \0 is fine */ } } } bool uriencode(const char *src, char *dst, const size_t srclen, const size_t dstlen) { int x; size_t d = 0; for (x = 0; x < srclen; x++) { if (d + 4 >= dstlen) return false; if (uriencode_map[(unsigned char) src[x]] != NULL) { memcpy(&dst[d], uriencode_map[(unsigned char) src[x]], 3); d += 3; } else { dst[d] = src[x]; d++; } } dst[d] = '\0'; return true; } /* Avoid warnings on solaris, where isspace() is an index into an array, and gcc uses signed chars */ #define xisspace(c) isspace((unsigned char)c) bool safe_strtoull(const char *str, uint64_t *out) { assert(out != NULL); errno = 0; *out = 0; char *endptr; unsigned long long ull = strtoull(str, &endptr, 10); if ((errno == ERANGE) || (str == endptr)) { return false; } if (xisspace(*endptr) || (*endptr == '\0' && endptr != str)) { if ((long long) ull < 0) { /* only check for negative signs in the uncommon case when * the unsigned number is so big that it's negative as a * signed number. */ if (strchr(str, '-') != NULL) { return false; } } *out = ull; return true; } return false; } bool safe_strtoll(const char *str, int64_t *out) { assert(out != NULL); errno = 0; *out = 0; char *endptr; long long ll = strtoll(str, &endptr, 10); if ((errno == ERANGE) || (str == endptr)) { return false; } if (xisspace(*endptr) || (*endptr == '\0' && endptr != str)) { *out = ll; return true; } return false; } bool safe_strtoul(const char *str, uint32_t *out) { char *endptr = NULL; unsigned long l = 0; assert(out); assert(str); *out = 0; errno = 0; l = strtoul(str, &endptr, 10); if ((errno == ERANGE) || (str == endptr)) { return false; } if (xisspace(*endptr) || (*endptr == '\0' && endptr != str)) { if ((long) l < 0) { /* only check for negative signs in the uncommon case when * the unsigned number is so big that it's negative as a * signed number. */ if (strchr(str, '-') != NULL) { return false; } } *out = l; return true; } return false; } bool safe_strtol(const char *str, int32_t *out) { assert(out != NULL); errno = 0; *out = 0; char *endptr; long l = strtol(str, &endptr, 10); if ((errno == ERANGE) || (str == endptr)) { return false; } if (xisspace(*endptr) || (*endptr == '\0' && endptr != str)) { *out = l; return true; } return false; } bool safe_strtod(const char *str, double *out) { assert(out != NULL); errno = 0; *out = 0; char *endptr; double d = strtod(str, &endptr); if ((errno == ERANGE) || (str == endptr)) { return false; } if (xisspace(*endptr) || (*endptr == '\0' && endptr != str)) { *out = d; return true; } return false; } void vperror(const char *fmt, ...) { int old_errno = errno; char buf[1024]; va_list ap; va_start(ap, fmt); if (vsnprintf(buf, sizeof(buf), fmt, ap) == -1) { buf[sizeof(buf) - 1] = '\0'; } va_end(ap); errno = old_errno; perror(buf); } #ifndef HAVE_HTONLL static uint64_t mc_swap64(uint64_t in) { #ifdef ENDIAN_LITTLE /* Little endian, flip the bytes around until someone makes a faster/better * way to do this. */ int64_t rv = 0; int i = 0; for(i = 0; i<8; i++) { rv = (rv << 8) | (in & 0xff); in >>= 8; } return rv; #else /* big-endian machines don't need byte swapping */ return in; #endif } uint64_t ntohll(uint64_t val) { return mc_swap64(val); } uint64_t htonll(uint64_t val) { return mc_swap64(val); } #endif
4,608
22.395939
101
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/extstore.h
#ifndef EXTSTORE_H #define EXTSTORE_H /* A safe-to-read dataset for determining compaction. * id is the array index. */ struct extstore_page_data { uint64_t version; uint64_t bytes_used; unsigned int bucket; }; /* Pages can have objects deleted from them at any time. This creates holes * that can't be reused until the page is either evicted or all objects are * deleted. * bytes_fragmented is the total bytes for all of these holes. * It is the size of all used pages minus each page's bytes_used value. */ struct extstore_stats { uint64_t page_allocs; uint64_t page_count; /* total page count */ uint64_t page_evictions; uint64_t page_reclaims; uint64_t page_size; /* size in bytes per page (supplied by caller) */ uint64_t pages_free; /* currently unallocated/unused pages */ uint64_t pages_used; uint64_t objects_evicted; uint64_t objects_read; uint64_t objects_written; uint64_t objects_used; /* total number of objects stored */ uint64_t bytes_evicted; uint64_t bytes_written; uint64_t bytes_read; /* wbuf - read -> bytes read from storage */ uint64_t bytes_used; /* total number of bytes stored */ uint64_t bytes_fragmented; /* see above comment */ struct extstore_page_data *page_data; }; // TODO: Temporary configuration structure. A "real" library should have an // extstore_set(enum, void *ptr) which hides the implementation. // this is plenty for quick development. struct extstore_conf { unsigned int page_size; // ideally 64-256M in size unsigned int page_count; unsigned int page_buckets; // number of different writeable pages unsigned int wbuf_size; // must divide cleanly into page_size unsigned int wbuf_count; // this might get locked to "2 per active page" unsigned int io_threadcount; unsigned int io_depth; // with normal I/O, hits locks less. req'd for AIO }; enum obj_io_mode { OBJ_IO_READ = 0, OBJ_IO_WRITE, }; typedef struct _obj_io obj_io; typedef void (*obj_io_cb)(void *e, obj_io *io, int ret); /* An object for both reads and writes to the storage engine. * Once an IO is submitted, ->next may be changed by the IO thread. It is not * safe to further modify the IO stack until the entire request is completed. */ struct _obj_io { void *data; /* user supplied data pointer */ struct _obj_io *next; char *buf; /* buffer of data to read or write to */ struct iovec *iov; /* alternatively, use this iovec */ unsigned int iovcnt; /* number of IOV's */ unsigned int page_version; /* page version for read mode */ unsigned int len; /* for both modes */ unsigned int offset; /* for read mode */ unsigned short page_id; /* for read mode */ enum obj_io_mode mode; /* callback pointers? */ obj_io_cb cb; }; enum extstore_res { EXTSTORE_INIT_BAD_WBUF_SIZE = 1, EXTSTORE_INIT_NEED_MORE_WBUF, EXTSTORE_INIT_NEED_MORE_BUCKETS, EXTSTORE_INIT_PAGE_WBUF_ALIGNMENT, EXTSTORE_INIT_OOM, EXTSTORE_INIT_OPEN_FAIL, EXTSTORE_INIT_THREAD_FAIL }; const char *extstore_err(enum extstore_res res); void *extstore_init(char *fn, struct extstore_conf *cf, enum extstore_res *res); int extstore_write_request(void *ptr, unsigned int bucket, obj_io *io); void extstore_write(void *ptr, obj_io *io); int extstore_submit(void *ptr, obj_io *io); /* count are the number of objects being removed, bytes are the original * length of those objects. Bytes is optional but you can't track * fragmentation without it. */ int extstore_check(void *ptr, unsigned int page_id, uint64_t page_version); int extstore_delete(void *ptr, unsigned int page_id, uint64_t page_version, unsigned int count, unsigned int bytes); void extstore_get_stats(void *ptr, struct extstore_stats *st); /* add page data array to a stats structure. * caller must allocate its stats.page_data memory first. */ void extstore_get_page_data(void *ptr, struct extstore_stats *st); void extstore_run_maint(void *ptr); void extstore_close_page(void *ptr, unsigned int page_id, uint64_t page_version); #endif
4,091
36.541284
116
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/stats.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Detailed statistics management. For simple stats like total number of * "get" requests, we use inline code in memcached.c and friends, but when * stats detail mode is activated, the code here records more information. * * Author: * Steven Grimm <sgrimm@facebook.com> */ #include "memcached.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <assert.h> /* * Stats are tracked on the basis of key prefixes. This is a simple * fixed-size hash of prefixes; we run the prefixes through the same * CRC function used by the cache hashtable. */ typedef struct _prefix_stats PREFIX_STATS; struct _prefix_stats { char *prefix; size_t prefix_len; uint64_t num_gets; uint64_t num_sets; uint64_t num_deletes; uint64_t num_hits; PREFIX_STATS *next; }; #define PREFIX_HASH_SIZE 256 static PREFIX_STATS *prefix_stats[PREFIX_HASH_SIZE]; static int num_prefixes = 0; static int total_prefix_size = 0; void stats_prefix_init() { memset(prefix_stats, 0, sizeof(prefix_stats)); } /* * Cleans up all our previously collected stats. NOTE: the stats lock is * assumed to be held when this is called. */ void stats_prefix_clear() { int i; for (i = 0; i < PREFIX_HASH_SIZE; i++) { PREFIX_STATS *cur, *next; for (cur = prefix_stats[i]; cur != NULL; cur = next) { next = cur->next; free(cur->prefix); free(cur); } prefix_stats[i] = NULL; } num_prefixes = 0; total_prefix_size = 0; } /* * Returns the stats structure for a prefix, creating it if it's not already * in the list. */ /*@null@*/ static PREFIX_STATS *stats_prefix_find(const char *key, const size_t nkey) { PREFIX_STATS *pfs; uint32_t hashval; size_t length; bool bailout = true; assert(key != NULL); for (length = 0; length < nkey && key[length] != '\0'; length++) { if (key[length] == settings.prefix_delimiter) { bailout = false; break; } } if (bailout) { return NULL; } hashval = hash(key, length) % PREFIX_HASH_SIZE; for (pfs = prefix_stats[hashval]; NULL != pfs; pfs = pfs->next) { if (strncmp(pfs->prefix, key, length) == 0) return pfs; } pfs = calloc(sizeof(PREFIX_STATS), 1); if (NULL == pfs) { perror("Can't allocate space for stats structure: calloc"); return NULL; } pfs->prefix = malloc(length + 1); if (NULL == pfs->prefix) { perror("Can't allocate space for copy of prefix: malloc"); free(pfs); return NULL; } strncpy(pfs->prefix, key, length); pfs->prefix[length] = '\0'; /* because strncpy() sucks */ pfs->prefix_len = length; pfs->next = prefix_stats[hashval]; prefix_stats[hashval] = pfs; num_prefixes++; total_prefix_size += length; return pfs; } /* * Records a "get" of a key. */ void stats_prefix_record_get(const char *key, const size_t nkey, const bool is_hit) { PREFIX_STATS *pfs; STATS_LOCK(); pfs = stats_prefix_find(key, nkey); if (NULL != pfs) { pfs->num_gets++; if (is_hit) { pfs->num_hits++; } } STATS_UNLOCK(); } /* * Records a "delete" of a key. */ void stats_prefix_record_delete(const char *key, const size_t nkey) { PREFIX_STATS *pfs; STATS_LOCK(); pfs = stats_prefix_find(key, nkey); if (NULL != pfs) { pfs->num_deletes++; } STATS_UNLOCK(); } /* * Records a "set" of a key. */ void stats_prefix_record_set(const char *key, const size_t nkey) { PREFIX_STATS *pfs; STATS_LOCK(); pfs = stats_prefix_find(key, nkey); if (NULL != pfs) { pfs->num_sets++; } STATS_UNLOCK(); } /* * Returns stats in textual form suitable for writing to a client. */ /*@null@*/ char *stats_prefix_dump(int *length) { const char *format = "PREFIX %s get %llu hit %llu set %llu del %llu\r\n"; PREFIX_STATS *pfs; char *buf; int i, pos; size_t size = 0, written = 0, total_written = 0; /* * Figure out how big the buffer needs to be. This is the sum of the * lengths of the prefixes themselves, plus the size of one copy of * the per-prefix output with 20-digit values for all the counts, * plus space for the "END" at the end. */ STATS_LOCK(); size = strlen(format) + total_prefix_size + num_prefixes * (strlen(format) - 2 /* %s */ + 4 * (20 - 4)) /* %llu replaced by 20-digit num */ + sizeof("END\r\n"); buf = malloc(size); if (NULL == buf) { perror("Can't allocate stats response: malloc"); STATS_UNLOCK(); return NULL; } pos = 0; for (i = 0; i < PREFIX_HASH_SIZE; i++) { for (pfs = prefix_stats[i]; NULL != pfs; pfs = pfs->next) { written = snprintf(buf + pos, size-pos, format, pfs->prefix, pfs->num_gets, pfs->num_hits, pfs->num_sets, pfs->num_deletes); pos += written; total_written += written; assert(total_written < size); } } STATS_UNLOCK(); memcpy(buf + pos, "END\r\n", 6); *length = pos + 5; return buf; } #ifdef UNIT_TEST /**************************************************************************** To run unit tests, compile with $(CC) -DUNIT_TEST stats.c assoc.o (need assoc.o to get the hash() function). ****************************************************************************/ struct settings settings; static char *current_test = ""; static int test_count = 0; static int fail_count = 0; static void fail(char *what) { printf("\tFAIL: %s\n", what); fflush(stdout); fail_count++; } static void test_equals_int(char *what, int a, int b) { test_count++; if (a != b) fail(what); } static void test_equals_ptr(char *what, void *a, void *b) { test_count++; if (a != b) fail(what); } static void test_equals_str(char *what, const char *a, const char *b) { test_count++; if (strcmp(a, b)) fail(what); } static void test_equals_ull(char *what, uint64_t a, uint64_t b) { test_count++; if (a != b) fail(what); } static void test_notequals_ptr(char *what, void *a, void *b) { test_count++; if (a == b) fail(what); } static void test_notnull_ptr(char *what, void *a) { test_count++; if (NULL == a) fail(what); } static void test_prefix_find() { PREFIX_STATS *pfs1, *pfs2; pfs1 = stats_prefix_find("abc"); test_notnull_ptr("initial prefix find", pfs1); test_equals_ull("request counts", 0ULL, pfs1->num_gets + pfs1->num_sets + pfs1->num_deletes + pfs1->num_hits); pfs2 = stats_prefix_find("abc"); test_equals_ptr("find of same prefix", pfs1, pfs2); pfs2 = stats_prefix_find("abc:"); test_equals_ptr("find of same prefix, ignoring delimiter", pfs1, pfs2); pfs2 = stats_prefix_find("abc:d"); test_equals_ptr("find of same prefix, ignoring extra chars", pfs1, pfs2); pfs2 = stats_prefix_find("xyz123"); test_notequals_ptr("find of different prefix", pfs1, pfs2); pfs2 = stats_prefix_find("ab:"); test_notequals_ptr("find of shorter prefix", pfs1, pfs2); } static void test_prefix_record_get() { PREFIX_STATS *pfs; stats_prefix_record_get("abc:123", 0); pfs = stats_prefix_find("abc:123"); test_equals_ull("get count after get #1", 1, pfs->num_gets); test_equals_ull("hit count after get #1", 0, pfs->num_hits); stats_prefix_record_get("abc:456", 0); test_equals_ull("get count after get #2", 2, pfs->num_gets); test_equals_ull("hit count after get #2", 0, pfs->num_hits); stats_prefix_record_get("abc:456", 1); test_equals_ull("get count after get #3", 3, pfs->num_gets); test_equals_ull("hit count after get #3", 1, pfs->num_hits); stats_prefix_record_get("def:", 1); test_equals_ull("get count after get #4", 3, pfs->num_gets); test_equals_ull("hit count after get #4", 1, pfs->num_hits); } static void test_prefix_record_delete() { PREFIX_STATS *pfs; stats_prefix_record_delete("abc:123"); pfs = stats_prefix_find("abc:123"); test_equals_ull("get count after delete #1", 0, pfs->num_gets); test_equals_ull("hit count after delete #1", 0, pfs->num_hits); test_equals_ull("delete count after delete #1", 1, pfs->num_deletes); test_equals_ull("set count after delete #1", 0, pfs->num_sets); stats_prefix_record_delete("def:"); test_equals_ull("delete count after delete #2", 1, pfs->num_deletes); } static void test_prefix_record_set() { PREFIX_STATS *pfs; stats_prefix_record_set("abc:123"); pfs = stats_prefix_find("abc:123"); test_equals_ull("get count after set #1", 0, pfs->num_gets); test_equals_ull("hit count after set #1", 0, pfs->num_hits); test_equals_ull("delete count after set #1", 0, pfs->num_deletes); test_equals_ull("set count after set #1", 1, pfs->num_sets); stats_prefix_record_delete("def:"); test_equals_ull("set count after set #2", 1, pfs->num_sets); } static void test_prefix_dump() { int hashval = hash("abc", 3) % PREFIX_HASH_SIZE; char tmp[500]; char *expected; int keynum; int length; test_equals_str("empty stats", "END\r\n", stats_prefix_dump(&length)); test_equals_int("empty stats length", 5, length); stats_prefix_record_set("abc:123"); expected = "PREFIX abc get 0 hit 0 set 1 del 0\r\nEND\r\n"; test_equals_str("stats after set", expected, stats_prefix_dump(&length)); test_equals_int("stats length after set", strlen(expected), length); stats_prefix_record_get("abc:123", 0); expected = "PREFIX abc get 1 hit 0 set 1 del 0\r\nEND\r\n"; test_equals_str("stats after get #1", expected, stats_prefix_dump(&length)); test_equals_int("stats length after get #1", strlen(expected), length); stats_prefix_record_get("abc:123", 1); expected = "PREFIX abc get 2 hit 1 set 1 del 0\r\nEND\r\n"; test_equals_str("stats after get #2", expected, stats_prefix_dump(&length)); test_equals_int("stats length after get #2", strlen(expected), length); stats_prefix_record_delete("abc:123"); expected = "PREFIX abc get 2 hit 1 set 1 del 1\r\nEND\r\n"; test_equals_str("stats after del #1", expected, stats_prefix_dump(&length)); test_equals_int("stats length after del #1", strlen(expected), length); /* The order of results might change if we switch hash functions. */ stats_prefix_record_delete("def:123"); expected = "PREFIX abc get 2 hit 1 set 1 del 1\r\n" "PREFIX def get 0 hit 0 set 0 del 1\r\n" "END\r\n"; test_equals_str("stats after del #2", expected, stats_prefix_dump(&length)); test_equals_int("stats length after del #2", strlen(expected), length); /* Find a key that hashes to the same bucket as "abc" */ for (keynum = 0; keynum < PREFIX_HASH_SIZE * 100; keynum++) { snprintf(tmp, sizeof(tmp), "%d", keynum); if (hashval == hash(tmp, strlen(tmp)) % PREFIX_HASH_SIZE) { break; } } stats_prefix_record_set(tmp); snprintf(tmp, sizeof(tmp), "PREFIX %d get 0 hit 0 set 1 del 0\r\n" "PREFIX abc get 2 hit 1 set 1 del 1\r\n" "PREFIX def get 0 hit 0 set 0 del 1\r\n" "END\r\n", keynum); test_equals_str("stats with two stats in one bucket", tmp, stats_prefix_dump(&length)); test_equals_int("stats length with two stats in one bucket", strlen(tmp), length); } static void run_test(char *what, void (*func)(void)) { current_test = what; test_count = fail_count = 0; puts(what); fflush(stdout); stats_prefix_clear(); (func)(); printf("\t%d / %d pass\n", (test_count - fail_count), test_count); } /* In case we're compiled in thread mode */ void mt_stats_lock() { } void mt_stats_unlock() { } main(int argc, char **argv) { stats_prefix_init(); settings.prefix_delimiter = ':'; run_test("stats_prefix_find", test_prefix_find); run_test("stats_prefix_record_get", test_prefix_record_get); run_test("stats_prefix_record_delete", test_prefix_record_delete); run_test("stats_prefix_record_set", test_prefix_record_set); run_test("stats_prefix_dump", test_prefix_dump); } #endif
12,481
32.196809
117
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/slab_automove_extstore.c
/* Copyright 2017 Facebook. * * Use and distribution licensed under the BSD license. See * the LICENSE file for full text. */ /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include "slab_automove_extstore.h" #include <stdlib.h> #include <string.h> #define MIN_PAGES_FOR_SOURCE 2 #define MIN_PAGES_FOR_RECLAIM 2.5 #define MIN_PAGES_FREE 1.5 #define MEMCHECK_PERIOD 60 struct window_data { uint64_t age; uint64_t dirty; uint64_t evicted; unsigned int excess_free; }; struct window_global { uint32_t pool_low; uint32_t pool_high; }; typedef struct { struct window_data *window_data; struct window_global *window_global; struct settings *settings; uint32_t window_size; uint32_t window_cur; uint32_t item_size; rel_time_t last_memcheck_run; double max_age_ratio; double free_ratio; bool pool_filled_once; unsigned int free_mem[MAX_NUMBER_OF_SLAB_CLASSES]; item_stats_automove iam_before[MAX_NUMBER_OF_SLAB_CLASSES]; item_stats_automove iam_after[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_before[MAX_NUMBER_OF_SLAB_CLASSES]; slab_stats_automove sam_after[MAX_NUMBER_OF_SLAB_CLASSES]; } slab_automove; void *slab_automove_extstore_init(struct settings *settings) { uint32_t window_size = settings->slab_automove_window; double max_age_ratio = settings->slab_automove_ratio; slab_automove *a = calloc(1, sizeof(slab_automove)); if (a == NULL) return NULL; a->window_data = calloc(window_size * MAX_NUMBER_OF_SLAB_CLASSES, sizeof(struct window_data)); a->window_global = calloc(window_size, sizeof(struct window_global)); a->window_size = window_size; a->max_age_ratio = max_age_ratio; a->free_ratio = settings->slab_automove_freeratio; a->item_size = settings->ext_item_size; a->last_memcheck_run = 0; a->settings = settings; a->pool_filled_once = false; if (a->window_data == NULL || a->window_global == NULL) { if (a->window_data) free(a->window_data); if (a->window_global) free(a->window_global); free(a); return NULL; } // do a dry run to fill the before structs fill_item_stats_automove(a->iam_before); fill_slab_stats_automove(a->sam_before); return (void *)a; } void slab_automove_extstore_free(void *arg) { slab_automove *a = (slab_automove *)arg; free(a->window_data); free(a); } static void window_sum(struct window_data *wd, struct window_data *w, uint32_t size) { for (int x = 0; x < size; x++) { struct window_data *d = &wd[x]; w->age += d->age; w->dirty += d->dirty; w->evicted += d->evicted; w->excess_free += d->excess_free; } } /* This could potentially merge with above */ static void window_global_sum(struct window_global *wg, struct window_global *w, uint32_t size) { for (int x = 0; x < size; x++) { struct window_global *d = &wg[x]; w->pool_high += d->pool_high; w->pool_low += d->pool_low; } } static void global_pool_check(slab_automove *a) { bool mem_limit_reached; uint32_t free = a->free_mem[0]; struct window_global *wg = &a->window_global[a->window_cur % a->window_size]; unsigned int count = global_page_pool_size(&mem_limit_reached); memset(wg, 0, sizeof(struct window_global)); if (!mem_limit_reached) return; if (count < free / 2) { wg->pool_low = 1; a->pool_filled_once = true; } else if (count > free) { wg->pool_high = 1; } else { a->pool_filled_once = true; } } static void memcheck(slab_automove *a) { unsigned int total_pages = 0; if (current_time < a->last_memcheck_run + MEMCHECK_PERIOD) return; a->last_memcheck_run = current_time; for (int n = 1; n < MAX_NUMBER_OF_SLAB_CLASSES; n++) { slab_stats_automove *sam = &a->sam_after[n]; total_pages += sam->total_pages; unsigned int hold_free = (sam->total_pages * sam->chunks_per_page) * a->free_ratio; if (sam->chunks_per_page * MIN_PAGES_FREE > hold_free) hold_free = sam->chunks_per_page * MIN_PAGES_FREE; a->free_mem[n] = hold_free; if (a->settings->ext_free_memchunks[n] != hold_free && a->pool_filled_once) { a->settings->ext_free_memchunks[n] = hold_free; } } // remember to add what remains in global pool. total_pages += a->sam_after[0].total_pages; a->free_mem[0] = total_pages * a->free_ratio; } void slab_automove_extstore_run(void *arg, int *src, int *dst) { slab_automove *a = (slab_automove *)arg; int n; struct window_data w_sum; int oldest = -1; uint64_t oldest_age = 0; int youngest = -1; uint64_t youngest_age = ~0; bool too_free = false; *src = -1; *dst = -1; global_pool_check(a); struct window_global wg_sum; memset(&wg_sum, 0, sizeof(struct window_global)); window_global_sum(a->window_global, &wg_sum, a->window_size); // fill after structs fill_item_stats_automove(a->iam_after); fill_slab_stats_automove(a->sam_after); a->window_cur++; memcheck(a); // iterate slabs for (n = POWER_SMALLEST; n < MAX_NUMBER_OF_SLAB_CLASSES; n++) { bool small_slab = a->sam_before[n].chunk_size < a->item_size ? true : false; int w_offset = n * a->window_size; struct window_data *wd = &a->window_data[w_offset + (a->window_cur % a->window_size)]; // summarize the window-up-to-now. memset(&w_sum, 0, sizeof(struct window_data)); window_sum(&a->window_data[w_offset], &w_sum, a->window_size); memset(wd, 0, sizeof(struct window_data)); // if page delta, or evicted delta, mark window dirty // (or outofmemory) if (a->iam_after[n].evicted - a->iam_before[n].evicted > 0 || a->iam_after[n].outofmemory - a->iam_before[n].outofmemory > 0) { wd->evicted = 1; wd->dirty = 1; } if (a->sam_after[n].total_pages - a->sam_before[n].total_pages > 0) { wd->dirty = 1; } // Mark excess free if we're over the free mem limit for too long. if (a->sam_after[n].free_chunks > a->free_mem[n] && a->free_mem[n] > 0) { wd->excess_free = 1; } // set age into window wd->age = a->iam_after[n].age; // grab age as average of window total uint64_t age = w_sum.age / a->window_size; // if > N free chunks and not dirty, make decision. if (a->sam_after[n].free_chunks > a->sam_after[n].chunks_per_page * MIN_PAGES_FOR_RECLAIM && w_sum.dirty == 0) { if (small_slab) { *src = n; *dst = 0; too_free = true; } else if (!small_slab && w_sum.excess_free >= a->window_size) { // If large slab and free chunks haven't decreased for a full // window, reclaim pages. *src = n; *dst = 0; too_free = true; } } // if oldest and have enough pages, is oldest if (!small_slab && age > oldest_age && a->sam_after[n].total_pages > MIN_PAGES_FOR_SOURCE) { oldest = n; oldest_age = age; } // don't count as youngest if it hasn't been using new chunks. if (!small_slab && age < youngest_age && a->sam_after[n].total_pages != 0 && w_sum.excess_free < a->window_size) { youngest = n; youngest_age = age; } } memcpy(a->iam_before, a->iam_after, sizeof(item_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); memcpy(a->sam_before, a->sam_after, sizeof(slab_stats_automove) * MAX_NUMBER_OF_SLAB_CLASSES); // only make decisions if window has filled once. if (a->window_cur < a->window_size) return; if (wg_sum.pool_high >= a->window_size && !wg_sum.pool_low && youngest != -1) { /**src = 0; *dst = youngest;*/ /* TODO: No current way to directly assign page from 0 to elsewhere. * Do a hack by setting the youngest's free mem limiter to * zero and re-running memcheck in the next second. * If set rates are very high and the pool is too low, this can bottom * out... */ // schedule a memcheck run for "soon" to keep the limit zeroed out // while the pool stays too high. This will also allow multiple // classes to zero out over time. a->last_memcheck_run = current_time - (MEMCHECK_PERIOD - 2); a->settings->ext_free_memchunks[youngest] = 0; } else if (!too_free && wg_sum.pool_low && oldest != -1) { *src = oldest; *dst = 0; } else if (!too_free && youngest != -1 && oldest != -1 && youngest != oldest) { // if we have a youngest and oldest, and oldest is outside the ratio. if (a->sam_after[youngest].free_chunks <= a->free_mem[youngest] && youngest_age < ((double)oldest_age * a->max_age_ratio)) { *src = oldest; *dst = youngest; } } return; }
9,351
33.895522
98
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/crawler.c
/* Copyright 2016 Netflix. * * Use and distribution licensed under the BSD license. See * the LICENSE file for full text. */ /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include <sys/stat.h> #include <sys/socket.h> #include <sys/resource.h> #include <fcntl.h> #include <netinet/in.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <signal.h> #include <string.h> #include <time.h> #include <assert.h> #include <unistd.h> #include <poll.h> #define LARGEST_ID POWER_LARGEST typedef struct { void *c; /* original connection structure. still with source thread attached. */ int sfd; /* client fd. */ bipbuf_t *buf; /* output buffer */ char *cbuf; /* current buffer */ } crawler_client_t; typedef struct _crawler_module_t crawler_module_t; typedef void (*crawler_eval_func)(crawler_module_t *cm, item *it, uint32_t hv, int slab_cls); typedef int (*crawler_init_func)(crawler_module_t *cm, void *data); // TODO: init args? typedef void (*crawler_deinit_func)(crawler_module_t *cm); // TODO: extra args? typedef void (*crawler_doneclass_func)(crawler_module_t *cm, int slab_cls); typedef void (*crawler_finalize_func)(crawler_module_t *cm); typedef struct { crawler_init_func init; /* run before crawl starts */ crawler_eval_func eval; /* runs on an item. */ crawler_doneclass_func doneclass; /* runs once per sub-crawler completion. */ crawler_finalize_func finalize; /* runs once when all sub-crawlers are done. */ bool needs_lock; /* whether or not we need the LRU lock held when eval is called */ bool needs_client; /* whether or not to grab onto the remote client */ } crawler_module_reg_t; struct _crawler_module_t { void *data; /* opaque data pointer */ crawler_client_t c; crawler_module_reg_t *mod; }; static int crawler_expired_init(crawler_module_t *cm, void *data); static void crawler_expired_doneclass(crawler_module_t *cm, int slab_cls); static void crawler_expired_finalize(crawler_module_t *cm); static void crawler_expired_eval(crawler_module_t *cm, item *search, uint32_t hv, int i); crawler_module_reg_t crawler_expired_mod = { .init = crawler_expired_init, .eval = crawler_expired_eval, .doneclass = crawler_expired_doneclass, .finalize = crawler_expired_finalize, .needs_lock = true, .needs_client = false }; static void crawler_metadump_eval(crawler_module_t *cm, item *search, uint32_t hv, int i); static void crawler_metadump_finalize(crawler_module_t *cm); crawler_module_reg_t crawler_metadump_mod = { .init = NULL, .eval = crawler_metadump_eval, .doneclass = NULL, .finalize = crawler_metadump_finalize, .needs_lock = false, .needs_client = true }; crawler_module_reg_t *crawler_mod_regs[3] = { &crawler_expired_mod, &crawler_expired_mod, &crawler_metadump_mod }; static int lru_crawler_client_getbuf(crawler_client_t *c); crawler_module_t active_crawler_mod; enum crawler_run_type active_crawler_type; static crawler crawlers[LARGEST_ID]; static int crawler_count = 0; static volatile int do_run_lru_crawler_thread = 0; static int lru_crawler_initialized = 0; static pthread_mutex_t lru_crawler_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_cond_t lru_crawler_cond = PTHREAD_COND_INITIALIZER; #ifdef EXTSTORE /* TODO: pass this around */ static void *storage; #endif /* Will crawl all slab classes a minimum of once per hour */ #define MAX_MAINTCRAWL_WAIT 60 * 60 /*** LRU CRAWLER THREAD ***/ #define LRU_CRAWLER_WRITEBUF 8192 static void lru_crawler_close_client(crawler_client_t *c) { //fprintf(stderr, "CRAWLER: Closing client\n"); sidethread_conn_close(c->c); c->c = NULL; c->cbuf = NULL; bipbuf_free(c->buf); c->buf = NULL; } static void lru_crawler_release_client(crawler_client_t *c) { //fprintf(stderr, "CRAWLER: Closing client\n"); redispatch_conn(c->c); c->c = NULL; c->cbuf = NULL; bipbuf_free(c->buf); c->buf = NULL; } static int crawler_expired_init(crawler_module_t *cm, void *data) { struct crawler_expired_data *d; if (data != NULL) { d = data; d->is_external = true; cm->data = data; } else { // allocate data. d = calloc(1, sizeof(struct crawler_expired_data)); if (d == NULL) { return -1; } // init lock. pthread_mutex_init(&d->lock, NULL); d->is_external = false; d->start_time = current_time; cm->data = d; } pthread_mutex_lock(&d->lock); memset(&d->crawlerstats, 0, sizeof(crawlerstats_t) * POWER_LARGEST); for (int x = 0; x < POWER_LARGEST; x++) { d->crawlerstats[x].start_time = current_time; d->crawlerstats[x].run_complete = false; } pthread_mutex_unlock(&d->lock); return 0; } static void crawler_expired_doneclass(crawler_module_t *cm, int slab_cls) { struct crawler_expired_data *d = (struct crawler_expired_data *) cm->data; pthread_mutex_lock(&d->lock); d->crawlerstats[slab_cls].end_time = current_time; d->crawlerstats[slab_cls].run_complete = true; pthread_mutex_unlock(&d->lock); } static void crawler_expired_finalize(crawler_module_t *cm) { struct crawler_expired_data *d = (struct crawler_expired_data *) cm->data; pthread_mutex_lock(&d->lock); d->end_time = current_time; d->crawl_complete = true; pthread_mutex_unlock(&d->lock); if (!d->is_external) { free(d); } } /* I pulled this out to make the main thread clearer, but it reaches into the * main thread's values too much. Should rethink again. */ static void crawler_expired_eval(crawler_module_t *cm, item *search, uint32_t hv, int i) { struct crawler_expired_data *d = (struct crawler_expired_data *) cm->data; pthread_mutex_lock(&d->lock); crawlerstats_t *s = &d->crawlerstats[i]; int is_flushed = item_is_flushed(search); #ifdef EXTSTORE bool is_valid = true; if (search->it_flags & ITEM_HDR) { item_hdr *hdr = (item_hdr *)ITEM_data(search); if (extstore_check(storage, hdr->page_id, hdr->page_version) != 0) is_valid = false; } #endif if ((search->exptime != 0 && search->exptime < current_time) || is_flushed #ifdef EXTSTORE || !is_valid #endif ) { crawlers[i].reclaimed++; s->reclaimed++; if (settings.verbose > 1) { int ii; char *key = ITEM_key(search); fprintf(stderr, "LRU crawler found an expired item (flags: %d, slab: %d): ", search->it_flags, search->slabs_clsid); for (ii = 0; ii < search->nkey; ++ii) { fprintf(stderr, "%c", key[ii]); } fprintf(stderr, "\n"); } if ((search->it_flags & ITEM_FETCHED) == 0 && !is_flushed) { crawlers[i].unfetched++; } #ifdef EXTSTORE STORAGE_delete(storage, search); #endif do_item_unlink_nolock(search, hv); do_item_remove(search); assert(search->slabs_clsid == 0); } else { s->seen++; refcount_decr(search); if (search->exptime == 0) { s->noexp++; } else if (search->exptime - current_time > 3599) { s->ttl_hourplus++; } else { rel_time_t ttl_remain = search->exptime - current_time; int bucket = ttl_remain / 60; if (bucket <= 60) { s->histo[bucket]++; } } } pthread_mutex_unlock(&d->lock); } static void crawler_metadump_eval(crawler_module_t *cm, item *it, uint32_t hv, int i) { //int slab_id = CLEAR_LRU(i); char keybuf[KEY_MAX_LENGTH * 3 + 1]; int is_flushed = item_is_flushed(it); /* Ignore expired content. */ if ((it->exptime != 0 && it->exptime < current_time) || is_flushed) { refcount_decr(it); return; } // TODO: uriencode directly into the buffer. uriencode(ITEM_key(it), keybuf, it->nkey, KEY_MAX_LENGTH * 3 + 1); int total = snprintf(cm->c.cbuf, 4096, "key=%s exp=%ld la=%llu cas=%llu fetch=%s cls=%u size=%lu\n", keybuf, (it->exptime == 0) ? -1 : (long)it->exptime + process_started, (unsigned long long)it->time + process_started, (unsigned long long)ITEM_get_cas(it), (it->it_flags & ITEM_FETCHED) ? "yes" : "no", ITEM_clsid(it), (unsigned long) ITEM_ntotal(it)); refcount_decr(it); // TODO: some way of tracking the errors. these are very unlikely though. if (total >= LRU_CRAWLER_WRITEBUF - 1 || total <= 0) { /* Failed to write, don't push it. */ return; } bipbuf_push(cm->c.buf, total); } static void crawler_metadump_finalize(crawler_module_t *cm) { if (cm->c.c != NULL) { // Ensure space for final message. lru_crawler_client_getbuf(&cm->c); memcpy(cm->c.cbuf, "END\r\n", 5); bipbuf_push(cm->c.buf, 5); } } static int lru_crawler_poll(crawler_client_t *c) { unsigned char *data; unsigned int data_size = 0; struct pollfd to_poll[1]; to_poll[0].fd = c->sfd; to_poll[0].events = POLLOUT; int ret = poll(to_poll, 1, 1000); if (ret < 0) { // fatal. return -1; } if (ret == 0) return 0; if (to_poll[0].revents & POLLIN) { char buf[1]; int res = read(c->sfd, buf, 1); if (res == 0 || (res == -1 && (errno != EAGAIN && errno != EWOULDBLOCK))) { lru_crawler_close_client(c); return -1; } } if ((data = bipbuf_peek_all(c->buf, &data_size)) != NULL) { if (to_poll[0].revents & (POLLHUP|POLLERR)) { lru_crawler_close_client(c); return -1; } else if (to_poll[0].revents & POLLOUT) { int total = write(c->sfd, data, data_size); if (total == -1) { if (errno != EAGAIN && errno != EWOULDBLOCK) { lru_crawler_close_client(c); return -1; } } else if (total == 0) { lru_crawler_close_client(c); return -1; } else { bipbuf_poll(c->buf, total); } } } return 0; } /* Grab some space to work with, if none exists, run the poll() loop and wait * for it to clear up or close. * Return NULL if closed. */ static int lru_crawler_client_getbuf(crawler_client_t *c) { void *buf = NULL; if (c->c == NULL) return -1; /* not enough space. */ while ((buf = bipbuf_request(c->buf, LRU_CRAWLER_WRITEBUF)) == NULL) { // TODO: max loops before closing. int ret = lru_crawler_poll(c); if (ret < 0) return ret; } c->cbuf = buf; return 0; } static void lru_crawler_class_done(int i) { crawlers[i].it_flags = 0; crawler_count--; do_item_unlinktail_q((item *)&crawlers[i]); do_item_stats_add_crawl(i, crawlers[i].reclaimed, crawlers[i].unfetched, crawlers[i].checked); pthread_mutex_unlock(&lru_locks[i]); if (active_crawler_mod.mod->doneclass != NULL) active_crawler_mod.mod->doneclass(&active_crawler_mod, i); } static void *item_crawler_thread(void *arg) { int i; int crawls_persleep = settings.crawls_persleep; pthread_mutex_lock(&lru_crawler_lock); pthread_cond_signal(&lru_crawler_cond); settings.lru_crawler = true; if (settings.verbose > 2) fprintf(stderr, "Starting LRU crawler background thread\n"); while (do_run_lru_crawler_thread) { pthread_cond_wait(&lru_crawler_cond, &lru_crawler_lock); while (crawler_count) { item *search = NULL; void *hold_lock = NULL; for (i = POWER_SMALLEST; i < LARGEST_ID; i++) { if (crawlers[i].it_flags != 1) { continue; } /* Get memory from bipbuf, if client has no space, flush. */ if (active_crawler_mod.c.c != NULL) { int ret = lru_crawler_client_getbuf(&active_crawler_mod.c); if (ret != 0) { lru_crawler_class_done(i); continue; } } else if (active_crawler_mod.mod->needs_client) { lru_crawler_class_done(i); continue; } pthread_mutex_lock(&lru_locks[i]); search = do_item_crawl_q((item *)&crawlers[i]); if (search == NULL || (crawlers[i].remaining && --crawlers[i].remaining < 1)) { if (settings.verbose > 2) fprintf(stderr, "Nothing left to crawl for %d\n", i); lru_crawler_class_done(i); continue; } uint32_t hv = hash(ITEM_key(search), search->nkey); /* Attempt to hash item lock the "search" item. If locked, no * other callers can incr the refcount */ if ((hold_lock = item_trylock(hv)) == NULL) { pthread_mutex_unlock(&lru_locks[i]); continue; } /* Now see if the item is refcount locked */ if (refcount_incr(search) != 2) { refcount_decr(search); if (hold_lock) item_trylock_unlock(hold_lock); pthread_mutex_unlock(&lru_locks[i]); continue; } crawlers[i].checked++; /* Frees the item or decrements the refcount. */ /* Interface for this could improve: do the free/decr here * instead? */ if (!active_crawler_mod.mod->needs_lock) { pthread_mutex_unlock(&lru_locks[i]); } active_crawler_mod.mod->eval(&active_crawler_mod, search, hv, i); if (hold_lock) item_trylock_unlock(hold_lock); if (active_crawler_mod.mod->needs_lock) { pthread_mutex_unlock(&lru_locks[i]); } if (crawls_persleep-- <= 0 && settings.lru_crawler_sleep) { pthread_mutex_unlock(&lru_crawler_lock); usleep(settings.lru_crawler_sleep); pthread_mutex_lock(&lru_crawler_lock); crawls_persleep = settings.crawls_persleep; } else if (!settings.lru_crawler_sleep) { // TODO: only cycle lock every N? pthread_mutex_unlock(&lru_crawler_lock); pthread_mutex_lock(&lru_crawler_lock); } } } if (active_crawler_mod.mod != NULL) { if (active_crawler_mod.mod->finalize != NULL) active_crawler_mod.mod->finalize(&active_crawler_mod); while (active_crawler_mod.c.c != NULL && bipbuf_used(active_crawler_mod.c.buf)) { lru_crawler_poll(&active_crawler_mod.c); } // Double checking in case the client closed during the poll if (active_crawler_mod.c.c != NULL) { lru_crawler_release_client(&active_crawler_mod.c); } active_crawler_mod.mod = NULL; } if (settings.verbose > 2) fprintf(stderr, "LRU crawler thread sleeping\n"); STATS_LOCK(); stats_state.lru_crawler_running = false; STATS_UNLOCK(); } pthread_mutex_unlock(&lru_crawler_lock); if (settings.verbose > 2) fprintf(stderr, "LRU crawler thread stopping\n"); return NULL; } static pthread_t item_crawler_tid; int stop_item_crawler_thread(void) { int ret; pthread_mutex_lock(&lru_crawler_lock); do_run_lru_crawler_thread = 0; pthread_cond_signal(&lru_crawler_cond); pthread_mutex_unlock(&lru_crawler_lock); if ((ret = pthread_join(item_crawler_tid, NULL)) != 0) { fprintf(stderr, "Failed to stop LRU crawler thread: %s\n", strerror(ret)); return -1; } settings.lru_crawler = false; return 0; } /* Lock dance to "block" until thread is waiting on its condition: * caller locks mtx. caller spawns thread. * thread blocks on mutex. * caller waits on condition, releases lock. * thread gets lock, sends signal. * caller can't wait, as thread has lock. * thread waits on condition, releases lock * caller wakes on condition, gets lock. * caller immediately releases lock. * thread is now safely waiting on condition before the caller returns. */ int start_item_crawler_thread(void) { int ret; if (settings.lru_crawler) return -1; pthread_mutex_lock(&lru_crawler_lock); do_run_lru_crawler_thread = 1; if ((ret = pthread_create(&item_crawler_tid, NULL, item_crawler_thread, NULL)) != 0) { fprintf(stderr, "Can't create LRU crawler thread: %s\n", strerror(ret)); pthread_mutex_unlock(&lru_crawler_lock); return -1; } /* Avoid returning until the crawler has actually started */ pthread_cond_wait(&lru_crawler_cond, &lru_crawler_lock); pthread_mutex_unlock(&lru_crawler_lock); return 0; } /* 'remaining' is passed in so the LRU maintainer thread can scrub the whole * LRU every time. */ static int do_lru_crawler_start(uint32_t id, uint32_t remaining) { uint32_t sid = id; int starts = 0; pthread_mutex_lock(&lru_locks[sid]); if (crawlers[sid].it_flags == 0) { if (settings.verbose > 2) fprintf(stderr, "Kicking LRU crawler off for LRU %u\n", sid); crawlers[sid].nbytes = 0; crawlers[sid].nkey = 0; crawlers[sid].it_flags = 1; /* For a crawler, this means enabled. */ crawlers[sid].next = 0; crawlers[sid].prev = 0; crawlers[sid].time = 0; crawlers[sid].remaining = remaining; crawlers[sid].slabs_clsid = sid; crawlers[sid].reclaimed = 0; crawlers[sid].unfetched = 0; crawlers[sid].checked = 0; do_item_linktail_q((item *)&crawlers[sid]); crawler_count++; starts++; } pthread_mutex_unlock(&lru_locks[sid]); if (starts) { STATS_LOCK(); stats_state.lru_crawler_running = true; stats.lru_crawler_starts++; STATS_UNLOCK(); } return starts; } static int lru_crawler_set_client(crawler_module_t *cm, void *c, const int sfd) { crawler_client_t *crawlc = &cm->c; if (crawlc->c != NULL) { return -1; } crawlc->c = c; crawlc->sfd = sfd; crawlc->buf = bipbuf_new(1024 * 128); if (crawlc->buf == NULL) { return -2; } return 0; } int lru_crawler_start(uint8_t *ids, uint32_t remaining, const enum crawler_run_type type, void *data, void *c, const int sfd) { int starts = 0; bool is_running; static rel_time_t block_ae_until = 0; pthread_mutex_lock(&lru_crawler_lock); STATS_LOCK(); is_running = stats_state.lru_crawler_running; STATS_UNLOCK(); if (is_running && !(type == CRAWLER_AUTOEXPIRE && active_crawler_type == CRAWLER_AUTOEXPIRE)) { pthread_mutex_unlock(&lru_crawler_lock); block_ae_until = current_time + 60; return -1; } if (type == CRAWLER_AUTOEXPIRE && block_ae_until > current_time) { pthread_mutex_unlock(&lru_crawler_lock); return -1; } /* Configure the module */ if (!is_running) { assert(crawler_mod_regs[type] != NULL); active_crawler_mod.mod = crawler_mod_regs[type]; active_crawler_type = type; if (active_crawler_mod.mod->init != NULL) { active_crawler_mod.mod->init(&active_crawler_mod, data); } if (active_crawler_mod.mod->needs_client) { if (c == NULL || sfd == 0) { pthread_mutex_unlock(&lru_crawler_lock); return -2; } if (lru_crawler_set_client(&active_crawler_mod, c, sfd) != 0) { pthread_mutex_unlock(&lru_crawler_lock); return -2; } } } /* we allow the autocrawler to restart sub-LRU's before completion */ for (int sid = POWER_SMALLEST; sid < POWER_LARGEST; sid++) { if (ids[sid]) starts += do_lru_crawler_start(sid, remaining); } if (starts) { pthread_cond_signal(&lru_crawler_cond); } pthread_mutex_unlock(&lru_crawler_lock); return starts; } /* * Also only clear the crawlerstats once per sid. */ enum crawler_result_type lru_crawler_crawl(char *slabs, const enum crawler_run_type type, void *c, const int sfd) { char *b = NULL; uint32_t sid = 0; int starts = 0; uint8_t tocrawl[POWER_LARGEST]; /* FIXME: I added this while debugging. Don't think it's needed? */ memset(tocrawl, 0, sizeof(uint8_t) * POWER_LARGEST); if (strcmp(slabs, "all") == 0) { for (sid = 0; sid < POWER_LARGEST; sid++) { tocrawl[sid] = 1; } } else { for (char *p = strtok_r(slabs, ",", &b); p != NULL; p = strtok_r(NULL, ",", &b)) { if (!safe_strtoul(p, &sid) || sid < POWER_SMALLEST || sid >= MAX_NUMBER_OF_SLAB_CLASSES) { pthread_mutex_unlock(&lru_crawler_lock); return CRAWLER_BADCLASS; } tocrawl[sid | TEMP_LRU] = 1; tocrawl[sid | HOT_LRU] = 1; tocrawl[sid | WARM_LRU] = 1; tocrawl[sid | COLD_LRU] = 1; } } starts = lru_crawler_start(tocrawl, settings.lru_crawler_tocrawl, type, NULL, c, sfd); if (starts == -1) { return CRAWLER_RUNNING; } else if (starts == -2) { return CRAWLER_ERROR; /* FIXME: not very helpful. */ } else if (starts) { return CRAWLER_OK; } else { return CRAWLER_NOTSTARTED; } } /* If we hold this lock, crawler can't wake up or move */ void lru_crawler_pause(void) { pthread_mutex_lock(&lru_crawler_lock); } void lru_crawler_resume(void) { pthread_mutex_unlock(&lru_crawler_lock); } int init_lru_crawler(void *arg) { if (lru_crawler_initialized == 0) { #ifdef EXTSTORE storage = arg; #endif if (pthread_cond_init(&lru_crawler_cond, NULL) != 0) { fprintf(stderr, "Can't initialize lru crawler condition\n"); return -1; } pthread_mutex_init(&lru_crawler_lock, NULL); active_crawler_mod.c.c = NULL; active_crawler_mod.mod = NULL; active_crawler_mod.data = NULL; lru_crawler_initialized = 1; } return 0; }
22,607
31.623377
93
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/crawler.h
#ifndef CRAWLER_H #define CRAWLER_H typedef struct { uint64_t histo[61]; uint64_t ttl_hourplus; uint64_t noexp; uint64_t reclaimed; uint64_t seen; rel_time_t start_time; rel_time_t end_time; bool run_complete; } crawlerstats_t; struct crawler_expired_data { pthread_mutex_t lock; crawlerstats_t crawlerstats[POWER_LARGEST]; /* redundant with crawlerstats_t so we can get overall start/stop/done */ rel_time_t start_time; rel_time_t end_time; bool crawl_complete; bool is_external; /* whether this was an alloc local or remote to the module. */ }; enum crawler_result_type { CRAWLER_OK=0, CRAWLER_RUNNING, CRAWLER_BADCLASS, CRAWLER_NOTSTARTED, CRAWLER_ERROR }; int start_item_crawler_thread(void); int stop_item_crawler_thread(void); int init_lru_crawler(void *arg); enum crawler_result_type lru_crawler_crawl(char *slabs, enum crawler_run_type, void *c, const int sfd); int lru_crawler_start(uint8_t *ids, uint32_t remaining, const enum crawler_run_type type, void *data, void *c, const int sfd); void lru_crawler_pause(void); void lru_crawler_resume(void); #endif
1,191
29.564103
103
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/sizes.c
#include <stdio.h> #include "memcached.h" static void display(const char *name, size_t size) { printf("%s\t%d\n", name, (int)size); } int main(int argc, char **argv) { display("Slab Stats", sizeof(struct slab_stats)); display("Thread stats", sizeof(struct thread_stats) - (200 * sizeof(struct slab_stats))); display("Global stats", sizeof(struct stats)); display("Settings", sizeof(struct settings)); display("Item (no cas)", sizeof(item)); display("Item (cas)", sizeof(item) + sizeof(uint64_t)); #ifdef EXTSTORE display("extstore header", sizeof(item_hdr)); #endif display("Libevent thread", sizeof(LIBEVENT_THREAD) - sizeof(struct thread_stats)); display("Connection", sizeof(conn)); printf("----------------------------------------\n"); display("libevent thread cumulative", sizeof(LIBEVENT_THREAD)); display("Thread stats cumulative\t", sizeof(struct thread_stats)); return 0; }
982
28.787879
70
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/slab_automove.h
#ifndef SLAB_AUTOMOVE_H #define SLAB_AUTOMOVE_H /* default automove functions */ void *slab_automove_init(struct settings *settings); void slab_automove_free(void *arg); void slab_automove_run(void *arg, int *src, int *dst); typedef void *(*slab_automove_init_func)(struct settings *settings); typedef void (*slab_automove_free_func)(void *arg); typedef void (*slab_automove_run_func)(void *arg, int *src, int *dst); typedef struct { slab_automove_init_func init; slab_automove_free_func free; slab_automove_run_func run; } slab_automove_reg_t; #endif
568
27.45
70
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/assoc.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Hash table * * The hash function used here is by Bob Jenkins, 1996: * <http://burtleburtle.net/bob/hash/doobs.html> * "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. * You may use this code any way you wish, private, educational, * or commercial. It's free." * * The rest of the file is licensed under the BSD license. See LICENSE. */ #include "memcached.h" #include <sys/stat.h> #include <sys/socket.h> #include <sys/resource.h> #include <signal.h> #include <fcntl.h> #include <netinet/in.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <assert.h> #include <pthread.h> static pthread_cond_t maintenance_cond = PTHREAD_COND_INITIALIZER; static pthread_mutex_t maintenance_lock = PTHREAD_MUTEX_INITIALIZER; static pthread_mutex_t hash_items_counter_lock = PTHREAD_MUTEX_INITIALIZER; typedef unsigned long int ub4; /* unsigned 4-byte quantities */ typedef unsigned char ub1; /* unsigned 1-byte quantities */ /* how many powers of 2's worth of buckets we use */ unsigned int hashpower = HASHPOWER_DEFAULT; #define hashsize(n) ((ub4)1<<(n)) #define hashmask(n) (hashsize(n)-1) /* Main hash table. This is where we look except during expansion. */ static item** primary_hashtable = 0; /* * Previous hash table. During expansion, we look here for keys that haven't * been moved over to the primary yet. */ static item** old_hashtable = 0; /* Number of items in the hash table. */ static unsigned int hash_items = 0; /* Flag: Are we in the middle of expanding now? */ static bool expanding = false; static bool started_expanding = false; /* * During expansion we migrate values with bucket granularity; this is how * far we've gotten so far. Ranges from 0 .. hashsize(hashpower - 1) - 1. */ static unsigned int expand_bucket = 0; void assoc_init(const int hashtable_init) { if (hashtable_init) { hashpower = hashtable_init; } primary_hashtable = calloc(hashsize(hashpower), sizeof(void *)); if (! primary_hashtable) { fprintf(stderr, "Failed to init hashtable.\n"); exit(EXIT_FAILURE); } STATS_LOCK(); stats_state.hash_power_level = hashpower; stats_state.hash_bytes = hashsize(hashpower) * sizeof(void *); STATS_UNLOCK(); } item *assoc_find(const char *key, const size_t nkey, const uint32_t hv) { item *it; unsigned int oldbucket; if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it = old_hashtable[oldbucket]; } else { it = primary_hashtable[hv & hashmask(hashpower)]; } item *ret = NULL; int depth = 0; while (it) { if ((nkey == it->nkey) && (memcmp(key, ITEM_key(it), nkey) == 0)) { ret = it; break; } it = it->h_next; ++depth; } MEMCACHED_ASSOC_FIND(key, nkey, depth); return ret; } /* returns the address of the item pointer before the key. if *item == 0, the item wasn't found */ static item** _hashitem_before (const char *key, const size_t nkey, const uint32_t hv) { item **pos; unsigned int oldbucket; if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { pos = &old_hashtable[oldbucket]; } else { pos = &primary_hashtable[hv & hashmask(hashpower)]; } while (*pos && ((nkey != (*pos)->nkey) || memcmp(key, ITEM_key(*pos), nkey))) { pos = &(*pos)->h_next; } return pos; } /* grows the hashtable to the next power of 2. */ static void assoc_expand(void) { old_hashtable = primary_hashtable; primary_hashtable = calloc(hashsize(hashpower + 1), sizeof(void *)); if (primary_hashtable) { if (settings.verbose > 1) fprintf(stderr, "Hash table expansion starting\n"); hashpower++; expanding = true; expand_bucket = 0; STATS_LOCK(); stats_state.hash_power_level = hashpower; stats_state.hash_bytes += hashsize(hashpower) * sizeof(void *); stats_state.hash_is_expanding = true; STATS_UNLOCK(); } else { primary_hashtable = old_hashtable; /* Bad news, but we can keep running. */ } } static void assoc_start_expand(void) { if (started_expanding) return; started_expanding = true; pthread_cond_signal(&maintenance_cond); } /* Note: this isn't an assoc_update. The key must not already exist to call this */ int assoc_insert(item *it, const uint32_t hv) { unsigned int oldbucket; // assert(assoc_find(ITEM_key(it), it->nkey) == 0); /* shouldn't have duplicately named things defined */ if (expanding && (oldbucket = (hv & hashmask(hashpower - 1))) >= expand_bucket) { it->h_next = old_hashtable[oldbucket]; old_hashtable[oldbucket] = it; } else { it->h_next = primary_hashtable[hv & hashmask(hashpower)]; primary_hashtable[hv & hashmask(hashpower)] = it; } pthread_mutex_lock(&hash_items_counter_lock); hash_items++; if (! expanding && hash_items > (hashsize(hashpower) * 3) / 2 && hashpower < HASHPOWER_MAX) { assoc_start_expand(); } pthread_mutex_unlock(&hash_items_counter_lock); MEMCACHED_ASSOC_INSERT(ITEM_key(it), it->nkey, hash_items); return 1; } void assoc_delete(const char *key, const size_t nkey, const uint32_t hv) { item **before = _hashitem_before(key, nkey, hv); if (*before) { item *nxt; pthread_mutex_lock(&hash_items_counter_lock); hash_items--; pthread_mutex_unlock(&hash_items_counter_lock); /* The DTrace probe cannot be triggered as the last instruction * due to possible tail-optimization by the compiler */ MEMCACHED_ASSOC_DELETE(key, nkey, hash_items); nxt = (*before)->h_next; (*before)->h_next = 0; /* probably pointless, but whatever. */ *before = nxt; return; } /* Note: we never actually get here. the callers don't delete things they can't find. */ assert(*before != 0); } static volatile int do_run_maintenance_thread = 1; #define DEFAULT_HASH_BULK_MOVE 1 int hash_bulk_move = DEFAULT_HASH_BULK_MOVE; static void *assoc_maintenance_thread(void *arg) { mutex_lock(&maintenance_lock); while (do_run_maintenance_thread) { int ii = 0; /* There is only one expansion thread, so no need to global lock. */ for (ii = 0; ii < hash_bulk_move && expanding; ++ii) { item *it, *next; unsigned int bucket; void *item_lock = NULL; /* bucket = hv & hashmask(hashpower) =>the bucket of hash table * is the lowest N bits of the hv, and the bucket of item_locks is * also the lowest M bits of hv, and N is greater than M. * So we can process expanding with only one item_lock. cool! */ if ((item_lock = item_trylock(expand_bucket))) { for (it = old_hashtable[expand_bucket]; NULL != it; it = next) { next = it->h_next; bucket = hash(ITEM_key(it), it->nkey) & hashmask(hashpower); it->h_next = primary_hashtable[bucket]; primary_hashtable[bucket] = it; } old_hashtable[expand_bucket] = NULL; expand_bucket++; if (expand_bucket == hashsize(hashpower - 1)) { expanding = false; free(old_hashtable); STATS_LOCK(); stats_state.hash_bytes -= hashsize(hashpower - 1) * sizeof(void *); stats_state.hash_is_expanding = false; STATS_UNLOCK(); if (settings.verbose > 1) fprintf(stderr, "Hash table expansion done\n"); } } else { usleep(10*1000); } if (item_lock) { item_trylock_unlock(item_lock); item_lock = NULL; } } if (!expanding) { /* We are done expanding.. just wait for next invocation */ started_expanding = false; pthread_cond_wait(&maintenance_cond, &maintenance_lock); /* assoc_expand() swaps out the hash table entirely, so we need * all threads to not hold any references related to the hash * table while this happens. * This is instead of a more complex, possibly slower algorithm to * allow dynamic hash table expansion without causing significant * wait times. */ pause_threads(PAUSE_ALL_THREADS); assoc_expand(); pause_threads(RESUME_ALL_THREADS); } } return NULL; } static pthread_t maintenance_tid; int start_assoc_maintenance_thread() { int ret; char *env = getenv("MEMCACHED_HASH_BULK_MOVE"); if (env != NULL) { hash_bulk_move = atoi(env); if (hash_bulk_move == 0) { hash_bulk_move = DEFAULT_HASH_BULK_MOVE; } } pthread_mutex_init(&maintenance_lock, NULL); if ((ret = pthread_create(&maintenance_tid, NULL, assoc_maintenance_thread, NULL)) != 0) { fprintf(stderr, "Can't create thread: %s\n", strerror(ret)); return -1; } return 0; } void stop_assoc_maintenance_thread() { mutex_lock(&maintenance_lock); do_run_maintenance_thread = 0; pthread_cond_signal(&maintenance_cond); mutex_unlock(&maintenance_lock); /* Wait for the maintenance thread to stop */ pthread_join(maintenance_tid, NULL); }
9,916
31.198052
109
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/linux_priv.c
#include "config.h" #include <seccomp.h> #include <errno.h> #include <stdlib.h> #include "memcached.h" // In the future when the system is more tested this could be switched // to SCMP_ACT_KILL instead. #define DENY_ACTION SCMP_ACT_ERRNO(EACCES) void drop_privileges(void) { scmp_filter_ctx ctx = seccomp_init(DENY_ACTION); if (ctx == NULL) { return; } int rc = 0; rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(sigreturn), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(futex), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(epoll_wait), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(accept4), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(accept), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(write), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(fstat), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mmap), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(munmap), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(shmctl), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(exit_group), 0); #ifdef MEMCACHED_DEBUG rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(open), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(fcntl), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(read), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(lseek), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(close), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getpid), 0); #endif if (rc != 0) { goto fail; } rc = seccomp_load(ctx); if (rc < 0) { goto fail; } fail: seccomp_release(ctx); } void drop_worker_privileges(void) { scmp_filter_ctx ctx = seccomp_init(DENY_ACTION); if (ctx == NULL) { return; } int rc = 0; rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(sigreturn), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(futex), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(epoll_wait), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(epoll_ctl), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(read), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mprotect), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getpeername), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(close), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(sendmsg), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getrusage), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mmap), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(mremap), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(munmap), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(recvfrom), 0); // for spawning the LRU crawler rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(clone), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(set_robust_list), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(madvise), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(exit), 0); // stat rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getsockname), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getpid), 0); if (settings.shutdown_command) { rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(tgkill), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(exit_group), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(fstat), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(rt_sigprocmask), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(getpid), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(gettid), 0); } if (settings.relaxed_privileges) { rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(open), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(fcntl), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(lseek), 0); rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(write), 0); } else { rc |= seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(write), 1, SCMP_A0(SCMP_CMP_EQ, 1)); } if (rc != 0) { goto fail; } rc = seccomp_load(ctx); if (rc < 0) { goto fail; } fail: seccomp_release(ctx); }
4,495
38.438596
97
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/itoa_ljust.h
#ifndef ITOA_LJUST_H #define ITOA_LJUST_H //=== itoa_ljust.h - Fast integer to ascii conversion // // Fast and simple integer to ASCII conversion: // // - 32 and 64-bit integers // - signed and unsigned // - user supplied buffer must be large enough for all decimal digits // in value plus minus sign if negative // - left-justified // - NUL terminated // - return value is pointer to NUL terminator // // Copyright (c) 2016 Arturo Martin-de-Nicolas // arturomdn@gmail.com // https://github.com/amdn/itoa_ljust/ //===----------------------------------------------------------------------===// #include <stdint.h> char* itoa_u32(uint32_t u, char* buffer); char* itoa_32( int32_t i, char* buffer); char* itoa_u64(uint64_t u, char* buffer); char* itoa_64( int64_t i, char* buffer); #endif // ITOA_LJUST_H
822
27.37931
80
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/murmur3_hash.h
//----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. #ifndef MURMURHASH3_H #define MURMURHASH3_H //----------------------------------------------------------------------------- // Platform-specific functions and macros #include <stdint.h> #include <stddef.h> //----------------------------------------------------------------------------- uint32_t MurmurHash3_x86_32(const void *key, size_t length); //----------------------------------------------------------------------------- #endif // MURMURHASH3_H
681
33.1
79
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/itoa_ljust.c
//=== itoa_ljust.cpp - Fast integer to ascii conversion --*- C++ -*-// // // Substantially simplified (and slightly faster) version // based on the following functions in Google's protocol buffers: // // FastInt32ToBufferLeft() // FastUInt32ToBufferLeft() // FastInt64ToBufferLeft() // FastUInt64ToBufferLeft() // // Differences: // 1) Greatly simplified // 2) Avoids GOTO statements - uses "switch" instead and relies on // compiler constant folding and propagation for high performance // 3) Avoids unary minus of signed types - undefined behavior if value // is INT_MIN in platforms using two's complement representation // 4) Uses memcpy to store 2 digits at a time - lets the compiler // generate a 2-byte load/store in platforms that support // unaligned access, this is faster (and less code) than explicitly // loading and storing each byte // // Copyright (c) 2016 Arturo Martin-de-Nicolas // arturomdn@gmail.com // https://github.com/amdn/itoa_ljust/ // // Released under the BSD 3-Clause License, see Google's original copyright // and license below. //===----------------------------------------------------------------------===// // Protocol Buffers - Google's data interchange format // Copyright 2008 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. //===----------------------------------------------------------------------===// #include "itoa_ljust.h" #include <string.h> static const char lut[201] = "0001020304050607080910111213141516171819" "2021222324252627282930313233343536373839" "4041424344454647484950515253545556575859" "6061626364656667686970717273747576777879" "8081828384858687888990919293949596979899"; #define dd(u) ((const uint16_t)(lut[u])) static inline char* out2(const int d, char* p) { memcpy(p, &((uint16_t *)lut)[d], 2); return p + 2; } static inline char* out1(const char in, char* p) { memcpy(p, &in, 1); return p + 1; } static inline int digits( uint32_t u, unsigned k, int* d, char** p, int n ) { if (u < k*10) { *d = u / k; *p = out1('0'+*d, *p); --n; } return n; } static inline char* itoa(uint32_t u, char* p, int d, int n) { switch(n) { case 10: d = u / 100000000; p = out2( d, p ); case 9: u -= d * 100000000; case 8: d = u / 1000000; p = out2( d, p ); case 7: u -= d * 1000000; case 6: d = u / 10000; p = out2( d, p ); case 5: u -= d * 10000; case 4: d = u / 100; p = out2( d, p ); case 3: u -= d * 100; case 2: d = u / 1; p = out2( d, p ); case 1: ; } *p = '\0'; return p; } char* itoa_u32(uint32_t u, char* p) { int d = 0,n; if (u >=100000000) n = digits(u, 100000000, &d, &p, 10); else if (u < 100) n = digits(u, 1, &d, &p, 2); else if (u < 10000) n = digits(u, 100, &d, &p, 4); else if (u < 1000000) n = digits(u, 10000, &d, &p, 6); else n = digits(u, 1000000, &d, &p, 8); return itoa( u, p, d, n ); } char* itoa_32(int32_t i, char* p) { uint32_t u = i; if (i < 0) { *p++ = '-'; u = -u; } return itoa_u32(u, p); } char* itoa_u64(uint64_t u, char* p) { int d; uint32_t lower = (uint32_t)u; if (lower == u) return itoa_u32(lower, p); uint64_t upper = u / 1000000000; p = itoa_u64(upper, p); lower = u - (upper * 1000000000); d = lower / 100000000; p = out1('0'+d,p); return itoa( lower, p, d, 9 ); } char* itoa_64(int64_t i, char* p) { uint64_t u = i; if (i < 0) { *p++ = '-'; u = -u; } return itoa_u64(u, p); }
5,242
33.953333
80
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/util.h
/* fast-enough functions for uriencoding strings. */ void uriencode_init(void); bool uriencode(const char *src, char *dst, const size_t srclen, const size_t dstlen); /* * Wrappers around strtoull/strtoll that are safer and easier to * use. For tests and assumptions, see internal_tests.c. * * str a NULL-terminated base decimal 10 unsigned integer * out out parameter, if conversion succeeded * * returns true if conversion succeeded. */ bool safe_strtoull(const char *str, uint64_t *out); bool safe_strtoll(const char *str, int64_t *out); bool safe_strtoul(const char *str, uint32_t *out); bool safe_strtol(const char *str, int32_t *out); bool safe_strtod(const char *str, double *out); #ifndef HAVE_HTONLL extern uint64_t htonll(uint64_t); extern uint64_t ntohll(uint64_t); #endif #ifdef __GCC # define __gcc_attribute__ __attribute__ #else # define __gcc_attribute__(x) #endif /** * Vararg variant of perror that makes for more useful error messages * when reporting with parameters. * * @param fmt a printf format */ void vperror(const char *fmt, ...) __gcc_attribute__ ((format (printf, 1, 2)));
1,127
27.923077
85
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/protocol_binary.h
/* * Copyright (c) <2008>, Sun Microsystems, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY SUN MICROSYSTEMS, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL SUN MICROSYSTEMS, INC. BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Summary: Constants used by to implement the binary protocol. * * Copy: See Copyright for the status of this software. * * Author: Trond Norbye <trond.norbye@sun.com> */ #ifndef PROTOCOL_BINARY_H #define PROTOCOL_BINARY_H /** * This file contains definitions of the constants and packet formats * defined in the binary specification. Please note that you _MUST_ remember * to convert each multibyte field to / from network byte order to / from * host order. */ #ifdef __cplusplus extern "C" { #endif /** * Definition of the legal "magic" values used in a packet. * See section 3.1 Magic byte */ typedef enum { PROTOCOL_BINARY_REQ = 0x80, PROTOCOL_BINARY_RES = 0x81 } protocol_binary_magic; /** * Definition of the valid response status numbers. * See section 3.2 Response Status */ typedef enum { PROTOCOL_BINARY_RESPONSE_SUCCESS = 0x00, PROTOCOL_BINARY_RESPONSE_KEY_ENOENT = 0x01, PROTOCOL_BINARY_RESPONSE_KEY_EEXISTS = 0x02, PROTOCOL_BINARY_RESPONSE_E2BIG = 0x03, PROTOCOL_BINARY_RESPONSE_EINVAL = 0x04, PROTOCOL_BINARY_RESPONSE_NOT_STORED = 0x05, PROTOCOL_BINARY_RESPONSE_DELTA_BADVAL = 0x06, PROTOCOL_BINARY_RESPONSE_AUTH_ERROR = 0x20, PROTOCOL_BINARY_RESPONSE_AUTH_CONTINUE = 0x21, PROTOCOL_BINARY_RESPONSE_UNKNOWN_COMMAND = 0x81, PROTOCOL_BINARY_RESPONSE_ENOMEM = 0x82 } protocol_binary_response_status; /** * Definition of the different command opcodes. * See section 3.3 Command Opcodes */ typedef enum { PROTOCOL_BINARY_CMD_GET = 0x00, PROTOCOL_BINARY_CMD_SET = 0x01, PROTOCOL_BINARY_CMD_ADD = 0x02, PROTOCOL_BINARY_CMD_REPLACE = 0x03, PROTOCOL_BINARY_CMD_DELETE = 0x04, PROTOCOL_BINARY_CMD_INCREMENT = 0x05, PROTOCOL_BINARY_CMD_DECREMENT = 0x06, PROTOCOL_BINARY_CMD_QUIT = 0x07, PROTOCOL_BINARY_CMD_FLUSH = 0x08, PROTOCOL_BINARY_CMD_GETQ = 0x09, PROTOCOL_BINARY_CMD_NOOP = 0x0a, PROTOCOL_BINARY_CMD_VERSION = 0x0b, PROTOCOL_BINARY_CMD_GETK = 0x0c, PROTOCOL_BINARY_CMD_GETKQ = 0x0d, PROTOCOL_BINARY_CMD_APPEND = 0x0e, PROTOCOL_BINARY_CMD_PREPEND = 0x0f, PROTOCOL_BINARY_CMD_STAT = 0x10, PROTOCOL_BINARY_CMD_SETQ = 0x11, PROTOCOL_BINARY_CMD_ADDQ = 0x12, PROTOCOL_BINARY_CMD_REPLACEQ = 0x13, PROTOCOL_BINARY_CMD_DELETEQ = 0x14, PROTOCOL_BINARY_CMD_INCREMENTQ = 0x15, PROTOCOL_BINARY_CMD_DECREMENTQ = 0x16, PROTOCOL_BINARY_CMD_QUITQ = 0x17, PROTOCOL_BINARY_CMD_FLUSHQ = 0x18, PROTOCOL_BINARY_CMD_APPENDQ = 0x19, PROTOCOL_BINARY_CMD_PREPENDQ = 0x1a, PROTOCOL_BINARY_CMD_TOUCH = 0x1c, PROTOCOL_BINARY_CMD_GAT = 0x1d, PROTOCOL_BINARY_CMD_GATQ = 0x1e, PROTOCOL_BINARY_CMD_GATK = 0x23, PROTOCOL_BINARY_CMD_GATKQ = 0x24, PROTOCOL_BINARY_CMD_SASL_LIST_MECHS = 0x20, PROTOCOL_BINARY_CMD_SASL_AUTH = 0x21, PROTOCOL_BINARY_CMD_SASL_STEP = 0x22, /* These commands are used for range operations and exist within * this header for use in other projects. Range operations are * not expected to be implemented in the memcached server itself. */ PROTOCOL_BINARY_CMD_RGET = 0x30, PROTOCOL_BINARY_CMD_RSET = 0x31, PROTOCOL_BINARY_CMD_RSETQ = 0x32, PROTOCOL_BINARY_CMD_RAPPEND = 0x33, PROTOCOL_BINARY_CMD_RAPPENDQ = 0x34, PROTOCOL_BINARY_CMD_RPREPEND = 0x35, PROTOCOL_BINARY_CMD_RPREPENDQ = 0x36, PROTOCOL_BINARY_CMD_RDELETE = 0x37, PROTOCOL_BINARY_CMD_RDELETEQ = 0x38, PROTOCOL_BINARY_CMD_RINCR = 0x39, PROTOCOL_BINARY_CMD_RINCRQ = 0x3a, PROTOCOL_BINARY_CMD_RDECR = 0x3b, PROTOCOL_BINARY_CMD_RDECRQ = 0x3c /* End Range operations */ } protocol_binary_command; /** * Definition of the data types in the packet * See section 3.4 Data Types */ typedef enum { PROTOCOL_BINARY_RAW_BYTES = 0x00 } protocol_binary_datatypes; /** * Definition of the header structure for a request packet. * See section 2 */ typedef union { struct { uint8_t magic; uint8_t opcode; uint16_t keylen; uint8_t extlen; uint8_t datatype; uint16_t reserved; uint32_t bodylen; uint32_t opaque; uint64_t cas; } request; uint8_t bytes[24]; } protocol_binary_request_header; /** * Definition of the header structure for a response packet. * See section 2 */ typedef union { struct { uint8_t magic; uint8_t opcode; uint16_t keylen; uint8_t extlen; uint8_t datatype; uint16_t status; uint32_t bodylen; uint32_t opaque; uint64_t cas; } response; uint8_t bytes[24]; } protocol_binary_response_header; /** * Definition of a request-packet containing no extras */ typedef union { struct { protocol_binary_request_header header; } message; uint8_t bytes[sizeof(protocol_binary_request_header)]; } protocol_binary_request_no_extras; /** * Definition of a response-packet containing no extras */ typedef union { struct { protocol_binary_response_header header; } message; uint8_t bytes[sizeof(protocol_binary_response_header)]; } protocol_binary_response_no_extras; /** * Definition of the packet used by the get, getq, getk and getkq command. * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_get; typedef protocol_binary_request_no_extras protocol_binary_request_getq; typedef protocol_binary_request_no_extras protocol_binary_request_getk; typedef protocol_binary_request_no_extras protocol_binary_request_getkq; /** * Definition of the packet returned from a successful get, getq, getk and * getkq. * See section 4 */ typedef union { struct { protocol_binary_response_header header; struct { uint32_t flags; } body; } message; uint8_t bytes[sizeof(protocol_binary_response_header) + 4]; } protocol_binary_response_get; typedef protocol_binary_response_get protocol_binary_response_getq; typedef protocol_binary_response_get protocol_binary_response_getk; typedef protocol_binary_response_get protocol_binary_response_getkq; /** * Definition of the packet used by the delete command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_delete; /** * Definition of the packet returned by the delete command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_delete; /** * Definition of the packet used by the flush command * See section 4 * Please note that the expiration field is optional, so remember to see * check the header.bodysize to see if it is present. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_flush; /** * Definition of the packet returned by the flush command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_flush; /** * Definition of the packet used by set, add and replace * See section 4 */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t flags; uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 8]; } protocol_binary_request_set; typedef protocol_binary_request_set protocol_binary_request_add; typedef protocol_binary_request_set protocol_binary_request_replace; /** * Definition of the packet returned by set, add and replace * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_set; typedef protocol_binary_response_no_extras protocol_binary_response_add; typedef protocol_binary_response_no_extras protocol_binary_response_replace; /** * Definition of the noop packet * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_noop; /** * Definition of the packet returned by the noop command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_noop; /** * Definition of the structure used by the increment and decrement * command. * See section 4 */ typedef union { struct { protocol_binary_request_header header; struct { uint64_t delta; uint64_t initial; uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 20]; } protocol_binary_request_incr; typedef protocol_binary_request_incr protocol_binary_request_decr; /** * Definition of the response from an incr or decr command * command. * See section 4 */ typedef union { struct { protocol_binary_response_header header; struct { uint64_t value; } body; } message; uint8_t bytes[sizeof(protocol_binary_response_header) + 8]; } protocol_binary_response_incr; typedef protocol_binary_response_incr protocol_binary_response_decr; /** * Definition of the quit * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_quit; /** * Definition of the packet returned by the quit command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_quit; /** * Definition of the packet used by append and prepend command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_append; typedef protocol_binary_request_no_extras protocol_binary_request_prepend; /** * Definition of the packet returned from a successful append or prepend * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_append; typedef protocol_binary_response_no_extras protocol_binary_response_prepend; /** * Definition of the packet used by the version command * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_version; /** * Definition of the packet returned from a successful version command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_version; /** * Definition of the packet used by the stats command. * See section 4 */ typedef protocol_binary_request_no_extras protocol_binary_request_stats; /** * Definition of the packet returned from a successful stats command * See section 4 */ typedef protocol_binary_response_no_extras protocol_binary_response_stats; /** * Definition of the packet used by the touch command. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_touch; /** * Definition of the packet returned from the touch command */ typedef protocol_binary_response_no_extras protocol_binary_response_touch; /** * Definition of the packet used by the GAT(Q) command. */ typedef union { struct { protocol_binary_request_header header; struct { uint32_t expiration; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_gat; typedef protocol_binary_request_gat protocol_binary_request_gatq; typedef protocol_binary_request_gat protocol_binary_request_gatk; typedef protocol_binary_request_gat protocol_binary_request_gatkq; /** * Definition of the packet returned from the GAT(Q) */ typedef protocol_binary_response_get protocol_binary_response_gat; typedef protocol_binary_response_get protocol_binary_response_gatq; typedef protocol_binary_response_get protocol_binary_response_gatk; typedef protocol_binary_response_get protocol_binary_response_gatkq; /** * Definition of a request for a range operation. * See http://code.google.com/p/memcached/wiki/RangeOps * * These types are used for range operations and exist within * this header for use in other projects. Range operations are * not expected to be implemented in the memcached server itself. */ typedef union { struct { protocol_binary_response_header header; struct { uint16_t size; uint8_t reserved; uint8_t flags; uint32_t max_results; } body; } message; uint8_t bytes[sizeof(protocol_binary_request_header) + 4]; } protocol_binary_request_rangeop; typedef protocol_binary_request_rangeop protocol_binary_request_rget; typedef protocol_binary_request_rangeop protocol_binary_request_rset; typedef protocol_binary_request_rangeop protocol_binary_request_rsetq; typedef protocol_binary_request_rangeop protocol_binary_request_rappend; typedef protocol_binary_request_rangeop protocol_binary_request_rappendq; typedef protocol_binary_request_rangeop protocol_binary_request_rprepend; typedef protocol_binary_request_rangeop protocol_binary_request_rprependq; typedef protocol_binary_request_rangeop protocol_binary_request_rdelete; typedef protocol_binary_request_rangeop protocol_binary_request_rdeleteq; typedef protocol_binary_request_rangeop protocol_binary_request_rincr; typedef protocol_binary_request_rangeop protocol_binary_request_rincrq; typedef protocol_binary_request_rangeop protocol_binary_request_rdecr; typedef protocol_binary_request_rangeop protocol_binary_request_rdecrq; #ifdef __cplusplus } #endif #endif /* PROTOCOL_BINARY_H */
16,525
34.087049
80
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/timedrun.c
#include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <signal.h> #include <sys/wait.h> #include <sysexits.h> #include <assert.h> static int caught = 0; static void caught_signal(int which) { caught = which; } static int wait_for_process(pid_t pid) { int rv = EX_SOFTWARE; int stats = 0; int i = 0; struct sigaction sig_handler; sig_handler.sa_handler = caught_signal; sig_handler.sa_flags = 0; sigaction(SIGALRM, &sig_handler, NULL); sigaction(SIGHUP, &sig_handler, NULL); sigaction(SIGINT, &sig_handler, NULL); sigaction(SIGTERM, &sig_handler, NULL); sigaction(SIGPIPE, &sig_handler, NULL); /* Loop forever waiting for the process to quit */ for (i = 0; ;i++) { pid_t p = waitpid(pid, &stats, 0); if (p == pid) { /* child exited. Let's get out of here */ rv = WIFEXITED(stats) ? WEXITSTATUS(stats) : (0x80 | WTERMSIG(stats)); break; } else { int sig = 0; switch (i) { case 0: /* On the first iteration, pass the signal through */ sig = caught > 0 ? caught : SIGTERM; if (caught == SIGALRM) { fprintf(stderr, "Timeout.. killing the process\n"); } break; case 1: sig = SIGTERM; break; default: sig = SIGKILL; break; } if (kill(pid, sig) < 0) { /* Kill failed. Must have lost the process. :/ */ perror("lost child when trying to kill"); } /* Wait up to 5 seconds for the pid */ alarm(5); } } return rv; } static int spawn_and_wait(char **argv) { int rv = EX_SOFTWARE; pid_t pid = fork(); switch (pid) { case -1: perror("fork"); rv = EX_OSERR; break; /* NOTREACHED */ case 0: execvp(argv[0], argv); perror("exec"); rv = EX_SOFTWARE; break; /* NOTREACHED */ default: rv = wait_for_process(pid); } return rv; } int main(int argc, char **argv) { int naptime = 0; assert(argc > 2); naptime = atoi(argv[1]); assert(naptime > 0 && naptime < 1800); alarm(naptime); return spawn_and_wait(argv+2); }
2,416
22.466019
70
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/hash.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include "jenkins_hash.h" #include "murmur3_hash.h" int hash_init(enum hashfunc_type type) { switch(type) { case JENKINS_HASH: hash = jenkins_hash; settings.hash_algorithm = "jenkins"; break; case MURMUR3_HASH: hash = MurmurHash3_x86_32; settings.hash_algorithm = "murmur3"; break; default: return -1; } return 0; }
539
23.545455
77
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/items.h
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ /* * Note: * Codes enclosed in `#ifdef PSLAB' and `#endif' are added by Lenovo for * persistent memory support */ #define HOT_LRU 0 #define WARM_LRU 64 #define COLD_LRU 128 #define TEMP_LRU 192 #define CLEAR_LRU(id) (id & ~(3<<6)) #define GET_LRU(id) (id & (3<<6)) /* See items.c */ uint64_t get_cas_id(void); /*@null@*/ item *do_item_alloc(char *key, const size_t nkey, const unsigned int flags, const rel_time_t exptime, const int nbytes); item_chunk *do_item_alloc_chunk(item_chunk *ch, const size_t bytes_remain); item *do_item_alloc_pull(const size_t ntotal, const unsigned int id); void item_free(item *it); bool item_size_ok(const size_t nkey, const int flags, const int nbytes); int do_item_link(item *it, const uint32_t hv); /** may fail if transgresses limits */ #ifdef PSLAB void do_item_relink(item *it, const uint32_t hv); #endif void do_item_unlink(item *it, const uint32_t hv); void do_item_unlink_nolock(item *it, const uint32_t hv); void do_item_remove(item *it); void do_item_update(item *it); /** update LRU time to current and reposition */ void do_item_update_nolock(item *it); int do_item_replace(item *it, item *new_it, const uint32_t hv); int item_is_flushed(item *it); void do_item_linktail_q(item *it); void do_item_unlinktail_q(item *it); item *do_item_crawl_q(item *it); void *item_lru_bump_buf_create(void); #define LRU_PULL_EVICT 1 #define LRU_PULL_CRAWL_BLOCKS 2 #define LRU_PULL_RETURN_ITEM 4 /* fill info struct if available */ struct lru_pull_tail_return { item *it; uint32_t hv; }; int lru_pull_tail(const int orig_id, const int cur_lru, const uint64_t total_bytes, const uint8_t flags, const rel_time_t max_age, struct lru_pull_tail_return *ret_it); /*@null@*/ char *item_cachedump(const unsigned int slabs_clsid, const unsigned int limit, unsigned int *bytes); void item_stats(ADD_STAT add_stats, void *c); void do_item_stats_add_crawl(const int i, const uint64_t reclaimed, const uint64_t unfetched, const uint64_t checked); void item_stats_totals(ADD_STAT add_stats, void *c); /*@null@*/ void item_stats_sizes(ADD_STAT add_stats, void *c); void item_stats_sizes_init(void); void item_stats_sizes_enable(ADD_STAT add_stats, void *c); void item_stats_sizes_disable(ADD_STAT add_stats, void *c); void item_stats_sizes_add(item *it); void item_stats_sizes_remove(item *it); bool item_stats_sizes_status(void); /* stats getter for slab automover */ typedef struct { int64_t evicted; int64_t outofmemory; uint32_t age; } item_stats_automove; void fill_item_stats_automove(item_stats_automove *am); item *do_item_get(const char *key, const size_t nkey, const uint32_t hv, conn *c, const bool do_update); item *do_item_touch(const char *key, const size_t nkey, uint32_t exptime, const uint32_t hv, conn *c); void item_stats_reset(void); extern pthread_mutex_t lru_locks[POWER_LARGEST]; int start_lru_maintainer_thread(void *arg); int stop_lru_maintainer_thread(void); int init_lru_maintainer(void); void lru_maintainer_pause(void); void lru_maintainer_resume(void); void *lru_bump_buf_create(void); #ifdef EXTSTORE #define STORAGE_delete(e, it) \ do { \ if (it->it_flags & ITEM_HDR) { \ item_hdr *hdr = (item_hdr *)ITEM_data(it); \ extstore_delete(e, hdr->page_id, hdr->page_version, \ 1, ITEM_ntotal(it)); \ } \ } while (0) #else #define STORAGE_delete(...) #endif
3,550
31.577982
120
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/trace.h
#ifndef TRACE_H #define TRACE_H #ifdef ENABLE_DTRACE #include "memcached_dtrace.h" #else #define MEMCACHED_ASSOC_DELETE(arg0, arg1, arg2) #define MEMCACHED_ASSOC_DELETE_ENABLED() (0) #define MEMCACHED_ASSOC_FIND(arg0, arg1, arg2) #define MEMCACHED_ASSOC_FIND_ENABLED() (0) #define MEMCACHED_ASSOC_INSERT(arg0, arg1, arg2) #define MEMCACHED_ASSOC_INSERT_ENABLED() (0) #define MEMCACHED_COMMAND_ADD(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_ADD_ENABLED() (0) #define MEMCACHED_COMMAND_APPEND(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_APPEND_ENABLED() (0) #define MEMCACHED_COMMAND_CAS(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_CAS_ENABLED() (0) #define MEMCACHED_COMMAND_DECR(arg0, arg1, arg2, arg3) #define MEMCACHED_COMMAND_DECR_ENABLED() (0) #define MEMCACHED_COMMAND_DELETE(arg0, arg1, arg2) #define MEMCACHED_COMMAND_DELETE_ENABLED() (0) #define MEMCACHED_COMMAND_GET(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_GET_ENABLED() (0) #define MEMCACHED_COMMAND_TOUCH(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_TOUCH_ENABLED() (0) #define MEMCACHED_COMMAND_INCR(arg0, arg1, arg2, arg3) #define MEMCACHED_COMMAND_INCR_ENABLED() (0) #define MEMCACHED_COMMAND_PREPEND(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_PREPEND_ENABLED() (0) #define MEMCACHED_COMMAND_REPLACE(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_REPLACE_ENABLED() (0) #define MEMCACHED_COMMAND_SET(arg0, arg1, arg2, arg3, arg4) #define MEMCACHED_COMMAND_SET_ENABLED() (0) #define MEMCACHED_CONN_ALLOCATE(arg0) #define MEMCACHED_CONN_ALLOCATE_ENABLED() (0) #define MEMCACHED_CONN_CREATE(arg0) #define MEMCACHED_CONN_CREATE_ENABLED() (0) #define MEMCACHED_CONN_DESTROY(arg0) #define MEMCACHED_CONN_DESTROY_ENABLED() (0) #define MEMCACHED_CONN_DISPATCH(arg0, arg1) #define MEMCACHED_CONN_DISPATCH_ENABLED() (0) #define MEMCACHED_CONN_RELEASE(arg0) #define MEMCACHED_CONN_RELEASE_ENABLED() (0) #define MEMCACHED_ITEM_LINK(arg0, arg1, arg2) #define MEMCACHED_ITEM_LINK_ENABLED() (0) #define MEMCACHED_ITEM_REMOVE(arg0, arg1, arg2) #define MEMCACHED_ITEM_REMOVE_ENABLED() (0) #define MEMCACHED_ITEM_REPLACE(arg0, arg1, arg2, arg3, arg4, arg5) #define MEMCACHED_ITEM_REPLACE_ENABLED() (0) #define MEMCACHED_ITEM_UNLINK(arg0, arg1, arg2) #define MEMCACHED_ITEM_UNLINK_ENABLED() (0) #define MEMCACHED_ITEM_UPDATE(arg0, arg1, arg2) #define MEMCACHED_ITEM_UPDATE_ENABLED() (0) #define MEMCACHED_PROCESS_COMMAND_END(arg0, arg1, arg2) #define MEMCACHED_PROCESS_COMMAND_END_ENABLED() (0) #define MEMCACHED_PROCESS_COMMAND_START(arg0, arg1, arg2) #define MEMCACHED_PROCESS_COMMAND_START_ENABLED() (0) #define MEMCACHED_SLABS_ALLOCATE(arg0, arg1, arg2, arg3) #define MEMCACHED_SLABS_ALLOCATE_ENABLED() (0) #define MEMCACHED_SLABS_ALLOCATE_FAILED(arg0, arg1) #define MEMCACHED_SLABS_ALLOCATE_FAILED_ENABLED() (0) #define MEMCACHED_SLABS_FREE(arg0, arg1, arg2) #define MEMCACHED_SLABS_FREE_ENABLED() (0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE(arg0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_ENABLED() (0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(arg0) #define MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED_ENABLED() (0) #endif #endif
3,179
43.166667
66
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/cache.h
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #ifndef CACHE_H #define CACHE_H #include <pthread.h> #ifdef HAVE_UMEM_H #include <umem.h> #define cache_t umem_cache_t #define cache_alloc(a) umem_cache_alloc(a, UMEM_DEFAULT) #define do_cache_alloc(a) umem_cache_alloc(a, UMEM_DEFAULT) #define cache_free(a, b) umem_cache_free(a, b) #define do_cache_free(a, b) umem_cache_free(a, b) #define cache_create(a,b,c,d,e) umem_cache_create((char*)a, b, c, d, e, NULL, NULL, NULL, 0) #define cache_destroy(a) umem_cache_destroy(a); #else #ifndef NDEBUG /* may be used for debug purposes */ extern int cache_error; #endif /** * Constructor used to initialize allocated objects * * @param obj pointer to the object to initialized. * @param notused1 This parameter is currently not used. * @param notused2 This parameter is currently not used. * @return you should return 0, but currently this is not checked */ typedef int cache_constructor_t(void* obj, void* notused1, int notused2); /** * Destructor used to clean up allocated objects before they are * returned to the operating system. * * @param obj pointer to the object to clean up. * @param notused1 This parameter is currently not used. * @param notused2 This parameter is currently not used. * @return you should return 0, but currently this is not checked */ typedef void cache_destructor_t(void* obj, void* notused); /** * Definition of the structure to keep track of the internal details of * the cache allocator. Touching any of these variables results in * undefined behavior. */ typedef struct { /** Mutex to protect access to the structure */ pthread_mutex_t mutex; /** Name of the cache objects in this cache (provided by the caller) */ char *name; /** List of pointers to available buffers in this cache */ void **ptr; /** The size of each element in this cache */ size_t bufsize; /** The capacity of the list of elements */ int freetotal; /** The current number of free elements */ int freecurr; /** The constructor to be called each time we allocate more memory */ cache_constructor_t* constructor; /** The destructor to be called each time before we release memory */ cache_destructor_t* destructor; } cache_t; /** * Create an object cache. * * The object cache will let you allocate objects of the same size. It is fully * MT safe, so you may allocate objects from multiple threads without having to * do any synchronization in the application code. * * @param name the name of the object cache. This name may be used for debug purposes * and may help you track down what kind of object you have problems with * (buffer overruns, leakage etc) * @param bufsize the size of each object in the cache * @param align the alignment requirements of the objects in the cache. * @param constructor the function to be called to initialize memory when we need * to allocate more memory from the os. * @param destructor the function to be called before we release the memory back * to the os. * @return a handle to an object cache if successful, NULL otherwise. */ cache_t* cache_create(const char* name, size_t bufsize, size_t align, cache_constructor_t* constructor, cache_destructor_t* destructor); /** * Destroy an object cache. * * Destroy and invalidate an object cache. You should return all buffers allocated * with cache_alloc by using cache_free before calling this function. Not doing * so results in undefined behavior (the buffers may or may not be invalidated) * * @param handle the handle to the object cache to destroy. */ void cache_destroy(cache_t* handle); /** * Allocate an object from the cache. * * @param handle the handle to the object cache to allocate from * @return a pointer to an initialized object from the cache, or NULL if * the allocation cannot be satisfied. */ void* cache_alloc(cache_t* handle); void* do_cache_alloc(cache_t* handle); /** * Return an object back to the cache. * * The caller should return the object in an initialized state so that * the object may be returned in an expected state from cache_alloc. * * @param handle handle to the object cache to return the object to * @param ptr pointer to the object to return. */ void cache_free(cache_t* handle, void* ptr); void do_cache_free(cache_t* handle, void* ptr); #endif #endif
4,498
36.181818
92
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/sasl_defs.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ #include "memcached.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sasl/saslplug.h> char my_sasl_hostname[1025]; #ifdef HAVE_SASL_CB_GETCONF /* The locations we may search for a SASL config file if the user didn't * specify one in the environment variable SASL_CONF_PATH */ const char * const locations[] = { "/etc/sasl/memcached.conf", "/etc/sasl2/memcached.conf", NULL }; #endif #ifndef HAVE_SASL_CALLBACK_FT typedef int (*sasl_callback_ft)(void); #endif #ifdef ENABLE_SASL_PWDB #define MAX_ENTRY_LEN 256 static const char *memcached_sasl_pwdb; static int sasl_server_userdb_checkpass(sasl_conn_t *conn, void *context, const char *user, const char *pass, unsigned passlen, struct propctx *propctx) { size_t unmlen = strlen(user); if ((passlen + unmlen) > (MAX_ENTRY_LEN - 4)) { fprintf(stderr, "WARNING: Failed to authenticate <%s> due to too long password (%d)\n", user, passlen); return SASL_NOAUTHZ; } FILE *pwfile = fopen(memcached_sasl_pwdb, "r"); if (pwfile == NULL) { if (settings.verbose) { vperror("WARNING: Failed to open sasl database <%s>", memcached_sasl_pwdb); } return SASL_NOAUTHZ; } char buffer[MAX_ENTRY_LEN]; bool ok = false; while ((fgets(buffer, sizeof(buffer), pwfile)) != NULL) { if (memcmp(user, buffer, unmlen) == 0 && buffer[unmlen] == ':') { /* This is the correct user */ ++unmlen; if (memcmp(pass, buffer + unmlen, passlen) == 0 && (buffer[unmlen + passlen] == ':' || /* Additional tokens */ buffer[unmlen + passlen] == '\n' || /* end of line */ buffer[unmlen + passlen] == '\r'|| /* dos format? */ buffer[unmlen + passlen] == '\0')) { /* line truncated */ ok = true; } break; } } (void)fclose(pwfile); if (ok) { return SASL_OK; } if (settings.verbose) { fprintf(stderr, "INFO: User <%s> failed to authenticate\n", user); } return SASL_NOAUTHZ; } #endif #ifdef HAVE_SASL_CB_GETCONF static int sasl_getconf(void *context, const char **path) { *path = getenv("SASL_CONF_PATH"); if (*path == NULL) { for (int i = 0; locations[i] != NULL; ++i) { if (access(locations[i], F_OK) == 0) { *path = locations[i]; break; } } } if (settings.verbose) { if (*path != NULL) { fprintf(stderr, "Reading configuration from: <%s>\n", *path); } else { fprintf(stderr, "Failed to locate a config path\n"); } } return (*path != NULL) ? SASL_OK : SASL_FAIL; } #endif static int sasl_log(void *context, int level, const char *message) { bool log = true; switch (level) { case SASL_LOG_NONE: log = false; break; case SASL_LOG_PASS: case SASL_LOG_TRACE: case SASL_LOG_DEBUG: case SASL_LOG_NOTE: if (settings.verbose < 2) { log = false; } break; case SASL_LOG_WARN: case SASL_LOG_FAIL: if (settings.verbose < 1) { log = false; } break; default: /* This is an error */ ; } if (log) { fprintf(stderr, "SASL (severity %d): %s\n", level, message); } return SASL_OK; } static sasl_callback_t sasl_callbacks[] = { #ifdef ENABLE_SASL_PWDB { SASL_CB_SERVER_USERDB_CHECKPASS, (sasl_callback_ft)sasl_server_userdb_checkpass, NULL }, #endif { SASL_CB_LOG, (sasl_callback_ft)sasl_log, NULL }, #ifdef HAVE_SASL_CB_GETCONF { SASL_CB_GETCONF, sasl_getconf, NULL }, #endif { SASL_CB_LIST_END, NULL, NULL } }; void init_sasl(void) { #ifdef ENABLE_SASL_PWDB memcached_sasl_pwdb = getenv("MEMCACHED_SASL_PWDB"); if (memcached_sasl_pwdb == NULL) { if (settings.verbose) { fprintf(stderr, "INFO: MEMCACHED_SASL_PWDB not specified. " "Internal passwd database disabled\n"); } sasl_callbacks[0].id = SASL_CB_LIST_END; sasl_callbacks[0].proc = NULL; } #endif memset(my_sasl_hostname, 0, sizeof(my_sasl_hostname)); if (gethostname(my_sasl_hostname, sizeof(my_sasl_hostname)-1) == -1) { if (settings.verbose) { fprintf(stderr, "Error discovering hostname for SASL\n"); } my_sasl_hostname[0] = '\0'; } if (sasl_server_init(sasl_callbacks, "memcached") != SASL_OK) { fprintf(stderr, "Error initializing sasl.\n"); exit(EXIT_FAILURE); } else { if (settings.verbose) { fprintf(stderr, "Initialized SASL.\n"); } } }
5,091
25.659686
93
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/bipbuffer.h
#ifndef BIPBUFFER_H #define BIPBUFFER_H typedef struct { unsigned long int size; /* region A */ unsigned int a_start, a_end; /* region B */ unsigned int b_end; /* is B inuse? */ int b_inuse; unsigned char data[]; } bipbuf_t; /** * Create a new bip buffer. * * malloc()s space * * @param[in] size The size of the buffer */ bipbuf_t *bipbuf_new(const unsigned int size); /** * Initialise a bip buffer. Use memory provided by user. * * No malloc()s are performed. * * @param[in] size The size of the array */ void bipbuf_init(bipbuf_t* me, const unsigned int size); /** * Free the bip buffer */ void bipbuf_free(bipbuf_t *me); /* TODO: DOCUMENTATION */ unsigned char *bipbuf_request(bipbuf_t* me, const int size); int bipbuf_push(bipbuf_t* me, const int size); /** * @param[in] data The data to be offered to the buffer * @param[in] size The size of the data to be offered * @return number of bytes offered */ int bipbuf_offer(bipbuf_t *me, const unsigned char *data, const int size); /** * Look at data. Don't move cursor * * @param[in] len The length of the data to be peeked * @return data on success, NULL if we can't peek at this much data */ unsigned char *bipbuf_peek(const bipbuf_t* me, const unsigned int len); /** * Look at data. Don't move cursor * * @param[in] len The length of the data returned * @return data on success, NULL if nothing available */ unsigned char *bipbuf_peek_all(const bipbuf_t* me, unsigned int *len); /** * Get pointer to data to read. Move the cursor on. * * @param[in] len The length of the data to be polled * @return pointer to data, NULL if we can't poll this much data */ unsigned char *bipbuf_poll(bipbuf_t* me, const unsigned int size); /** * @return the size of the bipbuffer */ int bipbuf_size(const bipbuf_t* me); /** * @return 1 if buffer is empty; 0 otherwise */ int bipbuf_is_empty(const bipbuf_t* me); /** * @return how much space we have assigned */ int bipbuf_used(const bipbuf_t* cb); /** * @return bytes of unused space */ int bipbuf_unused(const bipbuf_t* me); #endif /* BIPBUFFER_H */
2,118
23.079545
74
h
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/solaris_priv.c
#include <stdlib.h> #include <priv.h> #include <stdio.h> #include "memcached.h" /* * this section of code will drop all (Solaris) privileges including * those normally granted to all userland process (basic privileges). The * effect of this is that after running this code, the process will not able * to fork(), exec(), etc. See privileges(5) for more information. */ void drop_privileges(void) { priv_set_t *privs = priv_str_to_set("basic", ",", NULL); if (privs == NULL) { perror("priv_str_to_set"); exit(EXIT_FAILURE); } (void)priv_delset(privs, PRIV_FILE_LINK_ANY); (void)priv_delset(privs, PRIV_PROC_EXEC); (void)priv_delset(privs, PRIV_PROC_FORK); (void)priv_delset(privs, PRIV_PROC_INFO); (void)priv_delset(privs, PRIV_PROC_SESSION); if (setppriv(PRIV_SET, PRIV_PERMITTED, privs) != 0) { perror("setppriv(PRIV_SET, PRIV_PERMITTED)"); exit(EXIT_FAILURE); } priv_emptyset(privs); if (setppriv(PRIV_SET, PRIV_INHERITABLE, privs) != 0) { perror("setppriv(PRIV_SET, PRIV_INHERITABLE)"); exit(EXIT_FAILURE); } if (setppriv(PRIV_SET, PRIV_LIMIT, privs) != 0) { perror("setppriv(PRIV_SET, PRIV_LIMIT)"); exit(EXIT_FAILURE); } priv_freeset(privs); }
1,259
27
76
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/jenkins_hash.c
/* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */ /* * Hash table * * The hash function used here is by Bob Jenkins, 1996: * <http://burtleburtle.net/bob/hash/doobs.html> * "By Bob Jenkins, 1996. bob_jenkins@burtleburtle.net. * You may use this code any way you wish, private, educational, * or commercial. It's free." * */ #include "memcached.h" #include "jenkins_hash.h" /* * Since the hash function does bit manipulation, it needs to know * whether it's big or little-endian. ENDIAN_LITTLE and ENDIAN_BIG * are set in the configure script. */ #if ENDIAN_BIG == 1 # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 1 #else # if ENDIAN_LITTLE == 1 # define HASH_LITTLE_ENDIAN 1 # define HASH_BIG_ENDIAN 0 # else # define HASH_LITTLE_ENDIAN 0 # define HASH_BIG_ENDIAN 0 # endif #endif #define rot(x,k) (((x)<<(k)) ^ ((x)>>(32-(k)))) /* ------------------------------------------------------------------------------- mix -- mix 3 32-bit values reversibly. This is reversible, so any information in (a,b,c) before mix() is still in (a,b,c) after mix(). If four pairs of (a,b,c) inputs are run through mix(), or through mix() in reverse, there are at least 32 bits of the output that are sometimes the same for one pair and different for another pair. This was tested for: * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that satisfy this are 4 6 8 16 19 4 9 15 3 18 27 15 14 9 3 7 17 3 Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing for "differ" defined as + with a one-bit base and a two-bit delta. I used http://burtleburtle.net/bob/hash/avalanche.html to choose the operations, constants, and arrangements of the variables. This does not achieve avalanche. There are input bits of (a,b,c) that fail to affect some output bits of (a,b,c), especially of a. The most thoroughly mixed value is c, but it doesn't really even achieve avalanche in c. This allows some parallelism. Read-after-writes are good at doubling the number of bits affected, so the goal of mixing pulls in the opposite direction as the goal of parallelism. I did what I could. Rotates seem to cost as much as shifts on every machine I could lay my hands on, and rotates are much kinder to the top and bottom bits, so I used rotates. ------------------------------------------------------------------------------- */ #define mix(a,b,c) \ { \ a -= c; a ^= rot(c, 4); c += b; \ b -= a; b ^= rot(a, 6); a += c; \ c -= b; c ^= rot(b, 8); b += a; \ a -= c; a ^= rot(c,16); c += b; \ b -= a; b ^= rot(a,19); a += c; \ c -= b; c ^= rot(b, 4); b += a; \ } /* ------------------------------------------------------------------------------- final -- final mixing of 3 32-bit values (a,b,c) into c Pairs of (a,b,c) values differing in only a few bits will usually produce values of c that look totally different. This was tested for * pairs that differed by one bit, by two bits, in any combination of top bits of (a,b,c), or in any combination of bottom bits of (a,b,c). * "differ" is defined as +, -, ^, or ~^. For + and -, I transformed the output delta to a Gray code (a^(a>>1)) so a string of 1's (as is commonly produced by subtraction) look like a single 1-bit difference. * the base values were pseudorandom, all zero but one bit set, or all zero plus a counter that starts at zero. These constants passed: 14 11 25 16 4 14 24 12 14 25 16 4 14 24 and these came close: 4 8 15 26 3 22 24 10 8 15 26 3 22 24 11 8 15 26 3 22 24 ------------------------------------------------------------------------------- */ #define final(a,b,c) \ { \ c ^= b; c -= rot(b,14); \ a ^= c; a -= rot(c,11); \ b ^= a; b -= rot(a,25); \ c ^= b; c -= rot(b,16); \ a ^= c; a -= rot(c,4); \ b ^= a; b -= rot(a,14); \ c ^= b; c -= rot(b,24); \ } #if HASH_LITTLE_ENDIAN == 1 uint32_t jenkins_hash( const void *key, /* the key to hash */ size_t length) /* length of the key */ { uint32_t a,b,c; /* internal state */ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)length) + 0; u.ptr = key; if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) { const uint32_t *k = key; /* read 32-bit chunks */ #ifdef VALGRIND const uint8_t *k8; #endif /* ifdef VALGRIND */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]&0xffffff" actually reads beyond the end of the string, but * then masks off the part it's not allowed to read. Because the * string is aligned, the masked-off tail is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticeably faster for short strings (like English words). */ #ifndef VALGRIND switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff; a+=k[0]; break; case 6 : b+=k[1]&0xffff; a+=k[0]; break; case 5 : b+=k[1]&0xff; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff; break; case 2 : a+=k[0]&0xffff; break; case 1 : a+=k[0]&0xff; break; case 0 : return c; /* zero length strings require no mixing */ } #else /* make valgrind happy */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<16; /* fall through */ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */ case 9 : c+=k8[8]; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */ case 5 : b+=k8[4]; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */ case 1 : a+=k8[0]; break; case 0 : return c; /* zero length strings require no mixing */ } #endif /* !valgrind */ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) { const uint16_t *k = key; /* read 16-bit chunks */ const uint8_t *k8; /*--------------- all but last block: aligned reads and different mixing */ while (length > 12) { a += k[0] + (((uint32_t)k[1])<<16); b += k[2] + (((uint32_t)k[3])<<16); c += k[4] + (((uint32_t)k[5])<<16); mix(a,b,c); length -= 12; k += 6; } /*----------------------------- handle the last (probably partial) block */ k8 = (const uint8_t *)k; switch(length) { case 12: c+=k[4]+(((uint32_t)k[5])<<16); b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 11: c+=((uint32_t)k8[10])<<16; /* @fallthrough */ case 10: c+=k[4]; /* @fallthrough@ */ b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 9 : c+=k8[8]; /* @fallthrough */ case 8 : b+=k[2]+(((uint32_t)k[3])<<16); a+=k[0]+(((uint32_t)k[1])<<16); break; case 7 : b+=((uint32_t)k8[6])<<16; /* @fallthrough */ case 6 : b+=k[2]; a+=k[0]+(((uint32_t)k[1])<<16); break; case 5 : b+=k8[4]; /* @fallthrough */ case 4 : a+=k[0]+(((uint32_t)k[1])<<16); break; case 3 : a+=((uint32_t)k8[2])<<16; /* @fallthrough */ case 2 : a+=k[0]; break; case 1 : a+=k8[0]; break; case 0 : return c; /* zero length strings require no mixing */ } } else { /* need to read the key one byte at a time */ const uint8_t *k = key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; a += ((uint32_t)k[1])<<8; a += ((uint32_t)k[2])<<16; a += ((uint32_t)k[3])<<24; b += k[4]; b += ((uint32_t)k[5])<<8; b += ((uint32_t)k[6])<<16; b += ((uint32_t)k[7])<<24; c += k[8]; c += ((uint32_t)k[9])<<8; c += ((uint32_t)k[10])<<16; c += ((uint32_t)k[11])<<24; mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=((uint32_t)k[11])<<24; case 11: c+=((uint32_t)k[10])<<16; case 10: c+=((uint32_t)k[9])<<8; case 9 : c+=k[8]; case 8 : b+=((uint32_t)k[7])<<24; case 7 : b+=((uint32_t)k[6])<<16; case 6 : b+=((uint32_t)k[5])<<8; case 5 : b+=k[4]; case 4 : a+=((uint32_t)k[3])<<24; case 3 : a+=((uint32_t)k[2])<<16; case 2 : a+=((uint32_t)k[1])<<8; case 1 : a+=k[0]; break; case 0 : return c; /* zero length strings require no mixing */ } } final(a,b,c); return c; /* zero length strings require no mixing */ } #elif HASH_BIG_ENDIAN == 1 /* * hashbig(): * This is the same as hashword() on big-endian machines. It is different * from hashlittle() on all machines. hashbig() takes advantage of * big-endian byte ordering. */ uint32_t jenkins_hash( const void *key, size_t length) { uint32_t a,b,c; union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */ /* Set up the internal state */ a = b = c = 0xdeadbeef + ((uint32_t)length) + 0; u.ptr = key; if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) { const uint32_t *k = key; /* read 32-bit chunks */ #ifdef VALGRIND const uint8_t *k8; #endif /* ifdef VALGRIND */ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */ while (length > 12) { a += k[0]; b += k[1]; c += k[2]; mix(a,b,c); length -= 12; k += 3; } /*----------------------------- handle the last (probably partial) block */ /* * "k[2]<<8" actually reads beyond the end of the string, but * then shifts out the part it's not allowed to read. Because the * string is aligned, the illegal read is in the same word as the * rest of the string. Every machine with memory protection I've seen * does it on word boundaries, so is OK with this. But VALGRIND will * still catch it and complain. The masking trick does make the hash * noticeably faster for short strings (like English words). */ #ifndef VALGRIND switch(length) { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break; case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break; case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break; case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=k[1]&0xffffff00; a+=k[0]; break; case 6 : b+=k[1]&0xffff0000; a+=k[0]; break; case 5 : b+=k[1]&0xff000000; a+=k[0]; break; case 4 : a+=k[0]; break; case 3 : a+=k[0]&0xffffff00; break; case 2 : a+=k[0]&0xffff0000; break; case 1 : a+=k[0]&0xff000000; break; case 0 : return c; /* zero length strings require no mixing */ } #else /* make valgrind happy */ k8 = (const uint8_t *)k; switch(length) /* all the case statements fall through */ { case 12: c+=k[2]; b+=k[1]; a+=k[0]; break; case 11: c+=((uint32_t)k8[10])<<8; /* fall through */ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */ case 8 : b+=k[1]; a+=k[0]; break; case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */ case 4 : a+=k[0]; break; case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */ case 1 : a+=((uint32_t)k8[0])<<24; break; case 0 : return c; } #endif /* !VALGRIND */ } else { /* need to read the key one byte at a time */ const uint8_t *k = key; /*--------------- all but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { a += ((uint32_t)k[0])<<24; a += ((uint32_t)k[1])<<16; a += ((uint32_t)k[2])<<8; a += ((uint32_t)k[3]); b += ((uint32_t)k[4])<<24; b += ((uint32_t)k[5])<<16; b += ((uint32_t)k[6])<<8; b += ((uint32_t)k[7]); c += ((uint32_t)k[8])<<24; c += ((uint32_t)k[9])<<16; c += ((uint32_t)k[10])<<8; c += ((uint32_t)k[11]); mix(a,b,c); length -= 12; k += 12; } /*-------------------------------- last block: affect all 32 bits of (c) */ switch(length) /* all the case statements fall through */ { case 12: c+=k[11]; case 11: c+=((uint32_t)k[10])<<8; case 10: c+=((uint32_t)k[9])<<16; case 9 : c+=((uint32_t)k[8])<<24; case 8 : b+=k[7]; case 7 : b+=((uint32_t)k[6])<<8; case 6 : b+=((uint32_t)k[5])<<16; case 5 : b+=((uint32_t)k[4])<<24; case 4 : a+=k[3]; case 3 : a+=((uint32_t)k[2])<<8; case 2 : a+=((uint32_t)k[1])<<16; case 1 : a+=((uint32_t)k[0])<<24; break; case 0 : return c; } } final(a,b,c); return c; } #else /* HASH_XXX_ENDIAN == 1 */ #error Must define HASH_BIG_ENDIAN or HASH_LITTLE_ENDIAN #endif /* HASH_XXX_ENDIAN == 1 */
14,709
33.050926
79
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/pslab.c
/* * Copyright 2018 Lenovo * * Licensed under the BSD-3 license. see LICENSE.Lenovo.txt for full text */ #include "memcached.h" #include <stddef.h> #include <string.h> #define PSLAB_POOL_SIG "PMCH" #define PSLAB_POOL_SIG_SIZE 4 #define PSLAB_POOL_VER_SIZE 12 #define PSLAB_ALIGN_MASK 0xfffffff8 #pragma pack(1) /* persistent slab pool */ typedef struct { char signature[PSLAB_POOL_SIG_SIZE]; uint32_t length; /* 8 bytes aligned */ char version[PSLAB_POOL_VER_SIZE]; uint8_t reserved; uint8_t checksum[2]; uint8_t valid; /* not checksumed */ uint64_t process_started; uint32_t flush_time[2]; uint32_t slab_num; uint32_t slab_page_size; uint32_t slabclass_num; uint32_t slabclass_sizes[]; } pslab_pool_t; #define PSLAB_LINKED 1 #define PSLAB_CHUNKED 2 #define PSLAB_CHUNK 4 typedef struct { uint8_t id; uint8_t flags; /* non-persistent */ uint8_t reserved[6]; /* make slab[] 8 bytes aligned */ uint32_t size; uint8_t slab[]; } pslab_t; #pragma pack() #define PSLAB_FRAME_SIZE(pm) (sizeof (pslab_t) + (pm)->slab_page_size) #define PSLAB_FIRST_FRAME(pm) ((pslab_t *)((char *)(pm) + (pm)->length)) #define PSLAB_NEXT_FRAME(pm, fp) \ ((fp) ? (pslab_t *)((char *)(fp) + PSLAB_FRAME_SIZE(pm)) : \ PSLAB_FIRST_FRAME(pm)) #define PSLAB_SLAB2FRAME(slab) \ ((slab) ? (pslab_t *)((char *)(slab) - sizeof (pslab_t)) : NULL) #define PSLAB_WALK_FROM(fp, s) \ assert(pslab_start != NULL || ((char *) (s) - (char *) pslab_start) \ % PSLAB_FRAME_SIZE(pslab_pool) == 0); \ (fp) = (s) ? (s) : pslab_start; \ for (int _i = (s) ? ((char *)(s) - (char *) pslab_start) \ / PSLAB_FRAME_SIZE(pslab_pool) : 0; \ (fp) >= pslab_start && (fp) < pslab_end; \ _i++, (fp) = PSLAB_NEXT_FRAME(pslab_pool, (fp))) #define PSLAB_WALK_ID() (_i) #define PSLAB_WALK(fp) PSLAB_WALK_FROM((fp), NULL) static pslab_pool_t *pslab_pool; static pslab_t *pslab_start, *pslab_end; uint64_t pslab_addr2off(void *addr) { return ((char *) addr >= (char *) pslab_start) ? (char *) addr - (char *) pslab_start : 0; } #define pslab_off2addr(off) ((off) ? (void *) ((char *)pslab_start + (off)) : NULL) #define pslab_addr2slab(addr) ((char *) (addr) >= (char *) pslab_start ? \ (pslab_t *) ((char *)(addr) - ((char *)(addr) - (char *) pslab_start) % \ PSLAB_FRAME_SIZE(pslab_pool)) : NULL) int pslab_contains(char *p) { if (p >= (char *) pslab_start && p < (char *) pslab_end) return 1; return 0; } void pslab_use_slab(void *p, int id, unsigned int size) { pslab_t *fp = PSLAB_SLAB2FRAME(p); fp->size = size; pmem_member_persist(fp, size); fp->id = id; pmem_member_persist(fp, id); } void *pslab_get_free_slab(void *slab) { static pslab_t *cur = NULL; pslab_t *fp = PSLAB_SLAB2FRAME(slab); if (fp == NULL) cur = fp; else if (fp != cur) return NULL; PSLAB_WALK_FROM(fp, PSLAB_NEXT_FRAME(pslab_pool, cur)) { if (fp->id == 0 || (fp->flags & (PSLAB_LINKED | PSLAB_CHUNK)) == 0) { cur = fp; return fp->slab; } } cur = NULL; return NULL; } static uint8_t pslab_chksum0; static uint8_t pslab_do_checksum(void *buf, uint32_t len) { uint8_t sum = 0; uint8_t *end = (uint8_t *)buf + len; uint8_t *cur = buf; while (cur < end) sum = (uint8_t) (sum + *(cur++)); return sum; } #define pslab_do_checksum_member(p, m) \ pslab_do_checksum(&(p)->m, sizeof ((p)->m)) static void pslab_checksum_init() { assert(pslab_pool != NULL); pslab_chksum0 = 0; pslab_chksum0 += pslab_do_checksum(pslab_pool, offsetof(pslab_pool_t, checksum)); pslab_chksum0 += pslab_do_checksum_member(pslab_pool, process_started); pslab_chksum0 += pslab_do_checksum(&pslab_pool->slab_num, pslab_pool->length - offsetof(pslab_pool_t, slab_num)); } static uint8_t pslab_checksum_check(int i) { uint8_t sum = pslab_chksum0; sum += pslab_do_checksum_member(pslab_pool, checksum[i]); sum += pslab_do_checksum_member(pslab_pool, flush_time[i]); return sum; } static void pslab_checksum_update(int sum, int i) { pslab_pool->checksum[i] = (uint8_t) (~(pslab_chksum0 + sum) + 1); } void pslab_update_flushtime(uint32_t time) { int i = (pslab_pool->valid - 1) ^ 1; pslab_pool->flush_time[i] = time; pslab_checksum_update(pslab_do_checksum(&time, sizeof (time)), i); pmem_member_flush(pslab_pool, flush_time); pmem_member_persist(pslab_pool, checksum); pslab_pool->valid = i + 1; pmem_member_persist(pslab_pool, valid); } time_t pslab_process_started(time_t process_started) { static time_t process_started_new; if (process_started) { process_started_new = process_started; return pslab_pool->process_started; } else { return process_started_new; } } int pslab_do_recover() { pslab_t *fp; uint8_t *ptr; int i, size, perslab; settings.oldest_live = pslab_pool->flush_time[pslab_pool->valid - 1]; /* current_time will be resetted by clock_handler afterwards. Set * it temporarily, so that functions depending on it can be reused * during recovery */ current_time = process_started - pslab_pool->process_started; PSLAB_WALK(fp) { fp->flags = 0; } /* check for linked and chunked slabs and mark all chunks */ PSLAB_WALK(fp) { if (fp->id == 0) continue; size = fp->size; perslab = pslab_pool->slab_page_size / size; for (i = 0, ptr = fp->slab; i < perslab; i++, ptr += size) { item *it = (item *) ptr; if (it->it_flags & ITEM_LINKED) { if (item_is_flushed(it) || (it->exptime != 0 && it->exptime <= current_time)) { it->it_flags = ITEM_PSLAB; pmem_member_persist(it, it_flags); } else { fp->flags |= PSLAB_LINKED; if (it->it_flags & ITEM_CHUNKED) fp->flags |= PSLAB_CHUNKED; } } else if (it->it_flags & ITEM_CHUNK) { ((item_chunk *)it)->head = NULL; /* non-persistent */ } } } /* relink alive chunks */ PSLAB_WALK(fp) { if (fp->id == 0 || (fp->flags & PSLAB_CHUNKED) == 0) continue; size = fp->size; perslab = pslab_pool->slab_page_size / size; for (i = 0, ptr = fp->slab; i < perslab; i++, ptr += size) { item *it = (item *) ptr; if ((it->it_flags & ITEM_LINKED) && (it->it_flags & ITEM_CHUNKED)) { item_chunk *nch; item_chunk *ch = (item_chunk *) ITEM_data(it); ch->head = it; while ((nch = pslab_off2addr(ch->next_poff)) != NULL) { pslab_t *nfp = pslab_addr2slab(nch); nfp->flags |= PSLAB_CHUNK; nch->head = it; ch->next = nch; nch->prev = ch; ch = nch; } } } } /* relink linked slabs and free free ones */ PSLAB_WALK(fp) { int id; if (fp->id == 0 || (fp->flags & (PSLAB_LINKED | PSLAB_CHUNK)) == 0) continue; if (do_slabs_renewslab(fp->id, (char *)fp->slab) == 0) return -1; id = fp->id; size = fp->size; perslab = pslab_pool->slab_page_size / size; for (i = 0, ptr = fp->slab; i < perslab; i++, ptr += size) { item *it = (item *) ptr; if (it->it_flags & ITEM_LINKED) { do_slab_realloc(it, id); do_item_relink(it, hash(ITEM_key(it), it->nkey)); } else if ((it->it_flags & ITEM_CHUNK) == 0 || ((item_chunk *)it)->head == NULL) { assert((it->it_flags & ITEM_CHUNKED) == 0); do_slabs_free(ptr, 0, id); } } } return 0; } int pslab_pre_recover(char *name, uint32_t *slab_sizes, int slab_max, int slab_page_size) { size_t mapped_len; int is_pmem; int i; if ((pslab_pool = pmem_map_file(name, 0, PMEM_FILE_EXCL, 0, &mapped_len, &is_pmem)) == NULL) { fprintf(stderr, "pmem_map_file failed\n"); return -1; } if (!is_pmem && (pslab_force == false)) { fprintf(stderr, "%s is not persistent memory\n", name); return -1; } if (strncmp(pslab_pool->signature, PSLAB_POOL_SIG, PSLAB_POOL_SIG_SIZE) != 0) { fprintf(stderr, "pslab pool unknown signature\n"); return -1; } pslab_checksum_init(); if (pslab_checksum_check(pslab_pool->valid - 1)) { fprintf(stderr, "pslab pool bad checksum\n"); return -1; } if (strncmp(pslab_pool->version, VERSION, PSLAB_POOL_VER_SIZE) != 0) { fprintf(stderr, "pslab pool version mismatch\n"); return -1; } if (pslab_pool->slab_page_size != slab_page_size) { fprintf(stderr, "pslab pool slab size mismatch\n"); return -1; } assert(slab_max > pslab_pool->slabclass_num); for (i = 0; i < pslab_pool->slabclass_num; i++) slab_sizes[i] = pslab_pool->slabclass_sizes[i]; slab_sizes[i] = 0; pslab_start = PSLAB_FIRST_FRAME(pslab_pool); pslab_end = (pslab_t *) ((char *) pslab_start + pslab_pool->slab_num * PSLAB_FRAME_SIZE(pslab_pool)); return 0; } bool pslab_force; int pslab_create(char *pool_name, uint32_t pool_size, uint32_t slab_page_size, uint32_t *slabclass_sizes, int slabclass_num) { size_t mapped_len; int is_pmem; uint32_t length; pslab_t *fp; int i; if ((pslab_pool = pmem_map_file(pool_name, pool_size, PMEM_FILE_CREATE, 0666, &mapped_len, &is_pmem)) == NULL) { fprintf(stderr, "pmem_map_file failed\n"); return -1; } if (!is_pmem && (pslab_force == false)) { fprintf(stderr, "%s is not persistent memory\n", pool_name); return -1; } //PMEMoid root = pmemobj_root(pop, sizeof(uint64_t)); uint64_t* tmp = (uint64_t*)pslab_pool;//pmemobj_direct(root); //printf( "%ld\n",PMEM_OBJ_POOL_UNUSED2_SIZE); //printf( "%ld %ld %ld\n",sizeof(PMEMobjpool),sizeof(uint16_t),sizeof(void*)); printf("vaddr %p pmemobjid %x\n",pslab_pool,0); *tmp = 0xdeadbeefdeadbeef; pmem_persist(tmp,64); *tmp = (uint64_t)tmp; pmem_persist(tmp,64); *tmp = 0; pmem_persist(tmp,64); printf("created file start 1 %p size %d %s\n",(void *)pslab_pool,pool_size,pool_name); length = (sizeof (pslab_pool_t) + sizeof (pslab_pool->slabclass_sizes[0]) * slabclass_num + 7) & PSLAB_ALIGN_MASK; pmem_memset_nodrain(pslab_pool, 0, length); (void) memcpy(pslab_pool->signature, PSLAB_POOL_SIG, PSLAB_POOL_SIG_SIZE); pslab_pool->length = length; snprintf(pslab_pool->version, PSLAB_POOL_VER_SIZE, VERSION); pslab_pool->slab_page_size = slab_page_size; pslab_pool->slab_num = (pool_size - pslab_pool->length) / PSLAB_FRAME_SIZE(pslab_pool); pslab_start = PSLAB_FIRST_FRAME(pslab_pool); pslab_end = (pslab_t *) ((char *) pslab_start + pslab_pool->slab_num * PSLAB_FRAME_SIZE(pslab_pool)); PSLAB_WALK(fp) { pmem_memset_nodrain(fp, 0, sizeof (pslab_t)); } pslab_pool->slabclass_num = slabclass_num; for (i = 0; i < slabclass_num; i++) pslab_pool->slabclass_sizes[i] = slabclass_sizes[i]; assert(process_started != 0); pslab_pool->process_started = (uint64_t) process_started; pslab_checksum_init(); pslab_checksum_update(0, 0); pmem_persist(pslab_pool, pslab_pool->length); pslab_pool->valid = 1; pmem_member_persist(pslab_pool, valid); return 0; }
11,974
29.705128
90
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/murmur3_hash.c
//----------------------------------------------------------------------------- // MurmurHash3 was written by Austin Appleby, and is placed in the public // domain. The author hereby disclaims copyright to this source code. // Note - The x86 and x64 versions do _not_ produce the same results, as the // algorithms are optimized for their respective platforms. You can still // compile and run any of them on any platform, but your performance with the // non-native version will be less than optimal. #include "murmur3_hash.h" //----------------------------------------------------------------------------- // Platform-specific functions and macros // Microsoft Visual Studio #if defined(_MSC_VER) #define FORCE_INLINE __forceinline #include <stdlib.h> #define ROTL32(x,y) _rotl(x,y) #define BIG_CONSTANT(x) (x) // Other compilers #else // defined(_MSC_VER) #define FORCE_INLINE inline __attribute__((always_inline)) static inline uint32_t rotl32 ( uint32_t x, int8_t r ) { return (x << r) | (x >> (32 - r)); } #define ROTL32(x,y) rotl32(x,y) #define BIG_CONSTANT(x) (x##LLU) #endif // !defined(_MSC_VER) //----------------------------------------------------------------------------- // Block read - if your platform needs to do endian-swapping or can only // handle aligned reads, do the conversion here static FORCE_INLINE uint32_t getblock32 ( const uint32_t * p, int i ) { return p[i]; } //----------------------------------------------------------------------------- // Finalization mix - force all bits of a hash block to avalanche static FORCE_INLINE uint32_t fmix32 ( uint32_t h ) { h ^= h >> 16; h *= 0x85ebca6b; h ^= h >> 13; h *= 0xc2b2ae35; h ^= h >> 16; return h; } //----------------------------------------------------------------------------- /* Definition modified slightly from the public domain interface (no seed + * return value */ uint32_t MurmurHash3_x86_32 ( const void * key, size_t length) { const uint8_t * data = (const uint8_t*)key; const int nblocks = length / 4; uint32_t h1 = 0; uint32_t c1 = 0xcc9e2d51; uint32_t c2 = 0x1b873593; //---------- // body const uint32_t * blocks = (const uint32_t *)(data + nblocks*4); for(int i = -nblocks; i; i++) { uint32_t k1 = getblock32(blocks,i); k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; h1 = ROTL32(h1,13); h1 = h1*5+0xe6546b64; } //---------- // tail const uint8_t * tail = (const uint8_t*)(data + nblocks*4); uint32_t k1 = 0; switch(length & 3) { case 3: k1 ^= tail[2] << 16; case 2: k1 ^= tail[1] << 8; case 1: k1 ^= tail[0]; k1 *= c1; k1 = ROTL32(k1,15); k1 *= c2; h1 ^= k1; }; //---------- // finalization h1 ^= length; h1 = fmix32(h1); //*(uint32_t*)out = h1; return h1; }
2,826
21.616
79
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/daemon.c
/* $Header: /cvsroot/wikipedia/willow/src/bin/willow/daemon.c,v 1.1 2005/05/02 19:15:21 kateturner Exp $ */ /* $NetBSD: daemon.c,v 1.9 2003/08/07 16:42:46 agc Exp $ */ /*- * Copyright (c) 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #if defined __SUNPRO_C || defined __DECC || defined __HP_cc # pragma ident "@(#)$Header: /cvsroot/wikipedia/willow/src/bin/willow/daemon.c,v 1.1 2005/05/02 19:15:21 kateturner Exp $" # pragma ident "$NetBSD: daemon.c,v 1.9 2003/08/07 16:42:46 agc Exp $" #endif #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include "memcached.h" int daemonize(int nochdir, int noclose) { int fd; switch (fork()) { case -1: return (-1); case 0: break; default: _exit(EXIT_SUCCESS); } if (setsid() == -1) return (-1); if (nochdir == 0) { if(chdir("/") != 0) { perror("chdir"); return (-1); } } if (noclose == 0 && (fd = open("/dev/null", O_RDWR, 0)) != -1) { if(dup2(fd, STDIN_FILENO) < 0) { perror("dup2 stdin"); return (-1); } if(dup2(fd, STDOUT_FILENO) < 0) { perror("dup2 stdout"); return (-1); } if(dup2(fd, STDERR_FILENO) < 0) { perror("dup2 stderr"); return (-1); } if (fd > STDERR_FILENO) { if(close(fd) < 0) { perror("close"); return (-1); } } } return (0); }
3,082
33.255556
122
c
null
NearPMSW-main/nearpm/shadow/memcached-pmem-sd/crc32c.c
/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction * Copyright (C) 2013 Mark Adler * Version 1.1 1 Aug 2013 Mark Adler */ /* This software is provided 'as-is', without any express or implied warranty. In no event will the author be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. Mark Adler madler@alumni.caltech.edu */ /* Use hardware CRC instruction on Intel SSE 4.2 processors. This computes a CRC-32C, *not* the CRC-32 used by Ethernet and zip, gzip, etc. A software version is provided as a fall-back, as well as for speed comparisons. */ /* Version history: 1.0 10 Feb 2013 First version 1.1 1 Aug 2013 Correct comments on why three crc instructions in parallel */ /* This version has been modified by dormando for inclusion in memcached */ #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <unistd.h> #include <pthread.h> #include "crc32c.h" /* CRC-32C (iSCSI) polynomial in reversed bit order. */ #define POLY 0x82f63b78 /* Table for a quadword-at-a-time software crc. */ static pthread_once_t crc32c_once_sw = PTHREAD_ONCE_INIT; static uint32_t crc32c_table[8][256]; /* Construct table for software CRC-32C calculation. */ static void crc32c_init_sw(void) { uint32_t n, crc, k; for (n = 0; n < 256; n++) { crc = n; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1; crc32c_table[0][n] = crc; } for (n = 0; n < 256; n++) { crc = crc32c_table[0][n]; for (k = 1; k < 8; k++) { crc = crc32c_table[0][crc & 0xff] ^ (crc >> 8); crc32c_table[k][n] = crc; } } } /* Table-driven software version as a fall-back. This is about 15 times slower than using the hardware instructions. This assumes little-endian integers, as is the case on Intel processors that the assembler code here is for. */ static uint32_t crc32c_sw(uint32_t crci, const void *buf, size_t len) { const unsigned char *next = buf; uint64_t crc; pthread_once(&crc32c_once_sw, crc32c_init_sw); crc = crci ^ 0xffffffff; while (len && ((uintptr_t)next & 7) != 0) { crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); len--; } while (len >= 8) { crc ^= *(uint64_t *)next; crc = crc32c_table[7][crc & 0xff] ^ crc32c_table[6][(crc >> 8) & 0xff] ^ crc32c_table[5][(crc >> 16) & 0xff] ^ crc32c_table[4][(crc >> 24) & 0xff] ^ crc32c_table[3][(crc >> 32) & 0xff] ^ crc32c_table[2][(crc >> 40) & 0xff] ^ crc32c_table[1][(crc >> 48) & 0xff] ^ crc32c_table[0][crc >> 56]; next += 8; len -= 8; } while (len) { crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8); len--; } return (uint32_t)crc ^ 0xffffffff; } /* Multiply a matrix times a vector over the Galois field of two elements, GF(2). Each element is a bit in an unsigned integer. mat must have at least as many entries as the power of two for most significant one bit in vec. */ static inline uint32_t gf2_matrix_times(uint32_t *mat, uint32_t vec) { uint32_t sum; sum = 0; while (vec) { if (vec & 1) sum ^= *mat; vec >>= 1; mat++; } return sum; } /* Multiply a matrix by itself over GF(2). Both mat and square must have 32 rows. */ static inline void gf2_matrix_square(uint32_t *square, uint32_t *mat) { int n; for (n = 0; n < 32; n++) square[n] = gf2_matrix_times(mat, mat[n]); } /* Construct an operator to apply len zeros to a crc. len must be a power of two. If len is not a power of two, then the result is the same as for the largest power of two less than len. The result for len == 0 is the same as for len == 1. A version of this routine could be easily written for any len, but that is not needed for this application. */ static void crc32c_zeros_op(uint32_t *even, size_t len) { int n; uint32_t row; uint32_t odd[32]; /* odd-power-of-two zeros operator */ /* put operator for one zero bit in odd */ odd[0] = POLY; /* CRC-32C polynomial */ row = 1; for (n = 1; n < 32; n++) { odd[n] = row; row <<= 1; } /* put operator for two zero bits in even */ gf2_matrix_square(even, odd); /* put operator for four zero bits in odd */ gf2_matrix_square(odd, even); /* first square will put the operator for one zero byte (eight zero bits), in even -- next square puts operator for two zero bytes in odd, and so on, until len has been rotated down to zero */ do { gf2_matrix_square(even, odd); len >>= 1; if (len == 0) return; gf2_matrix_square(odd, even); len >>= 1; } while (len); /* answer ended up in odd -- copy to even */ for (n = 0; n < 32; n++) even[n] = odd[n]; } /* Take a length and build four lookup tables for applying the zeros operator for that length, byte-by-byte on the operand. */ static void crc32c_zeros(uint32_t zeros[][256], size_t len) { uint32_t n; uint32_t op[32]; crc32c_zeros_op(op, len); for (n = 0; n < 256; n++) { zeros[0][n] = gf2_matrix_times(op, n); zeros[1][n] = gf2_matrix_times(op, n << 8); zeros[2][n] = gf2_matrix_times(op, n << 16); zeros[3][n] = gf2_matrix_times(op, n << 24); } } /* Apply the zeros operator table to crc. */ static inline uint32_t crc32c_shift(uint32_t zeros[][256], uint32_t crc) { return zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^ zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24]; } /* Block sizes for three-way parallel crc computation. LONG and SHORT must both be powers of two. The associated string constants must be set accordingly, for use in constructing the assembler instructions. */ #define LONG 8192 #define LONGx1 "8192" #define LONGx2 "16384" #define SHORT 256 #define SHORTx1 "256" #define SHORTx2 "512" /* Tables for hardware crc that shift a crc by LONG and SHORT zeros. */ static pthread_once_t crc32c_once_hw = PTHREAD_ONCE_INIT; static uint32_t crc32c_long[4][256]; static uint32_t crc32c_short[4][256]; /* Initialize tables for shifting crcs. */ static void crc32c_init_hw(void) { crc32c_zeros(crc32c_long, LONG); crc32c_zeros(crc32c_short, SHORT); } /* Compute CRC-32C using the Intel hardware instruction. */ static uint32_t crc32c_hw(uint32_t crc, const void *buf, size_t len) { const unsigned char *next = buf; const unsigned char *end; uint64_t crc0, crc1, crc2; /* need to be 64 bits for crc32q */ /* populate shift tables the first time through */ pthread_once(&crc32c_once_hw, crc32c_init_hw); /* pre-process the crc */ crc0 = crc ^ 0xffffffff; /* compute the crc for up to seven leading bytes to bring the data pointer to an eight-byte boundary */ while (len && ((uintptr_t)next & 7) != 0) { __asm__("crc32b\t" "(%1), %0" : "=r"(crc0) : "r"(next), "0"(crc0)); next++; len--; } /* compute the crc on sets of LONG*3 bytes, executing three independent crc instructions, each on LONG bytes -- this is optimized for the Nehalem, Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a throughput of one crc per cycle, but a latency of three cycles */ while (len >= LONG*3) { crc1 = 0; crc2 = 0; end = next + LONG; do { __asm__("crc32q\t" "(%3), %0\n\t" "crc32q\t" LONGx1 "(%3), %1\n\t" "crc32q\t" LONGx2 "(%3), %2" : "=r"(crc0), "=r"(crc1), "=r"(crc2) : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2)); next += 8; } while (next < end); crc0 = crc32c_shift(crc32c_long, crc0) ^ crc1; crc0 = crc32c_shift(crc32c_long, crc0) ^ crc2; next += LONG*2; len -= LONG*3; } /* do the same thing, but now on SHORT*3 blocks for the remaining data less than a LONG*3 block */ while (len >= SHORT*3) { crc1 = 0; crc2 = 0; end = next + SHORT; do { __asm__("crc32q\t" "(%3), %0\n\t" "crc32q\t" SHORTx1 "(%3), %1\n\t" "crc32q\t" SHORTx2 "(%3), %2" : "=r"(crc0), "=r"(crc1), "=r"(crc2) : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2)); next += 8; } while (next < end); crc0 = crc32c_shift(crc32c_short, crc0) ^ crc1; crc0 = crc32c_shift(crc32c_short, crc0) ^ crc2; next += SHORT*2; len -= SHORT*3; } /* compute the crc on the remaining eight-byte units less than a SHORT*3 block */ end = next + (len - (len & 7)); while (next < end) { __asm__("crc32q\t" "(%1), %0" : "=r"(crc0) : "r"(next), "0"(crc0)); next += 8; } len &= 7; /* compute the crc for up to seven trailing bytes */ while (len) { __asm__("crc32b\t" "(%1), %0" : "=r"(crc0) : "r"(next), "0"(crc0)); next++; len--; } /* return a post-processed crc */ return (uint32_t)crc0 ^ 0xffffffff; } /* Check for SSE 4.2. SSE 4.2 was first supported in Nehalem processors introduced in November, 2008. This does not check for the existence of the cpuid instruction itself, which was introduced on the 486SL in 1992, so this will fail on earlier x86 processors. cpuid works on all Pentium and later processors. */ #define SSE42(have) \ do { \ uint32_t eax, ecx; \ eax = 1; \ __asm__("cpuid" \ : "=c"(ecx) \ : "a"(eax) \ : "%ebx", "%edx"); \ (have) = (ecx >> 20) & 1; \ } while (0) /* Compute a CRC-32C. If the crc32 instruction is available, use the hardware version. Otherwise, use the software version. */ void crc32c_init(void) { int sse42; SSE42(sse42); if (sse42) { crc32c = crc32c_hw; } else { crc32c = crc32c_sw; } }
11,318
31.90407
79
c
null
NearPMSW-main/nearpm/shadow/TATP_SD/tableEntries.h
/* Author: Vaibhav Gogte <vgogte@umich.edu> Aasheesh Kolli <akolli@umich.edu> This file defines the table entries used by TATP. */ struct subscriber_entry { unsigned s_id; // Subscriber id char sub_nbr[15]; // Subscriber number, s_id in 15 digit string, zeros padded short bit_1, bit_2, bit_3, bit_4, bit_5, bit_6, bit_7, bit_8, bit_9, bit_10; // randomly generated values 0/1 short hex_1, hex_2, hex_3, hex_4, hex_5, hex_6, hex_7, hex_8, hex_9, hex_10; // randomly generated values 0->15 short byte2_1, byte2_2, byte2_3, byte2_4, byte2_5, byte2_6, byte2_7, byte2_8, byte2_9, byte2_10; // randomly generated values 0->255 unsigned msc_location; // Randomly generated value 1->((2^32)-1) unsigned vlr_location; // Randomly generated value 1->((2^32)-1) char padding[40]; }; struct access_info_entry { unsigned s_id; //Subscriber id short ai_type; // Random value 1->4. A subscriber can have a max of 4 and all unique short data_1, data_2; // Randomly generated values 0->255 char data_3[3]; // random 3 char string. All upper case alphabets char data_4[5]; // random 5 char string. All upper case alphabets bool valid; bool padding_1[7]; char padding_2[4+32]; }; struct special_facility_entry { unsigned s_id; //Subscriber id short sf_type; // Random value 1->4. A subscriber can have a max of 4 and all unique short is_active; // 0(15%)/1(85%) short error_cntrl; // Randomly generated values 0->255 short data_a; // Randomly generated values 0->255 char data_b[5]; // random 5 char string. All upper case alphabets char padding_1[7]; bool valid; bool padding_2[4+32]; }; struct call_forwarding_entry { unsigned s_id; // Subscriber id from special facility short sf_type; // sf_type from special facility table int start_time; // 0 or 8 or 16 int end_time; // start_time+N, N randomly generated 1->8 char numberx[15]; // randomly generated 15 digit string char padding_1[7]; bool valid; bool padding_2[24]; };
1,993
35.254545
134
h
null
NearPMSW-main/nearpm/shadow/TPCC_SD/tpcc_db.h
/* Author: Vaibhav Gogte <vgogte@umich.edu> Aasheesh Kolli <akolli@umich.edu> This file declares the tpcc database and the accesor transactions. */ #include "table_entries.h" #include <atomic> #include "simple_queue.h" #include <pthread.h> #include <cstdlib> #include "../include/txopt.h" typedef simple_queue queue_t; struct backUpLog{ struct district_entry district_back; //fill_new_order_entry struct new_order_entry new_order_entry_back; //update_order_entry struct order_entry order_entry_back; //update_stock_entry struct stock_entry stock_entry_back[15]; int fill_new_order_entry_indx = 0; int update_order_entry_indx = 0; int update_stock_entry_indx[16]; uint64_t district_back_valid; uint64_t fill_new_order_entry_back_valid; uint64_t update_order_entry_back_valid; uint64_t update_stock_entry_num_valid; //global log valid uint64_t log_valid; }; class TPCC_DB { private: // Tables with size dependent on num warehouses short num_warehouses; short random_3000[3000]; warehouse_entry* warehouse; district_entry* district; customer_entry* customer; stock_entry* stock; // Tables with slight variation in sizes (due to inserts/deletes etc.) history_entry* history; order_entry* order; new_order_entry* new_order; order_line_entry* order_line; // Fixed size table item_entry* item; unsigned long* rndm_seeds; queue_t* perTxLocks; // Array of queues of locks held by active Tx pthread_mutex_t* locks; // Array of locks held by the TxEngn. RDSs acquire locks through the TxEngn unsigned g_seed; public: struct backUpLog * backUpInst; TPCC_DB(); ~TPCC_DB(); void initialize(int _num_warehouses, int numThreads); void populate_tables(); void fill_item_entry(int _i_id); void fill_warehouse_entry(int _w_id); void fill_stock_entry(int _s_w_id, int s_i_id); void fill_district_entry(int _d_w_id, int _d_id); void fill_customer_entry(int _c_w_id, int _c_d_id, int _c_id); void fill_history_entry(int _h_c_w_id, int _h_c_d_id, int _h_c_id); void fill_order_entry(int _o_w_id, int _o_d_id, int _o_id); void fill_order_line_entry(int _ol_w_id, int _ol_d_id, int _ol_o_id, int _o_ol_cnt, long long _o_entry_d); void fill_new_order_entry(int _no_w_id, int _no_d_id, int _no_o_id, int threadId); void random_a_string(int min, int max, char* string_ptr); void random_n_string(int min, int max, char* string_ptr); void random_a_original_string(int min, int max, int probability, char* string_ptr); void random_zip(char* string_ptr); void fill_time(long long &time_slot); int rand_local(int min, int max); void new_order_tx(int threadId, int w_id, int d_id, int c_id); void copy_district_info(district_entry &dest, district_entry &source); void copy_customer_info(customer_entry &dest, customer_entry &source); void copy_new_order_info(new_order_entry &dest, new_order_entry &source); void copy_order_info(order_entry &dest, order_entry &source); void copy_stock_info(stock_entry &dest, stock_entry &source); void copy_order_line_info(order_line_entry &dest, order_line_entry &source); void update_order_entry(int _w_id, short _d_id, int _o_id, int _c_id, int _ol_cnt, int threadId); void update_stock_entry(int threadId, int _w_id, int _i_id, int _d_id, float &amount, int itr); unsigned long get_random(int thread_id, int min, int max); unsigned long get_random(int thread_id); void printStackPointer(int* sp, int thread_id); void acquire_locks(int thread_id, queue_t &reqLocks); void release_locks(int thread_id); unsigned fastrand(); };
3,755
30.041322
110
h
cba-pipeline-public
cba-pipeline-public-master/containernet/mnexec.c
/* mnexec: execution utility for mininet * * Starts up programs and does things that are slow or * difficult in Python, including: * * - closing all file descriptors except stdin/out/error * - detaching from a controlling tty using setsid * - running in network and mount namespaces * - printing out the pid of a process so we can identify it later * - attaching to a namespace and cgroup * - setting RT scheduling * * Partially based on public domain setsid(1) */ #define _GNU_SOURCE #include <stdio.h> #include <linux/sched.h> #include <unistd.h> #include <limits.h> #include <syscall.h> #include <fcntl.h> #include <stdlib.h> #include <sched.h> #include <ctype.h> #include <sys/mount.h> #if !defined(VERSION) #define VERSION "(devel)" #endif void usage(char *name) { printf("Execution utility for Mininet\n\n" "Usage: %s [-cdnp] [-a pid] [-g group] [-r rtprio] cmd args...\n\n" "Options:\n" " -c: close all file descriptors except stdin/out/error\n" " -d: detach from tty by calling setsid()\n" " -n: run in new network and mount namespaces\n" " -p: print ^A + pid\n" " -a pid: attach to pid's network and mount namespaces\n" " -g group: add to cgroup\n" " -r rtprio: run with SCHED_RR (usually requires -g)\n" " -v: print version\n", name); } int setns(int fd, int nstype) { return syscall(__NR_setns, fd, nstype); } /* Validate alphanumeric path foo1/bar2/baz */ void validate(char *path) { char *s; for (s=path; *s; s++) { if (!isalnum(*s) && *s != '/') { fprintf(stderr, "invalid path: %s\n", path); exit(1); } } } /* Add our pid to cgroup */ void cgroup(char *gname) { static char path[PATH_MAX]; static char *groups[] = { "cpu", "cpuacct", "cpuset", NULL }; char **gptr; pid_t pid = getpid(); int count = 0; validate(gname); for (gptr = groups; *gptr; gptr++) { FILE *f; snprintf(path, PATH_MAX, "/sys/fs/cgroup/%s/%s/tasks", *gptr, gname); f = fopen(path, "w"); if (f) { count++; fprintf(f, "%d\n", pid); fclose(f); } } if (!count) { fprintf(stderr, "cgroup: could not add to cgroup %s\n", gname); exit(1); } } int main(int argc, char *argv[]) { int c; int fd; char path[PATH_MAX]; int nsid; int pid; char *cwd = get_current_dir_name(); static struct sched_param sp; while ((c = getopt(argc, argv, "+cdnpa:g:r:vh")) != -1) switch(c) { case 'c': /* close file descriptors except stdin/out/error */ for (fd = getdtablesize(); fd > 2; fd--) close(fd); break; case 'd': /* detach from tty */ if (getpgrp() == getpid()) { switch(fork()) { case -1: perror("fork"); return 1; case 0: /* child */ break; default: /* parent */ return 0; } } setsid(); break; case 'n': /* run in network and mount namespaces */ if (unshare(CLONE_NEWNET|CLONE_NEWNS) == -1) { perror("unshare"); return 1; } /* Mark our whole hierarchy recursively as private, so that our * mounts do not propagate to other processes. */ if (mount("none", "/", NULL, MS_REC|MS_PRIVATE, NULL) == -1) { perror("remount"); return 1; } /* mount sysfs to pick up the new network namespace */ if (mount("sysfs", "/sys", "sysfs", MS_MGC_VAL, NULL) == -1) { perror("mount"); return 1; } break; case 'p': /* print pid */ printf("\001%d\n", getpid()); fflush(stdout); break; case 'a': /* Attach to pid's network namespace and mount namespace */ pid = atoi(optarg); sprintf(path, "/proc/%d/ns/net", pid); nsid = open(path, O_RDONLY); if (nsid < 0) { perror(path); return 1; } if (setns(nsid, 0) != 0) { perror("setns"); return 1; } /* Plan A: call setns() to attach to mount namespace */ sprintf(path, "/proc/%d/ns/mnt", pid); nsid = open(path, O_RDONLY); if (nsid < 0 || setns(nsid, 0) != 0) { /* Plan B: chroot/chdir into pid's root file system */ sprintf(path, "/proc/%d/root", pid); if (chroot(path) < 0) { perror(path); return 1; } } /* chdir to correct working directory */ if (chdir(cwd) != 0) { perror(cwd); return 1; } break; case 'g': /* Attach to cgroup */ cgroup(optarg); break; case 'r': /* Set RT scheduling priority */ sp.sched_priority = atoi(optarg); if (sched_setscheduler(getpid(), SCHED_RR, &sp) < 0) { perror("sched_setscheduler"); return 1; } break; case 'v': printf("%s\n", VERSION); exit(0); case 'h': usage(argv[0]); exit(0); default: usage(argv[0]); exit(1); } if (optind < argc) { execvp(argv[optind], &argv[optind]); perror(argv[optind]); return 1; } usage(argv[0]); return 0; }
6,024
26.764977
78
c
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/multi.h
#ifndef __CURL_MULTI_H #define __CURL_MULTI_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* This is an "external" header file. Don't give away any internals here! GOALS o Enable a "pull" interface. The application that uses libcurl decides where and when to ask libcurl to get/send data. o Enable multiple simultaneous transfers in the same thread without making it complicated for the application. o Enable the application to select() on its own file descriptors and curl's file descriptors simultaneous easily. */ /* * This header file should not really need to include "curl.h" since curl.h * itself includes this file and we expect user applications to do #include * <curl/curl.h> without the need for especially including multi.h. * * For some reason we added this include here at one point, and rather than to * break existing (wrongly written) libcurl applications, we leave it as-is * but with this warning attached. */ #include "curl.h" #ifdef __cplusplus extern "C" { #endif typedef void CURLM; typedef enum { CURLM_CALL_MULTI_PERFORM = -1, /* please call curl_multi_perform() or curl_multi_socket*() soon */ CURLM_OK, CURLM_BAD_HANDLE, /* the passed-in handle is not a valid CURLM handle */ CURLM_BAD_EASY_HANDLE, /* an easy handle was not good/valid */ CURLM_OUT_OF_MEMORY, /* if you ever get this, you're in deep sh*t */ CURLM_INTERNAL_ERROR, /* this is a libcurl bug */ CURLM_BAD_SOCKET, /* the passed in socket argument did not match */ CURLM_UNKNOWN_OPTION, /* curl_multi_setopt() with unsupported option */ CURLM_LAST } CURLMcode; /* just to make code nicer when using curl_multi_socket() you can now check for CURLM_CALL_MULTI_SOCKET too in the same style it works for curl_multi_perform() and CURLM_CALL_MULTI_PERFORM */ #define CURLM_CALL_MULTI_SOCKET CURLM_CALL_MULTI_PERFORM typedef enum { CURLMSG_NONE, /* first, not used */ CURLMSG_DONE, /* This easy handle has completed. 'result' contains the CURLcode of the transfer */ CURLMSG_LAST /* last, not used */ } CURLMSG; struct CURLMsg { CURLMSG msg; /* what this message means */ CURL *easy_handle; /* the handle it concerns */ union { void *whatever; /* message-specific data */ CURLcode result; /* return code for transfer */ } data; }; typedef struct CURLMsg CURLMsg; /* Based on poll(2) structure and values. * We don't use pollfd and POLL* constants explicitly * to cover platforms without poll(). */ #define CURL_WAIT_POLLIN 0x0001 #define CURL_WAIT_POLLPRI 0x0002 #define CURL_WAIT_POLLOUT 0x0004 struct curl_waitfd { curl_socket_t fd; short events; short revents; /* not supported yet */ }; /* * Name: curl_multi_init() * * Desc: inititalize multi-style curl usage * * Returns: a new CURLM handle to use in all 'curl_multi' functions. */ CURL_EXTERN CURLM *curl_multi_init(void); /* * Name: curl_multi_add_handle() * * Desc: add a standard curl handle to the multi stack * * Returns: CURLMcode type, general multi error code. */ CURL_EXTERN CURLMcode curl_multi_add_handle(CURLM *multi_handle, CURL *curl_handle); /* * Name: curl_multi_remove_handle() * * Desc: removes a curl handle from the multi stack again * * Returns: CURLMcode type, general multi error code. */ CURL_EXTERN CURLMcode curl_multi_remove_handle(CURLM *multi_handle, CURL *curl_handle); /* * Name: curl_multi_fdset() * * Desc: Ask curl for its fd_set sets. The app can use these to select() or * poll() on. We want curl_multi_perform() called as soon as one of * them are ready. * * Returns: CURLMcode type, general multi error code. */ CURL_EXTERN CURLMcode curl_multi_fdset(CURLM *multi_handle, fd_set *read_fd_set, fd_set *write_fd_set, fd_set *exc_fd_set, int *max_fd); /* * Name: curl_multi_wait() * * Desc: Poll on all fds within a CURLM set as well as any * additional fds passed to the function. * * Returns: CURLMcode type, general multi error code. */ CURL_EXTERN CURLMcode curl_multi_wait(CURLM *multi_handle, struct curl_waitfd extra_fds[], unsigned int extra_nfds, int timeout_ms, int *ret); /* * Name: curl_multi_perform() * * Desc: When the app thinks there's data available for curl it calls this * function to read/write whatever there is right now. This returns * as soon as the reads and writes are done. This function does not * require that there actually is data available for reading or that * data can be written, it can be called just in case. It returns * the number of handles that still transfer data in the second * argument's integer-pointer. * * Returns: CURLMcode type, general multi error code. *NOTE* that this only * returns errors etc regarding the whole multi stack. There might * still have occurred problems on invidual transfers even when this * returns OK. */ CURL_EXTERN CURLMcode curl_multi_perform(CURLM *multi_handle, int *running_handles); /* * Name: curl_multi_cleanup() * * Desc: Cleans up and removes a whole multi stack. It does not free or * touch any individual easy handles in any way. We need to define * in what state those handles will be if this function is called * in the middle of a transfer. * * Returns: CURLMcode type, general multi error code. */ CURL_EXTERN CURLMcode curl_multi_cleanup(CURLM *multi_handle); /* * Name: curl_multi_info_read() * * Desc: Ask the multi handle if there's any messages/informationals from * the individual transfers. Messages include informationals such as * error code from the transfer or just the fact that a transfer is * completed. More details on these should be written down as well. * * Repeated calls to this function will return a new struct each * time, until a special "end of msgs" struct is returned as a signal * that there is no more to get at this point. * * The data the returned pointer points to will not survive calling * curl_multi_cleanup(). * * The 'CURLMsg' struct is meant to be very simple and only contain * very basic informations. If more involved information is wanted, * we will provide the particular "transfer handle" in that struct * and that should/could/would be used in subsequent * curl_easy_getinfo() calls (or similar). The point being that we * must never expose complex structs to applications, as then we'll * undoubtably get backwards compatibility problems in the future. * * Returns: A pointer to a filled-in struct, or NULL if it failed or ran out * of structs. It also writes the number of messages left in the * queue (after this read) in the integer the second argument points * to. */ CURL_EXTERN CURLMsg *curl_multi_info_read(CURLM *multi_handle, int *msgs_in_queue); /* * Name: curl_multi_strerror() * * Desc: The curl_multi_strerror function may be used to turn a CURLMcode * value into the equivalent human readable error string. This is * useful for printing meaningful error messages. * * Returns: A pointer to a zero-terminated error message. */ CURL_EXTERN const char *curl_multi_strerror(CURLMcode); /* * Name: curl_multi_socket() and * curl_multi_socket_all() * * Desc: An alternative version of curl_multi_perform() that allows the * application to pass in one of the file descriptors that have been * detected to have "action" on them and let libcurl perform. * See man page for details. */ #define CURL_POLL_NONE 0 #define CURL_POLL_IN 1 #define CURL_POLL_OUT 2 #define CURL_POLL_INOUT 3 #define CURL_POLL_REMOVE 4 #define CURL_SOCKET_TIMEOUT CURL_SOCKET_BAD #define CURL_CSELECT_IN 0x01 #define CURL_CSELECT_OUT 0x02 #define CURL_CSELECT_ERR 0x04 typedef int (*curl_socket_callback)(CURL *easy, /* easy handle */ curl_socket_t s, /* socket */ int what, /* see above */ void *userp, /* private callback pointer */ void *socketp); /* private socket pointer */ /* * Name: curl_multi_timer_callback * * Desc: Called by libcurl whenever the library detects a change in the * maximum number of milliseconds the app is allowed to wait before * curl_multi_socket() or curl_multi_perform() must be called * (to allow libcurl's timed events to take place). * * Returns: The callback should return zero. */ typedef int (*curl_multi_timer_callback)(CURLM *multi, /* multi handle */ long timeout_ms, /* see above */ void *userp); /* private callback pointer */ CURL_EXTERN CURLMcode curl_multi_socket(CURLM *multi_handle, curl_socket_t s, int *running_handles); CURL_EXTERN CURLMcode curl_multi_socket_action(CURLM *multi_handle, curl_socket_t s, int ev_bitmask, int *running_handles); CURL_EXTERN CURLMcode curl_multi_socket_all(CURLM *multi_handle, int *running_handles); #ifndef CURL_ALLOW_OLD_MULTI_SOCKET /* This macro below was added in 7.16.3 to push users who recompile to use the new curl_multi_socket_action() instead of the old curl_multi_socket() */ #define curl_multi_socket(x,y,z) curl_multi_socket_action(x,y,0,z) #endif /* * Name: curl_multi_timeout() * * Desc: Returns the maximum number of milliseconds the app is allowed to * wait before curl_multi_socket() or curl_multi_perform() must be * called (to allow libcurl's timed events to take place). * * Returns: CURLM error code. */ CURL_EXTERN CURLMcode curl_multi_timeout(CURLM *multi_handle, long *milliseconds); #undef CINIT /* re-using the same name as in curl.h */ #ifdef CURL_ISOCPP #define CINIT(name,type,num) CURLMOPT_ ## name = CURLOPTTYPE_ ## type + num #else /* The macro "##" is ISO C, we assume pre-ISO C doesn't support it. */ #define LONG CURLOPTTYPE_LONG #define OBJECTPOINT CURLOPTTYPE_OBJECTPOINT #define FUNCTIONPOINT CURLOPTTYPE_FUNCTIONPOINT #define OFF_T CURLOPTTYPE_OFF_T #define CINIT(name,type,number) CURLMOPT_/**/name = type + number #endif typedef enum { /* This is the socket callback function pointer */ CINIT(SOCKETFUNCTION, FUNCTIONPOINT, 1), /* This is the argument passed to the socket callback */ CINIT(SOCKETDATA, OBJECTPOINT, 2), /* set to 1 to enable pipelining for this multi handle */ CINIT(PIPELINING, LONG, 3), /* This is the timer callback function pointer */ CINIT(TIMERFUNCTION, FUNCTIONPOINT, 4), /* This is the argument passed to the timer callback */ CINIT(TIMERDATA, OBJECTPOINT, 5), /* maximum number of entries in the connection cache */ CINIT(MAXCONNECTS, LONG, 6), CURLMOPT_LASTENTRY /* the last unused */ } CURLMoption; /* * Name: curl_multi_setopt() * * Desc: Sets options for the multi handle. * * Returns: CURLM error code. */ CURL_EXTERN CURLMcode curl_multi_setopt(CURLM *multi_handle, CURLMoption option, ...); /* * Name: curl_multi_assign() * * Desc: This function sets an association in the multi handle between the * given socket and a private pointer of the application. This is * (only) useful for curl_multi_socket uses. * * Returns: CURLM error code. */ CURL_EXTERN CURLMcode curl_multi_assign(CURLM *multi_handle, curl_socket_t sockfd, void *sockp); #ifdef __cplusplus } /* end of extern "C" */ #endif #endif
13,836
36.096515
79
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/curlbuild.h
#ifndef __CURL_CURLBUILD_H #define __CURL_CURLBUILD_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* ================================================================ */ /* NOTES FOR CONFIGURE CAPABLE SYSTEMS */ /* ================================================================ */ /* * NOTE 1: * ------- * * See file include/curl/curlbuild.h.in, run configure, and forget * that this file exists it is only used for non-configure systems. * But you can keep reading if you want ;-) * */ /* ================================================================ */ /* NOTES FOR NON-CONFIGURE SYSTEMS */ /* ================================================================ */ /* * NOTE 1: * ------- * * Nothing in this file is intended to be modified or adjusted by the * curl library user nor by the curl library builder. * * If you think that something actually needs to be changed, adjusted * or fixed in this file, then, report it on the libcurl development * mailing list: http://cool.haxx.se/mailman/listinfo/curl-library/ * * Try to keep one section per platform, compiler and architecture, * otherwise, if an existing section is reused for a different one and * later on the original is adjusted, probably the piggybacking one can * be adversely changed. * * In order to differentiate between platforms/compilers/architectures * use only compiler built in predefined preprocessor symbols. * * This header file shall only export symbols which are 'curl' or 'CURL' * prefixed, otherwise public name space would be polluted. * * NOTE 2: * ------- * * For any given platform/compiler curl_off_t must be typedef'ed to a * 64-bit wide signed integral data type. The width of this data type * must remain constant and independent of any possible large file * support settings. * * As an exception to the above, curl_off_t shall be typedef'ed to a * 32-bit wide signed integral data type if there is no 64-bit type. * * As a general rule, curl_off_t shall not be mapped to off_t. This * rule shall only be violated if off_t is the only 64-bit data type * available and the size of off_t is independent of large file support * settings. Keep your build on the safe side avoiding an off_t gating. * If you have a 64-bit off_t then take for sure that another 64-bit * data type exists, dig deeper and you will find it. * * NOTE 3: * ------- * * Right now you might be staring at file include/curl/curlbuild.h.dist or * at file include/curl/curlbuild.h, this is due to the following reason: * file include/curl/curlbuild.h.dist is renamed to include/curl/curlbuild.h * when the libcurl source code distribution archive file is created. * * File include/curl/curlbuild.h.dist is not included in the distribution * archive. File include/curl/curlbuild.h is not present in the git tree. * * The distributed include/curl/curlbuild.h file is only intended to be used * on systems which can not run the also distributed configure script. * * On systems capable of running the configure script, the configure process * will overwrite the distributed include/curl/curlbuild.h file with one that * is suitable and specific to the library being configured and built, which * is generated from the include/curl/curlbuild.h.in template file. * * If you check out from git on a non-configure platform, you must run the * appropriate buildconf* script to set up curlbuild.h and other local files. * */ /* ================================================================ */ /* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */ /* ================================================================ */ #ifdef CURL_SIZEOF_LONG # error "CURL_SIZEOF_LONG shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_SIZEOF_LONG_already_defined #endif #ifdef CURL_TYPEOF_CURL_SOCKLEN_T # error "CURL_TYPEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_already_defined #endif #ifdef CURL_SIZEOF_CURL_SOCKLEN_T # error "CURL_SIZEOF_CURL_SOCKLEN_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_already_defined #endif #ifdef CURL_TYPEOF_CURL_OFF_T # error "CURL_TYPEOF_CURL_OFF_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_already_defined #endif #ifdef CURL_FORMAT_CURL_OFF_T # error "CURL_FORMAT_CURL_OFF_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_already_defined #endif #ifdef CURL_FORMAT_CURL_OFF_TU # error "CURL_FORMAT_CURL_OFF_TU shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_already_defined #endif #ifdef CURL_FORMAT_OFF_T # error "CURL_FORMAT_OFF_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_FORMAT_OFF_T_already_defined #endif #ifdef CURL_SIZEOF_CURL_OFF_T # error "CURL_SIZEOF_CURL_OFF_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_already_defined #endif #ifdef CURL_SUFFIX_CURL_OFF_T # error "CURL_SUFFIX_CURL_OFF_T shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_already_defined #endif #ifdef CURL_SUFFIX_CURL_OFF_TU # error "CURL_SUFFIX_CURL_OFF_TU shall not be defined except in curlbuild.h" Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_already_defined #endif /* ================================================================ */ /* EXTERNAL INTERFACE SETTINGS FOR NON-CONFIGURE SYSTEMS ONLY */ /* ================================================================ */ #if defined(__DJGPP__) || defined(__GO32__) # if defined(__DJGPP__) && (__DJGPP__ > 1) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__SALFORDC__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__BORLANDC__) # if (__BORLANDC__ < 0x520) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T i64 # define CURL_SUFFIX_CURL_OFF_TU ui64 # endif # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__TURBOC__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__WATCOMC__) # if defined(__386__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T i64 # define CURL_SUFFIX_CURL_OFF_TU ui64 # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__POCC__) # if (__POCC__ < 280) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # elif defined(_MSC_VER) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T i64 # define CURL_SUFFIX_CURL_OFF_TU ui64 # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__LCC__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__SYMBIAN32__) # if defined(__EABI__) /* Treat all ARM compilers equally */ # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # elif defined(__CW32__) # pragma longlong on # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # elif defined(__VC32__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__MWERKS__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(_WIN32_WCE) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T i64 # define CURL_SUFFIX_CURL_OFF_TU ui64 # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__MINGW32__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__VMS) # if defined(__VAX) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T unsigned int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 #elif defined(__OS400__) # if defined(__ILEC400__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t # define CURL_SIZEOF_CURL_SOCKLEN_T 4 # define CURL_PULL_SYS_TYPES_H 1 # define CURL_PULL_SYS_SOCKET_H 1 # endif #elif defined(__MVS__) # if defined(__IBMC__) || defined(__IBMCPP__) # if defined(_ILP32) # define CURL_SIZEOF_LONG 4 # elif defined(_LP64) # define CURL_SIZEOF_LONG 8 # endif # if defined(_LONG_LONG) # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # elif defined(_LP64) # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # else # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t # define CURL_SIZEOF_CURL_SOCKLEN_T 4 # define CURL_PULL_SYS_TYPES_H 1 # define CURL_PULL_SYS_SOCKET_H 1 # endif #elif defined(__370__) # if defined(__IBMC__) || defined(__IBMCPP__) # if defined(_ILP32) # define CURL_SIZEOF_LONG 4 # elif defined(_LP64) # define CURL_SIZEOF_LONG 8 # endif # if defined(_LONG_LONG) # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # elif defined(_LP64) # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # else # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t # define CURL_SIZEOF_CURL_SOCKLEN_T 4 # define CURL_PULL_SYS_TYPES_H 1 # define CURL_PULL_SYS_SOCKET_H 1 # endif #elif defined(TPF) # define CURL_SIZEOF_LONG 8 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 /* ===================================== */ /* KEEP MSVC THE PENULTIMATE ENTRY */ /* ===================================== */ #elif defined(_MSC_VER) # if (_MSC_VER >= 900) && (_INTEGRAL_MAX_BITS >= 64) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T __int64 # define CURL_FORMAT_CURL_OFF_T "I64d" # define CURL_FORMAT_CURL_OFF_TU "I64u" # define CURL_FORMAT_OFF_T "%I64d" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T i64 # define CURL_SUFFIX_CURL_OFF_TU ui64 # else # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 4 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T int # define CURL_SIZEOF_CURL_SOCKLEN_T 4 /* ===================================== */ /* KEEP GENERIC GCC THE LAST ENTRY */ /* ===================================== */ #elif defined(__GNUC__) # if defined(__i386__) || defined(__ppc__) # define CURL_SIZEOF_LONG 4 # define CURL_TYPEOF_CURL_OFF_T long long # define CURL_FORMAT_CURL_OFF_T "lld" # define CURL_FORMAT_CURL_OFF_TU "llu" # define CURL_FORMAT_OFF_T "%lld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T LL # define CURL_SUFFIX_CURL_OFF_TU ULL # elif defined(__x86_64__) || defined(__ppc64__) # define CURL_SIZEOF_LONG 8 # define CURL_TYPEOF_CURL_OFF_T long # define CURL_FORMAT_CURL_OFF_T "ld" # define CURL_FORMAT_CURL_OFF_TU "lu" # define CURL_FORMAT_OFF_T "%ld" # define CURL_SIZEOF_CURL_OFF_T 8 # define CURL_SUFFIX_CURL_OFF_T L # define CURL_SUFFIX_CURL_OFF_TU UL # endif # define CURL_TYPEOF_CURL_SOCKLEN_T socklen_t # define CURL_SIZEOF_CURL_SOCKLEN_T 4 # define CURL_PULL_SYS_TYPES_H 1 # define CURL_PULL_SYS_SOCKET_H 1 #else # error "Unknown non-configure build target!" Error Compilation_aborted_Unknown_non_configure_build_target #endif /* CURL_PULL_SYS_TYPES_H is defined above when inclusion of header file */ /* sys/types.h is required here to properly make type definitions below. */ #ifdef CURL_PULL_SYS_TYPES_H # include <sys/types.h> #endif /* CURL_PULL_SYS_SOCKET_H is defined above when inclusion of header file */ /* sys/socket.h is required here to properly make type definitions below. */ #ifdef CURL_PULL_SYS_SOCKET_H # include <sys/socket.h> #endif /* Data type definition of curl_socklen_t. */ #ifdef CURL_TYPEOF_CURL_SOCKLEN_T typedef CURL_TYPEOF_CURL_SOCKLEN_T curl_socklen_t; #endif /* Data type definition of curl_off_t. */ #ifdef CURL_TYPEOF_CURL_OFF_T typedef CURL_TYPEOF_CURL_OFF_T curl_off_t; #endif #endif /* __CURL_CURLBUILD_H */
22,192
37.001712
80
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/curlrules.h
#ifndef __CURL_CURLRULES_H #define __CURL_CURLRULES_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2011, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* ================================================================ */ /* COMPILE TIME SANITY CHECKS */ /* ================================================================ */ /* * NOTE 1: * ------- * * All checks done in this file are intentionally placed in a public * header file which is pulled by curl/curl.h when an application is * being built using an already built libcurl library. Additionally * this file is also included and used when building the library. * * If compilation fails on this file it is certainly sure that the * problem is elsewhere. It could be a problem in the curlbuild.h * header file, or simply that you are using different compilation * settings than those used to build the library. * * Nothing in this file is intended to be modified or adjusted by the * curl library user nor by the curl library builder. * * Do not deactivate any check, these are done to make sure that the * library is properly built and used. * * You can find further help on the libcurl development mailing list: * http://cool.haxx.se/mailman/listinfo/curl-library/ * * NOTE 2 * ------ * * Some of the following compile time checks are based on the fact * that the dimension of a constant array can not be a negative one. * In this way if the compile time verification fails, the compilation * will fail issuing an error. The error description wording is compiler * dependent but it will be quite similar to one of the following: * * "negative subscript or subscript is too large" * "array must have at least one element" * "-1 is an illegal array size" * "size of array is negative" * * If you are building an application which tries to use an already * built libcurl library and you are getting this kind of errors on * this file, it is a clear indication that there is a mismatch between * how the library was built and how you are trying to use it for your * application. Your already compiled or binary library provider is the * only one who can give you the details you need to properly use it. */ /* * Verify that some macros are actually defined. */ #ifndef CURL_SIZEOF_LONG # error "CURL_SIZEOF_LONG definition is missing!" Error Compilation_aborted_CURL_SIZEOF_LONG_is_missing #endif #ifndef CURL_TYPEOF_CURL_SOCKLEN_T # error "CURL_TYPEOF_CURL_SOCKLEN_T definition is missing!" Error Compilation_aborted_CURL_TYPEOF_CURL_SOCKLEN_T_is_missing #endif #ifndef CURL_SIZEOF_CURL_SOCKLEN_T # error "CURL_SIZEOF_CURL_SOCKLEN_T definition is missing!" Error Compilation_aborted_CURL_SIZEOF_CURL_SOCKLEN_T_is_missing #endif #ifndef CURL_TYPEOF_CURL_OFF_T # error "CURL_TYPEOF_CURL_OFF_T definition is missing!" Error Compilation_aborted_CURL_TYPEOF_CURL_OFF_T_is_missing #endif #ifndef CURL_FORMAT_CURL_OFF_T # error "CURL_FORMAT_CURL_OFF_T definition is missing!" Error Compilation_aborted_CURL_FORMAT_CURL_OFF_T_is_missing #endif #ifndef CURL_FORMAT_CURL_OFF_TU # error "CURL_FORMAT_CURL_OFF_TU definition is missing!" Error Compilation_aborted_CURL_FORMAT_CURL_OFF_TU_is_missing #endif #ifndef CURL_FORMAT_OFF_T # error "CURL_FORMAT_OFF_T definition is missing!" Error Compilation_aborted_CURL_FORMAT_OFF_T_is_missing #endif #ifndef CURL_SIZEOF_CURL_OFF_T # error "CURL_SIZEOF_CURL_OFF_T definition is missing!" Error Compilation_aborted_CURL_SIZEOF_CURL_OFF_T_is_missing #endif #ifndef CURL_SUFFIX_CURL_OFF_T # error "CURL_SUFFIX_CURL_OFF_T definition is missing!" Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_T_is_missing #endif #ifndef CURL_SUFFIX_CURL_OFF_TU # error "CURL_SUFFIX_CURL_OFF_TU definition is missing!" Error Compilation_aborted_CURL_SUFFIX_CURL_OFF_TU_is_missing #endif /* * Macros private to this header file. */ #define CurlchkszEQ(t, s) sizeof(t) == s ? 1 : -1 #define CurlchkszGE(t1, t2) sizeof(t1) >= sizeof(t2) ? 1 : -1 /* * Verify that the size previously defined and expected for long * is the same as the one reported by sizeof() at compile time. */ typedef char __curl_rule_01__ [CurlchkszEQ(long, CURL_SIZEOF_LONG)]; /* * Verify that the size previously defined and expected for * curl_off_t is actually the the same as the one reported * by sizeof() at compile time. */ typedef char __curl_rule_02__ [CurlchkszEQ(curl_off_t, CURL_SIZEOF_CURL_OFF_T)]; /* * Verify at compile time that the size of curl_off_t as reported * by sizeof() is greater or equal than the one reported for long * for the current compilation. */ typedef char __curl_rule_03__ [CurlchkszGE(curl_off_t, long)]; /* * Verify that the size previously defined and expected for * curl_socklen_t is actually the the same as the one reported * by sizeof() at compile time. */ typedef char __curl_rule_04__ [CurlchkszEQ(curl_socklen_t, CURL_SIZEOF_CURL_SOCKLEN_T)]; /* * Verify at compile time that the size of curl_socklen_t as reported * by sizeof() is greater or equal than the one reported for int for * the current compilation. */ typedef char __curl_rule_05__ [CurlchkszGE(curl_socklen_t, int)]; /* ================================================================ */ /* EXTERNALLY AND INTERNALLY VISIBLE DEFINITIONS */ /* ================================================================ */ /* * CURL_ISOCPP and CURL_OFF_T_C definitions are done here in order to allow * these to be visible and exported by the external libcurl interface API, * while also making them visible to the library internals, simply including * setup.h, without actually needing to include curl.h internally. * If some day this section would grow big enough, all this should be moved * to its own header file. */ /* * Figure out if we can use the ## preprocessor operator, which is supported * by ISO/ANSI C and C++. Some compilers support it without setting __STDC__ * or __cplusplus so we need to carefully check for them too. */ #if defined(__STDC__) || defined(_MSC_VER) || defined(__cplusplus) || \ defined(__HP_aCC) || defined(__BORLANDC__) || defined(__LCC__) || \ defined(__POCC__) || defined(__SALFORDC__) || defined(__HIGHC__) || \ defined(__ILEC400__) /* This compiler is believed to have an ISO compatible preprocessor */ #define CURL_ISOCPP #else /* This compiler is believed NOT to have an ISO compatible preprocessor */ #undef CURL_ISOCPP #endif /* * Macros for minimum-width signed and unsigned curl_off_t integer constants. */ #if defined(__BORLANDC__) && (__BORLANDC__ == 0x0551) # define __CURL_OFF_T_C_HLPR2(x) x # define __CURL_OFF_T_C_HLPR1(x) __CURL_OFF_T_C_HLPR2(x) # define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_T) # define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val) ## \ __CURL_OFF_T_C_HLPR1(CURL_SUFFIX_CURL_OFF_TU) #else # ifdef CURL_ISOCPP # define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val ## Suffix # else # define __CURL_OFF_T_C_HLPR2(Val,Suffix) Val/**/Suffix # endif # define __CURL_OFF_T_C_HLPR1(Val,Suffix) __CURL_OFF_T_C_HLPR2(Val,Suffix) # define CURL_OFF_T_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_T) # define CURL_OFF_TU_C(Val) __CURL_OFF_T_C_HLPR1(Val,CURL_SUFFIX_CURL_OFF_TU) #endif /* * Get rid of macros private to this header file. */ #undef CurlchkszEQ #undef CurlchkszGE /* * Get rid of macros not intended to exist beyond this point. */ #undef CURL_PULL_WS2TCPIP_H #undef CURL_PULL_SYS_TYPES_H #undef CURL_PULL_SYS_SOCKET_H #undef CURL_PULL_STDINT_H #undef CURL_PULL_INTTYPES_H #undef CURL_TYPEOF_CURL_SOCKLEN_T #undef CURL_TYPEOF_CURL_OFF_T #ifdef CURL_NO_OLDIES #undef CURL_FORMAT_OFF_T /* not required since 7.19.0 - obsoleted in 7.20.0 */ #endif #endif /* __CURL_CURLRULES_H */
8,901
32.977099
78
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/easy.h
#ifndef __CURL_EASY_H #define __CURL_EASY_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2008, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #ifdef __cplusplus extern "C" { #endif CURL_EXTERN CURL *curl_easy_init(void); CURL_EXTERN CURLcode curl_easy_setopt(CURL *curl, CURLoption option, ...); CURL_EXTERN CURLcode curl_easy_perform(CURL *curl); CURL_EXTERN void curl_easy_cleanup(CURL *curl); /* * NAME curl_easy_getinfo() * * DESCRIPTION * * Request internal information from the curl session with this function. The * third argument MUST be a pointer to a long, a pointer to a char * or a * pointer to a double (as the documentation describes elsewhere). The data * pointed to will be filled in accordingly and can be relied upon only if the * function returns CURLE_OK. This function is intended to get used *AFTER* a * performed transfer, all results from this function are undefined until the * transfer is completed. */ CURL_EXTERN CURLcode curl_easy_getinfo(CURL *curl, CURLINFO info, ...); /* * NAME curl_easy_duphandle() * * DESCRIPTION * * Creates a new curl session handle with the same options set for the handle * passed in. Duplicating a handle could only be a matter of cloning data and * options, internal state info and things like persistent connections cannot * be transferred. It is useful in multithreaded applications when you can run * curl_easy_duphandle() for each new thread to avoid a series of identical * curl_easy_setopt() invokes in every thread. */ CURL_EXTERN CURL* curl_easy_duphandle(CURL *curl); /* * NAME curl_easy_reset() * * DESCRIPTION * * Re-initializes a CURL handle to the default values. This puts back the * handle to the same state as it was in when it was just created. * * It does keep: live connections, the Session ID cache, the DNS cache and the * cookies. */ CURL_EXTERN void curl_easy_reset(CURL *curl); /* * NAME curl_easy_recv() * * DESCRIPTION * * Receives data from the connected socket. Use after successful * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. */ CURL_EXTERN CURLcode curl_easy_recv(CURL *curl, void *buffer, size_t buflen, size_t *n); /* * NAME curl_easy_send() * * DESCRIPTION * * Sends data over the connected socket. Use after successful * curl_easy_perform() with CURLOPT_CONNECT_ONLY option. */ CURL_EXTERN CURLcode curl_easy_send(CURL *curl, const void *buffer, size_t buflen, size_t *n); #ifdef __cplusplus } #endif #endif
3,472
32.718447
78
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/stdcheaders.h
#ifndef __STDC_HEADERS_H #define __STDC_HEADERS_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include <sys/types.h> size_t fread (void *, size_t, size_t, FILE *); size_t fwrite (const void *, size_t, size_t, FILE *); int strcasecmp(const char *, const char *); int strncasecmp(const char *, const char *, size_t); #endif /* __STDC_HEADERS_H */
1,330
38.147059
77
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/curlver.h
#ifndef __CURL_CURLVER_H #define __CURL_CURLVER_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2012, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ /* This header file contains nothing but libcurl version info, generated by a script at release-time. This was made its own header file in 7.11.2 */ /* This is the global package copyright */ #define LIBCURL_COPYRIGHT "1996 - 2012 Daniel Stenberg, <daniel@haxx.se>." /* This is the version number of the libcurl package from which this header file origins: */ #define LIBCURL_VERSION "7.28.1" /* The numeric version number is also available "in parts" by using these defines: */ #define LIBCURL_VERSION_MAJOR 7 #define LIBCURL_VERSION_MINOR 28 #define LIBCURL_VERSION_PATCH 1 /* This is the numeric version of the libcurl version number, meant for easier parsing and comparions by programs. The LIBCURL_VERSION_NUM define will always follow this syntax: 0xXXYYZZ Where XX, YY and ZZ are the main version, release and patch numbers in hexadecimal (using 8 bits each). All three numbers are always represented using two digits. 1.2 would appear as "0x010200" while version 9.11.7 appears as "0x090b07". This 6-digit (24 bits) hexadecimal number does not show pre-release number, and it is always a greater number in a more recent release. It makes comparisons with greater than and less than work. */ #define LIBCURL_VERSION_NUM 0x071c01 /* * This is the date and time when the full source package was created. The * timestamp is not stored in git, as the timestamp is properly set in the * tarballs by the maketgz script. * * The format of the date should follow this template: * * "Mon Feb 12 11:35:33 UTC 2007" */ #define LIBCURL_TIMESTAMP "Tue Nov 20 07:12:05 UTC 2012" #endif /* __CURL_CURLVER_H */
2,741
38.171429
78
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/libcurl/include/curl/mprintf.h
#ifndef __CURL_MPRINTF_H #define __CURL_MPRINTF_H /*************************************************************************** * _ _ ____ _ * Project ___| | | | _ \| | * / __| | | | |_) | | * | (__| |_| | _ <| |___ * \___|\___/|_| \_\_____| * * Copyright (C) 1998 - 2006, Daniel Stenberg, <daniel@haxx.se>, et al. * * This software is licensed as described in the file COPYING, which * you should have received as part of this distribution. The terms * are also available at http://curl.haxx.se/docs/copyright.html. * * You may opt to use, copy, modify, merge, publish, distribute and/or sell * copies of the Software, and permit persons to whom the Software is * furnished to do so, under the terms of the COPYING file. * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY * KIND, either express or implied. * ***************************************************************************/ #include <stdarg.h> #include <stdio.h> /* needed for FILE */ #include "curl.h" #ifdef __cplusplus extern "C" { #endif CURL_EXTERN int curl_mprintf(const char *format, ...); CURL_EXTERN int curl_mfprintf(FILE *fd, const char *format, ...); CURL_EXTERN int curl_msprintf(char *buffer, const char *format, ...); CURL_EXTERN int curl_msnprintf(char *buffer, size_t maxlength, const char *format, ...); CURL_EXTERN int curl_mvprintf(const char *format, va_list args); CURL_EXTERN int curl_mvfprintf(FILE *fd, const char *format, va_list args); CURL_EXTERN int curl_mvsprintf(char *buffer, const char *format, va_list args); CURL_EXTERN int curl_mvsnprintf(char *buffer, size_t maxlength, const char *format, va_list args); CURL_EXTERN char *curl_maprintf(const char *format, ...); CURL_EXTERN char *curl_mvaprintf(const char *format, va_list args); #ifdef _MPRINTF_REPLACE # undef printf # undef fprintf # undef sprintf # undef vsprintf # undef snprintf # undef vprintf # undef vfprintf # undef vsnprintf # undef aprintf # undef vaprintf # define printf curl_mprintf # define fprintf curl_mfprintf #ifdef CURLDEBUG /* When built with CURLDEBUG we define away the sprintf() functions since we don't want internal code to be using them */ # define sprintf sprintf_was_used # define vsprintf vsprintf_was_used #else # define sprintf curl_msprintf # define vsprintf curl_mvsprintf #endif # define snprintf curl_msnprintf # define vprintf curl_mvprintf # define vfprintf curl_mvfprintf # define vsnprintf curl_mvsnprintf # define aprintf curl_maprintf # define vaprintf curl_mvaprintf #endif #ifdef __cplusplus } #endif #endif /* __CURL_MPRINTF_H */
2,790
33.036585
79
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/qtsampleplayer/UI/QtSamplePlayerGui.h
/* * qtsampleplayer.h ***************************************************************************** * Copyright (C) 2012, bitmovin Softwareentwicklung OG, All Rights Reserved * * Email: libdash-dev@vicky.bitmovin.net * * This source code and its use and distribution, is subject to the terms * and conditions of the applicable license agreement. *****************************************************************************/ #ifndef QTSAMPLEPLAYER_H #define QTSAMPLEPLAYER_H #include <QtMultimedia/qmediaplayer.h> #include <QtMultimediaWidgets/qvideowidget.h> #include <QtGui/QMovie> #include <QtWidgets/QMainWindow> #include <QtWidgets/QStackedWidget> #include <QMouseEvent> #include "ui_qtsampleplayer.h" #include "libdash.h" #include "../libdashframework/MPD/AdaptationSetHelper.h" #include "../libdashframework/Adaptation/IAdaptationLogic.h" #include "../libdashframework/Portable/MultiThreading.h" #include <QDialog> //#include "FullScreenDialog.h" namespace sampleplayer { class IDASHPlayerGuiObserver; class QtSamplePlayerGui : public QMainWindow { Q_OBJECT public: QtSamplePlayerGui (QWidget *parent = 0); virtual ~QtSamplePlayerGui (); void SetGuiFields (dash::mpd::IMPD* mpd); virtual void AddWidgetObserver (IDASHPlayerGuiObserver* observer); virtual void SetStatusBar (const std::string& text); virtual std::string GetUrl (); sampleplayer::renderer::QTGLRenderer* GetVideoElement (); void DisableUserActions (); //On Bitrate selection void EnableUserActions (); //On Bitrate selection void Reset (); void mouseDoubleClickEvent (QMouseEvent* event); void keyPressEvent (QKeyEvent* event); QStackedWidget* myStackedWidget; bool isRunning; void LockStartAndDownloadButton (); private slots: void on_cb_mpd_currentTextChanged (const QString &arg1); void on_cb_period_currentIndexChanged (int index); void on_cb_video_adaptationset_currentIndexChanged (int index); void on_cb_video_representation_currentIndexChanged (int index); void on_cb_audio_adaptationset_currentIndexChanged (int index); void on_cb_audio_representation_currentIndexChanged (int index); void on_button_mpd_clicked (); void on_button_start_clicked (); void on_button_stop_clicked (); public slots: virtual void SetVideoSegmentBufferFillState (int percentage); virtual void SetVideoBufferFillState (int percentage); virtual void SetAudioSegmentBufferFillState (int percentage); virtual void SetAudioBufferFillState (int percentage); int GetAdaptationLogic (); bool GetNDNStatus (); //check if NDN is enabled or classical TCP void SetNDNStatus (bool value); void SetUrl (const char* text); void SetAdaptationLogic (libdash::framework::adaptation::LogicType type); void showNormal (); private: std::map<std::string, std::string> keyValues; std::map<std::string, int> keyIndices; std::map<std::string, std::vector<std::string> > video; std::map<std::string, std::vector<std::string> > audio; CRITICAL_SECTION monitorMutex; Ui::QtSamplePlayerClass *ui; std::vector<IDASHPlayerGuiObserver *> observers; dash::mpd::IMPD *mpd; bool fullscreen; bool isPlaying; void LockUI (); void UnLockUI (); void SetPeriodComboBox (dash::mpd::IMPD *mpd, QComboBox *cb); void SetAdaptationLogicComboBox (dash::mpd::IMPD *mpd, QComboBox *cb); void SetAdaptationSetComboBox (dash::mpd::IPeriod *period, QComboBox *cb); void SetVideoAdaptationSetComboBox (dash::mpd::IPeriod *period, QComboBox *cb); void SetAudioAdaptationSetComboBox (dash::mpd::IPeriod *period, QComboBox *cb); void SetRepresentationComoboBox (dash::mpd::IAdaptationSet *adaptationSet, QComboBox *cb); void ClearComboBoxes (); void OnDoubleClick (); void NotifySettingsChanged (); void NotifyMPDDownloadPressed (const std::string &url); void NotifyStartButtonPressed (); void NotifyStopButtonPressed (); void NotifyPauseButtonPressed (); void NotifyFastForward (); void NotifyFastRewind (); }; class FullScreenDialog : public QDialog { public: FullScreenDialog(QtSamplePlayerGui *gui); ~FullScreenDialog(); void keyPressEvent (QKeyEvent* event); private: QtSamplePlayerGui *gui; }; } #endif // QTSAMPLEPLAYER_H
5,505
41.682171
111
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/qtsampleplayer/libdashframework/helpers/TimingObject.h
/* * TimingObject.h ***************************************************************************** * Copyright (C) 2013, bitmovin Softwareentwicklung OG, All Rights Reserved * * Email: libdash-dev@vicky.bitmovin.net * * This source code and its use and distribution, is subject to the terms * and conditions of the applicable license agreement. *****************************************************************************/ #ifndef LIBDASH_FRAMEWORK_HELPERS_TIMINGOBJECT_H_ #define LIBDASH_FRAMEWORK_HELPERS_TIMINGOBJECT_H_ #include "config.h" #include <time.h> #include "Timing.h" namespace libdash { namespace framework { namespace helpers { class TimingObject { public: TimingObject (std::string desc); virtual ~TimingObject (); clock_t TimeStamp (); std::string Description (); private: clock_t timeStamp; std::string description; }; } } } #endif /* LIBDASH_FRAMEWORK_HELPERS_TIMINGOBJECT_H_ */
1,162
24.844444
79
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/qtsampleplayer/libdashframework/helpers/Timing.h
/* * Timing.h ***************************************************************************** * Copyright (C) 2013, bitmovin Softwareentwicklung OG, All Rights Reserved * * Email: libdash-dev@vicky.bitmovin.net * * This source code and its use and distribution, is subject to the terms * and conditions of the applicable license agreement. *****************************************************************************/ #ifndef LIBDASH_FRAMEWORK_HELPERS_TIMING_H_ #define LIBDASH_FRAMEWORK_HELPERS_TIMING_H_ #include <time.h> #include "config.h" #include "TimingObject.h" #include <iomanip> #include <fstream> namespace libdash { namespace framework { namespace helpers { class Timing { public: static void AddTiming (void *timing); static clock_t GetCurrentUTCTimeInMsec (); static void WriteToFile (std::string filename); static void DisposeTimingObjects (); private: static float GetDifference (clock_t before, clock_t after); static std::string TimingsInBetweenList (); static std::string TimingsList (); static std::vector<void *> timingsInBetween; }; } } } #endif /* LIBDASH_FRAMEWORK_HELPERS_TIMING_H_ */
1,511
29.857143
100
h
cba-pipeline-public
cba-pipeline-public-master/containernet/ndn-containers/ndn_headless-player/code/ndn-dash/libdash/qtsampleplayer/libdashframework/Input/IDASHReceiverObserver.h
/* * IDASHReceiverObserver.h ***************************************************************************** * Copyright (C) 2012, bitmovin Softwareentwicklung OG, All Rights Reserved * * Email: libdash-dev@vicky.bitmovin.net * * This source code and its use and distribution, is subject to the terms * and conditions of the applicable license agreement. *****************************************************************************/ #ifndef LIBDASH_FRAMEWORK_INPUT_IDASHRECEIVEROBSERVER_H_ #define LIBDASH_FRAMEWORK_INPUT_IDASHRECEIVEROBSERVER_H_ namespace libdash { namespace framework { namespace input { class IDASHReceiverObserver { public: virtual ~IDASHReceiverObserver () {} virtual void OnSegmentDownloaded () = 0; }; } } } #endif /* LIBDASH_FRAMEWORK_INPUT_IDASHRECEIVEROBSERVER_H_ */
938
28.34375
79
h