repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_constructor/obj_constructor.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * obj_constructor.c -- tests for constructor */ #include <stddef.h> #include "unittest.h" /* * Command line toggle indicating use of a bigger node structure for querying * pool size expressed in a number of possible allocations. A small node * structure results in a great number of allocations impossible to replicate * in assumed timeout. It is required by unit tests using remote replication to * pass on Travis. */ #define USE_BIG_ALLOC "--big-alloc" /* * Layout definition */ POBJ_LAYOUT_BEGIN(constr); POBJ_LAYOUT_ROOT(constr, struct root); POBJ_LAYOUT_TOID(constr, struct node); POBJ_LAYOUT_TOID(constr, struct node_big); POBJ_LAYOUT_END(constr); struct root { TOID(struct node) n; POBJ_LIST_HEAD(head, struct node) list; POBJ_LIST_HEAD(head_big, struct node_big) list_big; }; struct node { POBJ_LIST_ENTRY(struct node) next; }; struct node_big { POBJ_LIST_ENTRY(struct node_big) next; int weight[2048]; }; static int root_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg) { return 1; } static int node_constr_cancel(PMEMobjpool *pop, void *ptr, void *arg) { return 1; } struct foo { int bar; }; static struct foo *Canceled_ptr; static int vg_test_save_ptr(PMEMobjpool *pop, void *ptr, void *arg) { Canceled_ptr = (struct foo *)ptr; return 1; } int main(int argc, char *argv[]) { START(argc, argv, "obj_constructor"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(constr) != 2); int big = (argc == 3 && strcmp(argv[2], USE_BIG_ALLOC) == 0); size_t node_size; size_t next_off; if (big) { node_size = sizeof(struct node_big); next_off = offsetof(struct node_big, next); } else if (argc == 2) { node_size = sizeof(struct node); next_off = offsetof(struct node, next); } else { UT_FATAL("usage: %s file-name [ %s ]", argv[0], USE_BIG_ALLOC); } const char *path = argv[1]; PMEMobjpool *pop = NULL; int ret; TOID(struct root) root; TOID(struct node) node; TOID(struct node_big) node_big; if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(constr), 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); errno = 0; root.oid = pmemobj_root_construct(pop, sizeof(struct root), root_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(root)); UT_ASSERTeq(errno, ECANCELED); /* * Allocate memory until OOM, so we can check later if the alloc * cancellation didn't damage the heap in any way. */ int allocs = 0; while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0) allocs++; UT_ASSERTne(allocs, 0); PMEMoid oid; PMEMoid next; POBJ_FOREACH_SAFE(pop, oid, next) pmemobj_free(&oid); errno = 0; ret = pmemobj_alloc(pop, NULL, node_size, 1, node_constr_cancel, NULL); UT_ASSERTeq(ret, -1); UT_ASSERTeq(errno, ECANCELED); /* the same number of allocations should be possible. */ while (pmemobj_alloc(pop, NULL, node_size, 1, NULL, NULL) == 0) allocs--; UT_ASSERT(allocs <= 0); POBJ_FOREACH_SAFE(pop, oid, next) pmemobj_free(&oid); root.oid = pmemobj_root_construct(pop, sizeof(struct root), NULL, NULL); UT_ASSERT(!TOID_IS_NULL(root)); errno = 0; if (big) { node_big.oid = pmemobj_list_insert_new(pop, next_off, &D_RW(root)->list_big, OID_NULL, 0, node_size, 1, node_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(node_big)); } else { node.oid = pmemobj_list_insert_new(pop, next_off, &D_RW(root)->list, OID_NULL, 0, node_size, 1, node_constr_cancel, NULL); UT_ASSERT(TOID_IS_NULL(node)); } UT_ASSERTeq(errno, ECANCELED); pmemobj_alloc(pop, &oid, sizeof(struct foo), 1, vg_test_save_ptr, NULL); UT_ASSERTne(Canceled_ptr, NULL); /* this should generate a valgrind memcheck warning */ Canceled_ptr->bar = 5; pmemobj_persist(pop, &Canceled_ptr->bar, sizeof(Canceled_ptr->bar)); /* * Allocate and cancel a huge object. It should return back to the * heap and it should be possible to allocate it again. */ Canceled_ptr = NULL; ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1, vg_test_save_ptr, NULL); UT_ASSERTne(Canceled_ptr, NULL); void *first_ptr = Canceled_ptr; Canceled_ptr = NULL; ret = pmemobj_alloc(pop, &oid, sizeof(struct foo) + (1 << 22), 1, vg_test_save_ptr, NULL); UT_ASSERTeq(first_ptr, Canceled_ptr); pmemobj_close(pop); DONE(NULL); }
4,369
22.621622
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/unittest.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * unittest.h -- the mundane stuff shared by all unit tests * * we want unit tests to be very thorough and check absolutely everything * in order to nail down the test case as precisely as possible and flag * anything at all unexpected. as a result, most unit tests are 90% code * checking stuff that isn't really interesting to what is being tested. * to help address this, the macros defined here include all the boilerplate * error checking which prints information and exits on unexpected errors. * * the result changes this code: * * if ((buf = malloc(size)) == NULL) { * fprintf(stderr, "cannot allocate %d bytes for buf\n", size); * exit(1); * } * * into this code: * * buf = MALLOC(size); * * and the error message includes the calling context information (file:line). * in general, using the all-caps version of a call means you're using the * unittest.h version which does the most common checking for you. so * calling VMEM_CREATE() instead of vmem_create() returns the same * thing, but can never return an error since the unit test library checks for * it. * for routines like vmem_delete() there is no corresponding * VMEM_DELETE() because there's no error to check for. * * all unit tests should use the same initialization: * * START(argc, argv, "brief test description", ...); * * all unit tests should use these exit calls: * * DONE("message", ...); * UT_FATAL("message", ...); * * uniform stderr and stdout messages: * * UT_OUT("message", ...); * UT_ERR("message", ...); * * in all cases above, the message is printf-like, taking variable args. * the message can be NULL. it can start with "!" in which case the "!" is * skipped and the message gets the errno string appended to it, like this: * * if (somesyscall(..) < 0) * UT_FATAL("!my message"); */ #ifndef _UNITTEST_H #define _UNITTEST_H 1 #include <libpmem.h> #include <libpmem2.h> #include <libpmemblk.h> #include <libpmemlog.h> #include <libpmemobj.h> #include <libpmempool.h> #ifdef __cplusplus extern "C" { #endif #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <stdint.h> #include <string.h> #include <strings.h> #include <setjmp.h> #include <time.h> #include <sys/types.h> #include <sys/wait.h> #include <sys/stat.h> #include <sys/mman.h> #include <sys/file.h> #ifndef __FreeBSD__ #include <sys/mount.h> #endif #include <fcntl.h> #include <signal.h> #include <errno.h> #include <dirent.h> /* XXX: move OS abstraction layer out of common */ #include "os.h" #include "os_thread.h" #include "util.h" int ut_get_uuid_str(char *); #define UT_MAX_ERR_MSG 128 #define UT_POOL_HDR_UUID_STR_LEN 37 /* uuid string length */ #define UT_POOL_HDR_UUID_GEN_FILE "/proc/sys/kernel/random/uuid" /* XXX - fix this temp hack dup'ing util_strerror when we get mock for win */ void ut_strerror(int errnum, char *buff, size_t bufflen); /* XXX - eliminate duplicated definitions in unittest.h and util.h */ #ifdef _WIN32 static inline int ut_util_statW(const wchar_t *path, os_stat_t *st_bufp) { int retVal = _wstat64(path, st_bufp); /* clear unused bits to avoid confusion */ st_bufp->st_mode &= 0600; return retVal; } #endif /* * unit test support... */ void ut_start(const char *file, int line, const char *func, int argc, char * const argv[], const char *fmt, ...) __attribute__((format(printf, 6, 7))); void ut_startW(const char *file, int line, const char *func, int argc, wchar_t * const argv[], const char *fmt, ...) __attribute__((format(printf, 6, 7))); void NORETURN ut_done(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void NORETURN ut_fatal(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void NORETURN ut_end(const char *file, int line, const char *func, int ret); void ut_out(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); void ut_err(const char *file, int line, const char *func, const char *fmt, ...) __attribute__((format(printf, 4, 5))); /* indicate the start of the test */ #ifndef _WIN32 #define START(argc, argv, ...)\ ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) #else #define START(argc, argv, ...)\ wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc);\ for (int i = 0; i < argc; i++) {\ argv[i] = ut_toUTF8(wargv[i]);\ if (argv[i] == NULL) {\ for (i--; i >= 0; i--)\ free(argv[i]);\ UT_FATAL("Error during arguments conversion\n");\ }\ }\ ut_start(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) #endif /* indicate the start of the test */ #define STARTW(argc, argv, ...)\ ut_startW(__FILE__, __LINE__, __func__, argc, argv, __VA_ARGS__) /* normal exit from test */ #ifndef _WIN32 #define DONE(...)\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #else #define DONE(...)\ for (int i = argc; i > 0; i--)\ free(argv[i - 1]);\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #endif #define DONEW(...)\ ut_done(__FILE__, __LINE__, __func__, __VA_ARGS__) #define END(ret, ...)\ ut_end(__FILE__, __LINE__, __func__, ret) /* fatal error detected */ #define UT_FATAL(...)\ ut_fatal(__FILE__, __LINE__, __func__, __VA_ARGS__) /* normal output */ #define UT_OUT(...)\ ut_out(__FILE__, __LINE__, __func__, __VA_ARGS__) /* error output */ #define UT_ERR(...)\ ut_err(__FILE__, __LINE__, __func__, __VA_ARGS__) /* * assertions... */ /* assert a condition is true at runtime */ #define UT_ASSERT_rt(cnd)\ ((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s", #cnd), 0))) /* assertion with extra info printed if assertion fails at runtime */ #define UT_ASSERTinfo_rt(cnd, info) \ ((void)((cnd) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (%s)", #cnd, info), 0))) /* assert two integer values are equal at runtime */ #define UT_ASSERTeq_rt(lhs, rhs)\ ((void)(((lhs) == (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0))) /* assert two integer values are not equal at runtime */ #define UT_ASSERTne_rt(lhs, rhs)\ ((void)(((lhs) != (rhs)) || (ut_fatal(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)), 0))) #if defined(__CHECKER__) #define UT_COMPILE_ERROR_ON(cond) #define UT_ASSERT_COMPILE_ERROR_ON(cond) #elif defined(_MSC_VER) #define UT_COMPILE_ERROR_ON(cond) C_ASSERT(!(cond)) /* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */ #define UT_ASSERT_COMPILE_ERROR_ON(cond) (void)(cond) #else #define UT_COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1])) #ifndef __cplusplus #define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_COMPILE_ERROR_ON(cond) #else /* __cplusplus */ /* * XXX - workaround for https://github.com/pmem/issues/issues/189 */ #define UT_ASSERT_COMPILE_ERROR_ON(cond) UT_ASSERT_rt(!(cond)) #endif /* __cplusplus */ #endif /* _MSC_VER */ /* assert a condition is true */ #define UT_ASSERT(cnd)\ do {\ /*\ * Detect useless asserts on always true expression. Please use\ * UT_COMPILE_ERROR_ON(!cnd) or UT_ASSERT_rt(cnd) in such\ * cases.\ */\ if (__builtin_constant_p(cnd))\ UT_ASSERT_COMPILE_ERROR_ON(cnd);\ UT_ASSERT_rt(cnd);\ } while (0) /* assertion with extra info printed if assertion fails */ #define UT_ASSERTinfo(cnd, info) \ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(cnd))\ UT_ASSERT_COMPILE_ERROR_ON(cnd);\ UT_ASSERTinfo_rt(cnd, info);\ } while (0) /* assert two integer values are equal */ #define UT_ASSERTeq(lhs, rhs)\ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ UT_ASSERT_COMPILE_ERROR_ON((lhs) == (rhs));\ UT_ASSERTeq_rt(lhs, rhs);\ } while (0) /* assert two integer values are not equal */ #define UT_ASSERTne(lhs, rhs)\ do {\ /* See comment in UT_ASSERT. */\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ UT_ASSERT_COMPILE_ERROR_ON((lhs) != (rhs));\ UT_ASSERTne_rt(lhs, rhs);\ } while (0) /* assert pointer is fits range of [start, start + size) */ #define UT_ASSERTrange(ptr, start, size)\ ((void)(((uintptr_t)(ptr) >= (uintptr_t)(start) &&\ (uintptr_t)(ptr) < (uintptr_t)(start) + (uintptr_t)(size)) ||\ (ut_fatal(__FILE__, __LINE__, __func__,\ "assert failure: %s (%p) is outside range [%s (%p), %s (%p))", #ptr,\ (void *)(ptr), #start, (void *)(start), #start"+"#size,\ (void *)((uintptr_t)(start) + (uintptr_t)(size))), 0))) /* * memory allocation... */ void *ut_malloc(const char *file, int line, const char *func, size_t size); void *ut_calloc(const char *file, int line, const char *func, size_t nmemb, size_t size); void ut_free(const char *file, int line, const char *func, void *ptr); void ut_aligned_free(const char *file, int line, const char *func, void *ptr); void *ut_realloc(const char *file, int line, const char *func, void *ptr, size_t size); char *ut_strdup(const char *file, int line, const char *func, const char *str); void *ut_pagealignmalloc(const char *file, int line, const char *func, size_t size); void *ut_memalign(const char *file, int line, const char *func, size_t alignment, size_t size); void *ut_mmap_anon_aligned(const char *file, int line, const char *func, size_t alignment, size_t size); int ut_munmap_anon_aligned(const char *file, int line, const char *func, void *start, size_t size); /* a malloc() that can't return NULL */ #define MALLOC(size)\ ut_malloc(__FILE__, __LINE__, __func__, size) /* a calloc() that can't return NULL */ #define CALLOC(nmemb, size)\ ut_calloc(__FILE__, __LINE__, __func__, nmemb, size) /* a malloc() of zeroed memory */ #define ZALLOC(size)\ ut_calloc(__FILE__, __LINE__, __func__, 1, size) #define FREE(ptr)\ ut_free(__FILE__, __LINE__, __func__, ptr) #define ALIGNED_FREE(ptr)\ ut_aligned_free(__FILE__, __LINE__, __func__, ptr) /* a realloc() that can't return NULL */ #define REALLOC(ptr, size)\ ut_realloc(__FILE__, __LINE__, __func__, ptr, size) /* a strdup() that can't return NULL */ #define STRDUP(str)\ ut_strdup(__FILE__, __LINE__, __func__, str) /* a malloc() that only returns page aligned memory */ #define PAGEALIGNMALLOC(size)\ ut_pagealignmalloc(__FILE__, __LINE__, __func__, size) /* a malloc() that returns memory with given alignment */ #define MEMALIGN(alignment, size)\ ut_memalign(__FILE__, __LINE__, __func__, alignment, size) /* * A mmap() that returns anonymous memory with given alignment and guard * pages. */ #define MMAP_ANON_ALIGNED(size, alignment)\ ut_mmap_anon_aligned(__FILE__, __LINE__, __func__, alignment, size) #define MUNMAP_ANON_ALIGNED(start, size)\ ut_munmap_anon_aligned(__FILE__, __LINE__, __func__, start, size) /* * file operations */ int ut_open(const char *file, int line, const char *func, const char *path, int flags, ...); int ut_wopen(const char *file, int line, const char *func, const wchar_t *path, int flags, ...); int ut_close(const char *file, int line, const char *func, int fd); FILE *ut_fopen(const char *file, int line, const char *func, const char *path, const char *mode); int ut_fclose(const char *file, int line, const char *func, FILE *stream); int ut_unlink(const char *file, int line, const char *func, const char *path); size_t ut_write(const char *file, int line, const char *func, int fd, const void *buf, size_t len); size_t ut_read(const char *file, int line, const char *func, int fd, void *buf, size_t len); os_off_t ut_lseek(const char *file, int line, const char *func, int fd, os_off_t offset, int whence); int ut_posix_fallocate(const char *file, int line, const char *func, int fd, os_off_t offset, os_off_t len); int ut_stat(const char *file, int line, const char *func, const char *path, os_stat_t *st_bufp); int ut_statW(const char *file, int line, const char *func, const wchar_t *path, os_stat_t *st_bufp); int ut_fstat(const char *file, int line, const char *func, int fd, os_stat_t *st_bufp); void *ut_mmap(const char *file, int line, const char *func, void *addr, size_t length, int prot, int flags, int fd, os_off_t offset); int ut_munmap(const char *file, int line, const char *func, void *addr, size_t length); int ut_mprotect(const char *file, int line, const char *func, void *addr, size_t len, int prot); int ut_ftruncate(const char *file, int line, const char *func, int fd, os_off_t length); long long ut_strtoll(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); long ut_strtol(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); int ut_strtoi(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned long long ut_strtoull(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned long ut_strtoul(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); unsigned ut_strtou(const char *file, int line, const char *func, const char *nptr, char **endptr, int base); int ut_snprintf(const char *file, int line, const char *func, char *str, size_t size, const char *format, ...); /* an open() that can't return < 0 */ #define OPEN(path, ...)\ ut_open(__FILE__, __LINE__, __func__, path, __VA_ARGS__) /* a _wopen() that can't return < 0 */ #define WOPEN(path, ...)\ ut_wopen(__FILE__, __LINE__, __func__, path, __VA_ARGS__) /* a close() that can't return -1 */ #define CLOSE(fd)\ ut_close(__FILE__, __LINE__, __func__, fd) /* an fopen() that can't return != 0 */ #define FOPEN(path, mode)\ ut_fopen(__FILE__, __LINE__, __func__, path, mode) /* a fclose() that can't return != 0 */ #define FCLOSE(stream)\ ut_fclose(__FILE__, __LINE__, __func__, stream) /* an unlink() that can't return -1 */ #define UNLINK(path)\ ut_unlink(__FILE__, __LINE__, __func__, path) /* a write() that can't return -1 */ #define WRITE(fd, buf, len)\ ut_write(__FILE__, __LINE__, __func__, fd, buf, len) /* a read() that can't return -1 */ #define READ(fd, buf, len)\ ut_read(__FILE__, __LINE__, __func__, fd, buf, len) /* a lseek() that can't return -1 */ #define LSEEK(fd, offset, whence)\ ut_lseek(__FILE__, __LINE__, __func__, fd, offset, whence) #define POSIX_FALLOCATE(fd, off, len)\ ut_posix_fallocate(__FILE__, __LINE__, __func__, fd, off, len) #define FSTAT(fd, st_bufp)\ ut_fstat(__FILE__, __LINE__, __func__, fd, st_bufp) /* a mmap() that can't return MAP_FAILED */ #define MMAP(addr, len, prot, flags, fd, offset)\ ut_mmap(__FILE__, __LINE__, __func__, addr, len, prot, flags, fd, offset); /* a munmap() that can't return -1 */ #define MUNMAP(addr, length)\ ut_munmap(__FILE__, __LINE__, __func__, addr, length); /* a mprotect() that can't return -1 */ #define MPROTECT(addr, len, prot)\ ut_mprotect(__FILE__, __LINE__, __func__, addr, len, prot); #define STAT(path, st_bufp)\ ut_stat(__FILE__, __LINE__, __func__, path, st_bufp) #define STATW(path, st_bufp)\ ut_statW(__FILE__, __LINE__, __func__, path, st_bufp) #define FTRUNCATE(fd, length)\ ut_ftruncate(__FILE__, __LINE__, __func__, fd, length) #define ATOU(nptr) STRTOU(nptr, NULL, 10) #define ATOUL(nptr) STRTOUL(nptr, NULL, 10) #define ATOULL(nptr) STRTOULL(nptr, NULL, 10) #define ATOI(nptr) STRTOI(nptr, NULL, 10) #define ATOL(nptr) STRTOL(nptr, NULL, 10) #define ATOLL(nptr) STRTOLL(nptr, NULL, 10) #define STRTOULL(nptr, endptr, base)\ ut_strtoull(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOUL(nptr, endptr, base)\ ut_strtoul(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOL(nptr, endptr, base)\ ut_strtol(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOLL(nptr, endptr, base)\ ut_strtoll(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOU(nptr, endptr, base)\ ut_strtou(__FILE__, __LINE__, __func__, nptr, endptr, base) #define STRTOI(nptr, endptr, base)\ ut_strtoi(__FILE__, __LINE__, __func__, nptr, endptr, base) #define SNPRINTF(str, size, format, ...) \ ut_snprintf(__FILE__, __LINE__, __func__, \ str, size, format, __VA_ARGS__) #ifndef _WIN32 #define ut_jmp_buf_t sigjmp_buf #define ut_siglongjmp(b) siglongjmp(b, 1) #define ut_sigsetjmp(b) sigsetjmp(b, 1) #else #define ut_jmp_buf_t jmp_buf #define ut_siglongjmp(b) longjmp(b, 1) #define ut_sigsetjmp(b) setjmp(b) #endif void ut_suppress_errmsg(void); void ut_unsuppress_errmsg(void); void ut_suppress_crt_assert(void); void ut_unsuppress_crt_assert(void); /* * signals... */ int ut_sigaction(const char *file, int line, const char *func, int signum, struct sigaction *act, struct sigaction *oldact); /* a sigaction() that can't return an error */ #define SIGACTION(signum, act, oldact)\ ut_sigaction(__FILE__, __LINE__, __func__, signum, act, oldact) /* * pthreads... */ int ut_thread_create(const char *file, int line, const char *func, os_thread_t *__restrict thread, const os_thread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg); int ut_thread_join(const char *file, int line, const char *func, os_thread_t *thread, void **value_ptr); /* a os_thread_create() that can't return an error */ #define THREAD_CREATE(thread, attr, start_routine, arg)\ ut_thread_create(__FILE__, __LINE__, __func__,\ thread, attr, start_routine, arg) /* a os_thread_join() that can't return an error */ #define THREAD_JOIN(thread, value_ptr)\ ut_thread_join(__FILE__, __LINE__, __func__, thread, value_ptr) /* * processes... */ #ifdef _WIN32 intptr_t ut_spawnv(int argc, const char **argv, ...); #endif /* * mocks... * * NOTE: On Linux, function mocking is implemented using wrapper functions. * See "--wrap" option of the GNU linker. * There is no such feature in VC++, so on Windows we do the mocking at * compile time, by redefining symbol names: * - all the references to <symbol> are replaced with <__wrap_symbol> * in all the compilation units, except the one where the <symbol> is * defined and the test source file * - the original definition of <symbol> is replaced with <__real_symbol> * - a wrapper function <__wrap_symbol> must be defined in the test program * (it may still call the original function via <__real_symbol>) * Such solution seems to be sufficient for the purpose of our tests, even * though it has some limitations. I.e. it does no work well with malloc/free, * so to wrap the system memory allocator functions, we use the built-in * feature of all the PMDK libraries, allowing to override default memory * allocator with the custom one. */ #ifndef _WIN32 #define _FUNC_REAL_DECL(name, ret_type, ...)\ ret_type __real_##name(__VA_ARGS__) __attribute__((unused)); #else #define _FUNC_REAL_DECL(name, ret_type, ...)\ ret_type name(__VA_ARGS__); #endif #ifndef _WIN32 #define _FUNC_REAL(name)\ __real_##name #else #define _FUNC_REAL(name)\ name #endif #define RCOUNTER(name)\ _rcounter##name #define FUNC_MOCK_RCOUNTER_SET(name, val)\ RCOUNTER(name) = val; #define FUNC_MOCK(name, ret_type, ...)\ _FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\ static unsigned RCOUNTER(name);\ ret_type __wrap_##name(__VA_ARGS__);\ ret_type __wrap_##name(__VA_ARGS__) {\ switch (util_fetch_and_add32(&RCOUNTER(name), 1)) { #define FUNC_MOCK_DLLIMPORT(name, ret_type, ...)\ __declspec(dllimport) _FUNC_REAL_DECL(name, ret_type, ##__VA_ARGS__)\ static unsigned RCOUNTER(name);\ ret_type __wrap_##name(__VA_ARGS__);\ ret_type __wrap_##name(__VA_ARGS__) {\ switch (util_fetch_and_add32(&RCOUNTER(name), 1)) { #define FUNC_MOCK_END\ }} #define FUNC_MOCK_RUN(run)\ case run: #define FUNC_MOCK_RUN_DEFAULT\ default: #define FUNC_MOCK_RUN_RET(run, ret)\ case run: return (ret); #define FUNC_MOCK_RUN_RET_DEFAULT_REAL(name, ...)\ default: return _FUNC_REAL(name)(__VA_ARGS__); #define FUNC_MOCK_RUN_RET_DEFAULT(ret)\ default: return (ret); #define FUNC_MOCK_RET_ALWAYS(name, ret_type, ret, ...)\ FUNC_MOCK(name, ret_type, __VA_ARGS__)\ FUNC_MOCK_RUN_RET_DEFAULT(ret);\ FUNC_MOCK_END #define FUNC_MOCK_RET_ALWAYS_VOID(name, ...)\ FUNC_MOCK(name, void, __VA_ARGS__)\ default: return;\ FUNC_MOCK_END extern unsigned long Ut_pagesize; extern unsigned long long Ut_mmap_align; extern os_mutex_t Sigactions_lock; void ut_dump_backtrace(void); void ut_sighandler(int); void ut_register_sighandlers(void); uint16_t ut_checksum(uint8_t *addr, size_t len); char *ut_toUTF8(const wchar_t *wstr); wchar_t *ut_toUTF16(const char *wstr); struct test_case { const char *name; int (*func)(const struct test_case *tc, int argc, char *argv[]); }; /* * get_tc -- return test case of specified name */ static inline const struct test_case * get_tc(const char *name, const struct test_case *test_cases, size_t ntests) { for (size_t i = 0; i < ntests; i++) { if (strcmp(name, test_cases[i].name) == 0) return &test_cases[i]; } return NULL; } static inline void TEST_CASE_PROCESS(int argc, char *argv[], const struct test_case *test_cases, size_t ntests) { if (argc < 2) UT_FATAL("usage: %s <test case> [<args>]", argv[0]); for (int i = 1; i < argc; i++) { char *str_test = argv[i]; const int args_off = i + 1; const struct test_case *tc = get_tc(str_test, test_cases, ntests); if (!tc) UT_FATAL("unknown test case -- '%s'", str_test); int ret = tc->func(tc, argc - args_off, &argv[args_off]); if (ret < 0) UT_FATAL("test return value cannot be negative"); i += ret; } } #define TEST_CASE_DECLARE(_name)\ int \ _name(const struct test_case *tc, int argc, char *argv[]) #define TEST_CASE(_name)\ {\ .name = #_name,\ .func = (_name),\ } #define STR(x) #x #define ASSERT_ALIGNED_BEGIN(type) do {\ size_t off = 0;\ const char *last = "(none)";\ type t; #define ASSERT_ALIGNED_FIELD(type, field) do {\ if (offsetof(type, field) != off)\ UT_FATAL("%s: padding, missing field or fields not in order between "\ "'%s' and '%s' -- offset %lu, real offset %lu",\ STR(type), last, STR(field), off, offsetof(type, field));\ off += sizeof(t.field);\ last = STR(field);\ } while (0) #define ASSERT_FIELD_SIZE(field, size) do {\ UT_COMPILE_ERROR_ON(size != sizeof(t.field));\ } while (0) #define ASSERT_OFFSET_CHECKPOINT(type, checkpoint) do {\ if (off != checkpoint)\ UT_FATAL("%s: violated offset checkpoint -- "\ "checkpoint %lu, real offset %lu",\ STR(type), checkpoint, off);\ } while (0) #define ASSERT_ALIGNED_CHECK(type)\ if (off != sizeof(type))\ UT_FATAL("%s: missing field or padding after '%s': "\ "sizeof(%s) = %lu, fields size = %lu",\ STR(type), last, STR(type), sizeof(type), off);\ } while (0) /* * AddressSanitizer */ #ifdef __clang__ #if __has_feature(address_sanitizer) #define UT_DEFINE_ASAN_POISON #endif #else #ifdef __SANITIZE_ADDRESS__ #define UT_DEFINE_ASAN_POISON #endif #endif #ifdef UT_DEFINE_ASAN_POISON void __asan_poison_memory_region(void const volatile *addr, size_t size); void __asan_unpoison_memory_region(void const volatile *addr, size_t size); #define ASAN_POISON_MEMORY_REGION(addr, size) \ __asan_poison_memory_region((addr), (size)) #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ __asan_unpoison_memory_region((addr), (size)) #else #define ASAN_POISON_MEMORY_REGION(addr, size) \ ((void)(addr), (void)(size)) #define ASAN_UNPOISON_MEMORY_REGION(addr, size) \ ((void)(addr), (void)(size)) #endif #ifdef __cplusplus } #endif #endif /* unittest.h */
23,907
29.769627
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_fh.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_fh.c -- implementation of OS-independent file handle / file descriptor * interface */ /* for O_TMPFILE */ #define _GNU_SOURCE #include <fcntl.h> #include "ut_fh.h" #include "unittest.h" struct FHandle { int fd; #ifdef _WIN32 HANDLE h; #endif enum file_handle_type type; }; #ifdef _WIN32 #define HIDWORD(x) ((DWORD)((x) >> 32)) #define LODWORD(x) ((DWORD)((x) & 0xFFFFFFFF)) #endif static void check_invalid_flags(const char *file, int line, const char *func, int flags) { if ((flags & FH_EXCL) && !(flags & FH_CREAT)) { ut_fatal(file, line, func, "FH_EXCL without FH_CREAT is meaningless"); } if ((flags & FH_TRUNC) && (flags & FH_CREAT)) { /* because Windows doesn't support both */ ut_fatal(file, line, func, "FH_TRUNC with FH_CREAT is forbidden"); } } static int ut_fh_open_fd(const char *file, int line, const char *func, const char *path, int flags, mode_t mode) { int sflags = 0; check_invalid_flags(file, line, func, flags); if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) { flags &= ~(FH_CREAT | FH_EXCL); sflags |= O_CREAT | O_EXCL; } else if (flags & FH_CREAT) { flags &= ~FH_CREAT; sflags |= O_CREAT; /* Windows version doesn't support both O_TRUNC and O_CREAT */ } else if (flags & FH_TRUNC) { flags &= ~FH_TRUNC; sflags |= O_TRUNC; } int acc = flags & FH_ACCMODE; /* Linux version does not have FH_EXEC equivalent */ if ((acc & FH_WRITE) && (acc & FH_READ)) sflags |= O_RDWR; else if (acc & FH_WRITE) sflags |= O_WRONLY; else if (acc & FH_READ) sflags |= O_RDONLY; else ut_fatal(file, line, func, "unknown access mode %d", acc); flags &= ~FH_ACCMODE; if (flags & FH_DIRECTORY) { #ifdef _WIN32 ut_fatal(file, line, func, "FH_DIRECTORY is not supported on Windows using FD interface"); #else flags &= ~FH_DIRECTORY; sflags |= O_DIRECTORY; #endif } if (flags & FH_TMPFILE) { #ifdef O_TMPFILE flags &= ~FH_TMPFILE; sflags |= O_TMPFILE; #else ut_fatal(file, line, func, "FH_TMPFILE is not supported on this system for file descriptors"); #endif } if (flags) ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags); return ut_open(file, line, func, path, sflags, mode); } #ifdef _WIN32 static HANDLE ut_fh_open_handle(const char *file, int line, const char *func, const char *path, int flags, mode_t mode) { DWORD dwDesiredAccess; /* do not allow delete, read or write from another process */ DWORD dwShareMode = 0; LPSECURITY_ATTRIBUTES lpSecurityAttributes = NULL; DWORD dwCreationDisposition; DWORD dwFlagsAndAttributes = FILE_ATTRIBUTE_NORMAL; HANDLE hTemplateFile = NULL; /* XXX sometimes doesn't work, ERROR_ACCESS_DENIED on AppVeyor */ #if 0 /* * FILE_FLAG_DELETE_ON_CLOSE needs a real file (FH_CREAT) * If it already exists refuse to use it (FH_EXCL), because this means * something weird is going on (either there's another process with * the same file opened or FILE_FLAG_DELETE_ON_CLOSE didn't actually * delete the file on close) */ if (flags & FH_TMPFILE) flags |= FH_CREAT | FH_EXCL; #else if (flags & FH_TMPFILE) ut_fatal(file, line, func, "FH_TMPFILE is not supported for file handles"); #endif check_invalid_flags(file, line, func, flags); /* only write permission can be taken out on Windows */ if (!(mode & _S_IWRITE)) dwFlagsAndAttributes |= FILE_ATTRIBUTE_READONLY; if ((flags & (FH_CREAT | FH_EXCL)) == (FH_CREAT | FH_EXCL)) { flags &= ~(FH_CREAT | FH_EXCL); dwCreationDisposition = CREATE_NEW; } else if (flags & FH_CREAT) { flags &= ~FH_CREAT; dwCreationDisposition = OPEN_ALWAYS; } else if (flags & FH_TRUNC) { flags &= ~FH_TRUNC; dwCreationDisposition = TRUNCATE_EXISTING; } else { dwCreationDisposition = OPEN_EXISTING; } int acc = flags & FH_ACCMODE; dwDesiredAccess = 0; if (acc & FH_READ) { dwDesiredAccess |= GENERIC_READ; acc &= ~FH_READ; } if (acc & FH_WRITE) { dwDesiredAccess |= GENERIC_WRITE; acc &= ~FH_WRITE; } if (acc & FH_EXEC) { dwDesiredAccess |= GENERIC_EXECUTE; acc &= ~FH_EXEC; } if (acc) ut_fatal(file, line, func, "unknown access mode %d", acc); flags &= ~FH_ACCMODE; if (flags & FH_DIRECTORY) { flags &= ~FH_DIRECTORY; /* GJ MS */ dwFlagsAndAttributes |= FILE_FLAG_BACKUP_SEMANTICS; } char *full_path = NULL; if (flags & FH_TMPFILE) { flags &= ~FH_TMPFILE; dwFlagsAndAttributes |= FILE_FLAG_DELETE_ON_CLOSE; /* * FILE_FLAG_DELETE_ON_CLOSE needs a real file, * not a directory */ full_path = MALLOC(strlen(path) + 1 + strlen("UT_FH_TMPFILE") + 1); sprintf(full_path, "%s\\UT_FH_TMPFILE", path); path = full_path; } if (flags) ut_fatal(file, line, func, "unsupported flag(s) 0%o", flags); wchar_t *wpath = util_toUTF16(path); if (wpath == NULL) ut_fatal(file, line, func, "conversion to utf16 failed"); HANDLE h = CreateFileW(wpath, dwDesiredAccess, dwShareMode, lpSecurityAttributes, dwCreationDisposition, dwFlagsAndAttributes, hTemplateFile); util_free_UTF16(wpath); if (h == INVALID_HANDLE_VALUE) { ut_fatal(file, line, func, "opening file %s failed: %d", path, GetLastError()); } if (full_path) free(full_path); return h; } #endif struct FHandle * ut_fh_open(const char *file, int line, const char *func, enum file_handle_type type, const char *path, int flags, ...) { struct FHandle *f = MALLOC(sizeof(*f)); mode_t mode = 0; va_list ap; va_start(ap, flags); if ((flags & FH_CREAT) || (flags & FH_TMPFILE)) mode = va_arg(ap, mode_t); va_end(ap); f->type = type; if (type == FH_FD) { f->fd = ut_fh_open_fd(file, line, func, path, flags, mode); } else if (type == FH_HANDLE) { #ifdef _WIN32 f->h = ut_fh_open_handle(file, line, func, path, flags, mode); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", type); } return f; } void ut_fh_truncate(const char *file, int line, const char *func, struct FHandle *f, os_off_t length) { if (f->type == FH_FD) { ut_ftruncate(file, line, func, f->fd, length); } else if (f->type == FH_HANDLE) { #ifdef _WIN32 LONG low = LODWORD(length); LONG high = HIDWORD(length); if (SetFilePointer(f->h, low, &high, FILE_BEGIN) == INVALID_SET_FILE_POINTER && GetLastError() != ERROR_SUCCESS) { ut_fatal(file, line, func, "SetFilePointer failed: %d", GetLastError()); } if (SetEndOfFile(f->h) == 0) { ut_fatal(file, line, func, "SetEndOfFile failed: %d", GetLastError()); } #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", f->type); } } void ut_fh_close(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_FD) { CLOSE(f->fd); } else if (f->type == FH_HANDLE) { #ifdef _WIN32 CloseHandle(f->h); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown type value %d", f->type); } memset(f, 0, sizeof(*f)); FREE(f); } int ut_fh_get_fd(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_FD) return f->fd; ut_fatal(file, line, func, "requested file descriptor on FHandle that doesn't contain it"); } #ifdef _WIN32 HANDLE ut_fh_get_handle(const char *file, int line, const char *func, struct FHandle *f) { if (f->type == FH_HANDLE) return f->h; ut_fatal(file, line, func, "requested file handle on FHandle that doesn't contain it"); } #endif enum file_handle_type ut_fh_get_handle_type(struct FHandle *fh) { return fh->type; }
7,734
22.158683
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_config.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_pmem2_config.h -- utility helper functions for libpmem2 config tests */ #ifndef UT_PMEM2_CONFIG_H #define UT_PMEM2_CONFIG_H 1 #include "ut_fh.h" /* a pmem2_config_new() that can't return NULL */ #define PMEM2_CONFIG_NEW(cfg) \ ut_pmem2_config_new(__FILE__, __LINE__, __func__, cfg) /* a pmem2_config_set_required_store_granularity() doesn't return an error */ #define PMEM2_CONFIG_SET_GRANULARITY(cfg, g) \ ut_pmem2_config_set_required_store_granularity \ (__FILE__, __LINE__, __func__, cfg, g) /* a pmem2_config_delete() that can't return NULL */ #define PMEM2_CONFIG_DELETE(cfg) \ ut_pmem2_config_delete(__FILE__, __LINE__, __func__, cfg) void ut_pmem2_config_new(const char *file, int line, const char *func, struct pmem2_config **cfg); void ut_pmem2_config_set_required_store_granularity(const char *file, int line, const char *func, struct pmem2_config *cfg, enum pmem2_granularity g); void ut_pmem2_config_delete(const char *file, int line, const char *func, struct pmem2_config **cfg); #endif /* UT_PMEM2_CONFIG_H */
1,152
30.162162
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_alloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_alloc.c -- unit test memory allocation routines */ #include "unittest.h" /* * ut_malloc -- a malloc that cannot return NULL */ void * ut_malloc(const char *file, int line, const char *func, size_t size) { void *retval = malloc(size); if (retval == NULL) ut_fatal(file, line, func, "cannot malloc %zu bytes", size); return retval; } /* * ut_calloc -- a calloc that cannot return NULL */ void * ut_calloc(const char *file, int line, const char *func, size_t nmemb, size_t size) { void *retval = calloc(nmemb, size); if (retval == NULL) ut_fatal(file, line, func, "cannot calloc %zu bytes", size); return retval; } /* * ut_free -- wrapper for free * * technically we don't need to wrap free since there's no return to * check. using this wrapper to add memory allocation tracking later. */ void ut_free(const char *file, int line, const char *func, void *ptr) { free(ptr); } /* * ut_aligned_free -- wrapper for aligned memory free */ void ut_aligned_free(const char *file, int line, const char *func, void *ptr) { #ifndef _WIN32 free(ptr); #else _aligned_free(ptr); #endif } /* * ut_realloc -- a realloc that cannot return NULL */ void * ut_realloc(const char *file, int line, const char *func, void *ptr, size_t size) { void *retval = realloc(ptr, size); if (retval == NULL) ut_fatal(file, line, func, "cannot realloc %zu bytes", size); return retval; } /* * ut_strdup -- a strdup that cannot return NULL */ char * ut_strdup(const char *file, int line, const char *func, const char *str) { char *retval = strdup(str); if (retval == NULL) ut_fatal(file, line, func, "cannot strdup %zu bytes", strlen(str)); return retval; } /* * ut_memalign -- like malloc but page-aligned memory */ void * ut_memalign(const char *file, int line, const char *func, size_t alignment, size_t size) { void *retval; #ifndef _WIN32 if ((errno = posix_memalign(&retval, alignment, size)) != 0) ut_fatal(file, line, func, "!memalign %zu bytes (%zu alignment)", size, alignment); #else retval = _aligned_malloc(size, alignment); if (!retval) { ut_fatal(file, line, func, "!memalign %zu bytes (%zu alignment)", size, alignment); } #endif return retval; } /* * ut_pagealignmalloc -- like malloc but page-aligned memory */ void * ut_pagealignmalloc(const char *file, int line, const char *func, size_t size) { return ut_memalign(file, line, func, (size_t)Ut_pagesize, size); } /* * ut_mmap_anon_aligned -- mmaps anonymous memory with specified (power of two, * multiple of page size) alignment and adds guard * pages around it */ void * ut_mmap_anon_aligned(const char *file, int line, const char *func, size_t alignment, size_t size) { char *d, *d_aligned; uintptr_t di, di_aligned; size_t sz; if (alignment == 0) alignment = Ut_mmap_align; /* alignment must be a multiple of page size */ if (alignment & (Ut_mmap_align - 1)) return NULL; /* power of two */ if (alignment & (alignment - 1)) return NULL; d = ut_mmap(file, line, func, NULL, size + 2 * alignment, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); di = (uintptr_t)d; di_aligned = (di + alignment - 1) & ~(alignment - 1); if (di == di_aligned) di_aligned += alignment; d_aligned = (void *)di_aligned; sz = di_aligned - di; if (sz - Ut_mmap_align) ut_munmap(file, line, func, d, sz - Ut_mmap_align); /* guard page before */ ut_mprotect(file, line, func, d_aligned - Ut_mmap_align, Ut_mmap_align, PROT_NONE); /* guard page after */ ut_mprotect(file, line, func, d_aligned + size, Ut_mmap_align, PROT_NONE); sz = di + size + 2 * alignment - (di_aligned + size) - Ut_mmap_align; if (sz) ut_munmap(file, line, func, d_aligned + size + Ut_mmap_align, sz); return d_aligned; } /* * ut_munmap_anon_aligned -- unmaps anonymous memory allocated by * ut_mmap_anon_aligned */ int ut_munmap_anon_aligned(const char *file, int line, const char *func, void *start, size_t size) { return ut_munmap(file, line, func, (char *)start - Ut_mmap_align, size + 2 * Ut_mmap_align); }
4,238
20.963731
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_utils.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * ut_pmem2_utils.c -- utility helper functions for libpmem2 tests */ #include "unittest.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_expect_return -- veryfies error code and prints appropriate * error message in case of error */ void ut_pmem2_expect_return(const char *file, int line, const char *func, int value, int expected) { if (value != expected) { ut_fatal(file, line, func, "unexpected return code (got %d, expected: %d): %s", value, expected, (value == 0 ? "success" : pmem2_errormsg())); } }
608
23.36
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_utils.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * ut_pmem2_utils.h -- utility helper functions for libpmem2 tests */ #ifndef UT_PMEM2_UTILS_H #define UT_PMEM2_UTILS_H 1 /* veryfies error code and prints appropriate error message in case of error */ #define UT_PMEM2_EXPECT_RETURN(value, expected) \ ut_pmem2_expect_return(__FILE__, __LINE__, __func__, \ value, expected) void ut_pmem2_expect_return(const char *file, int line, const char *func, int value, int expected); #endif /* UT_PMEM2_UTILS_H */
552
26.65
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_fh.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_fh.h -- OS-independent file handle / file descriptor interface */ #ifndef UT_FH_H #define UT_FH_H #include "os.h" struct FHandle; enum file_handle_type { FH_FD, FH_HANDLE }; #define FH_ACCMODE (7) #define FH_READ (1 << 0) #define FH_WRITE (1 << 1) #define FH_RDWR (FH_READ | FH_WRITE) #define FH_EXEC (1 << 2) #define FH_CREAT (1 << 3) #define FH_EXCL (1 << 4) #define FH_TRUNC (1 << 5) /* needs directory, on Windows it creates publicly visible file */ #define FH_TMPFILE (1 << 6) #define FH_DIRECTORY (1 << 7) #define UT_FH_OPEN(type, path, flags, ...) \ ut_fh_open(__FILE__, __LINE__, __func__, type, path, \ flags, ##__VA_ARGS__) #define UT_FH_TRUNCATE(fhandle, size) \ ut_fh_truncate(__FILE__, __LINE__, __func__, fhandle, size) #define UT_FH_GET_FD(fhandle) \ ut_fh_get_fd(__FILE__, __LINE__, __func__, fhandle) #ifdef _WIN32 #define UT_FH_GET_HANDLE(fhandle) \ ut_fh_get_handle(__FILE__, __LINE__, __func__, fhandle) #endif #define UT_FH_CLOSE(fhandle) \ ut_fh_close(__FILE__, __LINE__, __func__, fhandle) struct FHandle *ut_fh_open(const char *file, int line, const char *func, enum file_handle_type type, const char *path, int flags, ...); void ut_fh_truncate(const char *file, int line, const char *func, struct FHandle *f, os_off_t length); void ut_fh_close(const char *file, int line, const char *func, struct FHandle *f); enum file_handle_type ut_fh_get_handle_type(struct FHandle *fh); int ut_fh_get_fd(const char *file, int line, const char *func, struct FHandle *f); #ifdef _WIN32 HANDLE ut_fh_get_handle(const char *file, int line, const char *func, struct FHandle *f); #endif #endif
1,761
24.536232
72
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_source.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_source.h -- utility helper functions for libpmem2 source tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_source.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_source_from_fd -- sets fd (cannot fail) */ void ut_pmem2_source_from_fd(const char *file, int line, const char *func, struct pmem2_source **src, int fd) { int ret = pmem2_source_from_fd(src, fd); ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_from_fh(const char *file, int line, const char *func, struct pmem2_source **src, struct FHandle *f) { enum file_handle_type type = ut_fh_get_handle_type(f); int ret; if (type == FH_FD) { int fd = ut_fh_get_fd(file, line, func, f); #ifdef _WIN32 ret = pmem2_source_from_handle(src, (HANDLE)_get_osfhandle(fd)); #else ret = pmem2_source_from_fd(src, fd); #endif } else if (type == FH_HANDLE) { #ifdef _WIN32 HANDLE h = ut_fh_get_handle(file, line, func, f); ret = pmem2_source_from_handle(src, h); #else ut_fatal(file, line, func, "FH_HANDLE not supported on !Windows"); #endif } else { ut_fatal(file, line, func, "unknown file handle type"); } ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_alignment(const char *file, int line, const char *func, struct pmem2_source *src, size_t *al) { int ret = pmem2_source_alignment(src, al); ut_pmem2_expect_return(file, line, func, ret, 0); } void ut_pmem2_source_delete(const char *file, int line, const char *func, struct pmem2_source **src) { int ret = pmem2_source_delete(src); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTeq(*src, NULL); } void ut_pmem2_source_size(const char *file, int line, const char *func, struct pmem2_source *src, size_t *size) { int ret = pmem2_source_size(src, size); ut_pmem2_expect_return(file, line, func, ret, 0); }
1,929
24.064935
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_signal.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_signal.c -- unit test signal operations */ #include "unittest.h" #ifdef _WIN32 /* * On Windows, Access Violation exception does not raise SIGSEGV signal. * The trick is to catch the exception and... call the signal handler. */ /* * Sigactions[] - allows registering more than one signal/exception handler */ static struct sigaction Sigactions[NSIG]; /* * exception_handler -- called for unhandled exceptions */ static LONG CALLBACK exception_handler(_In_ PEXCEPTION_POINTERS ExceptionInfo) { DWORD excode = ExceptionInfo->ExceptionRecord->ExceptionCode; if (excode == EXCEPTION_ACCESS_VIOLATION) Sigactions[SIGSEGV].sa_handler(SIGSEGV); return EXCEPTION_CONTINUE_EXECUTION; } /* * signal_handler_wrapper -- (internal) wrapper for user-defined signal handler * * Before the specified handler function is executed, signal disposition * is reset to SIG_DFL. This wrapper allows to handle subsequent signals * without the need to set the signal disposition again. */ static void signal_handler_wrapper(int signum) { _crt_signal_t retval = signal(signum, signal_handler_wrapper); if (retval == SIG_ERR) UT_FATAL("!signal: %d", signum); if (Sigactions[signum].sa_handler) Sigactions[signum].sa_handler(signum); else UT_FATAL("handler for signal: %d is not defined", signum); } #endif /* * ut_sigaction -- a sigaction that cannot return < 0 */ int ut_sigaction(const char *file, int line, const char *func, int signum, struct sigaction *act, struct sigaction *oldact) { #ifndef _WIN32 int retval = sigaction(signum, act, oldact); if (retval != 0) ut_fatal(file, line, func, "!sigaction: %s", os_strsignal(signum)); return retval; #else UT_ASSERT(signum < NSIG); os_mutex_lock(&Sigactions_lock); if (oldact) *oldact = Sigactions[signum]; if (act) Sigactions[signum] = *act; os_mutex_unlock(&Sigactions_lock); if (signum == SIGABRT) { ut_suppress_errmsg(); } if (signum == SIGSEGV) { AddVectoredExceptionHandler(0, exception_handler); } _crt_signal_t retval = signal(signum, signal_handler_wrapper); if (retval == SIG_ERR) ut_fatal(file, line, func, "!signal: %d", signum); if (oldact != NULL) oldact->sa_handler = retval; return 0; #endif }
2,306
23.806452
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pthread.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * ut_pthread.c -- unit test wrappers for pthread routines */ #include "unittest.h" /* * ut_thread_create -- a os_thread_create that cannot return an error */ int ut_thread_create(const char *file, int line, const char *func, os_thread_t *__restrict thread, const os_thread_attr_t *__restrict attr, void *(*start_routine)(void *), void *__restrict arg) { if ((errno = os_thread_create(thread, attr, start_routine, arg)) != 0) ut_fatal(file, line, func, "!os_thread_create"); return 0; } /* * ut_thread_join -- a os_thread_join that cannot return an error */ int ut_thread_join(const char *file, int line, const char *func, os_thread_t *thread, void **value_ptr) { if ((errno = os_thread_join(thread, value_ptr)) != 0) ut_fatal(file, line, func, "!os_thread_join"); return 0; }
901
23.378378
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_map.h -- utility helper functions for libpmem2 map tests */ #ifndef UT_PMEM2_MAP_H #define UT_PMEM2_MAP_H 1 /* a pmem2_map() that can't return NULL */ #define PMEM2_MAP(cfg, src, map) \ ut_pmem2_map(__FILE__, __LINE__, __func__, cfg, src, map) void ut_pmem2_map(const char *file, int line, const char *func, struct pmem2_config *cfg, struct pmem2_source *src, struct pmem2_map **map); #endif /* UT_PMEM2_MAP_H */
522
25.15
68
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * ut_pmem2_config.h -- utility helper functions for libpmem2 config tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_config.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_config_new -- allocates cfg (cannot fail) */ void ut_pmem2_config_new(const char *file, int line, const char *func, struct pmem2_config **cfg) { int ret = pmem2_config_new(cfg); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTne(*cfg, NULL); } /* * pmem2_config_set_required_store_granularity -- sets granularity */ void ut_pmem2_config_set_required_store_granularity(const char *file, int line, const char *func, struct pmem2_config *cfg, enum pmem2_granularity g) { int ret = pmem2_config_set_required_store_granularity(cfg, g); ut_pmem2_expect_return(file, line, func, ret, 0); } /* * ut_pmem2_config_delete -- deallocates cfg (cannot fail) */ void ut_pmem2_config_delete(const char *file, int line, const char *func, struct pmem2_config **cfg) { int ret = pmem2_config_delete(cfg); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTeq(*cfg, NULL); }
1,181
23.122449
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup_integration.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API * (for integration tests) */ #include <libpmem2.h> #include "ut_pmem2_config.h" #include "ut_pmem2_setup_integration.h" #include "ut_pmem2_source.h" #include "unittest.h" /* * ut_pmem2_prepare_config_integration -- fill pmem2_config in minimal scope */ void ut_pmem2_prepare_config_integration(const char *file, int line, const char *func, struct pmem2_config **cfg, struct pmem2_source **src, int fd, enum pmem2_granularity granularity) { ut_pmem2_config_new(file, line, func, cfg); ut_pmem2_config_set_required_store_granularity(file, line, func, *cfg, granularity); ut_pmem2_source_from_fd(file, line, func, src, fd); }
804
26.758621
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_source.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_source.h -- utility helper functions for libpmem2 source tests */ #ifndef UT_PMEM2_SOURCE_H #define UT_PMEM2_SOURCE_H 1 #include "ut_fh.h" /* a pmem2_config_set_fd() that can't return NULL */ #define PMEM2_SOURCE_FROM_FD(src, fd) \ ut_pmem2_source_from_fd(__FILE__, __LINE__, __func__, src, fd) /* a pmem2_config_set_fd() that can't return NULL */ #define PMEM2_SOURCE_FROM_FH(src, fh) \ ut_pmem2_source_from_fh(__FILE__, __LINE__, __func__, src, fh) /* a pmem2_source_alignment() that can't return an error */ #define PMEM2_SOURCE_ALIGNMENT(src, al) \ ut_pmem2_source_alignment(__FILE__, __LINE__, __func__, src, al) /* a pmem2_source_delete() that can't return NULL */ #define PMEM2_SOURCE_DELETE(src) \ ut_pmem2_source_delete(__FILE__, __LINE__, __func__, src) /* a pmem2_source_source() that can't return NULL */ #define PMEM2_SOURCE_SIZE(src, size) \ ut_pmem2_source_size(__FILE__, __LINE__, __func__, src, size) void ut_pmem2_source_from_fd(const char *file, int line, const char *func, struct pmem2_source **src, int fd); void ut_pmem2_source_from_fh(const char *file, int line, const char *func, struct pmem2_source **src, struct FHandle *fhandle); void ut_pmem2_source_alignment(const char *file, int line, const char *func, struct pmem2_source *src, size_t *alignment); void ut_pmem2_source_delete(const char *file, int line, const char *func, struct pmem2_source **src); void ut_pmem2_source_size(const char *file, int line, const char *func, struct pmem2_source *src, size_t *size); #endif /* UT_PMEM2_SOURCE_H */
1,667
33.040816
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup.h -- libpmem2 setup functions using non-public API * (only for unit tests) */ #include "../../libpmem2/config.h" #include "ut_pmem2_source.h" #include "ut_pmem2_setup.h" #include "unittest.h" /* * ut_pmem2_prepare_config -- fill pmem2_config, this function can not set * the wrong value */ void ut_pmem2_prepare_config(struct pmem2_config *cfg, struct pmem2_source **src, struct FHandle **fh, enum file_handle_type fh_type, const char *file, size_t length, size_t offset, int access) { pmem2_config_init(cfg); cfg->offset = offset; cfg->length = length; cfg->requested_max_granularity = PMEM2_GRANULARITY_PAGE; *fh = UT_FH_OPEN(fh_type, file, access); PMEM2_SOURCE_FROM_FH(src, *fh); }
805
25
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_map.h -- utility helper functions for libpmem2 map tests */ #include <libpmem2.h> #include "unittest.h" #include "ut_pmem2_map.h" #include "ut_pmem2_utils.h" /* * ut_pmem2_map -- allocates map (cannot fail) */ void ut_pmem2_map(const char *file, int line, const char *func, struct pmem2_config *cfg, struct pmem2_source *src, struct pmem2_map **map) { int ret = pmem2_map(cfg, src, map); ut_pmem2_expect_return(file, line, func, ret, 0); UT_ASSERTne(*map, NULL); }
572
21.92
68
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/unittest/ut_pmem2_setup_integration.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * ut_pmem2_setup_integration.h -- libpmem2 setup functions using public API * (for integration tests) */ #ifndef UT_PMEM2_SETUP_INTEGRATION_H #define UT_PMEM2_SETUP_INTEGRATION_H 1 #include "ut_fh.h" /* a prepare_config() that can't set wrong value */ #define PMEM2_PREPARE_CONFIG_INTEGRATION(cfg, src, fd, g) \ ut_pmem2_prepare_config_integration( \ __FILE__, __LINE__, __func__, cfg, src, fd, g) void ut_pmem2_prepare_config_integration(const char *file, int line, const char *func, struct pmem2_config **cfg, struct pmem2_source **src, int fd, enum pmem2_granularity granularity); #endif /* UT_PMEM2_SETUP_INTEGRATION_H */
728
29.375
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_movnt_align/pmem_movnt_align.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_movnt_align.c -- unit test for functions with non-temporal stores * * usage: pmem_movnt_align [C|F|B|S] * * C - pmem_memcpy_persist() * B - pmem_memmove_persist() in backward direction * F - pmem_memmove_persist() in forward direction * S - pmem_memset_persist() */ #include <stdio.h> #include <string.h> #include <unistd.h> #include "libpmem.h" #include "unittest.h" #include "movnt_align_common.h" #define N_BYTES (Ut_pagesize * 2) static int Heavy; static void * pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_persist(pmemdest, src, len); } static void * pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_nodrain(pmemdest, src, len); } static void * pmem_memmove_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memmove_persist(pmemdest, src, len); } static void * pmem_memmove_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memmove_nodrain(pmemdest, src, len); } static void * pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_persist(pmemdest, c, len); } static void * pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_nodrain(pmemdest, c, len); } static void check_memmove_variants(size_t doff, size_t soff, size_t len) { check_memmove(doff, soff, len, pmem_memmove_persist_wrapper, 0); if (!Heavy) return; check_memmove(doff, soff, len, pmem_memmove_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memmove(doff, soff, len, pmem_memmove, Flags[i]); } static void check_memcpy_variants(size_t doff, size_t soff, size_t len) { check_memcpy(doff, soff, len, pmem_memcpy_persist_wrapper, 0); if (!Heavy) return; check_memcpy(doff, soff, len, pmem_memcpy_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memcpy(doff, soff, len, pmem_memcpy, Flags[i]); } static void check_memset_variants(size_t off, size_t len) { check_memset(off, len, pmem_memset_persist_wrapper, 0); if (!Heavy) return; check_memset(off, len, pmem_memset_nodrain_wrapper, 0); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) check_memset(off, len, pmem_memset, Flags[i]); } int main(int argc, char *argv[]) { if (argc != 3) UT_FATAL("usage: %s type heavy=[0|1]", argv[0]); char type = argv[1][0]; Heavy = argv[2][0] == '1'; const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem_movnt_align %c %s %savx %savx512f", type, thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); size_t page_size = Ut_pagesize; size_t s; switch (type) { case 'C': /* memcpy */ /* mmap with guard pages */ Src = MMAP_ANON_ALIGNED(N_BYTES, 0); Dst = MMAP_ANON_ALIGNED(N_BYTES, 0); if (Src == NULL || Dst == NULL) UT_FATAL("!mmap"); Scratch = MALLOC(N_BYTES); /* check memcpy with 0 size */ check_memcpy_variants(0, 0, 0); /* check memcpy with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(0, 0, N_BYTES - s); /* check memcpy with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(s, 0, N_BYTES - s); /* check memcpy with unaligned begin and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memcpy_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Src, N_BYTES); MUNMAP_ANON_ALIGNED(Dst, N_BYTES); FREE(Scratch); break; case 'B': /* memmove backward */ /* mmap with guard pages */ Src = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0); Dst = Src + N_BYTES - page_size; if (Src == NULL) UT_FATAL("!mmap"); /* check memmove in backward direction with 0 size */ check_memmove_variants(0, 0, 0); /* check memmove in backward direction with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(0, 0, N_BYTES - s); /* check memmove in backward direction with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, 0, N_BYTES - s); /* * check memmove in backward direction with unaligned begin * and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Src, 2 * N_BYTES - page_size); break; case 'F': /* memmove forward */ /* mmap with guard pages */ Dst = MMAP_ANON_ALIGNED(2 * N_BYTES - page_size, 0); Src = Dst + N_BYTES - page_size; if (Src == NULL) UT_FATAL("!mmap"); /* check memmove in forward direction with 0 size */ check_memmove_variants(0, 0, 0); /* check memmove in forward direction with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(0, 0, N_BYTES - s); /* check memmove in forward direction with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, 0, N_BYTES - s); /* * check memmove in forward direction with unaligned begin * and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memmove_variants(s, s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Dst, 2 * N_BYTES - page_size); break; case 'S': /* memset */ /* mmap with guard pages */ Dst = MMAP_ANON_ALIGNED(N_BYTES, 0); if (Dst == NULL) UT_FATAL("!mmap"); Scratch = MALLOC(N_BYTES); /* check memset with 0 size */ check_memset_variants(0, 0); /* check memset with unaligned size */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(0, N_BYTES - s); /* check memset with unaligned begin */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(s, N_BYTES - s); /* check memset with unaligned begin and end */ for (s = 0; s < CACHELINE_SIZE; s++) check_memset_variants(s, N_BYTES - 2 * s); MUNMAP_ANON_ALIGNED(Dst, N_BYTES); FREE(Scratch); break; default: UT_FATAL("!wrong type of test"); break; } DONE(NULL); }
6,229
23.92
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memblock/obj_memblock.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * obj_memblock.c -- unit test for memblock interface */ #include "memblock.h" #include "memops.h" #include "obj.h" #include "unittest.h" #include "heap.h" #define NCHUNKS 10 static PMEMobjpool *pop; FUNC_MOCK(operation_add_typed_entry, int, struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type, enum operation_log_type en_type) FUNC_MOCK_RUN_DEFAULT { uint64_t *pval = ptr; switch (type) { case ULOG_OPERATION_SET: *pval = value; break; case ULOG_OPERATION_AND: *pval &= value; break; case ULOG_OPERATION_OR: *pval |= value; break; default: UT_ASSERT(0); } return 0; } FUNC_MOCK_END FUNC_MOCK(operation_add_entry, int, struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type) FUNC_MOCK_RUN_DEFAULT { /* just call the mock above - the entry type doesn't matter */ return operation_add_typed_entry(ctx, ptr, value, type, LOG_TRANSIENT); } FUNC_MOCK_END static void test_detect(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 2, 0, 0, 0 }; struct heap_layout *layout = pop->heap.layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; memblock_rebuild_state(&pop->heap, &mhuge_used); memblock_rebuild_state(&pop->heap, &mhuge_free); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTeq(mhuge_used.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mhuge_free.type, MEMORY_BLOCK_HUGE); UT_ASSERTeq(mrun.type, MEMORY_BLOCK_RUN); } static void test_block_size(void) { struct memory_block mhuge = { .chunk_id = 0, 0, 0, 0 }; struct memory_block mrun = { .chunk_id = 1, 0, 0, 0 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *) &layout->zone0.chunks[1]; run->hdr.block_size = 1234; memblock_rebuild_state(&pop->heap, &mhuge); memblock_rebuild_state(&pop->heap, &mrun); UT_ASSERTne(mhuge.m_ops, NULL); UT_ASSERTne(mrun.m_ops, NULL); UT_ASSERTeq(mhuge.m_ops->block_size(&mhuge), CHUNKSIZE); UT_ASSERTeq(mrun.m_ops->block_size(&mrun), 1234); } static void test_prep_hdr(void) { struct memory_block mhuge_used = { .chunk_id = 0, 0, .size_idx = 1, 0 }; struct memory_block mhuge_free = { .chunk_id = 1, 0, .size_idx = 1, 0 }; struct memory_block mrun_used = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 0 }; struct memory_block mrun_free = { .chunk_id = 2, 0, .size_idx = 4, .block_off = 4 }; struct memory_block mrun_large_used = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 64 }; struct memory_block mrun_large_free = { .chunk_id = 2, 0, .size_idx = 64, .block_off = 128 }; struct palloc_heap *heap = &pop->heap; struct heap_layout *layout = heap->layout; layout->zone0.chunk_headers[0].size_idx = 1; layout->zone0.chunk_headers[0].type = CHUNK_TYPE_USED; layout->zone0.chunk_headers[1].size_idx = 1; layout->zone0.chunk_headers[1].type = CHUNK_TYPE_FREE; layout->zone0.chunk_headers[2].size_idx = 1; layout->zone0.chunk_headers[2].type = CHUNK_TYPE_RUN; struct chunk_run *run = (struct chunk_run *)&layout->zone0.chunks[2]; run->hdr.block_size = 128; uint64_t *bitmap = (uint64_t *)run->content; bitmap[0] = 0b1111; bitmap[1] = ~0ULL; bitmap[2] = 0ULL; memblock_rebuild_state(heap, &mhuge_used); memblock_rebuild_state(heap, &mhuge_free); memblock_rebuild_state(heap, &mrun_used); memblock_rebuild_state(heap, &mrun_free); memblock_rebuild_state(heap, &mrun_large_used); memblock_rebuild_state(heap, &mrun_large_free); UT_ASSERTne(mhuge_used.m_ops, NULL); mhuge_used.m_ops->prep_hdr(&mhuge_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[0].type, CHUNK_TYPE_FREE); mhuge_free.m_ops->prep_hdr(&mhuge_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(layout->zone0.chunk_headers[1].type, CHUNK_TYPE_USED); mrun_used.m_ops->prep_hdr(&mrun_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(bitmap[0], 0ULL); mrun_free.m_ops->prep_hdr(&mrun_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(bitmap[0], 0b11110000); mrun_large_used.m_ops->prep_hdr(&mrun_large_used, MEMBLOCK_FREE, NULL); UT_ASSERTeq(bitmap[1], 0ULL); mrun_large_free.m_ops->prep_hdr(&mrun_large_free, MEMBLOCK_ALLOCATED, NULL); UT_ASSERTeq(bitmap[2], ~0ULL); } static int fake_persist(void *base, const void *addr, size_t size, unsigned flags) { return 0; } int main(int argc, char *argv[]) { START(argc, argv, "obj_memblock"); PMEMobjpool pool; pop = &pool; pop->heap.layout = ZALLOC(sizeof(struct heap_layout) + NCHUNKS * sizeof(struct chunk)); pop->heap.p_ops.persist = fake_persist; test_detect(); test_block_size(); test_prep_hdr(); FREE(pop->heap.layout); DONE(NULL); }
5,320
27.153439
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memblock/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * mocks_windows.h -- redefinitions of memops functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_memblock test. * It would replace default implementation with mocked functions defined * in obj_memblock.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define operation_add_typed_entry __wrap_operation_add_typed_entry #define operation_add_entry __wrap_operation_add_entry #endif
634
29.238095
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmreorder_flushes/pmreorder_flushes.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmreorder_flushes.c -- test for store reordering with flushes * in different barriers * * usage: pmreorder_flushes g|c file * * g - write data in a specific manner - some flushes * of the stores are made in different barriers, * c - check data consistency - stores should be applied only * after flush - no matter in which barrier the flush will happen * */ #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define STORE_SIZE 64 static FILE *fp; struct stores_fields { char A[STORE_SIZE]; char B[STORE_SIZE]; char C[STORE_SIZE]; char D[STORE_SIZE]; char E[STORE_SIZE]; }; /* * write_consistent -- (internal) write data in a specific order */ static void write_consistent(struct stores_fields *sf) { /* * STORE (A) * STORE (B) * STORE (C) * * FLUSH (A, B) (no flush C) * FENCE */ pmem_memset(&sf->A, -1, sizeof(sf->A), PMEM_F_MEM_NODRAIN); pmem_memset(&sf->B, 2, sizeof(sf->B), PMEM_F_MEM_NODRAIN); pmem_memset(&sf->C, 3, sizeof(sf->C), PMEM_F_MEM_NOFLUSH); pmem_drain(); /* * STORE (A) * STORE (D) * * FLUSH (D) (no flush A, still no flush C) * FENCE */ pmem_memset(sf->A, 1, sizeof(sf->A), PMEM_F_MEM_NOFLUSH); pmem_memset(sf->D, 4, sizeof(sf->D), PMEM_F_MEM_NODRAIN); pmem_drain(); /* * There are two transitive stores now: A (which does not change * it's value) and C (which is modified). * * STORE (D) * STORE (C) * * FLUSH (D) (still no flush A and C) * FENCE */ pmem_memset(sf->D, 5, sizeof(sf->D), PMEM_F_MEM_NODRAIN); pmem_memset(sf->C, 8, sizeof(sf->C), PMEM_F_MEM_NOFLUSH); pmem_drain(); /* * E is modified just to add additional step to the log. * Values of A and C should still be -1, 2. * * STORE (E) * FLUSH (E) * FENCE */ pmem_memset(sf->E, 6, sizeof(sf->E), PMEM_F_MEM_NODRAIN); pmem_drain(); /* * FLUSH (A, C) * FENCE */ pmem_flush(sf->A, sizeof(sf->A)); pmem_flush(sf->C, sizeof(sf->C)); pmem_drain(); } /* * check_consistency -- (internal) check if stores are made in proper manner */ static int check_consistency(struct stores_fields *sf) { fprintf(fp, "A=%d B=%d C=%d D=%d E=%d\n", sf->A[0], sf->B[0], sf->C[0], sf->D[0], sf->E[0]); return 0; } int main(int argc, char *argv[]) { START(argc, argv, "pmreorder_flushes"); util_init(); if ((argc < 4) || (strchr("gc", argv[1][0]) == NULL) || argv[1][1] != '\0') UT_FATAL("usage: %s g|c file log_file", argv[0]); int fd = OPEN(argv[2], O_RDWR); size_t size; /* mmap and register in valgrind pmemcheck */ void *map = pmem_map_file(argv[2], 0, 0, 0, &size, NULL); UT_ASSERTne(map, NULL); struct stores_fields *sf = map; char opt = argv[1][0]; /* clear the struct to get a consistent start state for writing */ if (strchr("g", opt)) pmem_memset_persist(sf, 0, sizeof(*sf)); switch (opt) { case 'g': write_consistent(sf); break; case 'c': fp = os_fopen(argv[3], "a"); if (fp == NULL) UT_FATAL("!fopen"); int ret; ret = check_consistency(sf); fclose(fp); return ret; default: UT_FATAL("Unrecognized option %c", opt); } CLOSE(fd); DONE(NULL); }
3,207
20.105263
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_api_win/libpmempool_test_win.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * libpmempool_test_win -- test of libpmempool. * */ #include <stddef.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include "unittest.h" /* * Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to * test libpmempool against various pmempool_check_args structure versions. */ struct pmempool_check_args_1_0 { const wchar_t *path; const wchar_t *backup_path; enum pmempool_pool_type pool_type; int flags; }; /* * check_pool -- check given pool */ static void check_pool(struct pmempool_check_argsW *args, size_t args_size) { const char *status2str[] = { [PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent", [PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent", [PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired", [PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair", [PMEMPOOL_CHECK_RESULT_ERROR] = "fatal", }; PMEMpoolcheck *ppc = pmempool_check_initW(args, args_size); if (!ppc) { char buff[UT_MAX_ERR_MSG]; ut_strerror(errno, buff, UT_MAX_ERR_MSG); UT_OUT("Error: %s", buff); return; } struct pmempool_check_statusW *status = NULL; while ((status = pmempool_checkW(ppc)) != NULL) { char *msg = ut_toUTF8(status->str.msg); switch (status->type) { case PMEMPOOL_CHECK_MSG_TYPE_ERROR: UT_OUT("%s", msg); break; case PMEMPOOL_CHECK_MSG_TYPE_INFO: UT_OUT("%s", msg); break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: UT_OUT("%s", msg); status->str.answer = L"yes"; break; default: pmempool_check_end(ppc); free(msg); exit(EXIT_FAILURE); } free(msg); } enum pmempool_check_result ret = pmempool_check_end(ppc); UT_OUT("status = %s", status2str[ret]); } /* * print_usage -- print usage of program */ static void print_usage(wchar_t *name) { UT_OUT("Usage: %S [-t <pool_type>] [-r <repair>] [-d <dry_run>] " "[-y <always_yes>] [-f <flags>] [-a <advanced>] " "[-b <backup_path>] <pool_path>", name); } /* * set_flag -- parse the value and set the flag according to a obtained value */ static void set_flag(const wchar_t *value, int *flags, int flag) { if (_wtoi(value) > 0) *flags |= flag; else *flags &= ~flag; } int wmain(int argc, wchar_t *argv[]) { STARTW(argc, argv, "libpmempool_test_win"); struct pmempool_check_args_1_0 args = { .path = NULL, .backup_path = NULL, .pool_type = PMEMPOOL_POOL_TYPE_LOG, .flags = PMEMPOOL_CHECK_FORMAT_STR | PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE }; size_t args_size = sizeof(struct pmempool_check_args_1_0); for (int i = 1; i < argc - 1; i += 2) { wchar_t *optarg = argv[i + 1]; if (wcscmp(L"-t", argv[i]) == 0) { if (wcscmp(optarg, L"blk") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BLK; } else if (wcscmp(optarg, L"log") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_LOG; } else if (wcscmp(optarg, L"obj") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_OBJ; } else if (wcscmp(optarg, L"btt") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BTT; } else { args.pool_type = (uint32_t)wcstoul(optarg, NULL, 0); } } else if (wcscmp(L"-r", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR); } else if (wcscmp(L"-d", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN); } else if (wcscmp(L"-a", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED); } else if (wcscmp(L"-y", argv[i]) == 0) { set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ALWAYS_YES); } else if (wcscmp(L"-s", argv[i]) == 0) { args_size = wcstoul(optarg, NULL, 0); } else if (wcscmp(L"-b", argv[i]) == 0) { args.backup_path = optarg; } else { print_usage(argv[0]); UT_FATAL("unknown option: %c", argv[i][1]); } } args.path = argv[argc - 1]; check_pool((struct pmempool_check_argsW *)&args, args_size); DONEW(NULL); }
3,912
24.743421
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_is_pmem_windows/pmem_is_pmem_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * pmem_is_pmem_windows.c -- Windows specific unit test for is_pmem_detect() * * usage: pmem_is_pmem_windows file [env] */ #include "unittest.h" #include "pmem.h" #include "queue.h" #include "win_mmap.h" #include "util.h" #define NTHREAD 16 static void *Addr; static size_t Size; static int pmem_is_pmem_force = 0; enum test_mmap_scenarios { TEST_MMAP_SCENARIO_UNKNOWN, TEST_MMAP_SCENARIO_BEGIN_HOLE, TEST_MMAP_SCENARIO_END_HOLE, TEST_MMAP_SCENARIO_MIDDLE_HOLE, TEST_MMAP_SCENARIO_NO_HOLE }; enum test_mmap_scenarios get_mmap_scenarios(char *name) { if (stricmp(name, "nothing") == 0) return TEST_MMAP_SCENARIO_NO_HOLE; if (stricmp(name, "begin") == 0) return TEST_MMAP_SCENARIO_BEGIN_HOLE; if (stricmp(name, "end") == 0) return TEST_MMAP_SCENARIO_END_HOLE; if (stricmp(name, "middle") == 0) return TEST_MMAP_SCENARIO_MIDDLE_HOLE; return TEST_MMAP_SCENARIO_UNKNOWN; } /* * mmap_file_mapping_comparer -- (internal) compares the two file mapping * trackers */ static LONG_PTR mmap_file_mapping_comparer(PFILE_MAPPING_TRACKER a, PFILE_MAPPING_TRACKER b) { return ((LONG_PTR)a->BaseAddress - (LONG_PTR)b->BaseAddress); } /* * worker -- the work each thread performs */ static void * worker(void *arg) { int *ret = (int *)arg; /* * We honor the force just to let the scenarios that require pmem fs * work in the environment that forces pmem. * * NOTE: We can't use pmem_is_pmem instead of checking for the ENV * variable explicitly, because we want to call is_pmem_detect that is * defined in this test so that it will use the FileMappingQHead * that's defined here. Because we are crafting the Q in the test. */ if (pmem_is_pmem_force) *ret = 1; else *ret = is_pmem_detect(Addr, Size); return NULL; } extern SRWLOCK FileMappingQLock; extern struct FMLHead FileMappingQHead; int main(int argc, char *argv[]) { HANDLE file_map; SIZE_T chunk_length; enum test_mmap_scenarios scenario; int still_holey = 1; int already_holey = 0; START(argc, argv, "pmem_is_pmem_windows"); if (argc != 3) UT_FATAL("usage: %s file {begin|end|middle|nothing}", argv[0]); util_init(); /* to initialize Mmap_align */ char *str_pmem_is_pmem_force = os_getenv("PMEM_IS_PMEM_FORCE"); if (str_pmem_is_pmem_force && atoi(str_pmem_is_pmem_force) == 1) pmem_is_pmem_force = 1; scenario = get_mmap_scenarios(argv[2]); UT_ASSERT(scenario != TEST_MMAP_SCENARIO_UNKNOWN); int fd = OPEN(argv[1], O_RDWR); os_stat_t stbuf; FSTAT(fd, &stbuf); Size = stbuf.st_size; chunk_length = Mmap_align; /* * We don't support too small a file size. */ UT_ASSERT(Size / 8 > chunk_length); file_map = CreateFileMapping((HANDLE)_get_osfhandle(fd), NULL, PAGE_READONLY, 0, 0, NULL); UT_ASSERT(file_map != NULL); Addr = MapViewOfFile(file_map, FILE_MAP_READ, 0, 0, 0); /* * let's setup FileMappingQHead such that, it appears to have lot of * DAX mapping created through our mmap. Here are our cases based * on the input: * - entire region in mapped through our mmap * - there is a region at the beginning that's not mapped through our * mmap * - there is a region at the end that's not mapped through our mmap * - there is a region in the middle that mapped through our mmap */ for (size_t offset = 0; offset < Size; offset += chunk_length) { void *base_address = (void *)((char *)Addr + offset); switch (scenario) { case TEST_MMAP_SCENARIO_BEGIN_HOLE: if (still_holey && ((offset == 0) || ((rand() % 2) == 0)) && (offset < (Size / 2))) continue; else still_holey = 0; break; case TEST_MMAP_SCENARIO_END_HOLE: if ((offset > (Size / 2)) && (already_holey || ((rand() % 2) == 0) || (offset >= (Size - chunk_length)))) { already_holey = 1; continue; } else UT_ASSERT(!already_holey); break; case TEST_MMAP_SCENARIO_MIDDLE_HOLE: if ((((offset > (Size / 8)) && ((rand() % 2) == 0)) || (offset > (Size / 8) * 6)) && (offset < (Size / 8) * 7)) continue; break; } PFILE_MAPPING_TRACKER mt = MALLOC(sizeof(struct FILE_MAPPING_TRACKER)); mt->Flags = FILE_MAPPING_TRACKER_FLAG_DIRECT_MAPPED; mt->FileHandle = (HANDLE)_get_osfhandle(fd); mt->FileMappingHandle = file_map; mt->BaseAddress = base_address; mt->EndAddress = (void *)((char *)base_address + chunk_length); mt->Access = FILE_MAP_READ; mt->Offset = offset; AcquireSRWLockExclusive(&FileMappingQLock); PMDK_SORTEDQ_INSERT(&FileMappingQHead, mt, ListEntry, FILE_MAPPING_TRACKER, mmap_file_mapping_comparer); ReleaseSRWLockExclusive(&FileMappingQLock); } CloseHandle(file_map); CLOSE(fd); os_thread_t threads[NTHREAD]; int ret[NTHREAD]; /* kick off NTHREAD threads */ for (int i = 0; i < NTHREAD; i++) THREAD_CREATE(&threads[i], NULL, worker, &ret[i]); /* wait for all the threads to complete */ for (int i = 0; i < NTHREAD; i++) THREAD_JOIN(&threads[i], NULL); /* verify that all the threads return the same value */ for (int i = 1; i < NTHREAD; i++) UT_ASSERTeq(ret[0], ret[i]); UT_OUT("%d", ret[0]); DONE(NULL); } /* * Since libpmem is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmem_init) MSVC_DESTR(libpmem_fini)
6,946
27.239837
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_fragmentation2/obj_fragmentation2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * obj_fragmentation.c -- measures average heap external fragmentation * * This test is based on the workloads proposed in: * Log-structured Memory for DRAM-based Storage * by Stephen M. Rumble, Ankita Kejriwal, and John Ousterhout * * https://www.usenix.org/system/files/conference/fast14/fast14-paper_rumble.pdf */ #include <stdlib.h> #include <math.h> #include "rand.h" #include "unittest.h" #define LAYOUT_NAME "obj_fragmentation" #define MEGABYTE (1ULL << 20) #define GIGABYTE (1ULL << 30) #define RRAND(max, min)\ ((min) == (max) ? (min) : (rnd64() % ((max) - (min)) + (min))) static PMEMoid *objects; static size_t nobjects; static size_t allocated_current; #define MAX_OBJECTS (200ULL * 1000000) #define ALLOC_TOTAL (5000ULL * MEGABYTE) #define ALLOC_CURR (1000 * MEGABYTE) #define FREES_P 200 #define DEFAULT_FILE_SIZE (3 * GIGABYTE) static void shuffle_objects(size_t start, size_t end) { PMEMoid tmp; size_t dest; for (size_t n = start; n < end; ++n) { dest = RRAND(nobjects - 1, 0); tmp = objects[n]; objects[n] = objects[dest]; objects[dest] = tmp; } } static PMEMoid remove_last() { UT_ASSERT(nobjects > 0); PMEMoid obj = objects[--nobjects]; return obj; } static void delete_objects(PMEMobjpool *pop, float pct) { size_t nfree = (size_t)(nobjects * pct); PMEMoid oid = pmemobj_root(pop, 1); shuffle_objects(0, nobjects); while (nfree--) { oid = remove_last(); allocated_current -= pmemobj_alloc_usable_size(oid); pmemobj_free(&oid); } } /* * object_next_size -- generates random sizes in range with * exponential distribution */ static size_t object_next_size(size_t max, size_t min) { float fmax = (float)max; float fmin = (float)min; float n = (float)rnd64() / ((float)UINT64_MAX / 1.0f); return (size_t)(fmin + (fmax - fmin) * (float)exp(n * - 4.0)); } /* * allocate_exponential -- allocates objects from a large range of sizes. * * This is designed to stress the recycler subsystem that will have to * constantly look for freed/empty runs and reuse them. * * For small pools (single digit gigabytes), this test will show large * fragmentation because it can use a large number of runs - which is fine. */ static void allocate_exponential(PMEMobjpool *pop, size_t size_min, size_t size_max) { size_t allocated_total = 0; PMEMoid oid; while (allocated_total < ALLOC_TOTAL) { size_t s = object_next_size(size_max, size_min); int ret = pmemobj_alloc(pop, &oid, s, 0, NULL, NULL); if (ret != 0) { /* delete a random percentage of allocated objects */ float delete_pct = (float)RRAND(90, 10) / 100.0f; delete_objects(pop, delete_pct); continue; } s = pmemobj_alloc_usable_size(oid); objects[nobjects++] = oid; UT_ASSERT(nobjects < MAX_OBJECTS); allocated_total += s; allocated_current += s; } } static void allocate_objects(PMEMobjpool *pop, size_t size_min, size_t size_max) { size_t allocated_total = 0; size_t sstart = 0; PMEMoid oid; while (allocated_total < ALLOC_TOTAL) { size_t s = RRAND(size_max, size_min); pmemobj_alloc(pop, &oid, s, 0, NULL, NULL); UT_ASSERTeq(OID_IS_NULL(oid), 0); s = pmemobj_alloc_usable_size(oid); objects[nobjects++] = oid; UT_ASSERT(nobjects < MAX_OBJECTS); allocated_total += s; allocated_current += s; if (allocated_current > ALLOC_CURR) { shuffle_objects(sstart, nobjects); for (int i = 0; i < FREES_P; ++i) { oid = remove_last(); allocated_current -= pmemobj_alloc_usable_size(oid); pmemobj_free(&oid); } sstart = nobjects; } } } typedef void workload(PMEMobjpool *pop); static void w0(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); } static void w1(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); allocate_objects(pop, 130, 130); } static void w2(PMEMobjpool *pop) { allocate_objects(pop, 100, 100); delete_objects(pop, 0.9F); allocate_objects(pop, 130, 130); } static void w3(PMEMobjpool *pop) { allocate_objects(pop, 100, 150); allocate_objects(pop, 200, 250); } static void w4(PMEMobjpool *pop) { allocate_objects(pop, 100, 150); delete_objects(pop, 0.9F); allocate_objects(pop, 200, 250); } static void w5(PMEMobjpool *pop) { allocate_objects(pop, 100, 200); delete_objects(pop, 0.5); allocate_objects(pop, 1000, 2000); } static void w6(PMEMobjpool *pop) { allocate_objects(pop, 1000, 2000); delete_objects(pop, 0.9F); allocate_objects(pop, 1500, 2500); } static void w7(PMEMobjpool *pop) { allocate_objects(pop, 50, 150); delete_objects(pop, 0.9F); allocate_objects(pop, 5000, 15000); } static void w8(PMEMobjpool *pop) { allocate_objects(pop, 2 * MEGABYTE, 2 * MEGABYTE); } static void w9(PMEMobjpool *pop) { allocate_exponential(pop, 1, 5 * MEGABYTE); } static workload *workloads[] = { w0, w1, w2, w3, w4, w5, w6, w7, w8, w9 }; static float workloads_target[] = { 0.01f, 0.01f, 0.01f, 0.9f, 0.8f, 0.7f, 0.3f, 0.8f, 0.73f, 3.0f }; static float workloads_defrag_target[] = { 0.01f, 0.01f, 0.01f, 0.01f, 0.01f, 0.05f, 0.09f, 0.13f, 0.01f, 0.16f }; /* * Last two workloads operates mostly on huge chunks, so run * stats are useless. */ static float workloads_stat_target[] = { 0.01f, 1.1f, 1.1f, 0.86f, 0.76f, 1.01f, 0.23f, 1.24f, 2100.f, 2100.f }; static float workloads_defrag_stat_target[] = { 0.01f, 0.01f, 0.01f, 0.02f, 0.02f, 0.04f, 0.08f, 0.12f, 2100.f, 2100.f }; int main(int argc, char *argv[]) { START(argc, argv, "obj_fragmentation2"); if (argc < 3) UT_FATAL("usage: %s filename workload [seed] [defrag]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, DEFAULT_FILE_SIZE, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); int w = atoi(argv[2]); if (argc > 3) randomize((unsigned)atoi(argv[3])); else randomize(0); int defrag = argc > 4 ? atoi(argv[4]) != 0 : 0; objects = ZALLOC(sizeof(PMEMoid) * MAX_OBJECTS); UT_ASSERTne(objects, NULL); workloads[w](pop); /* this is to trigger global recycling */ pmemobj_defrag(pop, NULL, 0, NULL); size_t active = 0; size_t allocated = 0; pmemobj_ctl_get(pop, "stats.heap.run_active", &active); pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated); float stat_frag = 0; if (active != 0 && allocated != 0) { stat_frag = ((float)active / allocated) - 1.f; UT_ASSERT(stat_frag <= workloads_stat_target[w]); } if (defrag) { PMEMoid **objectsf = ZALLOC(sizeof(PMEMoid) * nobjects); for (size_t i = 0; i < nobjects; ++i) objectsf[i] = &objects[i]; pmemobj_defrag(pop, objectsf, nobjects, NULL); FREE(objectsf); active = 0; allocated = 0; /* this is to trigger global recycling */ pmemobj_defrag(pop, NULL, 0, NULL); pmemobj_ctl_get(pop, "stats.heap.run_active", &active); pmemobj_ctl_get(pop, "stats.heap.run_allocated", &allocated); if (active != 0 && allocated != 0) { stat_frag = ((float)active / allocated) - 1.f; UT_ASSERT(stat_frag <= workloads_defrag_stat_target[w]); } } PMEMoid oid; size_t remaining = 0; size_t chunk = (100); /* calc at chunk level */ while (pmemobj_alloc(pop, &oid, chunk, 0, NULL, NULL) == 0) remaining += pmemobj_alloc_usable_size(oid) + 16; size_t allocated_sum = 0; oid = pmemobj_root(pop, 1); for (size_t n = 0; n < nobjects; ++n) { if (OID_IS_NULL(objects[n])) continue; oid = objects[n]; allocated_sum += pmemobj_alloc_usable_size(oid) + 16; } size_t used = DEFAULT_FILE_SIZE - remaining; float frag = ((float)used / allocated_sum) - 1.f; UT_OUT("FRAG: %f\n", frag); UT_ASSERT(frag <= (defrag ? workloads_defrag_target[w] : workloads_target[w])); pmemobj_close(pop); FREE(objects); DONE(NULL); }
7,747
22.337349
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/getopt/getopt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * getopt.c -- test for windows getopt() implementation */ #include <stdio.h> #include <stdlib.h> #include <getopt.h> #include "unittest.h" /* * long_options -- command line arguments */ static const struct option long_options[] = { { "arg_a", no_argument, NULL, 'a' }, { "arg_b", no_argument, NULL, 'b' }, { "arg_c", no_argument, NULL, 'c' }, { "arg_d", no_argument, NULL, 'd' }, { "arg_e", no_argument, NULL, 'e' }, { "arg_f", no_argument, NULL, 'f' }, { "arg_g", no_argument, NULL, 'g' }, { "arg_h", no_argument, NULL, 'h' }, { "arg_A", required_argument, NULL, 'A' }, { "arg_B", required_argument, NULL, 'B' }, { "arg_C", required_argument, NULL, 'C' }, { "arg_D", required_argument, NULL, 'D' }, { "arg_E", required_argument, NULL, 'E' }, { "arg_F", required_argument, NULL, 'F' }, { "arg_G", required_argument, NULL, 'G' }, { "arg_H", required_argument, NULL, 'H' }, { "arg_1", optional_argument, NULL, '1' }, { "arg_2", optional_argument, NULL, '2' }, { "arg_3", optional_argument, NULL, '3' }, { "arg_4", optional_argument, NULL, '4' }, { "arg_5", optional_argument, NULL, '5' }, { "arg_6", optional_argument, NULL, '6' }, { "arg_7", optional_argument, NULL, '7' }, { "arg_8", optional_argument, NULL, '8' }, { NULL, 0, NULL, 0 }, }; int main(int argc, char *argv[]) { int opt; int option_index; START(argc, argv, "getopt"); while ((opt = getopt_long(argc, argv, "abcdefghA:B:C:D:E:F:G::H1::2::3::4::5::6::7::8::", long_options, &option_index)) != -1) { switch (opt) { case '?': UT_OUT("unknown argument"); break; case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g': case 'h': UT_OUT("arg_%c", opt); break; case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G': case 'H': case '1': case '2': case '3': case '4': case '5': case '6': case '7': case '8': UT_OUT("arg_%c=%s", opt, optarg == NULL ? "null": optarg); break; } } while (optind < argc) { UT_OUT("%s", argv[optind++]); } DONE(NULL); }
2,159
21.736842
55
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_sds/util_sds.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * util_sds.c -- unit test for shutdown state functions */ #include <stdlib.h> #include "unittest.h" #include "ut_pmem2.h" #include "shutdown_state.h" #include "set.h" #define PMEM_LEN 4096 static char **uids; static size_t uids_size; static size_t uid_it; static uint64_t *uscs; static size_t uscs_size; static size_t usc_it; static pmem2_persist_fn persist; #define FAIL(X, Y) \ if ((X) == (Y)) { \ goto out; \ } int main(int argc, char *argv[]) { START(argc, argv, "util_sds"); if (argc < 2) UT_FATAL("usage: %s init fail (file uuid usc)...", argv[0]); unsigned files = (unsigned)(argc - 2) / 3; char **pmemaddr = MALLOC(files * sizeof(char *)); int *fds = MALLOC(files * sizeof(fds[0])); struct pmem2_map **maps = MALLOC(files * sizeof(maps[0])); uids = MALLOC(files * sizeof(uids[0])); uscs = MALLOC(files * sizeof(uscs[0])); uids_size = files; uscs_size = files; int init = atoi(argv[1]); int fail_on = atoi(argv[2]); char **args = argv + 3; struct pmem2_config *cfg; PMEM2_CONFIG_NEW(&cfg); pmem2_config_set_required_store_granularity(cfg, PMEM2_GRANULARITY_PAGE); for (unsigned i = 0; i < files; i++) { fds[i] = OPEN(args[i * 3], O_CREAT | O_RDWR, 0666); POSIX_FALLOCATE(fds[i], 0, PMEM_LEN); struct pmem2_source *src; PMEM2_SOURCE_FROM_FD(&src, fds[i]); if (pmem2_map(cfg, src, &maps[i])) { UT_FATAL("pmem2_map: %s", pmem2_errormsg()); } pmemaddr[0] = pmem2_map_get_address(maps[i]); uids[i] = args[i * 3 + 1]; uscs[i] = strtoull(args[i * 3 + 2], NULL, 0); PMEM2_SOURCE_DELETE(&src); } persist = pmem2_get_persist_fn(maps[0]); FAIL(fail_on, 1); struct pool_replica *rep = MALLOC( sizeof(*rep) + sizeof(struct pool_set_part)); memset(rep, 0, sizeof(*rep) + sizeof(struct pool_set_part)); struct shutdown_state *pool_sds = (struct shutdown_state *)pmemaddr[0]; if (init) { /* initialize pool shutdown state */ shutdown_state_init(pool_sds, rep); FAIL(fail_on, 2); for (unsigned i = 0; i < files; i++) { if (shutdown_state_add_part(pool_sds, fds[i], rep)) UT_FATAL("shutdown_state_add_part"); FAIL(fail_on, 3); } } else { /* verify a shutdown state saved in the pool */ struct shutdown_state current_sds; shutdown_state_init(&current_sds, NULL); FAIL(fail_on, 2); for (unsigned i = 0; i < files; i++) { if (shutdown_state_add_part(&current_sds, fds[i], NULL)) UT_FATAL("shutdown_state_add_part"); FAIL(fail_on, 3); } if (shutdown_state_check(&current_sds, pool_sds, rep)) { UT_FATAL( "An ADR failure is detected, the pool might be corrupted"); } } FAIL(fail_on, 4); shutdown_state_set_dirty(pool_sds, rep); /* pool is open */ FAIL(fail_on, 5); /* close pool */ shutdown_state_clear_dirty(pool_sds, rep); FAIL(fail_on, 6); out: for (unsigned i = 0; i < files; i++) { pmem2_unmap(&maps[i]); CLOSE(fds[i]); } PMEM2_CONFIG_DELETE(&cfg); FREE(pmemaddr); FREE(uids); FREE(uscs); FREE(fds); FREE(maps); DONE(NULL); } FUNC_MOCK(pmem2_source_device_id, int, const struct pmem2_source *src, char *uid, size_t *len) FUNC_MOCK_RUN_DEFAULT { if (uid_it < uids_size) { if (uid != NULL) { strcpy(uid, uids[uid_it]); uid_it++; } else { *len = strlen(uids[uid_it]) + 1; } } else { return -1; } return 0; } FUNC_MOCK_END FUNC_MOCK(pmem2_source_device_usc, int, const struct pmem2_source *src, uint64_t *usc) FUNC_MOCK_RUN_DEFAULT { if (usc_it < uscs_size) { *usc = uscs[usc_it]; usc_it++; } else { return -1; } return 0; } FUNC_MOCK_END int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush); /* * os_part_deep_common -- XXX temporally workaround until we will have pmem2 * integrated with common */ int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush) { /* * this is test - we don't need to deep persist anything - * just call regular persist to make valgrind happy */ persist(addr, len); return 0; } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
4,175
21.572973
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_recreate/obj_recreate.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * obj_recreate.c -- recreate pool on dirty file and check consistency */ #include "unittest.h" POBJ_LAYOUT_BEGIN(recreate); POBJ_LAYOUT_ROOT(recreate, struct root); POBJ_LAYOUT_TOID(recreate, struct foo); POBJ_LAYOUT_END(recreate); struct foo { int bar; }; struct root { TOID(struct foo) foo; }; #define LAYOUT_NAME "obj_recreate" #define N PMEMOBJ_MIN_POOL int main(int argc, char *argv[]) { START(argc, argv, "obj_recreate"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recreate) != 1); if (argc < 2) UT_FATAL("usage: %s file-name [trunc]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = NULL; /* create pool 2*N */ pop = pmemobj_create(path, LAYOUT_NAME, 2 * N, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); /* allocate 1.5*N */ TOID(struct root) root = (TOID(struct root))pmemobj_root(pop, (size_t)(1.5 * N)); /* use root object for something */ POBJ_NEW(pop, &D_RW(root)->foo, struct foo, NULL, NULL); pmemobj_close(pop); int fd = OPEN(path, O_RDWR); if (argc >= 3 && strcmp(argv[2], "trunc") == 0) { UT_OUT("truncating"); /* shrink file to N */ FTRUNCATE(fd, N); } size_t zero_len = Ut_pagesize; /* zero first page */ void *p = MMAP(NULL, zero_len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); memset(p, 0, zero_len); MUNMAP(p, zero_len); CLOSE(fd); /* create pool on existing file */ pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); /* try to allocate 0.7*N */ root = (TOID(struct root))pmemobj_root(pop, (size_t)(0.5 * N)); if (TOID_IS_NULL(root)) UT_FATAL("couldn't allocate root object"); /* validate root object is empty */ if (!TOID_IS_NULL(D_RW(root)->foo)) UT_FATAL("root object is already filled after pmemobj_create!"); pmemobj_close(pop); DONE(NULL); }
1,968
21.123596
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_ctl/util_ctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * util_ctl.c -- tests for the control module */ #include "unittest.h" #include "ctl.h" #include "out.h" #include "pmemcommon.h" #include "fault_injection.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 struct pool { struct ctl *ctl; }; static char *testconfig_path; static int test_config_written; static int CTL_READ_HANDLER(test_rw)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_rw = arg; *arg_rw = 0; return 0; } static int CTL_WRITE_HANDLER(test_rw)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { int *arg_rw = arg; *arg_rw = 1; test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_rw) = CTL_ARG_INT; static int CTL_WRITE_HANDLER(test_wo)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { int *arg_wo = arg; *arg_wo = 1; test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_wo) = CTL_ARG_INT; #define TEST_CONFIG_VALUE "abcd" static int CTL_WRITE_HANDLER(test_config)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); char *config_value = arg; UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0); test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_config) = CTL_ARG_STRING(8); struct complex_arg { int a; char b[5]; long long c; int d; }; #define COMPLEX_ARG_TEST_A 12345 #define COMPLEX_ARG_TEST_B "abcd" #define COMPLEX_ARG_TEST_C 3147483647 #define COMPLEX_ARG_TEST_D 1 static int CTL_WRITE_HANDLER(test_config_complex_arg)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); struct complex_arg *c = arg; UT_ASSERTeq(c->a, COMPLEX_ARG_TEST_A); UT_ASSERT(strcmp(COMPLEX_ARG_TEST_B, c->b) == 0); UT_ASSERTeq(c->c, COMPLEX_ARG_TEST_C); UT_ASSERTeq(c->d, COMPLEX_ARG_TEST_D); test_config_written++; return 0; } static struct ctl_argument CTL_ARG(test_config_complex_arg) = { .dest_size = sizeof(struct complex_arg), .parsers = { CTL_ARG_PARSER_STRUCT(struct complex_arg, a, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct complex_arg, b, ctl_arg_string), CTL_ARG_PARSER_STRUCT(struct complex_arg, c, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct complex_arg, d, ctl_arg_boolean), CTL_ARG_PARSER_END } }; static int CTL_READ_HANDLER(test_ro)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_ro = arg; *arg_ro = 0; return 0; } static int CTL_READ_HANDLER(index_value)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); long *index_value = arg; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); UT_ASSERT(strcmp(idx->name, "test_index") == 0); *index_value = idx->value; return 0; } static int CTL_RUNNABLE_HANDLER(test_runnable)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_runnable = arg; *arg_runnable = 0; return 0; } static const struct ctl_node CTL_NODE(test_index)[] = { CTL_LEAF_RO(index_value), CTL_NODE_END }; static const struct ctl_node CTL_NODE(debug)[] = { CTL_LEAF_RO(test_ro), CTL_LEAF_WO(test_wo), CTL_LEAF_RUNNABLE(test_runnable), CTL_LEAF_RW(test_rw), CTL_INDEXED(test_index), CTL_LEAF_WO(test_config), CTL_LEAF_WO(test_config_complex_arg), CTL_NODE_END }; static int CTL_WRITE_HANDLER(gtest_config)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_CONFIG_INPUT); char *config_value = arg; UT_ASSERTeq(strcmp(config_value, TEST_CONFIG_VALUE), 0); test_config_written = 1; return 0; } static struct ctl_argument CTL_ARG(gtest_config) = CTL_ARG_STRING(8); static int CTL_READ_HANDLER(gtest_ro)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { UT_ASSERTeq(source, CTL_QUERY_PROGRAMMATIC); int *arg_ro = arg; *arg_ro = 0; return 0; } static const struct ctl_node CTL_NODE(global_debug)[] = { CTL_LEAF_RO(gtest_ro), CTL_LEAF_WO(gtest_config), CTL_NODE_END }; static int util_ctl_get(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg); } static int util_ctl_set(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg); } static int util_ctl_exec(struct pool *pop, const char *name, void *arg) { LOG(3, "pop %p name %s arg %p", pop, name, arg); return ctl_query(pop ? pop->ctl : NULL, pop, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg); } static void test_ctl_parser(struct pool *pop) { errno = 0; int ret; ret = util_ctl_get(pop, NULL, NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "a.b.c.d", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, ".", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "..", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "1.2.3.4", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.1.", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.1.invalid", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); /* test methods set read to 0 and write to 1 if successful */ int arg_read = 1; int arg_write = 0; errno = 0; /* correct name, wrong args */ ret = util_ctl_get(pop, "debug.test_rw", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_rw", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_wo", &arg_read); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_wo", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_ro", &arg_write); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_set(pop, "debug.test_ro", NULL); UT_ASSERTne(ret, 0); UT_ASSERTne(errno, 0); errno = 0; ret = util_ctl_get(pop, "debug.test_rw", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 0); UT_ASSERTeq(errno, 0); ret = util_ctl_set(pop, "debug.test_rw", &arg_write); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 1); arg_read = 1; arg_write = 0; ret = util_ctl_get(pop, "debug.test_ro", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); UT_ASSERTeq(arg_write, 0); arg_read = 1; arg_write = 0; ret = util_ctl_set(pop, "debug.test_wo", &arg_write); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); UT_ASSERTeq(arg_write, 1); long index_value = 0; ret = util_ctl_get(pop, "debug.5.index_value", &index_value); UT_ASSERTeq(ret, 0); UT_ASSERTeq(index_value, 5); ret = util_ctl_get(pop, "debug.10.index_value", &index_value); UT_ASSERTeq(ret, 0); UT_ASSERTeq(index_value, 10); arg_read = 1; arg_write = 1; int arg_runnable = 1; ret = util_ctl_exec(pop, "debug.test_runnable", &arg_runnable); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); UT_ASSERTeq(arg_write, 1); UT_ASSERTeq(arg_runnable, 0); } static void test_string_config(struct pool *pop) { UT_ASSERTne(pop, NULL); int ret; test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ""); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, ";=;"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo="); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "=b"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=111=222"); UT_ASSERTeq(ret, -1); UT_ASSERTeq(test_config_written, 0); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=333;debug.test_rw=444;"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 2); test_config_written = 0; ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_config="TEST_CONFIG_VALUE";"); UT_ASSERTeq(ret, 0); UT_ASSERTeq(test_config_written, 1); } static void config_file_create(const char *buf) { /* the test script will take care of removing this file for us */ FILE *f = os_fopen(testconfig_path, "w+"); fwrite(buf, sizeof(char), strlen(buf), f); fclose(f); } static void create_and_test_file_config(struct pool *pop, const char *buf, int ret, int result) { config_file_create(buf); test_config_written = 0; int r = ctl_load_config_from_file(pop ? pop->ctl : NULL, pop, testconfig_path); UT_ASSERTeq(r, ret); UT_ASSERTeq(test_config_written, result); } static void test_too_large_file(struct pool *pop) { char *too_large_buf = calloc(1, 1 << 21); UT_ASSERTne(too_large_buf, NULL); memset(too_large_buf, 0xc, (1 << 21) - 1); config_file_create(too_large_buf); int ret = ctl_load_config_from_file(pop->ctl, pop, testconfig_path); UT_ASSERTne(ret, 0); free(too_large_buf); } static void test_file_config(struct pool *pop) { create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";" "debug.test_config="TEST_CONFIG_VALUE";", 0, 2); create_and_test_file_config(pop, "#this is a comment\n" "debug.test_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a comment\n" "="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "debug.test_config="TEST_CONFIG_VALUE";#this is a comment", 0, 1); create_and_test_file_config(pop, "\n\n\ndebug\n.\ntest\t_\tconfig="TEST_CONFIG_VALUE";\n", 0, 1); create_and_test_file_config(pop, " d e b u g . t e s t _ c o n f i g = "TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(pop, "#debug.test_config="TEST_CONFIG_VALUE";", 0, 0); create_and_test_file_config(pop, "debug.#this is a comment\n" "test_config#this is a not properly terminated comment" "="TEST_CONFIG_VALUE";", -1, 0); create_and_test_file_config(pop, "invalid", -1, 0); create_and_test_file_config(pop, "", 0, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=1,2,3;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,,1;", -1, 0); create_and_test_file_config(pop, "debug.test_config_complex_arg=12345,abcd,3147483647,1;", 0, 1); create_and_test_file_config(NULL, "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); create_and_test_file_config(NULL, "private.missing.query=1;" "global_debug.gtest_config="TEST_CONFIG_VALUE";", 0, 1); test_too_large_file(pop); int ret = ctl_load_config_from_file(pop->ctl, pop, "does_not_exist"); UT_ASSERTne(ret, 0); } static void test_ctl_global_namespace(struct pool *pop) { int arg_read = 1; int ret = util_ctl_get(pop, "global_debug.gtest_ro", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); } static void test_ctl_arg_parsers() { char *input; input = ""; int boolean = -1; int ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "abcdefgh"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "-999"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(boolean, -1); input = "N"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 0); input = "0"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 0); input = "yes"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "Yes"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "1"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = "1234"; boolean = -1; ret = ctl_arg_boolean(input, &boolean, sizeof(int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(boolean, 1); input = ""; int small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "abcd"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "12345678901234567890"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "-12345678901234567890"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "2147483648"; /* INT_MAX + 1 */ small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "-2147483649"; /* INT_MIN - 2 */ small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(small_int, -1); input = "0"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, 0); input = "500"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, 500); input = "-500"; small_int = -1; ret = ctl_arg_integer(input, &small_int, sizeof(small_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(small_int, -500); input = ""; long long ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "12345678901234567890"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "-12345678901234567890"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, -1); UT_ASSERTeq(ll_int, -1); input = "2147483648"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ll_int, 2147483648); input = "-2147483649"; ll_int = -1; ret = ctl_arg_integer(input, &ll_int, sizeof(ll_int)); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ll_int, -2147483649LL); input = ""; char string[1000] = {0}; ret = ctl_arg_string(input, string, 0); UT_ASSERTeq(ret, -1); input = "abcd"; ret = ctl_arg_string(input, string, 3); UT_ASSERTeq(ret, -1); input = "abcdefg"; ret = ctl_arg_string(input, string, 3); UT_ASSERTeq(ret, -1); input = "abcd"; ret = ctl_arg_string(input, string, 4); UT_ASSERTeq(ret, -1); input = "abc"; ret = ctl_arg_string(input, string, 4); UT_ASSERTeq(ret, 0); UT_ASSERT(strcmp(input, string) == 0); } static void test_fault_injection(struct pool *pop) { if (!core_fault_injection_enabled()) return; UT_ASSERTne(pop, NULL); core_inject_fault_at(PMEM_MALLOC, 1, "ctl_parse_args"); test_config_written = 0; int ret = ctl_load_config_from_string(pop->ctl, pop, "debug.test_wo=333;debug.test_rw=444;"); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "util_ctl"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc != 2) UT_FATAL("usage: %s testconfig", argv[0]); testconfig_path = argv[1]; CTL_REGISTER_MODULE(NULL, global_debug); test_ctl_global_namespace(NULL); struct pool *pop = malloc(sizeof(pop)); pop->ctl = ctl_new(); test_ctl_global_namespace(NULL); CTL_REGISTER_MODULE(pop->ctl, debug); test_ctl_global_namespace(pop); test_fault_injection(pop); test_ctl_parser(pop); test_string_config(pop); test_file_config(pop); test_ctl_arg_parsers(); ctl_delete(pop->ctl); free(pop); common_fini(); DONE(NULL); }
17,492
22.639189
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of EnumSystemFirmwareTables and * GetSystemFirmwareTable * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem * files, when compiled for the purpose of pmem_has_auto_flush_win test. * It would replace default implementation with mocked functions defined * in mocks_windows.c * * This WRAP_REAL define could be also passed as preprocessor definition. */ #include <windows.h> #ifndef WRAP_REAL #define EnumSystemFirmwareTables __wrap_EnumSystemFirmwareTables #define GetSystemFirmwareTable __wrap_GetSystemFirmwareTable UINT __wrap_EnumSystemFirmwareTables(DWORD FirmwareTableProviderSignature, PVOID pFirmwareTableEnumBuffer, DWORD BufferSize); UINT __wrap_GetSystemFirmwareTable(DWORD FirmwareTableProviderSignature, DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize); #endif
988
33.103448
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/mocks_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * mocks_windows.c -- mocked functions used in auto_flush_windows.c */ #include "util.h" #include "unittest.h" #include "set.h" #include "pmemcommon.h" #include "auto_flush_windows.h" #include "pmem_has_auto_flush_win.h" #include <errno.h> extern size_t Is_nfit; extern size_t Pc_type; extern size_t Pc_capabilities; FUNC_MOCK_DLLIMPORT(EnumSystemFirmwareTables, UINT, DWORD FirmwareTableProviderSignature, PVOID pFirmwareTableBuffer, DWORD BufferSize) FUNC_MOCK_RUN_DEFAULT { if (FirmwareTableProviderSignature != ACPI_SIGNATURE) return _FUNC_REAL(EnumSystemFirmwareTables) (FirmwareTableProviderSignature, pFirmwareTableBuffer, BufferSize); if (Is_nfit == 1 && pFirmwareTableBuffer != NULL && BufferSize != 0) { UT_OUT("Mock NFIT available"); strncpy(pFirmwareTableBuffer, NFIT_STR_SIGNATURE, BufferSize); } return NFIT_SIGNATURE_LEN + sizeof(struct nfit_header); } FUNC_MOCK_END FUNC_MOCK_DLLIMPORT(GetSystemFirmwareTable, UINT, DWORD FirmwareTableProviderSignature, DWORD FirmwareTableID, PVOID pFirmwareTableBuffer, DWORD BufferSize) FUNC_MOCK_RUN_DEFAULT { if (FirmwareTableProviderSignature != ACPI_SIGNATURE || FirmwareTableID != NFIT_REV_SIGNATURE) return _FUNC_REAL(GetSystemFirmwareTable) (FirmwareTableProviderSignature, FirmwareTableID, pFirmwareTableBuffer, BufferSize); if (pFirmwareTableBuffer == NULL && BufferSize == 0) { UT_OUT("GetSystemFirmwareTable mock"); return sizeof(struct platform_capabilities) + sizeof(struct nfit_header); } struct nfit_header nfit; struct platform_capabilities pc; /* fill nfit */ char sig[NFIT_SIGNATURE_LEN] = NFIT_STR_SIGNATURE; strncpy(nfit.signature, sig, NFIT_SIGNATURE_LEN); nfit.length = sizeof(nfit); memcpy(pFirmwareTableBuffer, &nfit, nfit.length); /* fill platform_capabilities */ pc.length = sizeof(pc); /* [...] 0000 0011 - proper capabilities bits combination */ pc.capabilities = (uint32_t)Pc_capabilities; pc.type = (uint16_t)Pc_type; memcpy((char *)pFirmwareTableBuffer + nfit.length, &pc, pc.length); return BufferSize; } FUNC_MOCK_END
2,173
28.378378
68
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush_win/pmem_has_auto_flush_win.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * pmem_has_auto_flush_win.c -- unit test for pmem_has_auto_flush_win() * * usage: pmem_has_auto_flush_win <option> * options: * n - is nfit available or not (y or n) * type: number of platform capabilities structure * capabilities: platform capabilities bits */ #include <stdbool.h> #include <errno.h> #include "unittest.h" #include "pmem.h" #include "pmemcommon.h" #include "set.h" #include "mocks_windows.h" #include "pmem_has_auto_flush_win.h" #include "util.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 size_t Is_nfit = 0; size_t Pc_type = 0; size_t Pc_capabilities = 3; int main(int argc, char *argv[]) { START(argc, argv, "pmem_has_auto_flush_win"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 4) UT_FATAL("usage: pmem_has_auto_flush_win " "<option> <type> <capabilities>", argv[0]); pmem_init(); Pc_type = (size_t)atoi(argv[2]); Pc_capabilities = (size_t)atoi(argv[3]); Is_nfit = argv[1][0] == 'y'; int eADR = pmem_has_auto_flush(); UT_OUT("pmem_has_auto_flush ret: %d", eADR); common_fini(); DONE(NULL); }
1,305
21.517241
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_alloc/obj_tx_alloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_alloc.c -- unit test for pmemobj_tx_alloc and pmemobj_tx_zalloc */ #include <assert.h> #include <sys/param.h> #include <string.h> #include "unittest.h" #include "libpmemobj.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_alloc" #define TEST_VALUE_1 1 #define TEST_VALUE_2 2 #define OBJ_SIZE (200 * 1024) enum type_number { TYPE_NO_TX, TYPE_COMMIT, TYPE_ABORT, TYPE_ZEROED_COMMIT, TYPE_ZEROED_ABORT, TYPE_XCOMMIT, TYPE_XABORT, TYPE_XZEROED_COMMIT, TYPE_XZEROED_ABORT, TYPE_XNOFLUSHED_COMMIT, TYPE_COMMIT_NESTED1, TYPE_COMMIT_NESTED2, TYPE_ABORT_NESTED1, TYPE_ABORT_NESTED2, TYPE_ABORT_AFTER_NESTED1, TYPE_ABORT_AFTER_NESTED2, TYPE_OOM, }; TOID_DECLARE(struct object, TYPE_OOM); struct object { size_t value; char data[OBJ_SIZE - sizeof(size_t)]; }; /* * do_tx_alloc_oom -- allocates objects until OOM */ static void do_tx_alloc_oom(PMEMobjpool *pop) { int do_alloc = 1; size_t alloc_cnt = 0; do { TX_BEGIN(pop) { TOID(struct object) obj = TX_NEW(struct object); D_RW(obj)->value = alloc_cnt; } TX_ONCOMMIT { alloc_cnt++; } TX_ONABORT { do_alloc = 0; } TX_END } while (do_alloc); size_t bitmap_size = howmany(alloc_cnt, 8); char *bitmap = (char *)MALLOC(bitmap_size); memset(bitmap, 0, bitmap_size); size_t obj_cnt = 0; TOID(struct object) i; POBJ_FOREACH_TYPE(pop, i) { UT_ASSERT(D_RO(i)->value < alloc_cnt); UT_ASSERT(!isset(bitmap, D_RO(i)->value)); setbit(bitmap, D_RO(i)->value); obj_cnt++; } FREE(bitmap); UT_ASSERTeq(obj_cnt, alloc_cnt); TOID(struct object) o = POBJ_FIRST(pop, struct object); while (!TOID_IS_NULL(o)) { TOID(struct object) next = POBJ_NEXT(o); POBJ_FREE(&o); o = next; } } /* * do_tx_alloc_abort_after_nested -- aborts transaction after allocation * in nested transaction */ static void do_tx_alloc_abort_after_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT_AFTER_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_ABORT_AFTER_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj1, OID_NULL); TOID_ASSIGN(obj2, OID_NULL); } TX_END TOID(struct object) first; /* check the obj1 object */ UT_ASSERT(TOID_IS_NULL(obj1)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED1); UT_ASSERT(TOID_IS_NULL(first)); /* check the obj2 object */ UT_ASSERT(TOID_IS_NULL(obj2)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_AFTER_NESTED2); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_abort_nested -- aborts transaction in nested transaction */ static void do_tx_alloc_abort_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_ABORT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj2, OID_NULL); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj1, OID_NULL); } TX_END TOID(struct object) first; /* check the obj1 object */ UT_ASSERT(TOID_IS_NULL(obj1)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED1); UT_ASSERT(TOID_IS_NULL(first)); /* check the obj2 object */ UT_ASSERT(TOID_IS_NULL(obj2)); first.oid = POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT_NESTED2); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_commit_nested -- allocates two objects, one in nested transaction */ static void do_tx_alloc_commit_nested(PMEMobjpool *pop) { TOID(struct object) obj1; TOID(struct object) obj2; TX_BEGIN(pop) { TOID_ASSIGN(obj1, pmemobj_tx_alloc(sizeof(struct object), TYPE_COMMIT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj1)); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { TOID_ASSIGN(obj2, pmemobj_tx_zalloc( sizeof(struct object), TYPE_COMMIT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj2)); UT_ASSERT(util_is_zeroed(D_RO(obj2), sizeof(struct object))); D_RW(obj2)->value = TEST_VALUE_2; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj2)->value, TEST_VALUE_2); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID(struct object) next; /* check the obj1 object */ TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED1)); UT_ASSERT(TOID_EQUALS(first, obj1)); UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_1); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); /* check the obj2 object */ TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT_NESTED2)); UT_ASSERT(TOID_EQUALS(first, obj2)); UT_ASSERTeq(D_RO(first)->value, TEST_VALUE_2); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_alloc_abort -- allocates an object and aborts the transaction */ static void do_tx_alloc_abort(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object), TYPE_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_zerolen -- allocates an object of zero size to trigger tx abort */ static void do_tx_alloc_zerolen(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_alloc_huge(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_alloc_commit -- allocates and object */ static void do_tx_alloc_commit(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc(sizeof(struct object), TYPE_COMMIT)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; next = POBJ_NEXT(first); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_zalloc_abort -- allocates a zeroed object and aborts the transaction */ static void do_tx_zalloc_abort(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object), TYPE_ZEROED_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_zerolen -- allocate an object of zero size to trigger tx abort */ static void do_tx_zalloc_zerolen(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_ZEROED_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_zalloc_huge(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_ZEROED_ABORT)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_zalloc_commit -- allocates zeroed object */ static void do_tx_zalloc_commit(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_zalloc(sizeof(struct object), TYPE_ZEROED_COMMIT)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_ZEROED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; next = POBJ_NEXT(first); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_xalloc_abort -- allocates a zeroed object and aborts the transaction */ static void do_tx_xalloc_abort(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XABORT, 0)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_xalloc_zerolen -- allocate an object of zero size to trigger tx abort */ static void do_tx_xalloc_zerolen(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* xalloc 0 with POBJ_XALLOC_NO_ABORT flag */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, POBJ_XALLOC_NO_ABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* alloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_alloc(0, TYPE_XABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* xalloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XABORT, 0)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); /* zalloc 0 with pmemobj_tx_set_failure_behavior called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TOID_ASSIGN(obj, pmemobj_tx_zalloc(0, TYPE_XABORT)); } TX_ONCOMMIT { TOID_ASSIGN(obj, OID_NULL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(0, TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); } /* * do_tx_xalloc_huge -- allocates a huge object to trigger tx abort */ static void do_tx_xalloc_huge(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_XABORT, 0)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(PMEMOBJ_MAX_ALLOC_SIZE + 1, TYPE_XZEROED_ABORT, POBJ_XALLOC_ZERO)); UT_ASSERT(0); /* should not get to this point */ } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { TOID_ASSIGN(obj, OID_NULL); } TX_END UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_ABORT)); UT_ASSERT(TOID_IS_NULL(first)); /* * do xalloc until overfilled and then * free last successful allocation */ uint64_t tot_allocated = 0, alloc_size = (5 * 1024 *1024); int rc = 0; PMEMoid oid, prev_oid; POBJ_FOREACH_SAFE(pop, oid, prev_oid) { pmemobj_free(&oid); } TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); TX_BEGIN(pop) { while (rc == 0) { oid = pmemobj_tx_xalloc(alloc_size, 0, POBJ_XALLOC_NO_ABORT); if (oid.off == 0) rc = -1; else { tot_allocated += alloc_size; prev_oid = oid; } } rc = pmemobj_tx_free(prev_oid); } TX_ONCOMMIT { UT_ASSERTeq(errno, ENOMEM); UT_ASSERTeq(rc, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_xalloc_commit -- allocates zeroed object */ static void do_tx_xalloc_commit(PMEMobjpool *pop) { /* xalloc 0 */ TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XCOMMIT, 0)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XCOMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID(struct object) next; TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); /* xalloc ZERO */ TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XZEROED_COMMIT, POBJ_XALLOC_ZERO)); UT_ASSERT(!TOID_IS_NULL(obj)); UT_ASSERT(util_is_zeroed(D_RO(obj), sizeof(struct object))); D_RW(obj)->value = TEST_VALUE_1; } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XZEROED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->value, D_RO(obj)->value); TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_xalloc_noflush -- allocates zeroed object */ static void do_tx_xalloc_noflush(PMEMobjpool *pop) { TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_xalloc(sizeof(struct object), TYPE_XNOFLUSHED_COMMIT, POBJ_XALLOC_NO_FLUSH)); UT_ASSERT(!TOID_IS_NULL(obj)); D_RW(obj)->data[OBJ_SIZE - sizeof(size_t) - 1] = TEST_VALUE_1; /* let pmemcheck find we didn't flush it */ } TX_ONCOMMIT { UT_ASSERTeq(D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1], TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) first; TOID_ASSIGN(first, POBJ_FIRST_TYPE_NUM(pop, TYPE_XNOFLUSHED_COMMIT)); UT_ASSERT(TOID_EQUALS(first, obj)); UT_ASSERTeq(D_RO(first)->data[OBJ_SIZE - sizeof(size_t) - 1], D_RO(obj)->data[OBJ_SIZE - sizeof(size_t) - 1]); TOID(struct object) next; TOID_ASSIGN(next, POBJ_NEXT_TYPE_NUM(first.oid)); UT_ASSERT(TOID_IS_NULL(next)); } /* * do_tx_root -- retrieve root inside of transaction */ static void do_tx_root(PMEMobjpool *pop) { size_t root_size = 24; TX_BEGIN(pop) { PMEMoid root = pmemobj_root(pop, root_size); UT_ASSERT(!OID_IS_NULL(root)); UT_ASSERT(util_is_zeroed(pmemobj_direct(root), root_size)); UT_ASSERTeq(root_size, pmemobj_root_size(pop)); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_alloc_many -- allocates many objects inside of a single transaction */ static void do_tx_alloc_many(PMEMobjpool *pop) { #define TX_ALLOC_COUNT 70 /* bigger than max reservations */ PMEMoid oid, oid2; POBJ_FOREACH_SAFE(pop, oid, oid2) { pmemobj_free(&oid); } TOID(struct object) first; TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); PMEMoid oids[TX_ALLOC_COUNT]; TX_BEGIN(pop) { for (int i = 0; i < TX_ALLOC_COUNT; ++i) { oids[i] = pmemobj_tx_alloc(1, 0); UT_ASSERT(!OID_IS_NULL(oids[i])); } } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { /* empty tx to make sure there's no leftover state */ } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { for (int i = 0; i < TX_ALLOC_COUNT; ++i) { pmemobj_tx_free(oids[i]); } } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(first, pmemobj_first(pop)); UT_ASSERT(TOID_IS_NULL(first)); #undef TX_ALLOC_COUNT } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_alloc"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_root(pop); VALGRIND_WRITE_STATS; /* alloc */ do_tx_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_alloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_alloc_huge(pop); VALGRIND_WRITE_STATS; /* zalloc */ do_tx_zalloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_zalloc_huge(pop); VALGRIND_WRITE_STATS; /* xalloc */ do_tx_xalloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_zerolen(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_huge(pop); VALGRIND_WRITE_STATS; /* alloc */ do_tx_alloc_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_alloc_oom(pop); VALGRIND_WRITE_STATS; do_tx_alloc_many(pop); VALGRIND_WRITE_STATS; do_tx_xalloc_noflush(pop); pmemobj_close(pop); DONE(NULL); }
20,667
21.862832
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_pool/blk_pool.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * blk_pool.c -- unit test for pmemblk_create() and pmemblk_open() * * usage: blk_pool op path bsize [poolsize mode] * * op can be: * c - create * o - open * f - do fault injection * * "poolsize" and "mode" arguments are ignored for "open" */ #include "unittest.h" #include "../libpmemblk/blk.h" #define MB ((size_t)1 << 20) static void do_fault_injection(const char *path, size_t bsize, size_t poolsize, unsigned mode) { if (!pmemblk_fault_injection_enabled()) return; pmemblk_inject_fault_at(PMEM_MALLOC, 1, "blk_runtime_init"); PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode); UT_ASSERTeq(pbp, NULL); UT_ASSERTeq(errno, ENOMEM); } static void pool_create(const char *path, size_t bsize, size_t poolsize, unsigned mode) { PMEMblkpool *pbp = pmemblk_create(path, bsize, poolsize, mode); if (pbp == NULL) UT_OUT("!%s: pmemblk_create", path); else { os_stat_t stbuf; STAT(path, &stbuf); UT_OUT("%s: file size %zu usable blocks %zu mode 0%o", path, stbuf.st_size, pmemblk_nblock(pbp), stbuf.st_mode & 0777); pmemblk_close(pbp); int result = pmemblk_check(path, bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", path); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", path); else UT_ASSERTeq(pmemblk_check(path, bsize * 2), -1); } } static void pool_open(const char *path, size_t bsize) { PMEMblkpool *pbp = pmemblk_open(path, bsize); if (pbp == NULL) UT_OUT("!%s: pmemblk_open", path); else { UT_OUT("%s: pmemblk_open: Success", path); pmemblk_close(pbp); } } int main(int argc, char *argv[]) { START(argc, argv, "blk_pool"); if (argc < 4) UT_FATAL("usage: %s op path bsize [poolsize mode]", argv[0]); size_t bsize = strtoul(argv[3], NULL, 0); size_t poolsize; unsigned mode; switch (argv[1][0]) { case 'c': poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); pool_create(argv[2], bsize, poolsize, mode); break; case 'o': pool_open(argv[2], bsize); break; case 'f': poolsize = strtoul(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); do_fault_injection(argv[2], bsize, poolsize, mode); break; default: UT_FATAL("unknown operation"); } DONE(NULL); }
2,377
20.423423
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memcpy/pmem_memcpy.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_memcpy.c -- unit test for doing a memcpy * * usage: pmem_memcpy file destoff srcoff length * */ #include "unittest.h" #include "util_pmem.h" #include "file.h" #include "memcpy_common.h" static void * pmem_memcpy_persist_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_persist(pmemdest, src, len); } static void * pmem_memcpy_nodrain_wrapper(void *pmemdest, const void *src, size_t len, unsigned flags) { (void) flags; return pmem_memcpy_nodrain(pmemdest, src, len); } static void do_persist_ddax(const void *ptr, size_t size) { util_persist_auto(1, ptr, size); } static void do_persist(const void *ptr, size_t size) { util_persist_auto(0, ptr, size); } /* * swap_mappings - swap given two mapped regions. * * Try swapping src and dest by unmapping src, mapping a new dest with * the original src address as a hint. If successful, unmap original dest. * Map a new src with the original dest as a hint. */ static void swap_mappings(char **dest, char **src, size_t size, int fd) { char *d = *dest; char *s = *src; char *td, *ts; MUNMAP(*src, size); /* mmap destination using src addr as a hint */ td = MMAP(s, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); MUNMAP(*dest, size); *dest = td; /* mmap src using original destination addr as a hint */ ts = MMAP(d, size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); *src = ts; } /* * do_memcpy_variants -- do_memcpy wrapper that tests multiple variants * of memcpy functions */ static void do_memcpy_variants(int fd, char *dest, int dest_off, char *src, int src_off, size_t bytes, size_t mapped_len, const char *file_name, persist_fn p) { do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy_persist_wrapper, 0, p); do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy_nodrain_wrapper, 0, p); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { do_memcpy(fd, dest, dest_off, src, src_off, bytes, mapped_len, file_name, pmem_memcpy, Flags[i], p); } } int main(int argc, char *argv[]) { int fd; char *dest; char *src; char *dest_orig; char *src_orig; size_t mapped_len; if (argc != 5) UT_FATAL("usage: %s file srcoff destoff length", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem_memcpy %s %s %s %s %savx %savx512f", argv[2], argv[3], argv[4], thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); fd = OPEN(argv[1], O_RDWR); int dest_off = atoi(argv[2]); int src_off = atoi(argv[3]); size_t bytes = strtoul(argv[4], NULL, 0); /* src > dst */ dest_orig = dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL); if (dest == NULL) UT_FATAL("!could not map file: %s", argv[1]); src_orig = src = MMAP(dest + mapped_len, mapped_len, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); /* * Its very unlikely that src would not be > dest. pmem_map_file * chooses the first unused address >= 1TB, large * enough to hold the give range, and 1GB aligned. If the * addresses did not get swapped to allow src > dst, log error * and allow test to continue. */ if (src <= dest) { swap_mappings(&dest, &src, mapped_len, fd); if (src <= dest) UT_FATAL("cannot map files in memory order"); } enum file_type type = util_fd_get_type(fd); if (type < 0) UT_FATAL("cannot check type of file with fd %d", fd); persist_fn persist; persist = type == TYPE_DEVDAX ? do_persist_ddax : do_persist; memset(dest, 0, (2 * bytes)); persist(dest, 2 * bytes); memset(src, 0, (2 * bytes)); do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, 0, argv[1], persist); /* dest > src */ swap_mappings(&dest, &src, mapped_len, fd); if (dest <= src) UT_FATAL("cannot map files in memory order"); do_memcpy_variants(fd, dest, dest_off, src, src_off, bytes, 0, argv[1], persist); int ret = pmem_unmap(dest_orig, mapped_len); UT_ASSERTeq(ret, 0); MUNMAP(src_orig, mapped_len); CLOSE(fd); DONE(NULL); }
4,249
23.853801
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_heap_interrupt/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of memops functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_heap_interrupt test. * It would replace default implementation with mocked functions defined * in obj_heap_interrupt.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define operation_finish __wrap_operation_finish #endif
578
27.95
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmalloc_mt/obj_pmalloc_mt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_pmalloc_mt.c -- multithreaded test of allocator */ #include <stdint.h> #include "file.h" #include "obj.h" #include "pmalloc.h" #include "sys_util.h" #include "unittest.h" #define MAX_THREADS 32 #define MAX_OPS_PER_THREAD 1000 #define ALLOC_SIZE 104 #define REALLOC_SIZE (ALLOC_SIZE * 3) #define MIX_RERUNS 2 #define CHUNKSIZE (1 << 18) #define CHUNKS_PER_THREAD 3 static unsigned Threads; static unsigned Ops_per_thread; static unsigned Tx_per_thread; struct action { struct pobj_action pact; os_mutex_t lock; os_cond_t cond; }; struct root { uint64_t offs[MAX_THREADS][MAX_OPS_PER_THREAD]; struct action actions[MAX_THREADS][MAX_OPS_PER_THREAD]; }; struct worker_args { PMEMobjpool *pop; struct root *r; unsigned idx; }; static void * alloc_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } return NULL; } static void * realloc_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { prealloc(a->pop, &a->r->offs[a->idx][i], REALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } return NULL; } static void * free_worker(void *arg) { struct worker_args *a = arg; for (unsigned i = 0; i < Ops_per_thread; ++i) { pfree(a->pop, &a->r->offs[a->idx][i]); UT_ASSERTeq(a->r->offs[a->idx][i], 0); } return NULL; } static void * mix_worker(void *arg) { struct worker_args *a = arg; /* * The mix scenario is ran twice to increase the chances of run * contention. */ for (unsigned j = 0; j < MIX_RERUNS; ++j) { for (unsigned i = 0; i < Ops_per_thread; ++i) { pmalloc(a->pop, &a->r->offs[a->idx][i], ALLOC_SIZE, 0, 0); UT_ASSERTne(a->r->offs[a->idx][i], 0); } for (unsigned i = 0; i < Ops_per_thread; ++i) { pfree(a->pop, &a->r->offs[a->idx][i]); UT_ASSERTeq(a->r->offs[a->idx][i], 0); } } return NULL; } static void * tx_worker(void *arg) { struct worker_args *a = arg; /* * Allocate objects until exhaustion, once that happens the transaction * will automatically abort and all of the objects will be freed. */ TX_BEGIN(a->pop) { for (unsigned n = 0; ; ++n) { /* this is NOT an infinite loop */ pmemobj_tx_alloc(ALLOC_SIZE, a->idx); if (Ops_per_thread != MAX_OPS_PER_THREAD && n == Ops_per_thread) { pmemobj_tx_abort(0); } } } TX_END return NULL; } static void * tx3_worker(void *arg) { struct worker_args *a = arg; /* * Allocate N objects, abort, repeat M times. Should reveal issues in * transaction abort handling. */ for (unsigned n = 0; n < Tx_per_thread; ++n) { TX_BEGIN(a->pop) { for (unsigned i = 0; i < Ops_per_thread; ++i) { pmemobj_tx_alloc(ALLOC_SIZE, a->idx); } pmemobj_tx_abort(EINVAL); } TX_END } return NULL; } static void * alloc_free_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { int err = pmemobj_alloc(a->pop, &oid, ALLOC_SIZE, 0, NULL, NULL); UT_ASSERTeq(err, 0); pmemobj_free(&oid); } return NULL; } #define OPS_PER_TX 10 #define STEP 8 #define TEST_LANES 4 static void * tx2_worker(void *arg) { struct worker_args *a = arg; for (unsigned n = 0; n < Tx_per_thread; ++n) { PMEMoid oids[OPS_PER_TX]; TX_BEGIN(a->pop) { for (int i = 0; i < OPS_PER_TX; ++i) { oids[i] = pmemobj_tx_alloc(ALLOC_SIZE, a->idx); for (unsigned j = 0; j < ALLOC_SIZE; j += STEP) { pmemobj_tx_add_range(oids[i], j, STEP); } } } TX_END TX_BEGIN(a->pop) { for (int i = 0; i < OPS_PER_TX; ++i) pmemobj_tx_free(oids[i]); } TX_ONABORT { UT_ASSERT(0); } TX_END } return NULL; } static void * action_cancel_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); pmemobj_cancel(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } } return NULL; } static void * action_publish_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); pmemobj_publish(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } } return NULL; } static void * action_mix_worker(void *arg) { struct worker_args *a = arg; PMEMoid oid; for (unsigned i = 0; i < Ops_per_thread; ++i) { unsigned arr_id = a->idx / 2; unsigned publish = i % 2; struct action *act = &a->r->actions[arr_id][i]; if (a->idx % 2 == 0) { os_mutex_lock(&act->lock); oid = pmemobj_reserve(a->pop, &act->pact, ALLOC_SIZE, 0); UT_ASSERT(!OID_IS_NULL(oid)); os_cond_signal(&act->cond); os_mutex_unlock(&act->lock); } else { os_mutex_lock(&act->lock); while (act->pact.heap.offset == 0) os_cond_wait(&act->cond, &act->lock); if (publish) pmemobj_publish(a->pop, &act->pact, 1); else pmemobj_cancel(a->pop, &act->pact, 1); os_mutex_unlock(&act->lock); } pmemobj_persist(a->pop, act, sizeof(*act)); } return NULL; } static void actions_clear(PMEMobjpool *pop, struct root *r) { for (unsigned i = 0; i < Threads; ++i) { for (unsigned j = 0; j < Ops_per_thread; ++j) { struct action *a = &r->actions[i][j]; util_mutex_destroy(&a->lock); util_mutex_init(&a->lock); util_cond_destroy(&a->cond); util_cond_init(&a->cond); memset(&a->pact, 0, sizeof(a->pact)); pmemobj_persist(pop, a, sizeof(*a)); } } } static void run_worker(void *(worker_func)(void *arg), struct worker_args args[]) { os_thread_t t[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) THREAD_CREATE(&t[i], NULL, worker_func, &args[i]); for (unsigned i = 0; i < Threads; ++i) THREAD_JOIN(&t[i], NULL); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_mt"); if (argc != 5) UT_FATAL("usage: %s <threads> <ops/t> <tx/t> [file]", argv[0]); PMEMobjpool *pop; Threads = ATOU(argv[1]); if (Threads > MAX_THREADS) UT_FATAL("Threads %d > %d", Threads, MAX_THREADS); Ops_per_thread = ATOU(argv[2]); if (Ops_per_thread > MAX_OPS_PER_THREAD) UT_FATAL("Ops per thread %d > %d", Threads, MAX_THREADS); Tx_per_thread = ATOU(argv[3]); int exists = util_file_exists(argv[4]); if (exists < 0) UT_FATAL("!util_file_exists"); if (!exists) { pop = pmemobj_create(argv[4], "TEST", (PMEMOBJ_MIN_POOL) + (MAX_THREADS * CHUNKSIZE * CHUNKS_PER_THREAD), 0666); if (pop == NULL) UT_FATAL("!pmemobj_create"); } else { pop = pmemobj_open(argv[4], "TEST"); if (pop == NULL) UT_FATAL("!pmemobj_open"); } PMEMoid oid = pmemobj_root(pop, sizeof(struct root)); struct root *r = pmemobj_direct(oid); UT_ASSERTne(r, NULL); struct worker_args args[MAX_THREADS]; for (unsigned i = 0; i < Threads; ++i) { args[i].pop = pop; args[i].r = r; args[i].idx = i; for (unsigned j = 0; j < Ops_per_thread; ++j) { struct action *a = &r->actions[i][j]; util_mutex_init(&a->lock); util_cond_init(&a->cond); } } run_worker(alloc_worker, args); run_worker(realloc_worker, args); run_worker(free_worker, args); run_worker(mix_worker, args); run_worker(alloc_free_worker, args); run_worker(action_cancel_worker, args); actions_clear(pop, r); run_worker(action_publish_worker, args); actions_clear(pop, r); run_worker(action_mix_worker, args); /* * Reduce the number of lanes to a value smaller than the number of * threads. This will ensure that at least some of the state of the lane * will be shared between threads. Doing this might reveal bugs related * to runtime race detection instrumentation. */ unsigned old_nlanes = pop->lanes_desc.runtime_nlanes; pop->lanes_desc.runtime_nlanes = TEST_LANES; run_worker(tx2_worker, args); pop->lanes_desc.runtime_nlanes = old_nlanes; /* * This workload might create many allocation classes due to pvector, * keep it last. */ if (Threads == MAX_THREADS) /* don't run for short tests */ run_worker(tx_worker, args); run_worker(tx3_worker, args); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
9,123
21.09201
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alignment/obj_ctl_alignment.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * obj_ctl_alignment.c -- tests for the alloc class alignment */ #include "unittest.h" #define LAYOUT "obj_ctl_alignment" static PMEMobjpool *pop; static void test_fail(void) { struct pobj_alloc_class_desc ac; ac.header_type = POBJ_HEADER_NONE; ac.unit_size = 1024 - 1; ac.units_per_block = 100; ac.alignment = 512; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac); UT_ASSERTeq(ret, -1); /* unit_size must be multiple of alignment */ } static void test_aligned_allocs(size_t size, size_t alignment, enum pobj_header_type htype) { struct pobj_alloc_class_desc ac; ac.header_type = htype; ac.unit_size = size; ac.units_per_block = 100; ac.alignment = alignment; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.new.desc", &ac); UT_ASSERTeq(ret, 0); PMEMoid oid; ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(ac.class_id), NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERTeq(oid.off % alignment, 0); UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0); ret = pmemobj_xalloc(pop, &oid, 1, 0, POBJ_CLASS_ID(ac.class_id), NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERTeq(oid.off % alignment, 0); UT_ASSERTeq((uintptr_t)pmemobj_direct(oid) % alignment, 0); char query[1024]; SNPRINTF(query, 1024, "heap.alloc_class.%u.desc", ac.class_id); struct pobj_alloc_class_desc read_ac; ret = pmemobj_ctl_get(pop, query, &read_ac); UT_ASSERTeq(ret, 0); UT_ASSERTeq(ac.alignment, read_ac.alignment); } int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_alignment"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL * 10, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); test_fail(); test_aligned_allocs(1024, 512, POBJ_HEADER_NONE); test_aligned_allocs(1024, 512, POBJ_HEADER_COMPACT); test_aligned_allocs(64, 64, POBJ_HEADER_COMPACT); pmemobj_close(pop); DONE(NULL); }
2,055
23.47619
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of obj list functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_list test. * It would replace default implementation with mocked functions defined * in obj_list.c. * * These defines could be also passed as preprocessor definitions. */ #if defined(__cplusplus) extern "C" { #endif #ifdef WRAP_REAL #define WRAP_REAL_PMALLOC #define WRAP_REAL_ULOG #define WRAP_REAL_LANE #define WRAP_REAL_HEAP #define WRAP_REAL_PMEMOBJ #endif #ifndef WRAP_REAL_PMALLOC #define pmalloc __wrap_pmalloc #define pfree __wrap_pfree #define pmalloc_construct __wrap_pmalloc_construct #define prealloc __wrap_prealloc #define prealloc_construct __wrap_prealloc_construct #define palloc_usable_size __wrap_palloc_usable_size #define palloc_reserve __wrap_palloc_reserve #define palloc_publish __wrap_palloc_publish #define palloc_defer_free __wrap_palloc_defer_free #endif #ifndef WRAP_REAL_ULOG #define ulog_store __wrap_ulog_store #define ulog_process __wrap_ulog_process #endif #ifndef WRAP_REAL_LANE #define lane_hold __wrap_lane_hold #define lane_release __wrap_lane_release #define lane_recover_and_section_boot __wrap_lane_recover_and_section_boot #define lane_section_cleanup __wrap_lane_section_cleanup #endif #ifndef WRAP_REAL_HEAP #define heap_boot __wrap_heap_boot #endif #ifndef WRAP_REAL_PMEMOBJ #define pmemobj_alloc __wrap_pmemobj_alloc #define pmemobj_alloc_usable_size __wrap_pmemobj_alloc_usable_size #define pmemobj_openU __wrap_pmemobj_open #define pmemobj_close __wrap_pmemobj_close #define pmemobj_direct __wrap_pmemobj_direct #define pmemobj_pool_by_oid __wrap_pmemobj_pool_by_oid #define pmemobj_pool_by_ptr __wrap_pmemobj_pool_by_ptr #endif #if defined(__cplusplus) } #endif
1,933
26.628571
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_list.h -- unit tests for list module */ #include <stddef.h> #include <sys/param.h> #include "list.h" #include "obj.h" #include "lane.h" #include "unittest.h" #include "util.h" /* offset to "in band" item */ #define OOB_OFF (sizeof(struct oob_header)) /* pmemobj initial heap offset */ #define HEAP_OFFSET 8192 TOID_DECLARE(struct item, 0); TOID_DECLARE(struct list, 1); TOID_DECLARE(struct oob_list, 2); TOID_DECLARE(struct oob_item, 3); struct item { int id; POBJ_LIST_ENTRY(struct item) next; }; struct oob_header { char data[48]; }; struct oob_item { struct oob_header oob; struct item item; }; struct oob_list { struct list_head head; }; struct list { POBJ_LIST_HEAD(listhead, struct item) head; }; enum ulog_fail { /* don't fail at all */ NO_FAIL, /* fail after ulog_store */ FAIL_AFTER_FINISH, /* fail before ulog_store */ FAIL_BEFORE_FINISH, /* fail after process */ FAIL_AFTER_PROCESS }; /* global handle to pmemobj pool */ extern PMEMobjpool *Pop; /* pointer to heap offset */ extern uint64_t *Heap_offset; /* list lane section */ extern struct lane Lane; /* actual item id */ extern int *Id; /* fail event */ extern enum ulog_fail Ulog_fail; /* global "in band" lists */ extern TOID(struct list) List; extern TOID(struct list) List_sec; /* global "out of band" lists */ extern TOID(struct oob_list) List_oob; extern TOID(struct oob_list) List_oob_sec; extern TOID(struct oob_item) *Item; /* usage macros */ #define FATAL_USAGE()\ UT_FATAL("usage: obj_list <file> [PRnifr]") #define FATAL_USAGE_PRINT()\ UT_FATAL("usage: obj_list <file> P:<list>") #define FATAL_USAGE_PRINT_REVERSE()\ UT_FATAL("usage: obj_list <file> R:<list>") #define FATAL_USAGE_INSERT()\ UT_FATAL("usage: obj_list <file> i:<where>:<num>") #define FATAL_USAGE_INSERT_NEW()\ UT_FATAL("usage: obj_list <file> n:<where>:<num>:<value>") #define FATAL_USAGE_REMOVE_FREE()\ UT_FATAL("usage: obj_list <file> f:<list>:<num>:<from>") #define FATAL_USAGE_REMOVE()\ UT_FATAL("usage: obj_list <file> r:<num>") #define FATAL_USAGE_MOVE()\ UT_FATAL("usage: obj_list <file> m:<num>:<where>:<num>") #define FATAL_USAGE_FAIL()\ UT_FATAL("usage: obj_list <file> "\ "F:<after_finish|before_finish|after_process>")
2,314
21.475728
59
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list_mocks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_list_mocks.c -- mocks for redo/lane/heap/obj modules */ #include <inttypes.h> #include "valgrind_internal.h" #include "obj_list.h" #include "set.h" /* * pmem_drain_nop -- no operation for drain on non-pmem memory */ static void pmem_drain_nop(void) { /* NOP */ } /* * obj_persist -- pmemobj version of pmem_persist w/o replication */ static int obj_persist(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->persist_local(addr, len); return 0; } /* * obj_flush -- pmemobj version of pmem_flush w/o replication */ static int obj_flush(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->flush_local(addr, len); return 0; } static uintptr_t Pool_addr; static size_t Pool_size; static void obj_msync_nofail(const void *addr, size_t size) { uintptr_t addr_ptrt = (uintptr_t)addr; /* * Verify msynced range is in the last mapped file range. Useful for * catching errors which normally would be caught only on Windows by * win_mmap.c. */ if (addr_ptrt < Pool_addr || addr_ptrt >= Pool_addr + Pool_size || addr_ptrt + size >= Pool_addr + Pool_size) UT_FATAL("<0x%" PRIxPTR ",0x%" PRIxPTR "> " "not in <0x%" PRIxPTR ",0x%" PRIxPTR "> range", addr_ptrt, addr_ptrt + size, Pool_addr, Pool_addr + Pool_size); if (pmem_msync(addr, size)) UT_FATAL("!pmem_msync"); } /* * obj_drain -- pmemobj version of pmem_drain w/o replication */ static void obj_drain(void *ctx) { PMEMobjpool *pop = (PMEMobjpool *)ctx; pop->drain_local(); } static void * obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags) { return pmem_memcpy(dest, src, len, flags); } static void * obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags) { return pmem_memset(ptr, c, sz, flags); } /* * linear_alloc -- allocates `size` bytes (rounded up to 8 bytes) and returns * offset to the allocated object */ static uint64_t linear_alloc(uint64_t *cur_offset, size_t size) { uint64_t ret = *cur_offset; *cur_offset += roundup(size, sizeof(uint64_t)); return ret; } /* * pmemobj_open -- pmemobj_open mock * * This function initializes the pmemobj pool for purposes of this * unittest. */ FUNC_MOCK(pmemobj_open, PMEMobjpool *, const char *fname, const char *layout) FUNC_MOCK_RUN_DEFAULT { size_t size; int is_pmem; void *addr = pmem_map_file(fname, 0, 0, 0, &size, &is_pmem); if (!addr) { UT_OUT("!%s: pmem_map_file", fname); return NULL; } Pool_addr = (uintptr_t)addr; Pool_size = size; Pop = (PMEMobjpool *)addr; Pop->addr = Pop; Pop->is_pmem = is_pmem; Pop->rdonly = 0; Pop->uuid_lo = 0x12345678; VALGRIND_REMOVE_PMEM_MAPPING(&Pop->mutex_head, sizeof(Pop->mutex_head)); VALGRIND_REMOVE_PMEM_MAPPING(&Pop->rwlock_head, sizeof(Pop->rwlock_head)); VALGRIND_REMOVE_PMEM_MAPPING(&Pop->cond_head, sizeof(Pop->cond_head)); Pop->mutex_head = NULL; Pop->rwlock_head = NULL; Pop->cond_head = NULL; if (Pop->is_pmem) { Pop->persist_local = pmem_persist; Pop->flush_local = pmem_flush; Pop->drain_local = pmem_drain; Pop->memcpy_local = pmem_memcpy; Pop->memset_local = pmem_memset; } else { Pop->persist_local = obj_msync_nofail; Pop->flush_local = obj_msync_nofail; Pop->drain_local = pmem_drain_nop; Pop->memcpy_local = pmem_memcpy; Pop->memset_local = pmem_memset; } Pop->p_ops.persist = obj_persist; Pop->p_ops.flush = obj_flush; Pop->p_ops.drain = obj_drain; Pop->p_ops.memcpy = obj_memcpy; Pop->p_ops.memset = obj_memset; Pop->p_ops.base = Pop; struct pmem_ops *p_ops = &Pop->p_ops; Pop->heap_offset = HEAP_OFFSET; Pop->heap_size = size - Pop->heap_offset; uint64_t heap_offset = HEAP_OFFSET; Heap_offset = (uint64_t *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Heap_offset))); Id = (int *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Id))); /* Alloc lane layout */ Lane.layout = (struct lane_layout *)((uintptr_t)Pop + linear_alloc(&heap_offset, LANE_TOTAL_SIZE)); /* Alloc in band lists */ List.oid.pool_uuid_lo = Pop->uuid_lo; List.oid.off = linear_alloc(&heap_offset, sizeof(struct list)); List_sec.oid.pool_uuid_lo = Pop->uuid_lo; List_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct list)); /* Alloc out of band lists */ List_oob.oid.pool_uuid_lo = Pop->uuid_lo; List_oob.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list)); List_oob_sec.oid.pool_uuid_lo = Pop->uuid_lo; List_oob_sec.oid.off = linear_alloc(&heap_offset, sizeof(struct oob_list)); Item = (union oob_item_toid *)((uintptr_t)Pop + linear_alloc(&heap_offset, sizeof(*Item))); Item->oid.pool_uuid_lo = Pop->uuid_lo; Item->oid.off = linear_alloc(&heap_offset, sizeof(struct oob_item)); pmemops_persist(p_ops, Item, sizeof(*Item)); if (*Heap_offset == 0) { *Heap_offset = heap_offset; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); } pmemops_persist(p_ops, Pop, HEAP_OFFSET); Pop->run_id += 2; pmemops_persist(p_ops, &Pop->run_id, sizeof(Pop->run_id)); Lane.external = operation_new((struct ulog *)&Lane.layout->external, LANE_REDO_EXTERNAL_SIZE, NULL, NULL, p_ops, LOG_TYPE_REDO); return Pop; } FUNC_MOCK_END /* * pmemobj_close -- pmemobj_close mock * * Just unmap the mapped area. */ FUNC_MOCK(pmemobj_close, void, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { operation_delete(Lane.external); UT_ASSERTeq(pmem_unmap(Pop, Pop->heap_size + Pop->heap_offset), 0); Pop = NULL; Pool_addr = 0; Pool_size = 0; } FUNC_MOCK_END /* * pmemobj_pool_by_ptr -- pmemobj_pool_by_ptr mock * * Just return Pop. */ FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_ptr, PMEMobjpool *, Pop, const void *ptr); /* * pmemobj_direct -- pmemobj_direct mock */ FUNC_MOCK(pmemobj_direct, void *, PMEMoid oid) FUNC_MOCK_RUN_DEFAULT { return (void *)((uintptr_t)Pop + oid.off); } FUNC_MOCK_END FUNC_MOCK_RET_ALWAYS(pmemobj_pool_by_oid, PMEMobjpool *, Pop, PMEMoid oid); /* * pmemobj_alloc_usable_size -- pmemobj_alloc_usable_size mock */ FUNC_MOCK(pmemobj_alloc_usable_size, size_t, PMEMoid oid) FUNC_MOCK_RUN_DEFAULT { size_t size = palloc_usable_size( &Pop->heap, oid.off - OOB_OFF); return size - OOB_OFF; } FUNC_MOCK_END /* * pmemobj_alloc -- pmemobj_alloc mock * * Allocates an object using pmalloc and return PMEMoid. */ FUNC_MOCK(pmemobj_alloc, int, PMEMobjpool *pop, PMEMoid *oidp, size_t size, uint64_t type_num, pmemobj_constr constructor, void *arg) FUNC_MOCK_RUN_DEFAULT { PMEMoid oid = {0, 0}; oid.pool_uuid_lo = 0; pmalloc(pop, &oid.off, size, 0, 0); if (oidp) { *oidp = oid; if (OBJ_PTR_FROM_POOL(pop, oidp)) pmemops_persist(&Pop->p_ops, oidp, sizeof(*oidp)); } return 0; } FUNC_MOCK_END /* * lane_hold -- lane_hold mock * * Returns pointer to list lane section. */ FUNC_MOCK(lane_hold, unsigned, PMEMobjpool *pop, struct lane **lane) FUNC_MOCK_RUN_DEFAULT { *lane = &Lane; return 0; } FUNC_MOCK_END /* * lane_release -- lane_release mock * * Always returns success. */ FUNC_MOCK_RET_ALWAYS_VOID(lane_release, PMEMobjpool *pop); /* * lane_recover_and_section_boot -- lane_recover_and_section_boot mock */ FUNC_MOCK(lane_recover_and_section_boot, int, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { ulog_recover((struct ulog *)&Lane.layout->external, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); return 0; } FUNC_MOCK_END /* * lane_section_cleanup -- lane_section_cleanup mock */ FUNC_MOCK(lane_section_cleanup, int, PMEMobjpool *pop) FUNC_MOCK_RUN_DEFAULT { return 0; } FUNC_MOCK_END /* * ulog_store_last -- ulog_store_last mock */ FUNC_MOCK(ulog_store, void, struct ulog *dest, struct ulog *src, size_t nbytes, size_t redo_base_nbytes, size_t ulog_base_capacity, struct ulog_next *next, const struct pmem_ops *p_ops) FUNC_MOCK_RUN_DEFAULT { switch (Ulog_fail) { case FAIL_AFTER_FINISH: _FUNC_REAL(ulog_store)(dest, src, nbytes, redo_base_nbytes, ulog_base_capacity, next, p_ops); DONEW(NULL); break; case FAIL_BEFORE_FINISH: DONEW(NULL); break; default: _FUNC_REAL(ulog_store)(dest, src, nbytes, redo_base_nbytes, ulog_base_capacity, next, p_ops); break; } } FUNC_MOCK_END /* * ulog_process -- ulog_process mock */ FUNC_MOCK(ulog_process, void, struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops) FUNC_MOCK_RUN_DEFAULT { _FUNC_REAL(ulog_process)(ulog, check, p_ops); if (Ulog_fail == FAIL_AFTER_PROCESS) { DONEW(NULL); } } FUNC_MOCK_END /* * heap_boot -- heap_boot mock * * Always returns success. */ FUNC_MOCK_RET_ALWAYS(heap_boot, int, 0, PMEMobjpool *pop);
8,765
22.691892
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_list/obj_list_mocks_palloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_list_mocks_palloc.c -- mocks for palloc/pmalloc modules */ #include "obj_list.h" /* * pmalloc -- pmalloc mock * * Allocates the memory using linear allocator. * Prints the id of allocated struct oob_item for tracking purposes. */ FUNC_MOCK(pmalloc, int, PMEMobjpool *pop, uint64_t *ptr, size_t size, uint64_t extra_field, uint16_t flags) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); *ptr = *Heap_offset + sizeof(uint64_t); if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(p_ops, ptr, sizeof(*ptr)); struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + *ptr); *ptr += OOB_OFF; if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(p_ops, ptr, sizeof(*ptr)); item->item.id = *Id; pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id)); (*Id)++; pmemops_persist(p_ops, Id, sizeof(*Id)); *Heap_offset = *Heap_offset + sizeof(uint64_t) + size + OOB_OFF; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); UT_OUT("pmalloc(id = %d)", item->item.id); return 0; } FUNC_MOCK_END /* * pfree -- pfree mock * * Just prints freeing struct oob_item id. Doesn't free the memory. */ FUNC_MOCK(pfree, void, PMEMobjpool *pop, uint64_t *ptr) FUNC_MOCK_RUN_DEFAULT { struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + *ptr - OOB_OFF); UT_OUT("pfree(id = %d)", item->item.id); *ptr = 0; if (OBJ_PTR_FROM_POOL(pop, ptr)) pmemops_persist(&Pop->p_ops, ptr, sizeof(*ptr)); return; } FUNC_MOCK_END /* * pmalloc_construct -- pmalloc_construct mock * * Allocates the memory using linear allocator and invokes the constructor. * Prints the id of allocated struct oob_item for tracking purposes. */ FUNC_MOCK(pmalloc_construct, int, PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t flags, uint16_t class_id) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); *off = *Heap_offset + sizeof(uint64_t) + OOB_OFF; if (OBJ_PTR_FROM_POOL(pop, off)) pmemops_persist(p_ops, off, sizeof(*off)); *Heap_offset = *Heap_offset + sizeof(uint64_t) + size; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); void *ptr = (void *)((uintptr_t)Pop + *off); constructor(pop, ptr, size, arg); return 0; } FUNC_MOCK_END /* * prealloc -- prealloc mock */ FUNC_MOCK(prealloc, int, PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t flags) FUNC_MOCK_RUN_DEFAULT { uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *off - sizeof(uint64_t)); struct item *item = (struct item *)((uintptr_t)Pop + *off + OOB_OFF); if (*alloc_size >= size) { *alloc_size = size; pmemops_persist(&Pop->p_ops, alloc_size, sizeof(*alloc_size)); UT_OUT("prealloc(id = %d, size = %zu) = true", item->id, (size - OOB_OFF) / sizeof(struct item)); return 0; } else { UT_OUT("prealloc(id = %d, size = %zu) = false", item->id, (size - OOB_OFF) / sizeof(struct item)); return -1; } } FUNC_MOCK_END /* * prealloc_construct -- prealloc_construct mock */ FUNC_MOCK(prealloc_construct, int, PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t flags, uint16_t class_id) FUNC_MOCK_RUN_DEFAULT { int ret = __wrap_prealloc(pop, off, size, 0, 0); if (!ret) { void *ptr = (void *)((uintptr_t)Pop + *off + OOB_OFF); constructor(pop, ptr, size, arg); } return ret; } FUNC_MOCK_END /* * palloc_reserve -- palloc_reserve mock */ FUNC_MOCK(palloc_reserve, int, struct palloc_heap *heap, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, uint16_t arena_id, struct pobj_action *act) FUNC_MOCK_RUN_DEFAULT { struct pmem_ops *p_ops = &Pop->p_ops; size = size + OOB_OFF + sizeof(uint64_t) * 2; uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + *Heap_offset); *alloc_size = size; pmemops_persist(p_ops, alloc_size, sizeof(*alloc_size)); act->heap.offset = *Heap_offset + sizeof(uint64_t); struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + act->heap.offset); act->heap.offset += OOB_OFF; item->item.id = *Id; pmemops_persist(p_ops, &item->item.id, sizeof(item->item.id)); (*Id)++; pmemops_persist(p_ops, Id, sizeof(*Id)); *Heap_offset += sizeof(uint64_t) + size + OOB_OFF; pmemops_persist(p_ops, Heap_offset, sizeof(*Heap_offset)); UT_OUT("pmalloc(id = %d)", item->item.id); return 0; } FUNC_MOCK_END /* * palloc_publish -- mock publish, must process operation */ FUNC_MOCK(palloc_publish, void, struct palloc_heap *heap, struct pobj_action *actv, size_t actvcnt, struct operation_context *ctx) FUNC_MOCK_RUN_DEFAULT { operation_process(ctx); operation_finish(ctx, 0); } FUNC_MOCK_END /* * palloc_defer_free -- pfree mock * * Just prints freeing struct oob_item id. Doesn't free the memory. */ FUNC_MOCK(palloc_defer_free, void, struct palloc_heap *heap, uint64_t off, struct pobj_action *act) FUNC_MOCK_RUN_DEFAULT { struct oob_item *item = (struct oob_item *)((uintptr_t)Pop + off - OOB_OFF); UT_OUT("pfree(id = %d)", item->item.id); act->heap.offset = off; return; } FUNC_MOCK_END /* * pmalloc_usable_size -- pmalloc_usable_size mock */ FUNC_MOCK(palloc_usable_size, size_t, struct palloc_heap *heap, uint64_t off) FUNC_MOCK_RUN_DEFAULT { uint64_t *alloc_size = (uint64_t *)((uintptr_t)Pop + off - sizeof(uint64_t)); return (size_t)*alloc_size; } FUNC_MOCK_END
6,050
26.756881
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_rw_mt/blk_rw_mt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * blk_rw_mt.c -- unit test for multi-threaded random I/O * * usage: blk_rw_mt bsize file seed nthread nops * */ #include "unittest.h" #include "rand.h" static size_t Bsize; /* all I/O below this LBA (increases collisions) */ static const unsigned Nblock = 100; static unsigned Seed; static unsigned Nthread; static unsigned Nops; static PMEMblkpool *Handle; /* * construct -- build a buffer for writing */ static void construct(int *ordp, unsigned char *buf) { for (int i = 0; i < Bsize; i++) buf[i] = *ordp; (*ordp)++; if (*ordp > 255) *ordp = 1; } /* * check -- check for torn buffers */ static void check(unsigned char *buf) { unsigned val = *buf; for (int i = 1; i < Bsize; i++) if (buf[i] != val) { UT_OUT("{%u} TORN at byte %d", val, i); break; } } /* * worker -- the work each thread performs */ static void * worker(void *arg) { uintptr_t mytid = (uintptr_t)arg; unsigned char *buf = MALLOC(Bsize); int ord = 1; rng_t rng; randomize_r(&rng, Seed + mytid); for (unsigned i = 0; i < Nops; i++) { os_off_t lba = (os_off_t)(rnd64_r(&rng) % Nblock); if (rnd64_r(&rng) % 2) { /* read */ if (pmemblk_read(Handle, buf, lba) < 0) UT_OUT("!read lba %zu", lba); else check(buf); } else { /* write */ construct(&ord, buf); if (pmemblk_write(Handle, buf, lba) < 0) UT_OUT("!write lba %zu", lba); } } FREE(buf); return NULL; } int main(int argc, char *argv[]) { START(argc, argv, "blk_rw_mt"); if (argc != 6) UT_FATAL("usage: %s bsize file seed nthread nops", argv[0]); Bsize = strtoul(argv[1], NULL, 0); const char *path = argv[2]; if ((Handle = pmemblk_create(path, Bsize, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!%s: pmemblk_create", path); Seed = strtoul(argv[3], NULL, 0); Nthread = strtoul(argv[4], NULL, 0); Nops = strtoul(argv[5], NULL, 0); UT_OUT("%s block size %zu usable blocks %u", argv[1], Bsize, Nblock); os_thread_t *threads = MALLOC(Nthread * sizeof(os_thread_t)); /* kick off nthread threads */ for (unsigned i = 0; i < Nthread; i++) THREAD_CREATE(&threads[i], NULL, worker, (void *)(intptr_t)i); /* wait for all the threads to complete */ for (unsigned i = 0; i < Nthread; i++) THREAD_JOIN(&threads[i], NULL); FREE(threads); pmemblk_close(Handle); /* XXX not ready to pass this part of the test yet */ int result = pmemblk_check(path, Bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", path); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", path); DONE(NULL); }
4,260
25.302469
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_stats/obj_ctl_stats.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * obj_ctl_stats.c -- tests for the libpmemobj statistics module */ #include "unittest.h" int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_stats"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, "ctl", PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); int enabled; int ret = pmemobj_ctl_get(pop, "stats.enabled", &enabled); UT_ASSERTeq(enabled, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t allocated; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(allocated, 0); enabled = 1; ret = pmemobj_ctl_set(pop, "stats.enabled", &enabled); UT_ASSERTeq(ret, 0); PMEMoid oid; ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t oid_size = pmemobj_alloc_usable_size(oid) + 16; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, oid_size); size_t run_allocated = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */); pmemobj_free(&oid); ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, 0); ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); UT_ASSERT(run_allocated /* 2 allocs */ > allocated /* 1 alloc */); TX_BEGIN(pop) { oid = pmemobj_tx_alloc(1, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END oid_size = pmemobj_alloc_usable_size(oid) + 16; ret = pmemobj_ctl_get(pop, "stats.heap.curr_allocated", &allocated); UT_ASSERTeq(ret, 0); UT_ASSERTeq(allocated, oid_size); enum pobj_stats_enabled enum_enabled; ret = pmemobj_ctl_get(pop, "stats.enabled", &enum_enabled); UT_ASSERTeq(enabled, POBJ_STATS_ENABLED_BOTH); UT_ASSERTeq(ret, 0); run_allocated = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &run_allocated); UT_ASSERTeq(ret, 0); enum_enabled = POBJ_STATS_ENABLED_PERSISTENT; /* transient disabled */ ret = pmemobj_ctl_set(pop, "stats.enabled", &enum_enabled); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); size_t tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, run_allocated); /* shouldn't change */ /* the deallocated object shouldn't be reflected in rebuilt stats */ pmemobj_free(&oid); pmemobj_close(pop); pop = pmemobj_open(path, "ctl"); UT_ASSERTne(pop, NULL); /* stats are rebuilt lazily, so initially this should be 0 */ tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, 0); ret = pmemobj_alloc(pop, NULL, 1, 0, NULL, NULL); UT_ASSERTeq(ret, 0); /* after first alloc, the previously allocated object will be found */ tmp = 0; ret = pmemobj_ctl_get(pop, "stats.heap.run_allocated", &tmp); UT_ASSERTeq(ret, 0); UT_ASSERTeq(tmp, run_allocated + oid_size); pmemobj_close(pop); DONE(NULL); }
3,299
25.829268
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset_foreach/util_poolset_foreach.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * util_poolset_foreach.c -- unit test for util_poolset_foreach_part() * * usage: util_poolset_foreach file... */ #include "unittest.h" #include "set.h" #include "pmemcommon.h" #include <errno.h> #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 static int cb(struct part_file *pf, void *arg) { if (pf->is_remote) { /* remote replica */ const char *node_addr = pf->remote->node_addr; const char *pool_desc = pf->remote->pool_desc; char *set_name = (char *)arg; UT_OUT("%s: %s %s", set_name, node_addr, pool_desc); } else { const char *name = pf->part->path; char *set_name = (char *)arg; UT_OUT("%s: %s", set_name, name); } return 0; } int main(int argc, char *argv[]) { START(argc, argv, "util_poolset_foreach"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 2) UT_FATAL("usage: %s file...", argv[0]); for (int i = 1; i < argc; i++) { char *fname = argv[i]; int ret = util_poolset_foreach_part(fname, cb, fname); UT_OUT("util_poolset_foreach_part(%s): %d", fname, ret); } common_fini(); DONE(NULL); }
1,293
20.213115
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_map_prot/pmem2_map_prot.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_map_prot.c -- pmem2_map_prot unit tests */ #include <stdbool.h> #include <signal.h> #include <setjmp.h> #include "config.h" #include "source.h" #include "map.h" #include "out.h" #include "pmem2.h" #include "unittest.h" #include "ut_pmem2.h" #include "ut_pmem2_setup.h" #include "ut_fh.h" struct res { struct FHandle *fh; struct pmem2_config cfg; struct pmem2_source *src; }; /* * res_prepare -- set access mode and protection flags */ static void res_prepare(const char *file, struct res *res, int access, unsigned proto) { #ifdef _WIN32 enum file_handle_type fh_type = FH_HANDLE; #else enum file_handle_type fh_type = FH_FD; #endif ut_pmem2_prepare_config(&res->cfg, &res->src, &res->fh, fh_type, file, 0, 0, access); pmem2_config_set_protection(&res->cfg, proto); } /* * res_cleanup -- free resources */ static void res_cleanup(struct res *res) { PMEM2_SOURCE_DELETE(&res->src); UT_FH_CLOSE(res->fh); } static const char *word1 = "Persistent or nonpersistent: this is the question."; static ut_jmp_buf_t Jmp; /* * signal_handler -- called on SIGSEGV */ static void signal_handler(int sig) { ut_siglongjmp(Jmp); } /* * test_rw_mode_rw_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should success */ static int test_rw_mode_rw_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_rw_prot <file>"); struct res res; /* read/write on file opened in read/write mode - should success */ res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ | PMEM2_PROT_WRITE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); memcpy_fn(addr_map, word1, strlen(word1), 0); UT_ASSERTeq(memcmp(addr_map, word1, strlen(word1)), 0); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * template_mode_prot_mismatch - try to map file with mutually exclusive FD * access and map protection */ static void template_mode_prot_mismatch(char *file, int access, unsigned prot) { struct res res; /* read/write on file opened in read-only mode - should fail */ res_prepare(file, &res, access, prot); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_NO_ACCESS); res_cleanup(&res); } /* * test_r_mode_rw_prot -- test R/W protection * pmem2_map() - should fail */ static int test_r_mode_rw_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_r_mode_rw_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_READ, PMEM2_PROT_WRITE | PMEM2_PROT_READ); return 1; } /* * test_rw_mode_rwx_prot - test R/W/X protection on R/W file * pmem2_map() - should fail */ static int test_rw_modex_rwx_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_modex_rwx_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_RDWR, PMEM2_PROT_EXEC |PMEM2_PROT_WRITE | PMEM2_PROT_READ); return 1; } /* * test_rw_modex_rx_prot - test R/X protection on R/W file * pmem2_map() - should fail */ static int test_rw_modex_rx_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_modex_rx_prot <file>"); char *file = argv[0]; template_mode_prot_mismatch(file, FH_RDWR, PMEM2_PROT_EXEC | PMEM2_PROT_READ); return 1; } /* * test_rw_mode_r_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_rw_mode_r_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_r_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* read-only on file opened in read/write mode - should success */ res_prepare(argv[0], &res, FH_RDWR, PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * test_r_mode_r_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_r_mode_r_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_r_mode_r_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* read-only on file opened in read-only mode - should succeed */ res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * test_rw_mode_none_prot -- test R/W protection * pmem2_map() - should success * memcpy() - should fail */ static int test_rw_mode_none_prot(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rw_mode_none_prot <file>"); /* arrange to catch SIGSEGV */ struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); struct res res; /* none on file opened in read-only mode - should success */ res_prepare(argv[0], &res, FH_READ, PMEM2_PROT_NONE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); void *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy should now fail */ memcpy_fn(addr_map, word1, strlen(word1), 0); UT_FATAL("memcpy successful"); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 1; } /* * sum_asm[] --> simple program in assembly which calculates '2 + 2' and * returns the result */ static unsigned char sum_asm[] = { 0x55, /* push %rbp */ 0x48, 0x89, 0xe5, /* mov %rsp,%rbp */ 0xc7, 0x45, 0xf8, 0x02, 0x00, 0x00, 0x00, /* movl $0x2,-0x8(%rbp) */ 0x8b, 0x45, 0xf8, /* mov -0x8(%rbp),%eax */ 0x01, 0xc0, /* add %eax,%eax */ 0x89, 0x45, 0xfc, /* mov %eax,-0x4(%rbp) */ 0x8b, 0x45, 0xfc, /* mov -0x4(%rbp),%eax */ 0x5d, /* pop %rbp */ 0xc3, /* retq */ }; typedef int (*sum_fn)(void); /* * test_rx_mode_rx_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, execute the program and verify result */ static int test_rx_mode_rx_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_rx_mode_rx_prot_do_execute <file>"); char *file = argv[0]; struct res res; /* Windows does not support PMEM2_PROT_WRITE combination */ res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); pmem2_unmap(&map); /* Windows does not support PMEM2_PROT_EXEC combination */ pmem2_config_set_protection(&res.cfg, PMEM2_PROT_READ | PMEM2_PROT_EXEC); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * test_rwx_mode_rx_prot_do_write -- try to copy the string into mapped memory, * expect failure */ static int test_rwx_mode_rx_prot_do_write(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rwx_mode_rx_prot_do_write <file> <if_sharing>"); struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; /* Windows does not support PMEM2_PROT_EXEC combination */ res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_READ | PMEM2_PROT_EXEC); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); if (!ut_sigsetjmp(Jmp)) { /* memcpy_fn should fail */ map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); } pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 2; } /* * test_rwx_mode_rwx_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, execute the program and verify result */ static int test_rwx_mode_rwx_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rwx_mode_rwx_prot_do_execute <file> <if_sharing>"); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; res_prepare(file, &res, FH_EXEC | FH_RDWR, PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); res_cleanup(&res); signal(SIGSEGV, SIG_DFL); return 2; } /* * test_rw_mode_rw_prot_do_execute -- copy string with the program to mapped * memory to prepare memory, and execute the program - should fail */ static int test_rw_mode_rw_prot_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL( "usage: test_rw_mode_rwx_prot_do_execute <file> <if_sharing>"); struct sigaction v; sigemptyset(&v.sa_mask); v.sa_flags = 0; v.sa_handler = signal_handler; SIGACTION(SIGSEGV, &v, NULL); char *file = argv[0]; unsigned if_sharing = ATOU(argv[1]); struct res res; res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); if (if_sharing) pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); void *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; if (!ut_sigsetjmp(Jmp)) { sum(); /* sum function should now fail */ } pmem2_unmap(&map); res_cleanup(&res); return 2; } static const char *initial_state = "No code."; /* * test_rwx_prot_map_priv_do_execute -- copy string with the program to * the mapped memory with MAP_PRIVATE to prepare memory, execute the program * and verify the result */ static int test_rwx_prot_map_priv_do_execute(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL( "usage: test_rwx_prot_map_priv_do_execute <file> <if_sharing>"); char *file = argv[0]; struct res res; res_prepare(file, &res, FH_RDWR, PMEM2_PROT_WRITE | PMEM2_PROT_READ); struct pmem2_map *map; int ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); char *addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, initial_state, sizeof(initial_state), 0); pmem2_unmap(&map); res_cleanup(&res); res_prepare(file, &res, FH_READ | FH_EXEC, PMEM2_PROT_EXEC | PMEM2_PROT_WRITE | PMEM2_PROT_READ); pmem2_config_set_sharing(&res.cfg, PMEM2_PRIVATE); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); addr_map = pmem2_map_get_address(map); map->memcpy_fn(addr_map, sum_asm, sizeof(sum_asm), 0); sum_fn sum = (sum_fn)addr_map; int sum_result = sum(); UT_ASSERTeq(sum_result, 4); pmem2_unmap(&map); ret = pmem2_map(&res.cfg, res.src, &map); UT_ASSERTeq(ret, 0); addr_map = pmem2_map_get_address(map); /* check if changes in private mapping affect initial state */ UT_ASSERTeq(memcmp(addr_map, initial_state, strlen(initial_state)), 0); pmem2_unmap(&map); res_cleanup(&res); return 1; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_rw_mode_rw_prot), TEST_CASE(test_r_mode_rw_prot), TEST_CASE(test_rw_modex_rwx_prot), TEST_CASE(test_rw_modex_rx_prot), TEST_CASE(test_rw_mode_r_prot), TEST_CASE(test_r_mode_r_prot), TEST_CASE(test_rw_mode_none_prot), TEST_CASE(test_rx_mode_rx_prot_do_execute), TEST_CASE(test_rwx_mode_rx_prot_do_write), TEST_CASE(test_rwx_mode_rwx_prot_do_execute), TEST_CASE(test_rw_mode_rw_prot_do_execute), TEST_CASE(test_rwx_prot_map_priv_do_execute), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_map_prot"); util_init(); out_init("pmem2_map_prot", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
13,698
22.537801
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_layout/obj_layout.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * obj_layout.c -- unit test for layout * * This test should be modified after every layout change. It's here to prevent * any accidental layout changes. */ #include "util.h" #include "unittest.h" #include "sync.h" #include "heap_layout.h" #include "lane.h" #include "tx.h" #include "ulog.h" #include "list.h" #define SIZEOF_CHUNK_HEADER_V3 (8) #define MAX_CHUNK_V3 (65535 - 7) #define SIZEOF_CHUNK_V3 (1024ULL * 256) #define SIZEOF_CHUNK_RUN_HEADER_V3 (16) #define SIZEOF_ZONE_HEADER_V3 (64) #define SIZEOF_ZONE_METADATA_V3 (SIZEOF_ZONE_HEADER_V3 +\ SIZEOF_CHUNK_HEADER_V3 * MAX_CHUNK_V3) #define SIZEOF_HEAP_HDR_V3 (1024) #define SIZEOF_LEGACY_ALLOCATION_HEADER_V3 (64) #define SIZEOF_COMPACT_ALLOCATION_HEADER_V3 (16) #define SIZEOF_LOCK_V3 (64) #define SIZEOF_PMEMOID_V3 (16) #define SIZEOF_LIST_ENTRY_V3 (SIZEOF_PMEMOID_V3 * 2) #define SIZEOF_LIST_HEAD_V3 (SIZEOF_PMEMOID_V3 + SIZEOF_LOCK_V3) #define SIZEOF_LANE_SECTION_V3 (1024) #define SIZEOF_LANE_V3 (3 * SIZEOF_LANE_SECTION_V3) #define SIZEOF_ULOG_V4 (CACHELINE_SIZE) #define SIZEOF_ULOG_BASE_ENTRY_V4 (8) #define SIZEOF_ULOG_VAL_ENTRY_V4 (16) #define SIZEOF_ULOG_BUF_ENTRY_V4 (24) #if CACHELINE_SIZE == 128 #define SIZEOF_LANE_UNDO_SIZE (1920) #define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640) #define SIZEOF_LANE_REDO_INTERNAL_SIZE (128) #elif CACHELINE_SIZE == 64 #define SIZEOF_LANE_UNDO_SIZE (2048) #define SIZEOF_LANE_REDO_EXTERNAL_SIZE (640) #define SIZEOF_LANE_REDO_INTERNAL_SIZE (192) #else #error "Unknown cacheline size" #endif POBJ_LAYOUT_BEGIN(layout); POBJ_LAYOUT_ROOT(layout, struct foo); POBJ_LAYOUT_END(layout); struct foo { POBJ_LIST_ENTRY(struct foo) f; }; POBJ_LIST_HEAD(foo_head, struct foo); int main(int argc, char *argv[]) { START(argc, argv, "obj_layout"); UT_COMPILE_ERROR_ON(CHUNKSIZE != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk); ASSERT_ALIGNED_FIELD(struct chunk, data); ASSERT_ALIGNED_CHECK(struct chunk); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk_run_header); ASSERT_ALIGNED_FIELD(struct chunk_run_header, block_size); ASSERT_ALIGNED_FIELD(struct chunk_run_header, alignment); ASSERT_ALIGNED_CHECK(struct chunk_run_header); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run_header) != SIZEOF_CHUNK_RUN_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct chunk_run); ASSERT_ALIGNED_FIELD(struct chunk_run, hdr); ASSERT_ALIGNED_FIELD(struct chunk_run, content); ASSERT_ALIGNED_CHECK(struct chunk_run); UT_COMPILE_ERROR_ON(sizeof(struct chunk_run) != SIZEOF_CHUNK_V3); ASSERT_ALIGNED_BEGIN(struct chunk_header); ASSERT_ALIGNED_FIELD(struct chunk_header, type); ASSERT_ALIGNED_FIELD(struct chunk_header, flags); ASSERT_ALIGNED_FIELD(struct chunk_header, size_idx); ASSERT_ALIGNED_CHECK(struct chunk_header); UT_COMPILE_ERROR_ON(sizeof(struct chunk_header) != SIZEOF_CHUNK_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct zone_header); ASSERT_ALIGNED_FIELD(struct zone_header, magic); ASSERT_ALIGNED_FIELD(struct zone_header, size_idx); ASSERT_ALIGNED_FIELD(struct zone_header, reserved); ASSERT_ALIGNED_CHECK(struct zone_header); UT_COMPILE_ERROR_ON(sizeof(struct zone_header) != SIZEOF_ZONE_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct zone); ASSERT_ALIGNED_FIELD(struct zone, header); ASSERT_ALIGNED_FIELD(struct zone, chunk_headers); ASSERT_ALIGNED_CHECK(struct zone); UT_COMPILE_ERROR_ON(sizeof(struct zone) != SIZEOF_ZONE_METADATA_V3); ASSERT_ALIGNED_BEGIN(struct heap_header); ASSERT_ALIGNED_FIELD(struct heap_header, signature); ASSERT_ALIGNED_FIELD(struct heap_header, major); ASSERT_ALIGNED_FIELD(struct heap_header, minor); ASSERT_ALIGNED_FIELD(struct heap_header, unused); ASSERT_ALIGNED_FIELD(struct heap_header, chunksize); ASSERT_ALIGNED_FIELD(struct heap_header, chunks_per_zone); ASSERT_ALIGNED_FIELD(struct heap_header, reserved); ASSERT_ALIGNED_FIELD(struct heap_header, checksum); ASSERT_ALIGNED_CHECK(struct heap_header); UT_COMPILE_ERROR_ON(sizeof(struct heap_header) != SIZEOF_HEAP_HDR_V3); ASSERT_ALIGNED_BEGIN(struct allocation_header_legacy); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, size); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, unused2); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, root_size); ASSERT_ALIGNED_FIELD(struct allocation_header_legacy, type_num); ASSERT_ALIGNED_CHECK(struct allocation_header_legacy); UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_legacy) != SIZEOF_LEGACY_ALLOCATION_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct allocation_header_compact); ASSERT_ALIGNED_FIELD(struct allocation_header_compact, size); ASSERT_ALIGNED_FIELD(struct allocation_header_compact, extra); ASSERT_ALIGNED_CHECK(struct allocation_header_compact); UT_COMPILE_ERROR_ON(sizeof(struct allocation_header_compact) != SIZEOF_COMPACT_ALLOCATION_HEADER_V3); ASSERT_ALIGNED_BEGIN(struct ulog); ASSERT_ALIGNED_FIELD(struct ulog, checksum); ASSERT_ALIGNED_FIELD(struct ulog, next); ASSERT_ALIGNED_FIELD(struct ulog, capacity); ASSERT_ALIGNED_FIELD(struct ulog, gen_num); ASSERT_ALIGNED_FIELD(struct ulog, flags); ASSERT_ALIGNED_FIELD(struct ulog, unused); ASSERT_ALIGNED_CHECK(struct ulog); UT_COMPILE_ERROR_ON(sizeof(struct ulog) != SIZEOF_ULOG_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_base); ASSERT_ALIGNED_FIELD(struct ulog_entry_base, offset); ASSERT_ALIGNED_CHECK(struct ulog_entry_base); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_base) != SIZEOF_ULOG_BASE_ENTRY_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_val); ASSERT_ALIGNED_FIELD(struct ulog_entry_val, base); ASSERT_ALIGNED_FIELD(struct ulog_entry_val, value); ASSERT_ALIGNED_CHECK(struct ulog_entry_val); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_val) != SIZEOF_ULOG_VAL_ENTRY_V4); ASSERT_ALIGNED_BEGIN(struct ulog_entry_buf); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, base); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, checksum); ASSERT_ALIGNED_FIELD(struct ulog_entry_buf, size); ASSERT_ALIGNED_CHECK(struct ulog_entry_buf); UT_COMPILE_ERROR_ON(sizeof(struct ulog_entry_buf) != SIZEOF_ULOG_BUF_ENTRY_V4); ASSERT_ALIGNED_BEGIN(PMEMoid); ASSERT_ALIGNED_FIELD(PMEMoid, pool_uuid_lo); ASSERT_ALIGNED_FIELD(PMEMoid, off); ASSERT_ALIGNED_CHECK(PMEMoid); UT_COMPILE_ERROR_ON(sizeof(PMEMoid) != SIZEOF_PMEMOID_V3); UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(PMEMmutex_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(PMEMrwlock) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(PMEMrwlock_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(os_rwlock_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(PMEMcond) != SIZEOF_LOCK_V3); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(PMEMcond_internal)); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t)); UT_COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(uint64_t)); UT_COMPILE_ERROR_ON(sizeof(struct foo) != SIZEOF_LIST_ENTRY_V3); UT_COMPILE_ERROR_ON(sizeof(struct list_entry) != SIZEOF_LIST_ENTRY_V3); UT_COMPILE_ERROR_ON(sizeof(struct foo_head) != SIZEOF_LIST_HEAD_V3); UT_COMPILE_ERROR_ON(sizeof(struct list_head) != SIZEOF_LIST_HEAD_V3); ASSERT_ALIGNED_BEGIN(struct lane_layout); ASSERT_ALIGNED_FIELD(struct lane_layout, internal); ASSERT_ALIGNED_FIELD(struct lane_layout, external); ASSERT_ALIGNED_FIELD(struct lane_layout, undo); ASSERT_ALIGNED_CHECK(struct lane_layout); UT_COMPILE_ERROR_ON(sizeof(struct lane_layout) != SIZEOF_LANE_V3); UT_COMPILE_ERROR_ON(LANE_UNDO_SIZE != SIZEOF_LANE_UNDO_SIZE); UT_COMPILE_ERROR_ON(LANE_REDO_EXTERNAL_SIZE != SIZEOF_LANE_REDO_EXTERNAL_SIZE); UT_COMPILE_ERROR_ON(LANE_REDO_INTERNAL_SIZE != SIZEOF_LANE_REDO_INTERNAL_SIZE); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
8,411
35.103004
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_add_range_direct/obj_tx_add_range_direct.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_add_range_direct.c -- unit test for pmemobj_tx_add_range_direct */ #include <string.h> #include <stddef.h> #include "tx.h" #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_add_range_direct" #define OBJ_SIZE 1024 enum type_number { TYPE_OBJ, TYPE_OBJ_ABORT, }; TOID_DECLARE(struct object, 0); struct object { size_t value; unsigned char data[OBJ_SIZE - sizeof(size_t)]; }; #define VALUE_OFF (offsetof(struct object, value)) #define VALUE_SIZE (sizeof(size_t)) #define DATA_OFF (offsetof(struct object, data)) #define DATA_SIZE (OBJ_SIZE - sizeof(size_t)) #define TEST_VALUE_1 1 #define TEST_VALUE_2 2 /* * do_tx_zalloc -- do tx allocation with specified type number */ static PMEMoid do_tx_zalloc(PMEMobjpool *pop, unsigned type_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_zalloc(sizeof(struct object), type_num); } TX_END return ret; } /* * do_tx_alloc -- do tx allocation and initialize first num bytes */ static PMEMoid do_tx_alloc(PMEMobjpool *pop, uint64_t type_num, uint64_t init_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_alloc(sizeof(struct object), type_num); pmemobj_memset(pop, pmemobj_direct(ret), 0, init_num, 0); } TX_END return ret; } /* * do_tx_add_range_alloc_commit -- call add_range_direct on object allocated * within the same transaction and commit the transaction */ static void do_tx_add_range_alloc_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj)->data[i], TEST_VALUE_2); } /* * do_tx_add_range_alloc_abort -- call add_range_direct on object allocated * within the same transaction and abort the transaction */ static void do_tx_add_range_alloc_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj)->data, TEST_VALUE_2, DATA_SIZE); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_OBJ_ABORT)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_add_range_twice_commit -- call add_range_direct one the same area * twice and commit the transaction */ static void do_tx_add_range_twice_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); } /* * do_tx_add_range_twice_abort -- call add_range_direct one the same area * twice and abort the transaction */ static void do_tx_add_range_twice_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); UT_ASSERT(!TOID_IS_NULL(obj)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, 0); } /* * do_tx_add_range_abort_after_nested -- call add_range_direct and * commit the tx */ static void do_tx_add_range_abort_after_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, 0); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], 0); } /* * do_tx_add_range_abort_nested -- call add_range_direct and * commit the tx */ static void do_tx_add_range_abort_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, 0); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], 0); } /* * do_tx_add_range_commit_nested -- call add_range_direct and commit the tx */ static void do_tx_add_range_commit_nested(PMEMobjpool *pop) { int ret; TOID(struct object) obj1; TOID(struct object) obj2; TOID_ASSIGN(obj1, do_tx_zalloc(pop, TYPE_OBJ)); TOID_ASSIGN(obj2, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr1 = (char *)pmemobj_direct(obj1.oid); ret = pmemobj_tx_add_range_direct(ptr1 + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj1)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr2 = (char *)pmemobj_direct(obj2.oid); ret = pmemobj_tx_add_range_direct(ptr2 + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); pmemobj_memset_persist(pop, D_RW(obj2)->data, TEST_VALUE_2, DATA_SIZE); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj1)->value, TEST_VALUE_1); size_t i; for (i = 0; i < DATA_SIZE; i++) UT_ASSERTeq(D_RO(obj2)->data[i], TEST_VALUE_2); } /* * do_tx_add_range_abort -- call add_range_direct and abort the tx */ static void do_tx_add_range_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, 0); } /* * do_tx_add_range_commit -- call add_range_direct and commit tx */ static void do_tx_add_range_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_flush_commit -- call xadd_range_direct with * POBJ_XADD_NO_FLUSH flag set and commit tx */ static void do_tx_xadd_range_no_flush_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_FLUSH); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; /* let pmemcheck find we didn't flush it */ } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_snapshot_commit -- call xadd_range_direct with * POBJ_XADD_NO_SNAPSHOT flag, commit the transaction */ static void do_tx_xadd_range_no_snapshot_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_snapshot_abort -- call xadd_range_direct with * POBJ_XADD_NO_SNAPSHOT flag, modify the value, abort the transaction */ static void do_tx_xadd_range_no_snapshot_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); D_RW(obj)->value = TEST_VALUE_1; TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_NO_SNAPSHOT); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END /* * value added with NO_SNAPSHOT flag should NOT be rolled back * after abort */ UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check -- call xdd_range_direct for * initialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit the * tx */ static void do_tx_xadd_range_no_uninit_check_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for * uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set and commit * the tx */ static void do_tx_xadd_range_no_uninit_check_commit_uninit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check -- call xadd_range_direct for * partially uninitialized memory with POBJ_XADD_ASSUME_INITIALIZED flag set * only for uninitialized part and commit the tx */ static void do_tx_xadd_range_no_uninit_check_commit_part_uninit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_add_range_no_uninit_check -- call add_range_direct for * partially uninitialized memory. */ static void do_tx_add_range_no_uninit_check_commit_no_flag(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, VALUE_SIZE)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_add_range_direct(ptr + VALUE_OFF, VALUE_SIZE); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_add_range_direct(ptr + DATA_OFF, DATA_SIZE); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); UT_ASSERTeq(D_RO(obj)->data[256], TEST_VALUE_2); } /* * do_tx_xadd_range_no_uninit_check_abort -- call pmemobj_tx_range with * POBJ_XADD_ASSUME_INITIALIZED flag, modify the value inside aborted * transaction */ static void do_tx_xadd_range_no_uninit_check_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_alloc(pop, TYPE_OBJ, 0)); TX_BEGIN(pop) { char *ptr = (char *)pmemobj_direct(obj.oid); ret = pmemobj_tx_xadd_range_direct(ptr + VALUE_OFF, VALUE_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); ret = pmemobj_tx_xadd_range_direct(ptr + DATA_OFF, DATA_SIZE, POBJ_XADD_ASSUME_INITIALIZED); UT_ASSERTeq(ret, 0); D_RW(obj)->value = TEST_VALUE_1; D_RW(obj)->data[256] = TEST_VALUE_2; pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } /* * do_tx_commit_and_abort -- use range cache, commit and then abort to make * sure that it won't affect previously modified data. */ static void do_tx_commit_and_abort(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { TX_SET(obj, value, TEST_VALUE_1); /* this will land in cache */ } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } /* * test_add_direct_macros -- test TX_ADD_DIRECT, TX_ADD_FIELD_DIRECT and * TX_SET_DIRECT */ static void test_add_direct_macros(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_SET_DIRECT(o, value, TEST_VALUE_1); } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_ADD_DIRECT(o); o->value = TEST_VALUE_2; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_2); TX_BEGIN(pop) { struct object *o = D_RW(obj); TX_ADD_FIELD_DIRECT(o, value); o->value = TEST_VALUE_1; } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERTeq(D_RO(obj)->value, TEST_VALUE_1); } #define MAX_CACHED_RANGES 100 /* * test_tx_corruption_bug -- test whether tx_adds for small objects from one * transaction does NOT leak to the next transaction */ static void test_tx_corruption_bug(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); struct object *o = D_RW(obj); unsigned char i; UT_COMPILE_ERROR_ON(1.5 * MAX_CACHED_RANGES > 255); TX_BEGIN(pop) { for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) { TX_ADD_DIRECT(&o->data[i]); o->data[i] = i; } } TX_ONABORT { UT_ASSERT(0); } TX_END for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) UT_ASSERTeq((unsigned char)o->data[i], i); TX_BEGIN(pop) { for (i = 0; i < 0.1 * MAX_CACHED_RANGES; ++i) { TX_ADD_DIRECT(&o->data[i]); o->data[i] = i + 10; } pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END for (i = 0; i < 1.5 * MAX_CACHED_RANGES; ++i) UT_ASSERTeq((unsigned char)o->data[i], i); pmemobj_free(&obj.oid); } static void do_tx_add_range_too_large(PMEMobjpool *pop) { TOID(struct object) obj; TOID_ASSIGN(obj, do_tx_zalloc(pop, TYPE_OBJ)); int ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, 0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1, POBJ_XADD_NO_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_add_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; ret = 0; TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_xadd_range_direct(pmemobj_direct(obj.oid), PMEMOBJ_MAX_ALLOC_SIZE + 1, 0); } TX_ONCOMMIT { UT_ASSERTeq(errno, EINVAL); UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); } TX_END errno = 0; } static void do_tx_add_range_lots_of_small_snapshots(PMEMobjpool *pop) { size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2; size_t snapshot_s = 8; PMEMoid obj; int ret = pmemobj_zalloc(pop, &obj, s, 0); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { for (size_t n = 0; n < s; n += snapshot_s) { void *addr = (void *)((size_t)pmemobj_direct(obj) + n); pmemobj_tx_add_range_direct(addr, snapshot_s); } } TX_ONABORT { UT_ASSERT(0); } TX_END } static void do_tx_add_cache_overflowing_range(PMEMobjpool *pop) { /* * This test adds snapshot to the cache, but in way that results in * one of the add_range being split into two caches. */ size_t s = TX_DEFAULT_RANGE_CACHE_SIZE * 2; size_t snapshot_s = TX_DEFAULT_RANGE_CACHE_THRESHOLD - 8; PMEMoid obj; int ret = pmemobj_zalloc(pop, &obj, s, 0); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { size_t n = 0; while (n != s) { if (n + snapshot_s > s) snapshot_s = s - n; void *addr = (void *)((size_t)pmemobj_direct(obj) + n); pmemobj_tx_add_range_direct(addr, snapshot_s); memset(addr, 0xc, snapshot_s); n += snapshot_s; } pmemobj_tx_abort(0); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END UT_ASSERT(util_is_zeroed(pmemobj_direct(obj), s)); UT_ASSERTne(errno, 0); errno = 0; pmemobj_free(&obj); } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_add_range_direct"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL * 4, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_add_range_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_add_range_twice_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_twice_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_add_range_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_commit_and_abort(pop); VALGRIND_WRITE_STATS; test_add_direct_macros(pop); VALGRIND_WRITE_STATS; test_tx_corruption_bug(pop); VALGRIND_WRITE_STATS; do_tx_add_range_too_large(pop); VALGRIND_WRITE_STATS; do_tx_add_range_lots_of_small_snapshots(pop); VALGRIND_WRITE_STATS; do_tx_add_cache_overflowing_range(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_snapshot_commit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_snapshot_abort(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit_uninit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_commit_part_uninit(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_uninit_check_abort(pop); VALGRIND_WRITE_STATS; do_tx_add_range_no_uninit_check_commit_no_flag(pop); VALGRIND_WRITE_STATS; do_tx_xadd_range_no_flush_commit(pop); pmemobj_close(pop); DONE(NULL); }
20,975
22.177901
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_many_size_allocs/obj_many_size_allocs.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_many_size_allocs.c -- allocation of many objects with different sizes * */ #include <stddef.h> #include "unittest.h" #include "heap.h" #define LAYOUT_NAME "many_size_allocs" #define TEST_ALLOC_SIZE 2048 #define LAZY_LOAD_SIZE 10 #define LAZY_LOAD_BIG_SIZE 150 struct cargs { size_t size; }; static int test_constructor(PMEMobjpool *pop, void *addr, void *args) { struct cargs *a = args; /* do not use pmem_memset_persit() here */ pmemobj_memset_persist(pop, addr, a->size % 256, a->size); return 0; } static PMEMobjpool * test_allocs(PMEMobjpool *pop, const char *path) { PMEMoid *oid = MALLOC(sizeof(PMEMoid) * TEST_ALLOC_SIZE); if (pmemobj_alloc(pop, &oid[0], 0, 0, NULL, NULL) == 0) UT_FATAL("pmemobj_alloc(0) succeeded"); for (unsigned i = 1; i < TEST_ALLOC_SIZE; ++i) { struct cargs args = { i }; if (pmemobj_alloc(pop, &oid[i], i, 0, test_constructor, &args) != 0) UT_FATAL("!pmemobj_alloc"); UT_ASSERT(!OID_IS_NULL(oid[i])); } pmemobj_close(pop); UT_ASSERT(pmemobj_check(path, LAYOUT_NAME) == 1); UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL); for (int i = 1; i < TEST_ALLOC_SIZE; ++i) { pmemobj_free(&oid[i]); UT_ASSERT(OID_IS_NULL(oid[i])); } FREE(oid); return pop; } static PMEMobjpool * test_lazy_load(PMEMobjpool *pop, const char *path) { PMEMoid oid[3]; int ret = pmemobj_alloc(pop, &oid[0], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); ret = pmemobj_alloc(pop, &oid[2], LAZY_LOAD_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); pmemobj_close(pop); UT_ASSERT((pop = pmemobj_open(path, LAYOUT_NAME)) != NULL); pmemobj_free(&oid[1]); ret = pmemobj_alloc(pop, &oid[1], LAZY_LOAD_BIG_SIZE, 0, NULL, NULL); UT_ASSERTeq(ret, 0); return pop; } #define ALLOC_BLOCK_SIZE 64 #define MAX_BUCKET_MAP_ENTRIES (RUN_DEFAULT_SIZE / ALLOC_BLOCK_SIZE) static void test_all_classes(PMEMobjpool *pop) { for (unsigned i = 1; i <= MAX_BUCKET_MAP_ENTRIES; ++i) { int err; int nallocs = 0; while ((err = pmemobj_alloc(pop, NULL, i * ALLOC_BLOCK_SIZE, 0, NULL, NULL)) == 0) { nallocs++; } UT_ASSERT(nallocs > 0); PMEMoid iter, niter; POBJ_FOREACH_SAFE(pop, iter, niter) { pmemobj_free(&iter); } } } int main(int argc, char *argv[]) { START(argc, argv, "obj_many_size_allocs"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = NULL; if ((pop = pmemobj_create(path, LAYOUT_NAME, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); pop = test_lazy_load(pop, path); pop = test_allocs(pop, path); test_all_classes(pop); pmemobj_close(pop); DONE(NULL); }
2,837
20.179104
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_integration/pmem2_integration.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_integration.c -- pmem2 integration tests */ #include "libpmem2.h" #include "unittest.h" #include "rand.h" #include "ut_pmem2.h" #include "ut_pmem2_setup_integration.h" #define N_GRANULARITIES 3 /* BYTE, CACHE_LINE, PAGE */ /* * map_invalid -- try to mapping memory with invalid config */ static void map_invalid(struct pmem2_config *cfg, struct pmem2_source *src, int result) { struct pmem2_map *map = (struct pmem2_map *)0x7; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, result); UT_ASSERTeq(map, NULL); } /* * map_valid -- return valid mapped pmem2_map and validate mapped memory length */ static struct pmem2_map * map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size) { struct pmem2_map *map = NULL; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(pmem2_map_get_size(map), size); return map; } /* * test_reuse_cfg -- map pmem2_map twice using the same pmem2_config */ static int test_reuse_cfg(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_reuse_cfg <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map1 = map_valid(cfg, src, size); struct pmem2_map *map2 = map_valid(cfg, src, size); /* cleanup after the test */ pmem2_unmap(&map2); pmem2_unmap(&map1); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_reuse_cfg_with_diff_fd -- map pmem2_map using the same pmem2_config * with changed file descriptor */ static int test_reuse_cfg_with_diff_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_reuse_cfg_with_diff_fd <file> <file2>"); char *file1 = argv[0]; int fd1 = OPEN(file1, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd1, PMEM2_GRANULARITY_PAGE); size_t size1; UT_ASSERTeq(pmem2_source_size(src, &size1), 0); struct pmem2_map *map1 = map_valid(cfg, src, size1); char *file2 = argv[1]; int fd2 = OPEN(file2, O_RDWR); /* set another valid file descriptor in source */ struct pmem2_source *src2; UT_ASSERTeq(pmem2_source_from_fd(&src2, fd2), 0); size_t size2; UT_ASSERTeq(pmem2_source_size(src2, &size2), 0); struct pmem2_map *map2 = map_valid(cfg, src2, size2); /* cleanup after the test */ pmem2_unmap(&map2); CLOSE(fd2); pmem2_unmap(&map1); pmem2_config_delete(&cfg); pmem2_source_delete(&src); pmem2_source_delete(&src2); CLOSE(fd1); return 2; } /* * test_register_pmem -- map, use and unmap memory */ static int test_register_pmem(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_register_pmem <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); char *word = "XXXXXXXX"; struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map = map_valid(cfg, src, size); char *addr = pmem2_map_get_address(map); size_t length = strlen(word); /* write some data in mapped memory without persisting data */ memcpy(addr, word, length); /* cleanup after the test */ pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_use_misc_lens_and_offsets -- test with multiple offsets and lengths */ static int test_use_misc_lens_and_offsets(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_use_misc_lens_and_offsets <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; UT_ASSERTeq(pmem2_source_size(src, &len), 0); struct pmem2_map *map = map_valid(cfg, src, len); char *base = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); rng_t rng; randomize_r(&rng, 13); /* arbitrarily chosen value */ for (size_t i = 0; i < len; i++) base[i] = (char)rnd64_r(&rng); persist_fn(base, len); UT_ASSERTeq(len % Ut_mmap_align, 0); for (size_t l = len; l > 0; l -= Ut_mmap_align) { for (size_t off = 0; off < l; off += Ut_mmap_align) { size_t len2 = l - off; int ret = pmem2_config_set_length(cfg, len2); UT_PMEM2_EXPECT_RETURN(ret, 0); ret = pmem2_config_set_offset(cfg, off); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map2 = map_valid(cfg, src, len2); char *ptr = pmem2_map_get_address(map2); UT_ASSERTeq(ret = memcmp(base + off, ptr, len2), 0); pmem2_unmap(&map2); } } pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } struct gran_test_ctx; typedef void(*map_func)(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *ctx); /* * gran_test_ctx -- essential parameters used by granularity test */ struct gran_test_ctx { map_func map_with_expected_gran; enum pmem2_granularity expected_granularity; }; /* * map_with_avail_gran -- map the range with valid granularity, * includes cleanup */ static void map_with_avail_gran(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *ctx) { struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(ctx->expected_granularity, pmem2_map_get_store_granularity(map)); /* cleanup after the test */ pmem2_unmap(&map); } /* * map_with_unavail_gran -- map the range with invalid granularity * (unsuccessful) */ static void map_with_unavail_gran(struct pmem2_config *cfg, struct pmem2_source *src, struct gran_test_ctx *unused) { struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED); UT_ERR("%s", pmem2_errormsg()); UT_ASSERTeq(map, NULL); } static const map_func map_with_gran[N_GRANULARITIES][N_GRANULARITIES] = { /* requested granularity / available granularity */ /* -------------------------------------------------------------------- */ /* BYTE CACHE_LINE PAGE */ /* -------------------------------------------------------------------- */ /* BYTE */ {map_with_avail_gran, map_with_unavail_gran, map_with_unavail_gran}, /* CL */ {map_with_avail_gran, map_with_avail_gran, map_with_unavail_gran}, /* PAGE */ {map_with_avail_gran, map_with_avail_gran, map_with_avail_gran}}; static const enum pmem2_granularity gran_id2granularity[N_GRANULARITIES] = { PMEM2_GRANULARITY_BYTE, PMEM2_GRANULARITY_CACHE_LINE, PMEM2_GRANULARITY_PAGE}; /* * str2gran_id -- reads granularity id from the provided string */ static int str2gran_id(const char *in) { int gran = atoi(in); UT_ASSERT(gran >= 0 && gran < N_GRANULARITIES); return gran; } /* * test_granularity -- performs pmem2_map with certain expected granularity * in context of certain available granularity */ static int test_granularity(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL( "usage: test_granularity <file>" " <available_granularity> <requested_granularity>"); struct gran_test_ctx ctx; int avail_gran_id = str2gran_id(argv[1]); int req_gran_id = str2gran_id(argv[2]); ctx.expected_granularity = gran_id2granularity[avail_gran_id]; ctx.map_with_expected_gran = map_with_gran[req_gran_id][avail_gran_id]; char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, gran_id2granularity[req_gran_id]); ctx.map_with_expected_gran(cfg, src, &ctx); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 3; } /* * test_len_not_aligned -- try to use unaligned length */ static int test_len_not_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_len_not_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); UT_ASSERT(len > alignment); size_t aligned_len = ALIGN_DOWN(len, alignment); size_t unaligned_len = aligned_len - 1; ret = pmem2_config_set_length(cfg, unaligned_len); UT_PMEM2_EXPECT_RETURN(ret, 0); map_invalid(cfg, src, PMEM2_E_LENGTH_UNALIGNED); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_len_aligned -- try to use aligned length */ static int test_len_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_len_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); UT_ASSERT(len > alignment); size_t aligned_len = ALIGN_DOWN(len, alignment); ret = pmem2_config_set_length(cfg, aligned_len); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map = map_valid(cfg, src, aligned_len); pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_offset_not_aligned -- try to map with unaligned offset */ static int test_offset_not_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_offset_not_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); /* break the offset */ size_t offset = alignment - 1; ret = pmem2_config_set_offset(cfg, offset); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERT(len > alignment); /* in this case len has to be aligned, only offset will be unaligned */ size_t aligned_len = ALIGN_DOWN(len, alignment); ret = pmem2_config_set_length(cfg, aligned_len - alignment); UT_PMEM2_EXPECT_RETURN(ret, 0); map_invalid(cfg, src, PMEM2_E_OFFSET_UNALIGNED); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_offset_aligned -- try to map with aligned offset */ static int test_offset_aligned(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_offset_aligned <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len, alignment; int ret = pmem2_source_size(src, &len); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_SOURCE_ALIGNMENT(src, &alignment); /* set the aligned offset */ size_t offset = alignment; ret = pmem2_config_set_offset(cfg, offset); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERT(len > alignment * 2); /* set the aligned len */ size_t map_len = ALIGN_DOWN(len / 2, alignment); ret = pmem2_config_set_length(cfg, map_len); UT_PMEM2_EXPECT_RETURN(ret, 0); struct pmem2_map *map = map_valid(cfg, src, map_len); pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_mem_move_cpy_set_with_map_private -- map O_RDONLY file and do * pmem2_[cpy|set|move]_fns with PMEM2_PRIVATE sharing */ static int test_mem_move_cpy_set_with_map_private(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL( "usage: test_mem_move_cpy_set_with_map_private <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDONLY); const char *word1 = "Persistent memory..."; const char *word2 = "Nonpersistent memory"; const char *word3 = "XXXXXXXXXXXXXXXXXXXX"; struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); pmem2_config_set_sharing(cfg, PMEM2_PRIVATE); size_t size = 0; UT_ASSERTeq(pmem2_source_size(src, &size), 0); struct pmem2_map *map = map_valid(cfg, src, size); char *addr = pmem2_map_get_address(map); /* copy inital state */ char *initial_state = MALLOC(size); memcpy(initial_state, addr, size); pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); memcpy_fn(addr, word1, strlen(word1), 0); UT_ASSERTeq(strcmp(addr, word1), 0); memmove_fn(addr, word2, strlen(word2), 0); UT_ASSERTeq(strcmp(addr, word2), 0); memset_fn(addr, 'X', strlen(word3), 0); UT_ASSERTeq(strcmp(addr, word3), 0); /* remap memory, and check that the data has not been saved */ pmem2_unmap(&map); map = map_valid(cfg, src, size); addr = pmem2_map_get_address(map); UT_ASSERTeq(strcmp(addr, initial_state), 0); /* cleanup after the test */ pmem2_unmap(&map); FREE(initial_state); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_deep_flush_valid -- perform valid deep_flush for whole map */ static int test_deep_flush_valid(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr, len); UT_PMEM2_EXPECT_RETURN(ret, 0); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_e_range_behind -- try deep_flush for range behind a map */ static int test_deep_flush_e_range_behind(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr + map_size + 1, 64); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_e_range_before -- try deep_flush for range before a map */ static int test_deep_flush_e_range_before(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr - map_size, 64); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_slice -- try deep_flush for slice of a map */ static int test_deep_flush_slice(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); size_t map_part = map_size / 4; char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, map_part); persist_fn(addr, map_part); int ret = pmem2_deep_flush(map, addr + map_part, map_part); UT_PMEM2_EXPECT_RETURN(ret, 0); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_deep_flush_overlap -- try deep_flush for range overlaping map */ static int test_deep_flush_overlap(const struct test_case *tc, int argc, char *argv[]) { char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t len; PMEM2_SOURCE_SIZE(src, &len); struct pmem2_map *map = map_valid(cfg, src, len); size_t map_size = pmem2_map_get_size(map); char *addr = pmem2_map_get_address(map); pmem2_persist_fn persist_fn = pmem2_get_persist_fn(map); memset(addr, 0, len); persist_fn(addr, len); int ret = pmem2_deep_flush(map, addr + 1024, map_size); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_DEEP_FLUSH_RANGE); pmem2_unmap(&map); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); CLOSE(fd); return 1; } /* * test_source_anon -- tests map/config/source functions in combination * with anonymous source. */ static int test_source_anon(enum pmem2_sharing_type sharing, enum pmem2_granularity granularity, size_t source_len, size_t map_len) { int ret = 0; struct pmem2_config *cfg; struct pmem2_source *src; struct pmem2_map *map; struct pmem2_badblock_context *bbctx; UT_ASSERTeq(pmem2_source_from_anon(&src, source_len), 0); UT_ASSERTeq(pmem2_source_device_id(src, NULL, NULL), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_source_device_usc(src, NULL), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_badblock_context_new(src, &bbctx), PMEM2_E_NOSUPP); size_t alignment; UT_ASSERTeq(pmem2_source_alignment(src, &alignment), 0); UT_ASSERT(alignment >= Ut_pagesize); size_t size; UT_ASSERTeq(pmem2_source_size(src, &size), 0); UT_ASSERTeq(size, source_len); PMEM2_CONFIG_NEW(&cfg); UT_ASSERTeq(pmem2_config_set_length(cfg, map_len), 0); UT_ASSERTeq(pmem2_config_set_offset(cfg, alignment), 0); /* ignored */ UT_ASSERTeq(pmem2_config_set_required_store_granularity(cfg, granularity), 0); UT_ASSERTeq(pmem2_config_set_sharing(cfg, sharing), 0); if ((ret = pmem2_map(cfg, src, &map)) != 0) goto map_fail; void *addr = pmem2_map_get_address(map); UT_ASSERTne(addr, NULL); UT_ASSERTeq(pmem2_map_get_size(map), map_len ? map_len : source_len); UT_ASSERTeq(pmem2_map_get_store_granularity(map), PMEM2_GRANULARITY_BYTE); UT_ASSERTeq(pmem2_deep_flush(map, addr, alignment), PMEM2_E_NOSUPP); UT_ASSERTeq(pmem2_unmap(&map), 0); map_fail: PMEM2_CONFIG_DELETE(&cfg); pmem2_source_delete(&src); return ret; } /* * test_source_anon_ok_private -- valid config /w private flag */ static int test_source_anon_private(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_PRIVATE, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_shared -- valid config /w shared flag */ static int test_source_anon_shared(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_page -- valid config /w page granularity */ static int test_source_anon_page(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_PAGE, 1 << 30ULL, 1 << 20ULL); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_zero_len -- valid config /w zero (src inherited) map length */ static int test_source_anon_zero_len(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 0); UT_ASSERTeq(ret, 0); return 1; } /* * test_source_anon_too_small -- valid config /w small mapping length */ static int test_source_anon_too_small(const struct test_case *tc, int argc, char *argv[]) { int ret = test_source_anon(PMEM2_SHARED, PMEM2_GRANULARITY_BYTE, 1 << 30ULL, 1 << 10ULL); UT_ASSERTne(ret, 0); return 1; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_reuse_cfg), TEST_CASE(test_reuse_cfg_with_diff_fd), TEST_CASE(test_register_pmem), TEST_CASE(test_use_misc_lens_and_offsets), TEST_CASE(test_granularity), TEST_CASE(test_len_not_aligned), TEST_CASE(test_len_aligned), TEST_CASE(test_offset_not_aligned), TEST_CASE(test_offset_aligned), TEST_CASE(test_mem_move_cpy_set_with_map_private), TEST_CASE(test_deep_flush_valid), TEST_CASE(test_deep_flush_e_range_behind), TEST_CASE(test_deep_flush_e_range_before), TEST_CASE(test_deep_flush_slice), TEST_CASE(test_deep_flush_overlap), TEST_CASE(test_source_anon_private), TEST_CASE(test_source_anon_shared), TEST_CASE(test_source_anon_page), TEST_CASE(test_source_anon_too_small), TEST_CASE(test_source_anon_zero_len), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_integration"); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); DONE(NULL); }
22,113
23.736018
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_ctl_alloc_class_config/obj_ctl_alloc_class_config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * obj_ctl_alloc_class_config.c -- tests for the ctl alloc class config */ #include "unittest.h" #define LAYOUT "obj_ctl_alloc_class_config" int main(int argc, char *argv[]) { START(argc, argv, "obj_ctl_alloc_class_config"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); struct pobj_alloc_class_desc alloc_class; int ret; ret = pmemobj_ctl_get(pop, "heap.alloc_class.128.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); ret = pmemobj_ctl_get(pop, "heap.alloc_class.129.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); ret = pmemobj_ctl_get(pop, "heap.alloc_class.130.desc", &alloc_class); UT_ASSERTeq(ret, 0); UT_OUT("%d %lu %d", alloc_class.header_type, alloc_class.unit_size, alloc_class.units_per_block); pmemobj_close(pop); DONE(NULL); }
1,242
22.45283
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_action/obj_action.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * obj_action.c -- test the action API */ #include <stdlib.h> #include "unittest.h" #define LAYOUT_NAME "obj_action" struct macro_reserve_s { PMEMoid oid; uint64_t value; }; TOID_DECLARE(struct macro_reserve_s, 1); struct foo { int bar; }; struct root { struct { PMEMoid oid; uint64_t value; } reserved; struct { PMEMoid oid; uint64_t value; } published; struct { PMEMoid oid; } tx_reserved; struct { PMEMoid oid; } tx_reserved_fulfilled; struct { PMEMoid oid; } tx_published; }; #define HUGE_ALLOC_SIZE ((1 << 20) * 3) #define MAX_ACTS 10 static void test_resv_cancel_huge(PMEMobjpool *pop) { PMEMoid oid; unsigned nallocs = 0; struct pobj_action *act = (struct pobj_action *) ZALLOC(sizeof(struct pobj_action) * MAX_ACTS); do { oid = pmemobj_reserve(pop, &act[nallocs++], HUGE_ALLOC_SIZE, 0); } while (!OID_IS_NULL(oid)); pmemobj_cancel(pop, act, nallocs - 1); unsigned nallocs2 = 0; do { oid = pmemobj_reserve(pop, &act[nallocs2++], HUGE_ALLOC_SIZE, 0); } while (!OID_IS_NULL(oid)); pmemobj_cancel(pop, act, nallocs2 - 1); UT_ASSERTeq(nallocs, nallocs2); FREE(act); } static void test_defer_free(PMEMobjpool *pop) { PMEMoid oid; int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); struct pobj_action act; pmemobj_defer_free(pop, oid, &act); pmemobj_publish(pop, &act, 1); struct foo *f = (struct foo *)pmemobj_direct(oid); f->bar = 5; /* should trigger memcheck error */ ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); pmemobj_defer_free(pop, oid, &act); pmemobj_cancel(pop, &act, 1); f = (struct foo *)pmemobj_direct(oid); f->bar = 5; /* should NOT trigger memcheck error */ } /* * This function tests if macros included in action.h api compile and * allocate memory. */ static void test_api_macros(PMEMobjpool *pop) { struct pobj_action macro_reserve_act[1]; TOID(struct macro_reserve_s) macro_reserve_p = POBJ_RESERVE_NEW(pop, struct macro_reserve_s, &macro_reserve_act[0]); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_RESERVE_ALLOC(pop, struct macro_reserve_s, sizeof(struct macro_reserve_s), &macro_reserve_act[0]); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_XRESERVE_NEW(pop, struct macro_reserve_s, &macro_reserve_act[0], 0); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); macro_reserve_p = POBJ_XRESERVE_ALLOC(pop, struct macro_reserve_s, sizeof(struct macro_reserve_s), &macro_reserve_act[0], 0); UT_ASSERT(!OID_IS_NULL(macro_reserve_p.oid)); pmemobj_publish(pop, macro_reserve_act, 1); POBJ_FREE(&macro_reserve_p); } #define POBJ_MAX_ACTIONS 60 static void test_many(PMEMobjpool *pop, size_t n) { struct pobj_action *act = (struct pobj_action *) MALLOC(sizeof(struct pobj_action) * n); PMEMoid *oid = (PMEMoid *) MALLOC(sizeof(PMEMoid) * n); for (int i = 0; i < n; ++i) { oid[i] = pmemobj_reserve(pop, &act[i], 1, 0); UT_ASSERT(!OID_IS_NULL(oid[i])); } UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); for (int i = 0; i < n; ++i) { pmemobj_defer_free(pop, oid[i], &act[i]); } UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); FREE(oid); FREE(act); } static void test_duplicate(PMEMobjpool *pop) { struct pobj_alloc_class_desc alloc_class_128; alloc_class_128.header_type = POBJ_HEADER_COMPACT; alloc_class_128.unit_size = 1024 * 100; alloc_class_128.units_per_block = 1; alloc_class_128.alignment = 0; int ret = pmemobj_ctl_set(pop, "heap.alloc_class.128.desc", &alloc_class_128); UT_ASSERTeq(ret, 0); struct pobj_action a[10]; PMEMoid oid[10]; oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 1); oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 3); oid[0] = pmemobj_xreserve(pop, &a[0], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[1], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[2], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[3], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); oid[0] = pmemobj_xreserve(pop, &a[4], 1, 0, POBJ_CLASS_ID(128)); UT_ASSERT(!OID_IS_NULL(oid[0])); pmemobj_cancel(pop, a, 5); } static void test_many_sets(PMEMobjpool *pop, size_t n) { struct pobj_action *act = (struct pobj_action *) MALLOC(sizeof(struct pobj_action) * n); PMEMoid oid; pmemobj_alloc(pop, &oid, sizeof(uint64_t) * n, 0, NULL, NULL); UT_ASSERT(!OID_IS_NULL(oid)); uint64_t *values = (uint64_t *)pmemobj_direct(oid); for (uint64_t i = 0; i < n; ++i) pmemobj_set_value(pop, &act[i], values + i, i); UT_ASSERTeq(pmemobj_publish(pop, act, n), 0); for (uint64_t i = 0; i < n; ++i) UT_ASSERTeq(*(values + i), i); pmemobj_free(&oid); FREE(act); } int main(int argc, char *argv[]) { START(argc, argv, "obj_action"); if (argc < 2) UT_FATAL("usage: %s filename", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); PMEMoid root = pmemobj_root(pop, sizeof(struct root)); struct root *rootp = (struct root *)pmemobj_direct(root); struct pobj_action reserved[2]; struct pobj_action published[2]; struct pobj_action tx_reserved; struct pobj_action tx_reserved_fulfilled; struct pobj_action tx_published; rootp->reserved.oid = pmemobj_reserve(pop, &reserved[0], sizeof(struct foo), 0); pmemobj_set_value(pop, &reserved[1], &rootp->reserved.value, 1); rootp->tx_reserved.oid = pmemobj_reserve(pop, &tx_reserved, sizeof(struct foo), 0); rootp->tx_reserved_fulfilled.oid = pmemobj_reserve(pop, &tx_reserved_fulfilled, sizeof(struct foo), 0); rootp->tx_published.oid = pmemobj_reserve(pop, &tx_published, sizeof(struct foo), 0); rootp->published.oid = pmemobj_reserve(pop, &published[0], sizeof(struct foo), 0); TX_BEGIN(pop) { pmemobj_tx_publish(&tx_reserved, 1); pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_publish(&tx_reserved_fulfilled, 1); pmemobj_tx_publish(NULL, 0); /* this is to force resv fulfill */ pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END pmemobj_set_value(pop, &published[1], &rootp->published.value, 1); pmemobj_publish(pop, published, 2); TX_BEGIN(pop) { pmemobj_tx_publish(&tx_published, 1); } TX_ONABORT { UT_ASSERT(0); } TX_END pmemobj_persist(pop, rootp, sizeof(*rootp)); pmemobj_close(pop); UT_ASSERTeq(pmemobj_check(path, LAYOUT_NAME), 1); UT_ASSERTne(pop = pmemobj_open(path, LAYOUT_NAME), NULL); root = pmemobj_root(pop, sizeof(struct root)); rootp = (struct root *)pmemobj_direct(root); struct foo *reserved_foop = (struct foo *)pmemobj_direct(rootp->reserved.oid); reserved_foop->bar = 1; /* should trigger memcheck error */ UT_ASSERTeq(rootp->reserved.value, 0); struct foo *published_foop = (struct foo *)pmemobj_direct(rootp->published.oid); published_foop->bar = 1; /* should NOT trigger memcheck error */ UT_ASSERTeq(rootp->published.value, 1); struct foo *tx_reserved_foop = (struct foo *)pmemobj_direct(rootp->tx_reserved.oid); tx_reserved_foop->bar = 1; /* should trigger memcheck error */ struct foo *tx_reserved_fulfilled_foop = (struct foo *)pmemobj_direct(rootp->tx_reserved_fulfilled.oid); tx_reserved_fulfilled_foop->bar = 1; /* should trigger memcheck error */ struct foo *tx_published_foop = (struct foo *)pmemobj_direct(rootp->tx_published.oid); tx_published_foop->bar = 1; /* should NOT trigger memcheck error */ test_resv_cancel_huge(pop); test_defer_free(pop); test_api_macros(pop); test_many(pop, POBJ_MAX_ACTIONS * 2); test_many_sets(pop, POBJ_MAX_ACTIONS * 2); test_duplicate(pop); pmemobj_close(pop); DONE(NULL); }
8,548
23.286932
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_source_size/pmem2_source_size.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_source_size.c -- pmem2_source_size unittests */ #include <stdint.h> #include "fault_injection.h" #include "unittest.h" #include "ut_pmem2.h" #include "ut_fh.h" #include "config.h" #include "out.h" typedef void (*test_fun)(const char *path, os_off_t size); /* * test_normal_file - tests normal file (common) */ static void test_normal_file(const char *path, os_off_t expected_size, enum file_handle_type type) { struct FHandle *fh = UT_FH_OPEN(type, path, FH_RDWR); struct pmem2_source *src; PMEM2_SOURCE_FROM_FH(&src, fh); size_t size; int ret = pmem2_source_size(src, &size); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(size, expected_size); PMEM2_SOURCE_DELETE(&src); UT_FH_CLOSE(fh); } /* * test_normal_file_fd - tests normal file using a file descriptor */ static int test_normal_file_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_normal_file_fd <file> <expected_size>"); char *path = argv[0]; os_off_t expected_size = ATOLL(argv[1]); test_normal_file(path, expected_size, FH_FD); return 2; } /* * test_normal_file_handle - tests normal file using a HANDLE */ static int test_normal_file_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_normal_file_handle" " <file> <expected_size>"); char *path = argv[0]; os_off_t expected_size = ATOLL(argv[1]); test_normal_file(path, expected_size, FH_HANDLE); return 2; } /* * test_tmpfile - tests temporary file */ static void test_tmpfile(const char *dir, os_off_t requested_size, enum file_handle_type type) { struct FHandle *fh = UT_FH_OPEN(type, dir, FH_RDWR | FH_TMPFILE); UT_FH_TRUNCATE(fh, requested_size); struct pmem2_source *src; PMEM2_SOURCE_FROM_FH(&src, fh); size_t size = SIZE_MAX; int ret = pmem2_source_size(src, &size); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(size, requested_size); PMEM2_SOURCE_DELETE(&src); UT_FH_CLOSE(fh); } /* * test_tmpfile_fd - tests temporary file using file descriptor interface */ static int test_tmpfile_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_tmpfile_fd <file> <requested_size>"); char *dir = argv[0]; os_off_t requested_size = ATOLL(argv[1]); test_tmpfile(dir, requested_size, FH_FD); return 2; } /* * test_tmpfile_handle - tests temporary file using file handle interface */ static int test_tmpfile_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: test_tmpfile_handle <file> <requested_size>"); char *dir = argv[0]; os_off_t requested_size = ATOLL(argv[1]); test_tmpfile(dir, requested_size, FH_HANDLE); return 2; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_normal_file_fd), TEST_CASE(test_normal_file_handle), TEST_CASE(test_tmpfile_fd), TEST_CASE(test_tmpfile_handle), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char **argv) { START(argc, argv, "pmem2_source_size"); util_init(); out_init("pmem2_source_size", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); }
3,326
20.191083
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ex_linkedlist/ex_linkedlist.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * ex_linkedlist.c - test of linkedlist example */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "pmemobj_list.h" #include "unittest.h" #define ELEMENT_NO 10 #define PRINT_RES(res, struct_name) do {\ if ((res) == 0) {\ UT_OUT("Outcome for " #struct_name " is correct!");\ } else {\ UT_ERR("Outcome for " #struct_name\ " does not match expected result!!!");\ }\ } while (0) POBJ_LAYOUT_BEGIN(list); POBJ_LAYOUT_ROOT(list, struct base); POBJ_LAYOUT_TOID(list, struct tqueuehead); POBJ_LAYOUT_TOID(list, struct slisthead); POBJ_LAYOUT_TOID(list, struct tqnode); POBJ_LAYOUT_TOID(list, struct snode); POBJ_LAYOUT_END(list); POBJ_TAILQ_HEAD(tqueuehead, struct tqnode); struct tqnode { int data; POBJ_TAILQ_ENTRY(struct tqnode) tnd; }; POBJ_SLIST_HEAD(slisthead, struct snode); struct snode { int data; POBJ_SLIST_ENTRY(struct snode) snd; }; struct base { struct tqueuehead tqueue; struct slisthead slist; }; static const int expectedResTQ[] = { 111, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 222 }; static const int expectedResSL[] = { 111, 8, 222, 6, 5, 4, 3, 2, 1, 0, 333 }; /* * dump_tq -- dumps list on standard output */ static void dump_tq(struct tqueuehead *head, const char *str) { TOID(struct tqnode) var; UT_OUT("%s start", str); POBJ_TAILQ_FOREACH(var, head, tnd) UT_OUT("%d", D_RW(var)->data); UT_OUT("%s end", str); } /* * init_tqueue -- initialize tail queue */ static void init_tqueue(PMEMobjpool *pop, struct tqueuehead *head) { if (!POBJ_TAILQ_EMPTY(head)) return; TOID(struct tqnode) node; TOID(struct tqnode) middleNode; TOID(struct tqnode) node888; TOID(struct tqnode) tempNode; int i = 0; TX_BEGIN(pop) { POBJ_TAILQ_INIT(head); dump_tq(head, "after init"); for (i = 0; i < ELEMENT_NO; ++i) { node = TX_NEW(struct tqnode); D_RW(node)->data = i; if (0 == i) { middleNode = node; } POBJ_TAILQ_INSERT_HEAD(head, node, tnd); node = TX_NEW(struct tqnode); D_RW(node)->data = i; POBJ_TAILQ_INSERT_TAIL(head, node, tnd); } dump_tq(head, "after insert[head|tail]"); node = TX_NEW(struct tqnode); D_RW(node)->data = 666; POBJ_TAILQ_INSERT_AFTER(middleNode, node, tnd); dump_tq(head, "after insert_after1"); middleNode = POBJ_TAILQ_NEXT(middleNode, tnd); node = TX_NEW(struct tqnode); D_RW(node)->data = 888; node888 = node; POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd); dump_tq(head, "after insert_before1"); node = TX_NEW(struct tqnode); D_RW(node)->data = 555; POBJ_TAILQ_INSERT_BEFORE(middleNode, node, tnd); dump_tq(head, "after insert_before2"); node = TX_NEW(struct tqnode); D_RW(node)->data = 111; tempNode = POBJ_TAILQ_FIRST(head); POBJ_TAILQ_INSERT_BEFORE(tempNode, node, tnd); dump_tq(head, "after insert_before3"); node = TX_NEW(struct tqnode); D_RW(node)->data = 222; tempNode = POBJ_TAILQ_LAST(head); POBJ_TAILQ_INSERT_AFTER(tempNode, node, tnd); dump_tq(head, "after insert_after2"); tempNode = middleNode; middleNode = POBJ_TAILQ_PREV(tempNode, tnd); POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, middleNode, tnd); dump_tq(head, "after move_element_tail"); POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, tempNode, tnd); dump_tq(head, "after move_element_head"); tempNode = POBJ_TAILQ_FIRST(head); POBJ_TAILQ_REMOVE(head, tempNode, tnd); dump_tq(head, "after remove1"); tempNode = POBJ_TAILQ_LAST(head); POBJ_TAILQ_REMOVE(head, tempNode, tnd); dump_tq(head, "after remove2"); POBJ_TAILQ_REMOVE(head, node888, tnd); dump_tq(head, "after remove3"); } TX_ONABORT { abort(); } TX_END } /* * dump_sl -- dumps list on standard output */ static void dump_sl(struct slisthead *head, const char *str) { TOID(struct snode) var; UT_OUT("%s start", str); POBJ_SLIST_FOREACH(var, head, snd) UT_OUT("%d", D_RW(var)->data); UT_OUT("%s end", str); } /* * init_slist -- initialize SLIST */ static void init_slist(PMEMobjpool *pop, struct slisthead *head) { if (!POBJ_SLIST_EMPTY(head)) return; TOID(struct snode) node; TOID(struct snode) tempNode; int i = 0; TX_BEGIN(pop) { POBJ_SLIST_INIT(head); dump_sl(head, "after init"); for (i = 0; i < ELEMENT_NO; ++i) { node = TX_NEW(struct snode); D_RW(node)->data = i; POBJ_SLIST_INSERT_HEAD(head, node, snd); } dump_sl(head, "after insert_head"); tempNode = POBJ_SLIST_FIRST(head); node = TX_NEW(struct snode); D_RW(node)->data = 111; POBJ_SLIST_INSERT_AFTER(tempNode, node, snd); dump_sl(head, "after insert_after1"); tempNode = POBJ_SLIST_NEXT(node, snd); node = TX_NEW(struct snode); D_RW(node)->data = 222; POBJ_SLIST_INSERT_AFTER(tempNode, node, snd); dump_sl(head, "after insert_after2"); tempNode = POBJ_SLIST_NEXT(node, snd); POBJ_SLIST_REMOVE_FREE(head, tempNode, snd); dump_sl(head, "after remove_free1"); POBJ_SLIST_REMOVE_HEAD(head, snd); dump_sl(head, "after remove_head"); TOID(struct snode) element = POBJ_SLIST_FIRST(head); while (!TOID_IS_NULL(D_RO(element)->snd.pe_next)) { element = D_RO(element)->snd.pe_next; } node = TX_NEW(struct snode); D_RW(node)->data = 333; POBJ_SLIST_INSERT_AFTER(element, node, snd); dump_sl(head, "after insert_after3"); element = node; node = TX_NEW(struct snode); D_RW(node)->data = 123; POBJ_SLIST_INSERT_AFTER(element, node, snd); dump_sl(head, "after insert_after4"); tempNode = POBJ_SLIST_NEXT(node, snd); POBJ_SLIST_REMOVE_FREE(head, node, snd); dump_sl(head, "after remove_free2"); } TX_ONABORT { abort(); } TX_END } int main(int argc, char *argv[]) { unsigned res = 0; PMEMobjpool *pop; const char *path; START(argc, argv, "ex_linkedlist"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(list) != 4); if (argc != 2) { UT_FATAL("usage: %s file-name", argv[0]); } path = argv[1]; if (os_access(path, F_OK) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list), PMEMOBJ_MIN_POOL, 0666)) == NULL) { UT_FATAL("!pmemobj_create: %s", path); } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(list))) == NULL) { UT_FATAL("!pmemobj_open: %s", path); } } TOID(struct base) base = POBJ_ROOT(pop, struct base); struct tqueuehead *tqhead = &D_RW(base)->tqueue; struct slisthead *slhead = &D_RW(base)->slist; init_tqueue(pop, tqhead); init_slist(pop, slhead); int i = 0; TOID(struct tqnode) tqelement; POBJ_TAILQ_FOREACH(tqelement, tqhead, tnd) { if (D_RO(tqelement)->data != expectedResTQ[i]) { res = 1; break; } i++; } PRINT_RES(res, tail queue); i = 0; res = 0; TOID(struct snode) slelement; POBJ_SLIST_FOREACH(slelement, slhead, snd) { if (D_RO(slelement)->data != expectedResSL[i]) { res = 1; break; } i++; } PRINT_RES(res, singly linked list); pmemobj_close(pop); DONE(NULL); }
6,919
22.862069
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_persist_count/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * mocks_windows.h -- redefinitions of pmem functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmemobj * files, when compiled for the purpose of obj_persist_count test. * It would replace default implementation with mocked functions defined * in obj_persist_count.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define pmem_persist __wrap_pmem_persist #define pmem_flush __wrap_pmem_flush #define pmem_drain __wrap_pmem_drain #define pmem_msync __wrap_pmem_msync #define pmem_memcpy_persist __wrap_pmem_memcpy_persist #define pmem_memcpy_nodrain __wrap_pmem_memcpy_nodrain #define pmem_memcpy __wrap_pmem_memcpy #define pmem_memmove_persist __wrap_pmem_memmove_persist #define pmem_memmove_nodrain __wrap_pmem_memmove_nodrain #define pmem_memmove __wrap_pmem_memmove #define pmem_memset_persist __wrap_pmem_memset_persist #define pmem_memset_nodrain __wrap_pmem_memset_nodrain #define pmem_memset __wrap_pmem_memset #endif
1,130
34.34375
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_persist_count/obj_persist_count.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_persist_count.c -- counting number of persists */ #define _GNU_SOURCE #include "obj.h" #include "pmalloc.h" #include "unittest.h" struct ops_counter { unsigned n_cl_stores; unsigned n_drain; unsigned n_pmem_persist; unsigned n_pmem_msync; unsigned n_pmem_flush; unsigned n_pmem_drain; unsigned n_flush_from_pmem_memcpy; unsigned n_flush_from_pmem_memset; unsigned n_drain_from_pmem_memcpy; unsigned n_drain_from_pmem_memset; unsigned n_pot_cache_misses; }; static struct ops_counter ops_counter; static struct ops_counter tx_counter; #define FLUSH_ALIGN ((uintptr_t)64) #define MOVNT_THRESHOLD 256 static unsigned cl_flushed(const void *addr, size_t len, uintptr_t alignment) { uintptr_t start = (uintptr_t)addr & ~(alignment - 1); uintptr_t end = ((uintptr_t)addr + len + alignment - 1) & ~(alignment - 1); return (unsigned)(end - start) / FLUSH_ALIGN; } #define PMEM_F_MEM_MOVNT (PMEM_F_MEM_WC | PMEM_F_MEM_NONTEMPORAL) #define PMEM_F_MEM_MOV (PMEM_F_MEM_WB | PMEM_F_MEM_TEMPORAL) static unsigned bulk_cl_changed(const void *addr, size_t len, unsigned flags) { uintptr_t start = (uintptr_t)addr & ~(FLUSH_ALIGN - 1); uintptr_t end = ((uintptr_t)addr + len + FLUSH_ALIGN - 1) & ~(FLUSH_ALIGN - 1); unsigned cl_changed = (unsigned)(end - start) / FLUSH_ALIGN; int wc; /* write combining */ if (flags & PMEM_F_MEM_NOFLUSH) wc = 0; /* NOFLUSH always uses temporal instructions */ else if (flags & PMEM_F_MEM_MOVNT) wc = 1; else if (flags & PMEM_F_MEM_MOV) wc = 0; else if (len < MOVNT_THRESHOLD) wc = 0; else wc = 1; /* count number of potential cache misses */ if (!wc) { /* * When we don't use write combining, it means all * cache lines may be missing. */ ops_counter.n_pot_cache_misses += cl_changed; } else { /* * When we use write combining there won't be any cache misses, * with an exception of unaligned beginning or end. */ if (start != (uintptr_t)addr) ops_counter.n_pot_cache_misses++; if (end != ((uintptr_t)addr + len) && start + FLUSH_ALIGN != end) ops_counter.n_pot_cache_misses++; } return cl_changed; } static void flush_cl(const void *addr, size_t len) { unsigned flushed = cl_flushed(addr, len, FLUSH_ALIGN); ops_counter.n_cl_stores += flushed; ops_counter.n_pot_cache_misses += flushed; } static void flush_msync(const void *addr, size_t len) { unsigned flushed = cl_flushed(addr, len, Pagesize); ops_counter.n_cl_stores += flushed; ops_counter.n_pot_cache_misses += flushed; } FUNC_MOCK(pmem_persist, void, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_persist++; flush_cl(addr, len); ops_counter.n_drain++; _FUNC_REAL(pmem_persist)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_msync, int, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_msync++; flush_msync(addr, len); ops_counter.n_drain++; return _FUNC_REAL(pmem_msync)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_flush, void, const void *addr, size_t len) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_flush++; flush_cl(addr, len); _FUNC_REAL(pmem_flush)(addr, len); } FUNC_MOCK_END FUNC_MOCK(pmem_drain, void, void) FUNC_MOCK_RUN_DEFAULT { ops_counter.n_pmem_drain++; ops_counter.n_drain++; _FUNC_REAL(pmem_drain)(); } FUNC_MOCK_END static void memcpy_nodrain_count(void *dest, const void *src, size_t len, unsigned flags) { unsigned cl_stores = bulk_cl_changed(dest, len, flags); if (!(flags & PMEM_F_MEM_NOFLUSH)) ops_counter.n_flush_from_pmem_memcpy += cl_stores; ops_counter.n_cl_stores += cl_stores; } static void memcpy_persist_count(void *dest, const void *src, size_t len, unsigned flags) { memcpy_nodrain_count(dest, src, len, flags); ops_counter.n_drain_from_pmem_memcpy++; ops_counter.n_drain++; } FUNC_MOCK(pmem_memcpy_persist, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_persist_count(dest, src, len, 0); return _FUNC_REAL(pmem_memcpy_persist)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memcpy_nodrain, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_nodrain_count(dest, src, len, 0); return _FUNC_REAL(pmem_memcpy_nodrain)(dest, src, len); } FUNC_MOCK_END static unsigned sanitize_flags(unsigned flags) { if (flags & PMEM_F_MEM_NOFLUSH) { /* NOFLUSH implies NODRAIN */ flags |= PMEM_F_MEM_NODRAIN; } return flags; } FUNC_MOCK(pmem_memcpy, void *, void *dest, const void *src, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memcpy_nodrain_count(dest, src, len, flags); else memcpy_persist_count(dest, src, len, flags); return _FUNC_REAL(pmem_memcpy)(dest, src, len, flags); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove_persist, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_persist_count(dest, src, len, 0); return _FUNC_REAL(pmem_memmove_persist)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove_nodrain, void *, void *dest, const void *src, size_t len) FUNC_MOCK_RUN_DEFAULT { memcpy_nodrain_count(dest, src, len, 0); return _FUNC_REAL(pmem_memmove_nodrain)(dest, src, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memmove, void *, void *dest, const void *src, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memcpy_nodrain_count(dest, src, len, flags); else memcpy_persist_count(dest, src, len, flags); return _FUNC_REAL(pmem_memmove)(dest, src, len, flags); } FUNC_MOCK_END static void memset_nodrain_count(void *dest, size_t len, unsigned flags) { unsigned cl_set = bulk_cl_changed(dest, len, flags); if (!(flags & PMEM_F_MEM_NOFLUSH)) ops_counter.n_flush_from_pmem_memset += cl_set; ops_counter.n_cl_stores += cl_set; } static void memset_persist_count(void *dest, size_t len, unsigned flags) { memset_nodrain_count(dest, len, flags); ops_counter.n_drain_from_pmem_memset++; ops_counter.n_drain++; } FUNC_MOCK(pmem_memset_persist, void *, void *dest, int c, size_t len) FUNC_MOCK_RUN_DEFAULT { memset_persist_count(dest, len, 0); return _FUNC_REAL(pmem_memset_persist)(dest, c, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memset_nodrain, void *, void *dest, int c, size_t len) FUNC_MOCK_RUN_DEFAULT { memset_nodrain_count(dest, len, 0); return _FUNC_REAL(pmem_memset_nodrain)(dest, c, len); } FUNC_MOCK_END FUNC_MOCK(pmem_memset, void *, void *dest, int c, size_t len, unsigned flags) FUNC_MOCK_RUN_DEFAULT { flags = sanitize_flags(flags); if (flags & PMEM_F_MEM_NODRAIN) memset_nodrain_count(dest, len, flags); else memset_persist_count(dest, len, flags); return _FUNC_REAL(pmem_memset)(dest, c, len, flags); } FUNC_MOCK_END /* * reset_counters -- zero all counters */ static void reset_counters(void) { memset(&ops_counter, 0, sizeof(ops_counter)); } /* * print_reset_counters -- print and then zero all counters */ static void print_reset_counters(const char *task, unsigned tx) { #define CNT(name) (ops_counter.name - tx * tx_counter.name) UT_OUT( "%-14s %-7d %-10d %-12d %-10d %-10d %-10d %-15d %-17d %-15d %-17d %-23d", task, CNT(n_cl_stores), CNT(n_drain), CNT(n_pmem_persist), CNT(n_pmem_msync), CNT(n_pmem_flush), CNT(n_pmem_drain), CNT(n_flush_from_pmem_memcpy), CNT(n_drain_from_pmem_memcpy), CNT(n_flush_from_pmem_memset), CNT(n_drain_from_pmem_memset), CNT(n_pot_cache_misses)); #undef CNT reset_counters(); } #define LARGE_SNAPSHOT ((1 << 10) * 10) struct foo_large { uint8_t snapshot[LARGE_SNAPSHOT]; }; struct foo { int val; uint64_t dest; PMEMoid bar; PMEMoid bar2; }; int main(int argc, char *argv[]) { START(argc, argv, "obj_persist_count"); if (argc != 2) UT_FATAL("usage: %s file-name", argv[0]); const char *path = argv[1]; PMEMobjpool *pop; if ((pop = pmemobj_create(path, "persist_count", PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); UT_OUT( "%-14s %-7s %-10s %-12s %-10s %-10s %-10s %-15s %-17s %-15s %-17s %-23s", "task", "cl(all)", "drain(all)", "pmem_persist", "pmem_msync", "pmem_flush", "pmem_drain", "pmem_memcpy_cls", "pmem_memcpy_drain", "pmem_memset_cls", "pmem_memset_drain", "potential_cache_misses"); print_reset_counters("pool_create", 0); /* allocate one structure to create a run */ pmemobj_alloc(pop, NULL, sizeof(struct foo), 0, NULL, NULL); reset_counters(); PMEMoid root = pmemobj_root(pop, sizeof(struct foo)); UT_ASSERT(!OID_IS_NULL(root)); print_reset_counters("root_alloc", 0); PMEMoid oid; int ret = pmemobj_alloc(pop, &oid, sizeof(struct foo), 0, NULL, NULL); UT_ASSERTeq(ret, 0); print_reset_counters("atomic_alloc", 0); pmemobj_free(&oid); print_reset_counters("atomic_free", 0); struct foo *f = pmemobj_direct(root); TX_BEGIN(pop) { } TX_END memcpy(&tx_counter, &ops_counter, sizeof(ops_counter)); print_reset_counters("tx_begin_end", 0); TX_BEGIN(pop) { f->bar = pmemobj_tx_alloc(sizeof(struct foo), 0); UT_ASSERT(!OID_IS_NULL(f->bar)); } TX_END print_reset_counters("tx_alloc", 1); TX_BEGIN(pop) { f->bar2 = pmemobj_tx_alloc(sizeof(struct foo), 0); UT_ASSERT(!OID_IS_NULL(f->bar2)); } TX_END print_reset_counters("tx_alloc_next", 1); TX_BEGIN(pop) { pmemobj_tx_free(f->bar); } TX_END print_reset_counters("tx_free", 1); TX_BEGIN(pop) { pmemobj_tx_free(f->bar2); } TX_END print_reset_counters("tx_free_next", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&f->val, sizeof(f->val), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_next", 1); PMEMoid large_foo; pmemobj_zalloc(pop, &large_foo, sizeof(struct foo_large), 0); UT_ASSERT(!OID_IS_NULL(large_foo)); reset_counters(); struct foo_large *flarge = pmemobj_direct(large_foo); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&flarge->snapshot, sizeof(flarge->snapshot), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_large", 1); TX_BEGIN(pop) { pmemobj_tx_xadd_range_direct(&flarge->snapshot, sizeof(flarge->snapshot), POBJ_XADD_NO_FLUSH); } TX_END print_reset_counters("tx_add_lnext", 1); pmalloc(pop, &f->dest, sizeof(f->val), 0, 0); print_reset_counters("pmalloc", 0); pfree(pop, &f->dest); print_reset_counters("pfree", 0); uint64_t stack_var; pmalloc(pop, &stack_var, sizeof(f->val), 0, 0); print_reset_counters("pmalloc_stack", 0); pfree(pop, &stack_var); print_reset_counters("pfree_stack", 0); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
10,962
22.832609
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_proto/rpmem_proto.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_proto.c -- unit test for rpmem_proto header * * The purpose of this test is to make sure the structures which describe * rpmem protocol messages does not have any padding. */ #include "unittest.h" #include "librpmem.h" #include "rpmem_proto.h" int main(int argc, char *argv[]) { START(argc, argv, "rpmem_proto"); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, type); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_hdr_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, status); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, type); ASSERT_ALIGNED_FIELD(struct rpmem_msg_hdr_resp, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_hdr_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, signature); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, major); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, incompat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, ro_compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, poolset_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, next_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, prev_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr, user_flags); ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_pool_attr_packed); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, signature); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, major); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, incompat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, ro_compat_features); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, poolset_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, next_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, prev_uuid); ASSERT_ALIGNED_FIELD(struct rpmem_pool_attr_packed, user_flags); ASSERT_ALIGNED_CHECK(struct rpmem_pool_attr_packed); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_ibc_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, port); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, persist_method); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, rkey); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, raddr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_ibc_attr, nlanes); ASSERT_ALIGNED_CHECK(struct rpmem_msg_ibc_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_common); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, major); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, minor); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, pool_size); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, nlanes); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, provider); ASSERT_ALIGNED_FIELD(struct rpmem_msg_common, buff_size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_common); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_pool_desc); ASSERT_ALIGNED_FIELD(struct rpmem_msg_pool_desc, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_pool_desc); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, c); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create, pool_desc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_create); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_create_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_create_resp, ibc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_create_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, c); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open, pool_desc); ASSERT_ALIGNED_CHECK(struct rpmem_msg_open); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_open_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, ibc); ASSERT_ALIGNED_FIELD(struct rpmem_msg_open_resp, pool_attr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_open_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close, flags); ASSERT_ALIGNED_CHECK(struct rpmem_msg_close); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_close_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_close_resp, hdr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_close_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, flags); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, lane); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, addr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist, size); ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_persist_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, flags); ASSERT_ALIGNED_FIELD(struct rpmem_msg_persist_resp, lane); ASSERT_ALIGNED_CHECK(struct rpmem_msg_persist_resp); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, hdr); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr, pool_attr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr); ASSERT_ALIGNED_BEGIN(struct rpmem_msg_set_attr_resp); ASSERT_ALIGNED_FIELD(struct rpmem_msg_set_attr_resp, hdr); ASSERT_ALIGNED_CHECK(struct rpmem_msg_set_attr_resp); DONE(NULL); }
5,733
41.474074
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/compat_incompat_features/pool_open.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * pool_open.c -- a tool for verifying that an obj/blk/log pool opens correctly * * usage: pool_open <path> <obj|blk|log> <layout> */ #include "unittest.h" int main(int argc, char *argv[]) { START(argc, argv, "compat_incompat_features"); if (argc < 3) UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]); char *type = argv[1]; char *path = argv[2]; if (strcmp(type, "obj") == 0) { PMEMobjpool *pop = pmemobj_open(path, ""); if (pop == NULL) { UT_FATAL("!%s: pmemobj_open failed", path); } else { UT_OUT("%s: pmemobj_open succeeded", path); pmemobj_close(pop); } } else if (strcmp(type, "blk") == 0) { PMEMblkpool *pop = pmemblk_open(path, 0); if (pop == NULL) { UT_FATAL("!%s: pmemblk_open failed", path); } else { UT_OUT("%s: pmemblk_open succeeded", path); pmemblk_close(pop); } } else if (strcmp(type, "log") == 0) { PMEMlogpool *pop = pmemlog_open(path); if (pop == NULL) { UT_FATAL("!%s: pmemlog_open failed", path); } else { UT_OUT("%s: pmemlog_open succeeded", path); pmemlog_close(pop); } } else { UT_FATAL("usage: %s <obj|blk|log> <path>", argv[0]); } DONE(NULL); }
1,237
23.27451
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * mocks_windows.h -- redefinitions of libc functions used in util_poolset * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem * files, when compiled for the purpose of util_poolset test. * It would replace default implementation with mocked functions defined * in util_poolset.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL_OPEN #define os_open __wrap_os_open #endif #ifndef WRAP_REAL_FALLOCATE #define os_posix_fallocate __wrap_os_posix_fallocate #endif #ifndef WRAP_REAL_PMEM #define pmem_is_pmem __wrap_pmem_is_pmem #endif
730
25.107143
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_poolset/util_poolset.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * util_poolset.c -- unit test for util_pool_create() / util_pool_open() * * usage: util_poolset cmd minlen hdrsize [mockopts] setfile ... */ #include <stdbool.h> #include "unittest.h" #include "pmemcommon.h" #include "set.h" #include <errno.h> #include "mocks.h" #include "fault_injection.h" #define LOG_PREFIX "ut" #define LOG_LEVEL_VAR "TEST_LOG_LEVEL" #define LOG_FILE_VAR "TEST_LOG_FILE" #define MAJOR_VERSION 1 #define MINOR_VERSION 0 #define SIG "PMEMXXX" #define MIN_PART ((size_t)(1024 * 1024 * 2)) /* 2 MiB */ #define TEST_FORMAT_INCOMPAT_DEFAULT POOL_FEAT_CKSUM_2K #define TEST_FORMAT_INCOMPAT_CHECK POOL_FEAT_INCOMPAT_VALID static size_t Extend_size = MIN_PART * 2; const char *Open_path = ""; os_off_t Fallocate_len = -1; size_t Is_pmem_len = 0; /* * poolset_info -- (internal) dumps poolset info and checks its integrity * * Performs the following checks: * - part_size[i] == rounddown(file_size - pool_hdr_size, Mmap_align) * - replica_size == sum(part_size) * - pool_size == min(replica_size) */ static void poolset_info(const char *fname, struct pool_set *set, int o) { if (o) UT_OUT("%s: opened: nreps %d poolsize %zu rdonly %d", fname, set->nreplicas, set->poolsize, set->rdonly); else UT_OUT("%s: created: nreps %d poolsize %zu zeroed %d", fname, set->nreplicas, set->poolsize, set->zeroed); size_t poolsize = SIZE_MAX; for (unsigned r = 0; r < set->nreplicas; r++) { struct pool_replica *rep = set->replica[r]; size_t repsize = 0; UT_OUT(" replica[%d]: nparts %d nhdrs %d repsize %zu " "is_pmem %d", r, rep->nparts, rep->nhdrs, rep->repsize, rep->is_pmem); for (unsigned i = 0; i < rep->nparts; i++) { struct pool_set_part *part = &rep->part[i]; UT_OUT(" part[%d] path %s filesize %zu size %zu", i, part->path, part->filesize, part->size); size_t partsize = (part->filesize & ~(Ut_mmap_align - 1)); repsize += partsize; if (i > 0 && (set->options & OPTION_SINGLEHDR) == 0) UT_ASSERTeq(part->size, partsize - Ut_mmap_align); /* XXX */ } repsize -= (rep->nhdrs - 1) * Ut_mmap_align; UT_ASSERTeq(rep->repsize, repsize); UT_ASSERT(rep->resvsize >= repsize); if (rep->repsize < poolsize) poolsize = rep->repsize; } UT_ASSERTeq(set->poolsize, poolsize); } /* * mock_options -- (internal) parse mock options and enable mocked functions */ static int mock_options(const char *arg) { /* reset to defaults */ Open_path = ""; Fallocate_len = -1; Is_pmem_len = 0; if (arg[0] != '-' || arg[1] != 'm') return 0; switch (arg[2]) { case 'n': /* do nothing */ break; case 'o': /* open */ Open_path = &arg[4]; break; case 'f': /* fallocate */ Fallocate_len = ATOLL(&arg[4]); break; case 'p': /* is_pmem */ Is_pmem_len = ATOULL(&arg[4]); break; default: UT_FATAL("unknown mock option: %c", arg[2]); } return 1; } int main(int argc, char *argv[]) { START(argc, argv, "util_poolset"); common_init(LOG_PREFIX, LOG_LEVEL_VAR, LOG_FILE_VAR, MAJOR_VERSION, MINOR_VERSION); if (argc < 3) UT_FATAL("usage: %s cmd minsize [mockopts] " "setfile ...", argv[0]); char *fname; struct pool_set *set; int ret; size_t minsize = strtoul(argv[2], &fname, 0); for (int arg = 3; arg < argc; arg++) { arg += mock_options(argv[arg]); fname = argv[arg]; struct pool_attr attr; memset(&attr, 0, sizeof(attr)); memcpy(attr.signature, SIG, sizeof(SIG)); attr.major = 1; switch (argv[1][0]) { case 'c': attr.features.incompat = TEST_FORMAT_INCOMPAT_DEFAULT; ret = util_pool_create(&set, fname, 0, minsize, MIN_PART, &attr, NULL, REPLICAS_ENABLED); if (ret == -1) UT_OUT("!%s: util_pool_create", fname); else { /* * XXX: On Windows pool files are created with * R/W permissions, so no need for chmod(). */ #ifndef _WIN32 util_poolset_chmod(set, S_IWUSR | S_IRUSR); #endif poolset_info(fname, set, 0); util_poolset_close(set, DO_NOT_DELETE_PARTS); } break; case 'o': attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); if (ret == -1) UT_OUT("!%s: util_pool_open", fname); else { poolset_info(fname, set, 1); util_poolset_close(set, DO_NOT_DELETE_PARTS); } break; case 'e': attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); UT_ASSERTeq(ret, 0); size_t esize = Extend_size; void *nptr = util_pool_extend(set, &esize, MIN_PART); if (nptr == NULL) UT_OUT("!%s: util_pool_extend", fname); else { poolset_info(fname, set, 1); } util_poolset_close(set, DO_NOT_DELETE_PARTS); break; case 'f': if (!core_fault_injection_enabled()) break; attr.features.incompat = TEST_FORMAT_INCOMPAT_CHECK; ret = util_pool_open(&set, fname, MIN_PART, &attr, NULL, NULL, 0 /* flags */); UT_ASSERTeq(ret, 0); size_t fsize = Extend_size; core_inject_fault_at(PMEM_MALLOC, 2, "util_poolset_append_new_part"); void *fnptr = util_pool_extend(set, &fsize, MIN_PART); UT_ASSERTeq(fnptr, NULL); UT_ASSERTeq(errno, ENOMEM); util_poolset_close(set, DO_NOT_DELETE_PARTS); break; } } common_fini(); DONE(NULL); }
5,390
23.843318
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_user_data/obj_tx_user_data.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * obj_tx_user_data.c -- unit test for pmemobj_tx_(get/set)_user_data */ #include "unittest.h" #define LAYOUT_NAME "tx_user_data" #define USER_DATA_V1 (void *) 123456789ULL #define USER_DATA_V2 (void *) 987654321ULL /* * do_tx_set_get_user_data_nested -- do set and verify user data in a tx */ static void do_tx_set_get_user_data_nested(PMEMobjpool *pop) { TX_BEGIN(pop) { pmemobj_tx_set_user_data(USER_DATA_V1); UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data()); TX_BEGIN(pop) { UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data()); pmemobj_tx_set_user_data(USER_DATA_V2); UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data()); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERTeq(USER_DATA_V2, pmemobj_tx_get_user_data()); } TX_ONABORT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { UT_ASSERTeq(NULL, pmemobj_tx_get_user_data()); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_set_get_user_data_abort -- do set and verify user data in a tx after * tx abort */ static void do_tx_set_get_user_data_abort(PMEMobjpool *pop) { TX_BEGIN(pop) { pmemobj_tx_set_user_data(USER_DATA_V1); UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data()); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_ONABORT { UT_ASSERTeq(USER_DATA_V1, pmemobj_tx_get_user_data()); } TX_END TX_BEGIN(pop) { UT_ASSERTeq(NULL, pmemobj_tx_get_user_data()); } TX_ONABORT { UT_ASSERT(0); } TX_END } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_user_data"); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_set_get_user_data_nested(pop); do_tx_set_get_user_data_abort(pop); pmemobj_close(pop); DONE(NULL); }
1,948
20.655556
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_memset/pmem_memset.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmem_memset.c -- unit test for doing a memset * * usage: pmem_memset file offset length */ #include "unittest.h" #include "util_pmem.h" #include "file.h" #include "memset_common.h" typedef void *pmem_memset_fn(void *pmemdest, int c, size_t len, unsigned flags); static void * pmem_memset_persist_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_persist(pmemdest, c, len); } static void * pmem_memset_nodrain_wrapper(void *pmemdest, int c, size_t len, unsigned flags) { (void) flags; return pmem_memset_nodrain(pmemdest, c, len); } static void do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, persist_fn p) { do_memset(fd, dest, file_name, dest_off, bytes, pmem_memset_persist_wrapper, 0, p); do_memset(fd, dest, file_name, dest_off, bytes, pmem_memset_nodrain_wrapper, 0, p); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { do_memset(fd, dest, file_name, dest_off, bytes, pmem_memset, Flags[i], p); if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH) pmem_persist(dest, bytes); } } static void do_persist_ddax(const void *ptr, size_t size) { util_persist_auto(1, ptr, size); } static void do_persist(const void *ptr, size_t size) { util_persist_auto(0, ptr, size); } int main(int argc, char *argv[]) { int fd; size_t mapped_len; char *dest; if (argc != 4) UT_FATAL("usage: %s file offset length", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem_memset %s %s %s %savx %savx512f", argv[2], argv[3], thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); fd = OPEN(argv[1], O_RDWR); /* open a pmem file and memory map it */ if ((dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, NULL)) == NULL) UT_FATAL("!Could not mmap %s\n", argv[1]); size_t dest_off = strtoul(argv[2], NULL, 0); size_t bytes = strtoul(argv[3], NULL, 0); enum file_type type = util_fd_get_type(fd); if (type < 0) UT_FATAL("cannot check type of file with fd %d", fd); persist_fn p; p = type == TYPE_DEVDAX ? do_persist_ddax : do_persist; do_memset_variants(fd, dest, argv[1], dest_off, bytes, p); UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0); CLOSE(fd); DONE(NULL); }
2,428
22.355769
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_fragmentation/obj_fragmentation.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * obj_fragmentation.c -- measures average heap fragmentation * * A pretty simplistic test that measures internal fragmentation of the * allocator for the given size. */ #include <stdlib.h> #include "unittest.h" #define LAYOUT_NAME "obj_fragmentation" #define OBJECT_OVERHEAD 64 /* account for the header added to each object */ #define MAX_OVERALL_OVERHEAD 0.10f /* * For the best accuracy fragmentation should be measured for one full zone * because the metadata is preallocated. For reasonable test duration a smaller * size must be used. */ #define DEFAULT_FILE_SIZE ((size_t)(1ULL << 28)) /* 256 megabytes */ int main(int argc, char *argv[]) { START(argc, argv, "obj_fragmentation"); if (argc < 3) UT_FATAL("usage: %s allocsize filename [filesize]", argv[0]); size_t file_size; if (argc == 4) file_size = ATOUL(argv[3]); else file_size = DEFAULT_FILE_SIZE; size_t alloc_size = ATOUL(argv[1]); const char *path = argv[2]; PMEMobjpool *pop = pmemobj_create(path, LAYOUT_NAME, file_size, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); size_t allocated = 0; int err = 0; do { PMEMoid oid; err = pmemobj_alloc(pop, &oid, alloc_size, 0, NULL, NULL); if (err == 0) allocated += pmemobj_alloc_usable_size(oid) + OBJECT_OVERHEAD; } while (err == 0); float allocated_pct = ((float)allocated / file_size); float overhead_pct = 1.f - allocated_pct; UT_ASSERT(overhead_pct <= MAX_OVERALL_OVERHEAD); pmemobj_close(pop); DONE(NULL); }
1,607
23.738462
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_recovery/obj_recovery.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_recovery.c -- unit test for pool recovery */ #include "unittest.h" #include "valgrind_internal.h" #if VG_PMEMCHECK_ENABLED #define VALGRIND_PMEMCHECK_END_TX VALGRIND_PMC_END_TX #else #define VALGRIND_PMEMCHECK_END_TX #endif POBJ_LAYOUT_BEGIN(recovery); POBJ_LAYOUT_ROOT(recovery, struct root); POBJ_LAYOUT_TOID(recovery, struct foo); POBJ_LAYOUT_END(recovery); #define MB (1 << 20) struct foo { int bar; }; struct root { PMEMmutex lock; TOID(struct foo) foo; char large_data[MB]; }; #define BAR_VALUE 5 int main(int argc, char *argv[]) { START(argc, argv, "obj_recovery"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(recovery) != 1); if (argc != 5) UT_FATAL("usage: %s [file] [lock: y/n] " "[cmd: c/o] [type: n/f/s/l]", argv[0]); const char *path = argv[1]; PMEMobjpool *pop = NULL; int exists = argv[3][0] == 'o'; enum { TEST_NEW, TEST_FREE, TEST_SET, TEST_LARGE } type; if (argv[4][0] == 'n') type = TEST_NEW; else if (argv[4][0] == 'f') type = TEST_FREE; else if (argv[4][0] == 's') type = TEST_SET; else if (argv[4][0] == 'l') type = TEST_LARGE; else UT_FATAL("invalid type"); if (!exists) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(recovery), 0, S_IWUSR | S_IRUSR)) == NULL) { UT_FATAL("failed to create pool\n"); } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(recovery))) == NULL) { UT_FATAL("failed to open pool\n"); } } TOID(struct root) root = POBJ_ROOT(pop, struct root); int lock_type = TX_PARAM_NONE; void *lock = NULL; if (argv[2][0] == 'y') { lock_type = TX_PARAM_MUTEX; lock = &D_RW(root)->lock; } if (type == TEST_SET) { if (!exists) { TX_BEGIN_PARAM(pop, lock_type, lock) { TX_ADD(root); TOID(struct foo) f = TX_NEW(struct foo); D_RW(root)->foo = f; D_RW(f)->bar = BAR_VALUE; } TX_END TX_BEGIN_PARAM(pop, lock_type, lock) { TX_ADD_FIELD(D_RW(root)->foo, bar); D_RW(D_RW(root)->foo)->bar = BAR_VALUE * 2; /* * Even though flushes are not required inside * of a transaction, this is done here to * suppress irrelevant pmemcheck issues, because * we exit the program before the data is * flushed, while preserving any real ones. */ pmemobj_persist(pop, &D_RW(D_RW(root)->foo)->bar, sizeof(int)); /* * We also need to cleanup the transaction state * of pmemcheck. */ VALGRIND_PMEMCHECK_END_TX; exit(0); /* simulate a crash */ } TX_END } else { UT_ASSERT(D_RW(D_RW(root)->foo)->bar == BAR_VALUE); } } else if (type == TEST_LARGE) { if (!exists) { TX_BEGIN(pop) { TX_MEMSET(D_RW(root)->large_data, 0xc, MB); pmemobj_persist(pop, D_RW(root)->large_data, MB); VALGRIND_PMEMCHECK_END_TX; exit(0); } TX_END } else { UT_ASSERT(util_is_zeroed(D_RW(root)->large_data, MB)); TX_BEGIN(pop) { /* we should be able to start TX */ TX_MEMSET(D_RW(root)->large_data, 0xc, MB); pmemobj_persist(pop, D_RW(root)->large_data, MB); VALGRIND_PMEMCHECK_END_TX; pmemobj_tx_abort(0); } TX_END } } else if (type == TEST_NEW) { if (!exists) { TX_BEGIN_PARAM(pop, lock_type, lock) { TOID(struct foo) f = TX_NEW(struct foo); TX_SET(root, foo, f); pmemobj_persist(pop, &D_RW(root)->foo, sizeof(PMEMoid)); VALGRIND_PMEMCHECK_END_TX; exit(0); /* simulate a crash */ } TX_END } else { UT_ASSERT(TOID_IS_NULL(D_RW(root)->foo)); } } else { /* TEST_FREE */ if (!exists) { TX_BEGIN_PARAM(pop, lock_type, lock) { TX_ADD(root); TOID(struct foo) f = TX_NEW(struct foo); D_RW(root)->foo = f; D_RW(f)->bar = BAR_VALUE; } TX_END TX_BEGIN_PARAM(pop, lock_type, lock) { TX_ADD(root); TX_FREE(D_RW(root)->foo); D_RW(root)->foo = TOID_NULL(struct foo); pmemobj_persist(pop, &D_RW(root)->foo, sizeof(PMEMoid)); VALGRIND_PMEMCHECK_END_TX; exit(0); /* simulate a crash */ } TX_END } else { UT_ASSERT(!TOID_IS_NULL(D_RW(root)->foo)); } } UT_ASSERT(pmemobj_check(path, POBJ_LAYOUT_NAME(recovery))); pmemobj_close(pop); DONE(NULL); }
4,244
20.994819
61
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_perror/pmem2_perror.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_perror.c -- pmem2_perror unittests */ #include "libpmem2.h" #include "unittest.h" #include "out.h" #include "config.h" #include "source.h" /* * test_fail_pmem2_func_simple - simply check print message when func * from pmem2 API fails */ static int test_fail_pmem2_func_simple(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_config cfg; size_t offset = (size_t)INT64_MAX + 1; /* "randomly" chosen function to be failed */ int ret = pmem2_config_set_offset(&cfg, offset); UT_ASSERTne(ret, 0); pmem2_perror("pmem2_config_set_offset"); return 0; } /* * test_fail_pmem2_func_format - check print message when func * from pmem2 API fails and ellipsis operator is used */ static int test_fail_pmem2_func_format(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_config cfg; size_t offset = (size_t)INT64_MAX + 1; /* "randomly" chosen function to be failed */ int ret = pmem2_config_set_offset(&cfg, offset); UT_ASSERTne(ret, 0); pmem2_perror("pmem2_config_set_offset %d", 123); return 0; } /* * test_fail_system_func_simple - check print message when directly called * system func fails */ static int test_fail_system_func_simple(const struct test_case *tc, int argc, char *argv[]) { /* "randomly" chosen function to be failed */ int ret = os_open("XXX", O_RDONLY); UT_ASSERTeq(ret, -1); ERR("!open"); pmem2_perror("test"); return 0; } /* * test_fail_system_func_format - check print message when directly called * system func fails and ellipsis operator is used */ static int test_fail_system_func_format(const struct test_case *tc, int argc, char *argv[]) { /* "randomly" chosen function to be failed */ int ret = os_open("XXX", O_RDONLY); UT_ASSERTeq(ret, -1); ERR("!open"); pmem2_perror("test %d", 123); return 0; } /* * test_fail_pmem2_syscall_simple - check print message when system func * fails through pmem2_source_size func */ static int test_fail_pmem2_syscall_simple(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source src; size_t size; #ifdef _WIN32 src.type = PMEM2_SOURCE_HANDLE; src.value.handle = INVALID_HANDLE_VALUE; #else src.type = PMEM2_SOURCE_FD; src.value.fd = -1; #endif /* "randomly" chosen function to be failed */ int ret = pmem2_source_size(&src, &size); ASSERTne(ret, 0); pmem2_perror("test"); return 0; } /* * test_fail_pmem2_syscall_simple - check print message when system func * fails through pmem2_source_size func and ellipsis operator is used */ static int test_fail_pmem2_syscall_format(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source src; size_t size; #ifdef _WIN32 src.type = PMEM2_SOURCE_HANDLE; src.value.handle = INVALID_HANDLE_VALUE; #else src.type = PMEM2_SOURCE_FD; src.value.fd = -1; #endif /* "randomly" chosen function to be failed */ int ret = pmem2_source_size(&src, &size); ASSERTne(ret, 0); pmem2_perror("test %d", 123); return 0; } /* * test_simple_err_to_errno_check - check if conversion * from pmem2 err value to errno works fine */ static int test_simple_err_to_errno_check(const struct test_case *tc, int argc, char *argv[]) { int ret_errno = pmem2_err_to_errno(PMEM2_E_NOSUPP); UT_ASSERTeq(ret_errno, ENOTSUP); ret_errno = pmem2_err_to_errno(PMEM2_E_UNKNOWN); UT_ASSERTeq(ret_errno, EINVAL); ret_errno = pmem2_err_to_errno(-ENOTSUP); UT_ASSERTeq(ret_errno, ENOTSUP); return 0; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_fail_pmem2_func_simple), TEST_CASE(test_fail_pmem2_func_format), TEST_CASE(test_fail_system_func_simple), TEST_CASE(test_fail_system_func_format), TEST_CASE(test_fail_pmem2_syscall_simple), TEST_CASE(test_fail_pmem2_syscall_format), TEST_CASE(test_simple_err_to_errno_check), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char **argv) { START(argc, argv, "pmem2_perror"); util_init(); out_init("pmem2_perror", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); }
4,205
21.253968
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_mem_ext/pmem2_mem_ext.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_mem_ext.c -- test for low level functions from libpmem2 */ #include "unittest.h" #include "file.h" #include "ut_pmem2.h" #include "valgrind_internal.h" typedef void *(*memmove_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*memcpy_fn)(void *pmemdest, const void *src, size_t len, unsigned flags); typedef void *(*memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); static unsigned Flags[] = { 0, PMEM_F_MEM_NONTEMPORAL, PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_WC, PMEM_F_MEM_WB, PMEM_F_MEM_NOFLUSH, PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH | PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL | PMEM_F_MEM_WC | PMEM_F_MEM_WB, }; /* * do_memcpy_with_flag -- pmem2 memcpy with specified flag amd size */ static void do_memcpy_with_flag(char *addr, size_t data_size, memcpy_fn cpy_fn, int flag) { char *addr2 = addr + data_size; cpy_fn(addr2, addr, data_size, Flags[flag]); } /* * do_memmove_with_flag -- pmem2 memmove with specified flag and size */ static void do_memmove_with_flag(char *addr, size_t data_size, memmove_fn mov_fn, int flag) { char *addr2 = addr + data_size; mov_fn(addr2, addr, data_size, Flags[flag]); } /* * do_memset_with_flag -- pmem2 memset with specified flag and size */ static void do_memset_with_flag(char *addr, size_t data_size, memset_fn set_fn, int flag) { set_fn(addr, 1, data_size, Flags[flag]); if (Flags[flag] & PMEM2_F_MEM_NOFLUSH) VALGRIND_DO_PERSIST(addr, data_size); } int main(int argc, char *argv[]) { int fd; char *addr; size_t mapped_len; struct pmem2_config *cfg; struct pmem2_source *src; struct pmem2_map *map; if (argc != 5) UT_FATAL("usage: %s file type size flag", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem2_mem_ext %s %savx %savx512f", thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); util_init(); char type = argv[2][0]; size_t data_size = strtoul(argv[3], NULL, 0); int flag = atoi(argv[4]); UT_ASSERT(flag < ARRAY_SIZE(Flags)); fd = OPEN(argv[1], O_RDWR); UT_ASSERT(fd != -1); PMEM2_CONFIG_NEW(&cfg); PMEM2_SOURCE_FROM_FD(&src, fd); PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE); int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_CONFIG_DELETE(&cfg); PMEM2_SOURCE_DELETE(&src); mapped_len = pmem2_map_get_size(map); UT_ASSERT(data_size * 2 < mapped_len); addr = pmem2_map_get_address(map); if (addr == NULL) UT_FATAL("!could not map file: %s", argv[1]); switch (type) { case 'C': { pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); do_memcpy_with_flag(addr, data_size, memcpy_fn, flag); break; } case 'S': { pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); do_memset_with_flag(addr, data_size, memset_fn, flag); break; } case 'M': { pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map); do_memmove_with_flag(addr, data_size, memmove_fn, flag); break; } default: UT_FATAL("!wrong type of test %c", type); break; } ret = pmem2_unmap(&map); UT_ASSERTeq(ret, 0); CLOSE(fd); DONE(NULL); }
3,349
22.426573
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_util/rpmemd_util_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * rpmemd_util_test.c -- unit tests for rpmemd_util module */ #include "unittest.h" #include "rpmem_common.h" #include "rpmemd_log.h" #include "rpmemd_util.h" #include "util.h" /* structure to store results */ struct result { int ret; enum rpmem_persist_method persist_method; int (*persist)(const void *addr, size_t len); void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len); }; /* all values to test */ static const enum rpmem_persist_method pms[] = {RPMEM_PM_GPSPM, RPMEM_PM_APM, MAX_RPMEM_PM}; static const int is_pmems[] = {0, 1}; enum mode { MODE_VALID, MODE_INVALID, MODE_MAX }; static const int ranges[2][2][2] = { [MODE_VALID] = { {0, ARRAY_SIZE(pms) - 1}, {0, ARRAY_SIZE(is_pmems)} }, [MODE_INVALID] = { {ARRAY_SIZE(pms) - 1, ARRAY_SIZE(pms)}, {0, ARRAY_SIZE(is_pmems)} } }; /* expected results */ static const struct result exp_results[3][2] = { { /* GPSPM and is_pmem == false */ {0, RPMEM_PM_GPSPM, pmem_msync, memcpy}, /* GPSPM and is_pmem == true */ {0, RPMEM_PM_GPSPM, rpmemd_pmem_persist, pmem_memcpy_persist} }, { /* APM and is_pmem == false */ {0, RPMEM_PM_GPSPM, pmem_msync, memcpy}, /* APM and is_pmem == true */ {0, RPMEM_PM_APM, rpmemd_flush_fatal, pmem_memcpy_persist} }, { /* persistency method outside of the range */ {1, 0, 0, 0}, {1, 0, 0, 0} } }; static void test_apply_pm_policy(struct result *result, int is_pmem) { if (rpmemd_apply_pm_policy(&result->persist_method, &result->persist, &result->memcpy_persist, is_pmem)) { goto err; } result->ret = 0; return; err: result->ret = 1; } #define USAGE() do {\ UT_ERR("usage: %s valid|invalid", argv[0]);\ } while (0) static void test(const int pm_range[2], const int is_pmem_range[2]) { rpmemd_log_level = RPD_LOG_NOTICE; int ret = rpmemd_log_init("rpmemd_log", NULL, 0); UT_ASSERTeq(ret, 0); struct result result; const struct result *exp_result; for (int pm_ind = pm_range[0]; pm_ind < pm_range[1]; ++pm_ind) { for (int is_pmem_ind = is_pmem_range[0]; is_pmem_ind < is_pmem_range[1]; ++is_pmem_ind) { result.persist_method = pms[pm_ind]; exp_result = &exp_results[pm_ind][is_pmem_ind]; test_apply_pm_policy(&result, is_pmems[is_pmem_ind]); UT_ASSERTeq(result.ret, exp_result->ret); if (exp_result->ret == 0) { UT_ASSERTeq(result.persist_method, exp_result->persist_method); UT_ASSERTeq(result.persist, exp_result->persist); } } } rpmemd_log_close(); } int main(int argc, char *argv[]) { START(argc, argv, "rpmemd_util"); if (argc < 2) { USAGE(); return 1; } const char *mode_str = argv[1]; enum mode mode = MODE_MAX; if (strcmp(mode_str, "valid") == 0) { mode = MODE_VALID; } else if (strcmp(mode_str, "invalid") == 0) { mode = MODE_INVALID; } else { USAGE(); return 1; } UT_ASSERTne(mode, MODE_MAX); test(ranges[mode][0], ranges[mode][1]); DONE(NULL); }
3,027
20.027778
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag/obj_defrag.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * obj_defrag.c -- unit test for pmemobj_defrag */ #include "unittest.h" #include <limits.h> #define OBJECT_SIZE 100 static void defrag_basic(PMEMobjpool *pop) { int ret; PMEMoid oid1; PMEMoid oid2; PMEMoid oid3; ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); char *buff = (char *)MALLOC(OBJECT_SIZE); memset(buff, 0xc, OBJECT_SIZE); char *foop = (char *)pmemobj_direct(oid3); pmemobj_memcpy_persist(pop, foop, buff, OBJECT_SIZE); UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0); pmemobj_free(&oid1); PMEMoid oid4 = oid3; PMEMoid *oids[] = {&oid2, &oid3, &oid4}; struct pobj_defrag_result result; ret = pmemobj_defrag(pop, oids, 3, &result); UT_ASSERTeq(ret, 0); UT_ASSERTeq(result.total, 2); UT_ASSERTeq(result.relocated, 2); /* the object at higher location should move into the freed oid1 pos */ foop = (char *)pmemobj_direct(oid3); UT_ASSERT(oid3.off < oid2.off); UT_ASSERTeq(oid3.off, oid4.off); UT_ASSERT(memcmp(foop, buff, OBJECT_SIZE) == 0); pmemobj_free(&oid2); pmemobj_free(&oid3); FREE(buff); } struct test_object { PMEMoid a; PMEMoid b; PMEMoid c; }; static void defrag_nested_pointers(PMEMobjpool *pop) { int ret; /* * This is done so that the oids below aren't allocated literally in the * ideal position in the heap (chunk 0, offset 0). */ #define EXTRA_ALLOCS 100 for (int i = 0; i < EXTRA_ALLOCS; ++i) { PMEMoid extra; ret = pmemobj_zalloc(pop, &extra, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); pmemobj_free(&extra); } #undef EXTRA_ALLOCS PMEMoid oid1; PMEMoid oid2; PMEMoid oid3; ret = pmemobj_zalloc(pop, &oid1, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_zalloc(pop, &oid2, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); ret = pmemobj_zalloc(pop, &oid3, OBJECT_SIZE, 0); UT_ASSERTeq(ret, 0); struct test_object *oid1p = (struct test_object *)pmemobj_direct(oid1); struct test_object *oid2p = (struct test_object *)pmemobj_direct(oid2); struct test_object *oid3p = (struct test_object *)pmemobj_direct(oid3); oid1p->a = OID_NULL; oid1p->b = oid2; oid1p->c = oid1; pmemobj_persist(pop, oid1p, sizeof(*oid1p)); oid2p->a = oid1; oid2p->b = OID_NULL; oid2p->c = oid3; pmemobj_persist(pop, oid2p, sizeof(*oid2p)); oid3p->a = oid2; oid3p->b = oid2; oid3p->c = oid1; pmemobj_persist(pop, oid3p, sizeof(*oid3p)); #define OID_PTRS 12 #define EXTRA_OID_PTRS 60 #define OIDS_ALL (EXTRA_OID_PTRS + OID_PTRS) PMEMoid **oids = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * OIDS_ALL); PMEMoid *oid3pprs = (PMEMoid *)MALLOC(sizeof(PMEMoid) * EXTRA_OID_PTRS); int i; for (i = 0; i < EXTRA_OID_PTRS; ++i) { oid3pprs[i] = oid3; oids[i] = &oid3pprs[i]; } oids[i + 0] = &oid1; oids[i + 1] = &oid2; oids[i + 2] = &oid3; oids[i + 3] = &oid1p->a; oids[i + 4] = &oid1p->b; oids[i + 5] = &oid1p->c; oids[i + 6] = &oid2p->a; oids[i + 7] = &oid2p->b; oids[i + 8] = &oid2p->c; oids[i + 9] = &oid3p->a; oids[i + 10] = &oid3p->b; oids[i + 11] = &oid3p->c; struct pobj_defrag_result result; ret = pmemobj_defrag(pop, oids, OIDS_ALL, &result); UT_ASSERTeq(result.total, 3); UT_ASSERTeq(result.relocated, 3); UT_ASSERTeq(ret, 0); oid1p = (struct test_object *)pmemobj_direct(oid1); oid2p = (struct test_object *)pmemobj_direct(oid2); oid3p = (struct test_object *)pmemobj_direct(oid3); for (int i = 0; i < EXTRA_OID_PTRS; ++i) { UT_ASSERTeq(oid3pprs[i].off, oid3.off); } UT_ASSERTeq(oid1p->a.off, 0); UT_ASSERTeq(oid1p->b.off, oid2.off); UT_ASSERTeq(oid1p->c.off, oid1.off); UT_ASSERTeq(oid2p->a.off, oid1.off); UT_ASSERTeq(oid2p->b.off, 0); UT_ASSERTeq(oid2p->c.off, oid3.off); UT_ASSERTeq(oid3p->a.off, oid2.off); UT_ASSERTeq(oid3p->b.off, oid2.off); UT_ASSERTeq(oid3p->c.off, oid1.off); pmemobj_free(&oid1); pmemobj_free(&oid2); pmemobj_free(&oid3); FREE(oids); FREE(oid3pprs); } int main(int argc, char *argv[]) { START(argc, argv, "obj_defrag"); const char *path = argv[1]; PMEMobjpool *pop = NULL; pop = pmemobj_create(path, POBJ_LAYOUT_NAME(basic), PMEMOBJ_MIN_POOL * 2, S_IWUSR | S_IRUSR); if (pop == NULL) UT_FATAL("!pmemobj_create: %s", path); defrag_basic(pop); defrag_nested_pointers(pop); pmemobj_close(pop); DONE(NULL); }
4,429
22.817204
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_deep_flush/pmem2_deep_flush.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_deep_flush.c -- unit test for pmem_deep_flush() * * usage: pmem2_deep_flush file deep_persist_size offset * * pmem2_deep_flush depending on the mapping granularity is performed using one * of the following paths: * - page: NOP * - cache: pmem2_deep_flush_dax * - byte: pmem2_persist_cpu_cache + pmem2_deep_flush_dax * * Where pmem2_deep_flush_dax: * - pmem2_get_type_from_stat is used to determine a file type * - for regular files performs pmem2_flush_file_buffers_os OR * - for Device DAX: * - is looking for Device DAX region (pmem2_get_region_id) * - is constructing the region deep flush file paths * - opens deep_flush file (os_open) * - reads deep_flush file (read) * - performs a write to it (write) * * Where pmem2_persist_cpu_cache performs: * - flush (replaced by mock_flush) AND * - drain (replaced by mock_drain) * * Additionally, for the sake of this test, the following functions are * replaced: * - pmem2_get_type_from_stat (to control perceived file type) * - pmem2_flush_file_buffers_os (for counting calls) * - pmem2_get_region_id (to prevent reading sysfs in search for non * existing Device DAXes) * or mocked: * - os_open (to prevent opening non existing * /sys/bus/nd/devices/region[0-9]+/deep_flush files) * - write (for counting writes to non-existing * /sys/bus/nd/devices/region[0-9]+/deep_flush files) * * NOTE: In normal usage the persist function precedes any call to * pmem2_deep_flush. This test aims to validate the pmem2_deep_flush * function and so the persist function is omitted. */ #include "source.h" #ifndef _WIN32 #include <sys/sysmacros.h> #endif #include "mmap.h" #include "persist.h" #include "pmem2_arch.h" #include "pmem2_utils.h" #include "region_namespace.h" #include "unittest.h" static int n_file_buffs_flushes = 0; static int n_fences = 0; static int n_flushes = 0; static int n_writes = 0; static int n_reads = 0; static enum pmem2_file_type *ftype_value; static int read_invalid = 0; static int deep_flush_not_needed = 0; #ifndef _WIN32 #define MOCK_FD 999 #define MOCK_REG_ID 888 #define MOCK_BUS_DEVICE_PATH "/sys/bus/nd/devices/region888/deep_flush" #define MOCK_DEV_ID 777UL /* * pmem2_get_region_id -- redefine libpmem2 function */ int pmem2_get_region_id(const struct pmem2_source *src, unsigned *region_id) { *region_id = MOCK_REG_ID; return 0; } /* * os_open -- os_open mock */ FUNC_MOCK(os_open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { if (strcmp(path, MOCK_BUS_DEVICE_PATH) == 0) return MOCK_FD; va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); return _FUNC_REAL(os_open)(path, flags, mode); } FUNC_MOCK_END /* * write -- write mock */ FUNC_MOCK(write, int, int fd, const void *buffer, size_t count) FUNC_MOCK_RUN_DEFAULT { UT_ASSERTeq(*(char *)buffer, '1'); UT_ASSERTeq(count, 1); UT_ASSERTeq(fd, MOCK_FD); ++n_writes; return 1; } FUNC_MOCK_END /* * read -- read mock */ FUNC_MOCK(read, int, int fd, void *buffer, size_t nbytes) FUNC_MOCK_RUN_DEFAULT { UT_ASSERTeq(nbytes, 2); UT_ASSERTeq(fd, MOCK_FD); UT_OUT("mocked read, fd %d", fd); char pattern[2] = {'1', '\n'}; int ret = sizeof(pattern); if (deep_flush_not_needed) pattern[0] = '0'; if (read_invalid) { ret = 0; goto end; } memcpy(buffer, pattern, sizeof(pattern)); end: ++n_reads; return ret; } FUNC_MOCK_END #endif /* not _WIN32 */ /* * mock_flush -- count flush calls in the test */ static void mock_flush(const void *addr, size_t len) { ++n_flushes; } /* * mock_drain -- count drain calls in the test */ static void mock_drain(void) { ++n_fences; } /* * pmem2_arch_init -- attach flush and drain functions replacements */ void pmem2_arch_init(struct pmem2_arch_info *info) { info->flush = mock_flush; info->fence = mock_drain; } /* * pmem2_map_find -- redefine libpmem2 function, redefinition is needed * for a proper compilation of the test. NOTE: this function is not used * in the test. */ struct pmem2_map * pmem2_map_find(const void *addr, size_t len) { UT_ASSERT(0); return NULL; } /* * pmem2_flush_file_buffers_os -- redefine libpmem2 function */ int pmem2_flush_file_buffers_os(struct pmem2_map *map, const void *addr, size_t len, int autorestart) { ++n_file_buffs_flushes; return 0; } /* * map_init -- fill pmem2_map in minimal scope */ static void map_init(struct pmem2_map *map) { const size_t length = 8 * MEGABYTE; map->content_length = length; /* * The test needs to allocate more memory because some test cases * validate behavior with address beyond mapping. */ map->addr = MALLOC(2 * length); #ifndef _WIN32 map->source.type = PMEM2_SOURCE_FD; /* mocked device ID for device DAX */ map->source.value.st_rdev = MOCK_DEV_ID; #else map->source.type = PMEM2_SOURCE_HANDLE; #endif ftype_value = &map->source.value.ftype; } /* * counters_check_n_reset -- check numbers of uses of deep-flushing elements * and reset them */ static void counters_check_n_reset(int msynces, int flushes, int fences, int writes, int reads) { UT_ASSERTeq(n_file_buffs_flushes, msynces); UT_ASSERTeq(n_flushes, flushes); UT_ASSERTeq(n_fences, fences); UT_ASSERTeq(n_writes, writes); UT_ASSERTeq(n_reads, reads); n_file_buffs_flushes = 0; n_flushes = 0; n_fences = 0; n_writes = 0; n_reads = 0; read_invalid = 0; deep_flush_not_needed = 0; } /* * test_deep_flush_func -- test pmem2_deep_flush for all granularity options */ static int test_deep_flush_func(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_map map; map_init(&map); *ftype_value = PMEM2_FTYPE_REG; void *addr = map.addr; size_t len = map.content_length; map.effective_granularity = PMEM2_GRANULARITY_PAGE; pmem2_set_flush_fns(&map); int ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 0, 0, 0, 0); map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE; pmem2_set_flush_fns(&map); ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(1, 0, 0, 0, 0); map.effective_granularity = PMEM2_GRANULARITY_BYTE; pmem2_set_flush_fns(&map); ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(1, 0, 0, 0, 0); FREE(map.addr); return 0; } /* * test_deep_flush_func_devdax -- test pmem2_deep_flush with mocked DAX devices */ static int test_deep_flush_func_devdax(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_map map; map_init(&map); void *addr = map.addr; size_t len = map.content_length; *ftype_value = PMEM2_FTYPE_DEVDAX; map.effective_granularity = PMEM2_GRANULARITY_CACHE_LINE; pmem2_set_flush_fns(&map); int ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 1, 1); deep_flush_not_needed = 1; ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 0, 1); read_invalid = 1; ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 0, 1); map.effective_granularity = PMEM2_GRANULARITY_BYTE; pmem2_set_flush_fns(&map); ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 1, 1); deep_flush_not_needed = 1; ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 0, 1); read_invalid = 1; ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, 0); counters_check_n_reset(0, 1, 1, 0, 1); FREE(map.addr); return 0; } /* * test_deep_flush_range_beyond_mapping -- test pmem2_deep_flush with * the address that goes beyond mapping */ static int test_deep_flush_range_beyond_mapping(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_map map; map_init(&map); /* set address completely beyond mapping */ void *addr = (void *)((uintptr_t)map.addr + map.content_length); size_t len = map.content_length; int ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE); /* * set address in the middle of mapping, which makes range partially * beyond mapping */ addr = (void *)((uintptr_t)map.addr + map.content_length / 2); ret = pmem2_deep_flush(&map, addr, len); UT_ASSERTeq(ret, PMEM2_E_DEEP_FLUSH_RANGE); FREE(map.addr); return 0; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_deep_flush_func), TEST_CASE(test_deep_flush_func_devdax), TEST_CASE(test_deep_flush_range_beyond_mapping), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_deep_flush"); pmem2_persist_init(); util_init(); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); DONE(NULL); }
8,865
22.270341
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_direct/obj_direct.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_direct.c -- unit test for pmemobj_direct() */ #include "obj.h" #include "obj_direct.h" #include "sys_util.h" #include "unittest.h" #define MAX_PATH_LEN 255 #define LAYOUT_NAME "direct" static os_mutex_t lock1; static os_mutex_t lock2; static os_cond_t sync_cond1; static os_cond_t sync_cond2; static int cond1; static int cond2; static PMEMoid thread_oid; static void * obj_direct(PMEMoid oid) { void *ptr1 = obj_direct_inline(oid); void *ptr2 = obj_direct_non_inline(oid); UT_ASSERTeq(ptr1, ptr2); return ptr1; } static void * test_worker(void *arg) { /* check before pool is closed, then let main continue */ UT_ASSERTne(obj_direct(thread_oid), NULL); util_mutex_lock(&lock1); cond1 = 1; os_cond_signal(&sync_cond1); util_mutex_unlock(&lock1); /* wait for main thread to free & close, then check */ util_mutex_lock(&lock2); while (!cond2) os_cond_wait(&sync_cond2, &lock2); util_mutex_unlock(&lock2); UT_ASSERTeq(obj_direct(thread_oid), NULL); return NULL; } int main(int argc, char *argv[]) { START(argc, argv, "obj_direct"); if (argc != 3) UT_FATAL("usage: %s [directory] [# of pools]", argv[0]); unsigned npools = ATOU(argv[2]); const char *dir = argv[1]; int r; util_mutex_init(&lock1); util_mutex_init(&lock2); util_cond_init(&sync_cond1); util_cond_init(&sync_cond2); cond1 = cond2 = 0; PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *)); UT_ASSERTne(pops, NULL); size_t length = strlen(dir) + MAX_PATH_LEN; char *path = MALLOC(length); for (unsigned i = 0; i < npools; ++i) { int ret = snprintf(path, length, "%s"OS_DIR_SEP_STR"testfile%d", dir, i); if (ret < 0 || ret >= length) UT_FATAL("snprintf: %d", ret); pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); if (pops[i] == NULL) UT_FATAL("!pmemobj_create"); } PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid)); UT_ASSERTne(oids, NULL); PMEMoid *tmpoids = MALLOC(npools * sizeof(PMEMoid)); UT_ASSERTne(tmpoids, NULL); oids[0] = OID_NULL; UT_ASSERTeq(obj_direct(oids[0]), NULL); for (unsigned i = 0; i < npools; ++i) { oids[i] = (PMEMoid) {pops[i]->uuid_lo, 0}; UT_ASSERTeq(obj_direct(oids[i]), NULL); uint64_t off = pops[i]->heap_offset; oids[i] = (PMEMoid) {pops[i]->uuid_lo, off}; UT_ASSERTeq((char *)obj_direct(oids[i]) - off, (char *)pops[i]); r = pmemobj_alloc(pops[i], &tmpoids[i], 100, 1, NULL, NULL); UT_ASSERTeq(r, 0); } r = pmemobj_alloc(pops[0], &thread_oid, 100, 2, NULL, NULL); UT_ASSERTeq(r, 0); UT_ASSERTne(obj_direct(thread_oid), NULL); os_thread_t t; THREAD_CREATE(&t, NULL, test_worker, NULL); /* wait for the worker thread to perform the first check */ util_mutex_lock(&lock1); while (!cond1) os_cond_wait(&sync_cond1, &lock1); util_mutex_unlock(&lock1); for (unsigned i = 0; i < npools; ++i) { UT_ASSERTne(obj_direct(tmpoids[i]), NULL); pmemobj_free(&tmpoids[i]); UT_ASSERTeq(obj_direct(tmpoids[i]), NULL); pmemobj_close(pops[i]); UT_ASSERTeq(obj_direct(oids[i]), NULL); } /* signal the worker that we're free and closed */ util_mutex_lock(&lock2); cond2 = 1; os_cond_signal(&sync_cond2); util_mutex_unlock(&lock2); THREAD_JOIN(&t, NULL); util_cond_destroy(&sync_cond1); util_cond_destroy(&sync_cond2); util_mutex_destroy(&lock1); util_mutex_destroy(&lock2); FREE(pops); FREE(tmpoids); FREE(oids); DONE(NULL); }
3,476
22.653061
66
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_memcheck/obj_memcheck.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ #include "unittest.h" #include "valgrind_internal.h" /* * Layout definition */ POBJ_LAYOUT_BEGIN(mc); POBJ_LAYOUT_ROOT(mc, struct root); POBJ_LAYOUT_TOID(mc, struct struct1); POBJ_LAYOUT_END(mc); struct struct1 { int fld; int dyn[]; }; struct root { TOID(struct struct1) s1; TOID(struct struct1) s2; }; static void test_memcheck_bug(void) { #if VG_MEMCHECK_ENABLED volatile char tmp[100]; VALGRIND_CREATE_MEMPOOL(tmp, 0, 0); VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16); VALGRIND_MEMPOOL_FREE(tmp, tmp + 8); VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 8, 16); VALGRIND_MAKE_MEM_NOACCESS(tmp, 8); tmp[7] = 0x66; #endif } static void test_memcheck_bug2(void) { #if VG_MEMCHECK_ENABLED volatile char tmp[1000]; VALGRIND_CREATE_MEMPOOL(tmp, 0, 0); VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 128, 128); VALGRIND_MEMPOOL_FREE(tmp, tmp + 128); VALGRIND_MEMPOOL_ALLOC(tmp, tmp + 256, 128); VALGRIND_MEMPOOL_FREE(tmp, tmp + 256); /* * This should produce warning: * Address ... is 0 bytes inside a block of size 128 bytes freed. * instead, it produces a warning: * Address ... is 0 bytes after a block of size 128 freed */ int *data = (int *)(tmp + 256); *data = 0x66; #endif } static void test_everything(const char *path) { PMEMobjpool *pop = NULL; if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(mc), PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); struct root *rt = D_RW(POBJ_ROOT(pop, struct root)); POBJ_ALLOC(pop, &rt->s1, struct struct1, sizeof(struct struct1), NULL, NULL); struct struct1 *s1 = D_RW(rt->s1); struct struct1 *s2; POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1), NULL, NULL); s2 = D_RW(rt->s2); POBJ_FREE(&rt->s2); /* read of uninitialized variable */ if (s1->fld) UT_OUT("%d", 1); /* write to freed object */ s2->fld = 7; pmemobj_persist(pop, s2, sizeof(*s2)); POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1), NULL, NULL); s2 = D_RW(rt->s2); memset(s2, 0, pmemobj_alloc_usable_size(rt->s2.oid)); s2->fld = 12; /* ok */ /* invalid write */ s2->dyn[100000] = 9; /* invalid write */ s2->dyn[1000] = 9; pmemobj_persist(pop, s2, sizeof(struct struct1)); POBJ_REALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1) + 100 * sizeof(int)); s2 = D_RW(rt->s2); s2->dyn[0] = 9; /* ok */ pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int)); POBJ_FREE(&rt->s2); /* invalid write to REALLOCated and FREEd object */ s2->dyn[0] = 9; pmemobj_persist(pop, s2, sizeof(struct struct1) + 100 * sizeof(int)); POBJ_ALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1), NULL, NULL); POBJ_REALLOC(pop, &rt->s2, struct struct1, sizeof(struct struct1) + 30 * sizeof(int)); s2 = D_RW(rt->s2); s2->dyn[0] = 0; s2->dyn[29] = 29; pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int)); POBJ_FREE(&rt->s2); s2->dyn[0] = 9; pmemobj_persist(pop, s2, sizeof(struct struct1) + 30 * sizeof(int)); pmemobj_close(pop); } static void usage(const char *a) { UT_FATAL("usage: %s [m|t] file-name", a); } int main(int argc, char *argv[]) { START(argc, argv, "obj_memcheck"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(mc) != 1); if (argc < 2) usage(argv[0]); if (strcmp(argv[1], "m") == 0) test_memcheck_bug(); else if (strcmp(argv[1], "t") == 0) { if (argc < 3) usage(argv[0]); test_everything(argv[2]); } else usage(argv[0]); test_memcheck_bug2(); DONE(NULL); }
3,591
20.769697
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/obj_defrag_advanced.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * obj_defrag_advanced.c -- test for libpmemobj defragmentation feature */ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <stddef.h> #include <unistd.h> #include <stdlib.h> #include "rand.h" #include "vgraph.h" #include "pgraph.h" #include "os_thread.h" #include "unittest.h" struct create_params_t { uint64_t seed; rng_t rng; struct vgraph_params vparams; struct pgraph_params pparams; }; /* * graph_create -- create a graph * - generate an intermediate volatile graph representation * - use the volatile graph to allocate a persistent one */ static void graph_create(struct create_params_t *task, PMEMobjpool *pop, PMEMoid *oidp, rng_t *rngp) { struct vgraph_t *vgraph = vgraph_new(&task->vparams, rngp); pgraph_new(pop, oidp, vgraph, &task->pparams, rngp); vgraph_delete(vgraph); } /* * graph_defrag -- defragment the pool * - collect pointers to all PMEMoids * - do a sanity checks * - call pmemobj_defrag * - return # of relocated objects */ static size_t graph_defrag(PMEMobjpool *pop, PMEMoid oid) { struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid); /* count number of oids */ unsigned oidcnt = pgraph->nodes_num; for (unsigned i = 0; i < pgraph->nodes_num; ++i) { struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct (pgraph->nodes[i]); oidcnt += pnode->edges_num; } /* create array of oid pointers */ PMEMoid **oidv = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * oidcnt); unsigned oidi = 0; for (unsigned i = 0; i < pgraph->nodes_num; ++i) { oidv[oidi++] = &pgraph->nodes[i]; struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct (pgraph->nodes[i]); for (unsigned j = 0; j < pnode->edges_num; ++j) { oidv[oidi++] = &pnode->edges[j]; } } UT_ASSERTeq(oidi, oidcnt); /* check if all oids are valid */ for (unsigned i = 0; i < oidcnt; ++i) { void *ptr = pmemobj_direct(*oidv[i]); UT_ASSERTne(ptr, NULL); } /* check if all oids appear only once */ for (unsigned i = 0; i < oidcnt - 1; ++i) { for (unsigned j = i + 1; j < oidcnt; ++j) { UT_ASSERTne(oidv[i], oidv[j]); } } struct pobj_defrag_result result; int ret = pmemobj_defrag(pop, oidv, oidcnt, &result); UT_ASSERTeq(ret, 0); UT_ASSERTeq(result.total, pgraph->nodes_num); FREE(oidv); return result.relocated; } /* * graph_defrag_ntimes -- defragment the graph N times * - where N <= max_rounds * - it stops defrag if # of relocated objects == 0 */ static void graph_defrag_ntimes(PMEMobjpool *pop, PMEMoid oid, unsigned max_rounds) { size_t relocated; unsigned rounds = 0; do { relocated = graph_defrag(pop, oid); ++rounds; } while (relocated > 0 && rounds < max_rounds); } #define HAS_TO_EXIST (1) /* * graph_dump -- dump a graph from the pool to a text file */ static void graph_dump(PMEMoid oid, const char *path, int has_exist) { struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(oid); if (has_exist) UT_ASSERTne(pgraph, NULL); if (pgraph) pgraph_print(pgraph, path); } #define FGETS_BUFF_LEN 1024 /* * dump_compare -- compare graph dumps * Test fails if the contents of dumps do not match */ static void dump_compare(const char *path1, const char *path2) { FILE *dump1 = FOPEN(path1, "r"); FILE *dump2 = FOPEN(path2, "r"); char buff1[FGETS_BUFF_LEN]; char buff2[FGETS_BUFF_LEN]; char *sret1, *sret2; do { sret1 = fgets(buff1, FGETS_BUFF_LEN, dump1); sret2 = fgets(buff2, FGETS_BUFF_LEN, dump2); /* both files have to end at the same time */ if (!sret1) { UT_ASSERTeq(sret2, NULL); FCLOSE(dump1); FCLOSE(dump2); return; } UT_ASSERTeq(sret1, buff1); UT_ASSERTeq(sret2, buff2); UT_ASSERTeq(strcmp(buff1, buff2), 0); } while (1); } /* * create_params_init -- initialize create params */ static void create_params_init(struct create_params_t *params) { params->seed = 1; /* good enough defaults - no magic here */ params->vparams.max_nodes = 50; params->vparams.max_edges = 10; params->vparams.range_nodes = 10; params->vparams.range_edges = 10; params->vparams.min_pattern_size = 8; params->vparams.max_pattern_size = 1024; params->pparams.graph_copies = 10; } /* global state */ static struct global_t { PMEMobjpool *pop; } global; /* * PMEMobj root object structure */ struct root_t { unsigned graphs_num; PMEMoid graphs[]; }; /* * root_size -- calculate a root object size */ static inline size_t root_size(unsigned graph_num, size_t min_root_size) { size_t size = sizeof(struct root_t) + sizeof(PMEMoid) * graph_num; return MAX(size, min_root_size); } #define QUERY_GRAPHS_NUM UINT_MAX static struct root_t * get_root(unsigned graphs_num, size_t min_root_size) { PMEMoid roid; struct root_t *root; if (graphs_num == QUERY_GRAPHS_NUM) { /* allocate a root object without graphs */ roid = pmemobj_root(global.pop, root_size(0, 0)); if (OID_IS_NULL(roid)) UT_FATAL("!pmemobj_root:"); root = (struct root_t *)pmemobj_direct(roid); UT_ASSERTne(root, NULL); graphs_num = root->graphs_num; } UT_ASSERT(graphs_num > 0); /* reallocate a root object with all known graphs */ roid = pmemobj_root(global.pop, root_size(graphs_num, min_root_size)); if (OID_IS_NULL(roid)) UT_FATAL("!pmemobj_root:"); root = (struct root_t *)pmemobj_direct(roid); UT_ASSERTne(root, NULL); return root; } /* * parse_nonzero -- parse non-zero unsigned */ static void parse_nonzero(unsigned *var, const char *arg) { unsigned long v = STRTOUL(arg, NULL, 10); UT_ASSERTne(v, 0); UT_ASSERT(v < UINT_MAX); *var = v; } #define GRAPH_LAYOUT POBJ_LAYOUT_NAME(graph) /* * op_pool_create -- create a pool */ static int op_pool_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <path>", tc->name); /* parse arguments */ const char *path = argv[0]; /* open a pool */ global.pop = pmemobj_create(path, GRAPH_LAYOUT, 0, S_IWUSR | S_IRUSR); if (global.pop == NULL) { UT_FATAL("!pmemobj_create: %s", path); } return 1; } /* * op_pool_close -- close the poll */ static int op_pool_close(const struct test_case *tc, int argc, char *argv[]) { pmemobj_close(global.pop); global.pop = NULL; return 0; } /* * op_graph_create -- create a graph */ static int op_graph_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 4) UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>" " <min-root-size>", tc->name); /* parse arguments */ struct create_params_t cparams; create_params_init(&cparams); parse_nonzero(&cparams.vparams.max_nodes, argv[0]); parse_nonzero(&cparams.vparams.max_edges, argv[1]); parse_nonzero(&cparams.pparams.graph_copies, argv[2]); size_t min_root_size = STRTOULL(argv[3], NULL, 10); struct root_t *root = get_root(1, min_root_size); randomize(cparams.seed); /* generate a single graph */ graph_create(&cparams, global.pop, &root->graphs[0], NULL); root->graphs_num = 1; pmemobj_persist(global.pop, root, root_size(1, min_root_size)); return 4; } /* * op_graph_dump -- dump the graph */ static int op_graph_dump(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <dump>", tc->name); /* parse arguments */ const char *dump = argv[0]; struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0); UT_ASSERTeq(root->graphs_num, 1); /* dump the graph before defrag */ graph_dump(root->graphs[0], dump, HAS_TO_EXIST); return 1; } /* * op_graph_defrag -- defrag the graph */ static int op_graph_defrag(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <max-rounds>", tc->name); /* parse arguments */ unsigned max_rounds; parse_nonzero(&max_rounds, argv[0]); struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0); UT_ASSERTeq(root->graphs_num, 1); /* do the defrag */ graph_defrag_ntimes(global.pop, root->graphs[0], max_rounds); return 1; } /* * op_dump_compare -- compare dumps */ static int op_dump_compare(const struct test_case *tc, int argc, char *argv[]) { if (argc < 2) UT_FATAL("usage: %s <dump1> <dump2>", tc->name); /* parse arguments */ const char *dump1 = argv[0]; const char *dump2 = argv[1]; dump_compare(dump1, dump2); return 2; } struct create_n_defrag_params_t { char dump1[PATH_MAX]; char dump2[PATH_MAX]; struct create_params_t cparams; PMEMobjpool *pop; PMEMoid *oidp; unsigned max_rounds; unsigned ncycles; }; /* * create_n_defrag_thread -- create and defrag graphs mutiple times */ static void * create_n_defrag_thread(void *arg) { struct create_n_defrag_params_t *params = (struct create_n_defrag_params_t *)arg; struct create_params_t *cparams = &params->cparams; for (unsigned i = 0; i < params->ncycles; ++i) { graph_create(cparams, global.pop, params->oidp, &cparams->rng); graph_dump(*params->oidp, params->dump1, HAS_TO_EXIST); graph_defrag_ntimes(params->pop, *params->oidp, params->max_rounds); graph_dump(*params->oidp, params->dump2, HAS_TO_EXIST); dump_compare(params->dump1, params->dump2); pgraph_delete(params->oidp); } return NULL; } /* * op_graph_create_n_defrag_mt -- multi-threaded graphs creation & defrag */ static int op_graph_create_n_defrag_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 8) UT_FATAL("usage: %s <max-nodes> <max-edges> <graph-copies>" " <min-root-size> <max-defrag-rounds> <n-threads>" "<n-create-defrag-cycles> <dump-suffix>", tc->name); /* parse arguments */ struct create_params_t cparams; create_params_init(&cparams); parse_nonzero(&cparams.vparams.max_nodes, argv[0]); parse_nonzero(&cparams.vparams.max_edges, argv[1]); parse_nonzero(&cparams.pparams.graph_copies, argv[2]); size_t min_root_size = STRTOULL(argv[3], NULL, 10); unsigned max_rounds; parse_nonzero(&max_rounds, argv[4]); unsigned nthreads; parse_nonzero(&nthreads, argv[5]); unsigned ncycles; parse_nonzero(&ncycles, argv[6]); char *dump_suffix = argv[7]; struct root_t *root = get_root(nthreads, min_root_size); root->graphs_num = nthreads; pmemobj_persist(global.pop, root, sizeof(*root)); /* prepare threads params */ struct create_n_defrag_params_t *paramss = (struct create_n_defrag_params_t *)MALLOC( sizeof(*paramss) * nthreads); for (unsigned i = 0; i < nthreads; ++i) { struct create_n_defrag_params_t *params = &paramss[i]; SNPRINTF(params->dump1, PATH_MAX, "dump_1_th%u_%s.log", i, dump_suffix); SNPRINTF(params->dump2, PATH_MAX, "dump_2_th%u_%s.log", i, dump_suffix); memcpy(&params->cparams, &cparams, sizeof(cparams)); params->cparams.seed += i; randomize_r(&params->cparams.rng, params->cparams.seed); params->pop = global.pop; params->oidp = &root->graphs[i]; params->max_rounds = max_rounds; params->ncycles = ncycles; } /* spawn threads */ os_thread_t *threads = (os_thread_t *)MALLOC( sizeof(*threads) * nthreads); for (unsigned i = 0; i < nthreads; ++i) THREAD_CREATE(&threads[i], NULL, create_n_defrag_thread, &paramss[i]); /* join all threads */ void *ret = NULL; for (unsigned i = 0; i < nthreads; ++i) { THREAD_JOIN(&threads[i], &ret); UT_ASSERTeq(ret, NULL); } FREE(threads); FREE(paramss); return 8; } /* * op_pool_open -- open the pool */ static int op_pool_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <path>", tc->name); /* parse arguments */ const char *path = argv[0]; /* open a pool */ global.pop = pmemobj_open(path, GRAPH_LAYOUT); if (global.pop == NULL) UT_FATAL("!pmemobj_create: %s", path); return 1; } /* * op_graph_dump_all -- dump all graphs */ static int op_graph_dump_all(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <dump-prefix>", tc->name); /* parse arguments */ const char *dump_prefix = argv[0]; struct root_t *root = get_root(QUERY_GRAPHS_NUM, 0); char dump[PATH_MAX]; for (unsigned i = 0; i < root->graphs_num; ++i) { SNPRINTF(dump, PATH_MAX, "%s_%u.log", dump_prefix, i); graph_dump(root->graphs[i], dump, HAS_TO_EXIST); } return 1; } /* * ops -- available ops */ static struct test_case ops[] = { TEST_CASE(op_pool_create), TEST_CASE(op_pool_close), TEST_CASE(op_graph_create), TEST_CASE(op_graph_dump), TEST_CASE(op_graph_defrag), TEST_CASE(op_dump_compare), TEST_CASE(op_graph_create_n_defrag_mt), /* for pool validation only */ TEST_CASE(op_pool_open), TEST_CASE(op_graph_dump_all), }; #define NOPS ARRAY_SIZE(ops) #define TEST_NAME "obj_defrag_advanced" int main(int argc, char *argv[]) { START(argc, argv, TEST_NAME); TEST_CASE_PROCESS(argc, argv, ops, NOPS); DONE(NULL); }
12,707
21.452297
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/pgraph.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pgraph.c -- persistent graph representation */ #include <inttypes.h> #include "unittest.h" #include "vgraph.h" #include "pgraph.h" #define PATTERN 'g' /* * pnode_size -- return the entire of node size */ static size_t pnode_size(unsigned edges_num, size_t pattern_size) { size_t node_size = sizeof(struct pnode_t); node_size += sizeof(PMEMoid) * edges_num; node_size += pattern_size; return node_size; } /* * pnode_init -- initialize the node */ static void pnode_init(PMEMobjpool *pop, PMEMoid pnode_oid, struct vnode_t *vnode, PMEMoid pnodes[]) { struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(pnode_oid); pnode->node_id = vnode->node_id; pnode->size = vnode->psize; /* set edges */ pnode->edges_num = vnode->edges_num; for (unsigned i = 0; i < vnode->edges_num; ++i) pnode->edges[i] = pnodes[vnode->edges[i]]; /* initialize pattern */ pnode->pattern_size = vnode->pattern_size; void *pattern = (void *)&pnode->edges[pnode->edges_num]; pmemobj_memset(pop, pattern, PATTERN, pnode->pattern_size, PMEMOBJ_F_MEM_NOFLUSH); /* persist the whole node state */ pmemobj_persist(pop, (const void *)pnode, pnode->size); } /* * order_shuffle -- shuffle the nodes in graph */ static void order_shuffle(unsigned *order, unsigned num, rng_t *rngp) { for (unsigned i = 0; i < num; ++i) { unsigned j = rand_range(0, num, rngp); unsigned temp = order[j]; order[j] = order[i]; order[i] = temp; } } /* * order_new -- generate the sequence of the graph nodes allocation */ static unsigned * order_new(struct vgraph_t *vgraph, rng_t *rngp) { unsigned *order = (unsigned *)MALLOC(sizeof(unsigned) * vgraph->nodes_num); /* initialize id list */ for (unsigned i = 0; i < vgraph->nodes_num; ++i) order[i] = i; order_shuffle(order, vgraph->nodes_num, rngp); return order; } /* * pgraph_copy_new -- allocate a persistent copy of the volatile graph */ static PMEMoid * pgraph_copy_new(PMEMobjpool *pop, struct vgraph_t *vgraph, rng_t *rngp) { /* to be returned array of PMEMoids to raw nodes allocations */ PMEMoid *nodes = (PMEMoid *)MALLOC(sizeof(PMEMoid) * vgraph->nodes_num); /* generates random order of nodes allocation */ unsigned *order = order_new(vgraph, rngp); /* allocate the nodes in the random order */ int ret; for (unsigned i = 0; i < vgraph->nodes_num; ++i) { struct vnode_t vnode = vgraph->node[order[i]]; PMEMoid *node = &nodes[order[i]]; ret = pmemobj_alloc(pop, node, vnode.psize, 0, NULL, NULL); UT_ASSERTeq(ret, 0); } FREE(order); return nodes; } /* * pgraph_copy_delete -- free copies of the graph */ static void pgraph_copy_delete(PMEMoid *nodes, unsigned num) { for (unsigned i = 0; i < num; ++i) { if (OID_IS_NULL(nodes[i])) continue; pmemobj_free(&nodes[i]); } FREE(nodes); } /* * pgraph_size -- return the struct pgraph_t size */ static size_t pgraph_size(unsigned nodes_num) { return sizeof(struct pgraph_t) + sizeof(PMEMoid) * nodes_num; } /* * pgraph_new -- allocate a new persistent graph in such a way * that the fragmentation is as large as possible */ void pgraph_new(PMEMobjpool *pop, PMEMoid *oidp, struct vgraph_t *vgraph, struct pgraph_params *params, rng_t *rngp) { int ret = pmemobj_alloc(pop, oidp, pgraph_size(vgraph->nodes_num), 0, NULL, NULL); UT_ASSERTeq(ret, 0); struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp); pgraph->nodes_num = vgraph->nodes_num; pmemobj_persist(pop, pgraph, sizeof(*pgraph)); /* calculate size of pnodes */ for (unsigned i = 0; i < vgraph->nodes_num; ++i) { struct vnode_t *vnode = &vgraph->node[i]; vnode->psize = pnode_size(vnode->edges_num, vnode->pattern_size); } /* prepare multiple copies of the nodes */ unsigned copies_num = rand_range(1, params->graph_copies, rngp); PMEMoid **copies = (PMEMoid **)MALLOC(sizeof(PMEMoid *) * copies_num); for (unsigned i = 0; i < copies_num; ++i) copies[i] = pgraph_copy_new(pop, vgraph, rngp); /* peek exactly the one copy of each node */ for (unsigned i = 0; i < pgraph->nodes_num; ++i) { unsigned copy_id = rand_range(0, copies_num, rngp); pgraph->nodes[i] = copies[copy_id][i]; copies[copy_id][i] = OID_NULL; } pmemobj_persist(pop, pgraph->nodes, sizeof(PMEMoid) * pgraph->nodes_num); /* free unused copies of the nodes */ for (unsigned i = 0; i < copies_num; ++i) pgraph_copy_delete(copies[i], vgraph->nodes_num); FREE(copies); /* initialize pnodes */ for (unsigned i = 0; i < pgraph->nodes_num; ++i) pnode_init(pop, pgraph->nodes[i], &vgraph->node[i], pgraph->nodes); } /* * pgraph_delete -- free the persistent graph */ void pgraph_delete(PMEMoid *oidp) { struct pgraph_t *pgraph = (struct pgraph_t *)pmemobj_direct(*oidp); /* free pnodes */ for (unsigned i = 0; i < pgraph->nodes_num; ++i) pmemobj_free(&pgraph->nodes[i]); pmemobj_free(oidp); } /* * pgraph_print -- print graph in human readable format */ void pgraph_print(struct pgraph_t *pgraph, const char *dump) { UT_ASSERTne(dump, NULL); FILE *out = FOPEN(dump, "w"); /* print the graph statistics */ fprintf(out, "# of nodes: %u\n", pgraph->nodes_num); uint64_t total_edges_num = 0; for (unsigned i = 0; i < pgraph->nodes_num; ++i) { PMEMoid node_oid = pgraph->nodes[i]; struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(node_oid); total_edges_num += pnode->edges_num; } fprintf(out, "Total # of edges: %" PRIu64 "\n\n", total_edges_num); /* print the graph itself */ for (unsigned i = 0; i < pgraph->nodes_num; ++i) { PMEMoid node_oid = pgraph->nodes[i]; struct pnode_t *pnode = (struct pnode_t *)pmemobj_direct(node_oid); fprintf(out, "%u:", pnode->node_id); for (unsigned j = 0; j < pnode->edges_num; ++j) { PMEMoid edge_oid = pnode->edges[j]; struct pnode_t *edge = (struct pnode_t *)pmemobj_direct(edge_oid); UT_ASSERT(edge->node_id < pgraph->nodes_num); fprintf(out, "%u, ", edge->node_id); } fprintf(out, "\n"); } FCLOSE(out); }
6,058
23.934156
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/vgraph.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * vgraph.c -- volatile graph representation */ #include <stdlib.h> #include <stdio.h> #include "rand.h" #include "unittest.h" #include "vgraph.h" /* * rand_range -- generate pseudo-random number from given interval [min, max] */ unsigned rand_range(unsigned min, unsigned max, rng_t *rngp) { if (min == max) return min; if (min > max) UT_FATAL("!rand_range"); unsigned ret; if (rngp) ret = (unsigned)rnd64_r(rngp); else ret = (unsigned)rnd64(); return ((unsigned)ret % (max - min)) + min; } /* * vnode_new -- allocate a new volatile node */ static void vnode_new(struct vnode_t *node, unsigned v, struct vgraph_params *params, rng_t *rngp) { unsigned min_edges = 1; if (params->max_edges > params->range_edges) min_edges = params->max_edges - params->range_edges; unsigned edges_num = rand_range(min_edges, params->max_edges, rngp); node->node_id = v; node->edges_num = edges_num; node->edges = (unsigned *)MALLOC(sizeof(int) * edges_num); node->pattern_size = rand_range(params->min_pattern_size, params->max_pattern_size, rngp); } /* * vnode_delete -- free a volatile node */ static void vnode_delete(struct vnode_t *node) { FREE(node->edges); } /* * vgraph_get_node -- return node in graph based on given id_node */ static struct vnode_t * vgraph_get_node(struct vgraph_t *graph, unsigned id_node) { struct vnode_t *node; node = &graph->node[id_node]; return node; } /* * vgraph_add_edges -- randomly assign destination nodes to the edges */ static void vgraph_add_edges(struct vgraph_t *graph, rng_t *rngp) { unsigned nodes_count = 0; unsigned edges_count = 0; struct vnode_t *node; for (nodes_count = 0; nodes_count < graph->nodes_num; nodes_count++) { node = vgraph_get_node(graph, nodes_count); unsigned edges_num = node->edges_num; for (edges_count = 0; edges_count < edges_num; edges_count++) { unsigned node_link = rand_range(0, graph->nodes_num, rngp); node->edges[edges_count] = node_link; } } } /* * vgraph_new -- allocate a new volatile graph */ struct vgraph_t * vgraph_new(struct vgraph_params *params, rng_t *rngp) { unsigned min_nodes = 1; if (params->max_nodes > params->range_nodes) min_nodes = params->max_nodes - params->range_nodes; unsigned nodes_num = rand_range(min_nodes, params->max_nodes, rngp); struct vgraph_t *graph = (struct vgraph_t *)MALLOC(sizeof(struct vgraph_t) + sizeof(struct vnode_t) * nodes_num); graph->nodes_num = nodes_num; for (unsigned i = 0; i < nodes_num; i++) { vnode_new(&graph->node[i], i, params, rngp); } vgraph_add_edges(graph, rngp); return graph; } /* * vgraph_delete -- free the volatile graph */ void vgraph_delete(struct vgraph_t *graph) { for (unsigned i = 0; i < graph->nodes_num; i++) vnode_delete(&graph->node[i]); FREE(graph); }
2,894
21.099237
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_defrag_advanced/vgraph.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * vgraph.h -- volatile graph representation */ #ifndef OBJ_DEFRAG_ADV_VGRAPH #define OBJ_DEFRAG_ADV_VGRAPH #include "rand.h" struct vgraph_params { unsigned max_nodes; /* max # of nodes per graph */ unsigned max_edges; /* max # of edges per node */ /* # of nodes is between [max_nodes - range_nodes, max_nodes] */ unsigned range_nodes; /* # of edges is between [max_edges - range_edges, max_edges] */ unsigned range_edges; unsigned min_pattern_size; unsigned max_pattern_size; }; struct vnode_t { unsigned node_id; unsigned edges_num; /* # of edges starting from this node */ unsigned *edges; /* ids of nodes the edges are pointing to */ /* the persistent node attributes */ size_t pattern_size; /* size of the pattern allocated after the node */ size_t psize; /* the total size of the node */ }; struct vgraph_t { unsigned nodes_num; struct vnode_t node[]; }; unsigned rand_range(unsigned min, unsigned max, rng_t *rngp); struct vgraph_t *vgraph_new(struct vgraph_params *params, rng_t *rngp); void vgraph_delete(struct vgraph_t *graph); #endif
1,158
23.145833
72
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_mem/obj_mem.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * obj_mem.c -- simple test for pmemobj_memcpy, pmemobj_memmove and * pmemobj_memset that verifies nothing blows up on pmemobj side. * Real consistency tests are for libpmem. */ #include "unittest.h" static unsigned Flags[] = { 0, PMEMOBJ_F_MEM_NODRAIN, PMEMOBJ_F_MEM_NONTEMPORAL, PMEMOBJ_F_MEM_TEMPORAL, PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL, PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_NODRAIN, PMEMOBJ_F_MEM_WC, PMEMOBJ_F_MEM_WB, PMEMOBJ_F_MEM_NOFLUSH, /* all possible flags */ PMEMOBJ_F_MEM_NODRAIN | PMEMOBJ_F_MEM_NOFLUSH | PMEMOBJ_F_MEM_NONTEMPORAL | PMEMOBJ_F_MEM_TEMPORAL | PMEMOBJ_F_MEM_WC | PMEMOBJ_F_MEM_WB, }; int main(int argc, char *argv[]) { START(argc, argv, "obj_mem"); if (argc != 2) UT_FATAL("usage: %s [directory]", argv[0]); PMEMobjpool *pop = pmemobj_create(argv[1], "obj_mem", 0, S_IWUSR | S_IRUSR); if (!pop) UT_FATAL("!pmemobj_create"); struct root { char c[4096]; }; struct root *r = pmemobj_direct(pmemobj_root(pop, sizeof(struct root))); for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { unsigned f = Flags[i]; pmemobj_memset(pop, &r->c[0], 0x77, 2048, f); pmemobj_memset(pop, &r->c[2048], 0xff, 2048, f); pmemobj_memcpy(pop, &r->c[2048 + 7], &r->c[0], 100, f); pmemobj_memcpy(pop, &r->c[2048 + 1024], &r->c[0] + 17, 128, f); pmemobj_memmove(pop, &r->c[125], &r->c[150], 100, f); pmemobj_memmove(pop, &r->c[350], &r->c[325], 100, f); if (f & PMEMOBJ_F_MEM_NOFLUSH) pmemobj_persist(pop, r, sizeof(*r)); } pmemobj_close(pop); DONE(NULL); }
1,644
22.84058
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_valgr_simple/pmem_valgr_simple.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2016, Intel Corporation */ /* * pmem_valgr_simple.c -- simple unit test using pmemcheck * * usage: pmem_valgr_simple file */ #include "unittest.h" int main(int argc, char *argv[]) { size_t mapped_len; char *dest; int is_pmem; START(argc, argv, "pmem_valgr_simple"); if (argc != 4) UT_FATAL("usage: %s file offset length", argv[0]); int dest_off = atoi(argv[2]); size_t bytes = strtoul(argv[3], NULL, 0); dest = pmem_map_file(argv[1], 0, 0, 0, &mapped_len, &is_pmem); if (dest == NULL) UT_FATAL("!Could not mmap %s\n", argv[1]); /* these will not be made persistent */ *(int *)dest = 4; /* this will be made persistent */ uint64_t *tmp64dst = (void *)((uintptr_t)dest + 4096); *tmp64dst = 50; if (is_pmem) { pmem_persist(tmp64dst, sizeof(*tmp64dst)); } else { UT_ASSERTeq(pmem_msync(tmp64dst, sizeof(*tmp64dst)), 0); } uint16_t *tmp16dst = (void *)((uintptr_t)dest + 1024); *tmp16dst = 21; /* will appear as flushed/fenced in valgrind log */ pmem_flush(tmp16dst, sizeof(*tmp16dst)); /* shows strange behavior of memset in some cases */ memset(dest + dest_off, 0, bytes); UT_ASSERTeq(pmem_unmap(dest, mapped_len), 0); DONE(NULL); }
1,240
21.160714
63
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_check_version/libpmempool_check_version.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * libpmempool_check_version -- a unittest for libpmempool_check_version. * */ #include "unittest.h" #include "libpmempool.h" int main(int argc, char *argv[]) { START(argc, argv, "libpmempool_check_version"); UT_ASSERTne(pmempool_check_version(0, 0), NULL); UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION - 1, PMEMPOOL_MINOR_VERSION)); if (PMEMPOOL_MINOR_VERSION > 0) { UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION, PMEMPOOL_MINOR_VERSION - 1)); } UT_ASSERTeq(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION, PMEMPOOL_MINOR_VERSION)); UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION + 1, PMEMPOOL_MINOR_VERSION)); UT_ASSERTne(NULL, pmempool_check_version(PMEMPOOL_MAJOR_VERSION, PMEMPOOL_MINOR_VERSION + 1)); DONE(NULL); }
897
22.631579
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_mmap_dtor/win_mmap_dtor.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * win_mmap_dtor.c -- unit test for windows mmap destructor */ #include "unittest.h" #include "os.h" #include "win_mmap.h" #define KILOBYTE (1 << 10) #define MEGABYTE (1 << 20) unsigned long long Mmap_align; int main(int argc, char *argv[]) { START(argc, argv, "win_mmap_dtor"); if (argc != 2) UT_FATAL("usage: %s path", argv[0]); SYSTEM_INFO si; GetSystemInfo(&si); /* set pagesize for mmap */ Mmap_align = si.dwAllocationGranularity; const char *path = argv[1]; int fd = os_open(path, O_RDWR); UT_ASSERTne(fd, -1); /* * Input file has size equal to 2MB, but the mapping is 3MB. * In this case mmap should map whole file and reserve 1MB * of virtual address space for remaining part of the mapping. */ void *addr = mmap(NULL, 3 * MEGABYTE, PROT_READ, MAP_SHARED, fd, 0); UT_ASSERTne(addr, MAP_FAILED); MEMORY_BASIC_INFORMATION basic_info; SIZE_T bytes_returned; bytes_returned = VirtualQuery(addr, &basic_info, sizeof(basic_info)); UT_ASSERTeq(bytes_returned, sizeof(basic_info)); UT_ASSERTeq(basic_info.RegionSize, 2 * MEGABYTE); UT_ASSERTeq(basic_info.State, MEM_COMMIT); bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE, &basic_info, sizeof(basic_info)); UT_ASSERTeq(bytes_returned, sizeof(basic_info)); UT_ASSERTeq(basic_info.RegionSize, MEGABYTE); UT_ASSERTeq(basic_info.State, MEM_RESERVE); win_mmap_fini(); bytes_returned = VirtualQuery((char *)addr + 2 * MEGABYTE, &basic_info, sizeof(basic_info)); UT_ASSERTeq(bytes_returned, sizeof(basic_info)); /* * region size can be bigger than 1MB because there was probably * free space after this mapping */ UT_ASSERTeq(basic_info.State, MEM_FREE); DONE(NULL); }
1,778
22.72
69
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_win/mocks_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * mocks_windows.h -- redefinitions of libc functions * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem * files, when compiled for the purpose of pmem_map_file test. * It would replace default implementation with mocked functions defined * in pmem_map_file.c. * * These defines could be also passed as preprocessor definitions. */ #ifndef WRAP_REAL #define os_posix_fallocate __wrap_os_posix_fallocate #define os_ftruncate __wrap_os_ftruncate #endif
608
28
72
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_map_file_win/mocks_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * mocks_windows.c -- mocked functions used in pmem_map_file.c * (Windows-specific) */ #include "unittest.h" #define MAX_LEN (4 * 1024 * 1024) /* * posix_fallocate -- interpose on libc posix_fallocate() */ FUNC_MOCK(os_posix_fallocate, int, int fd, os_off_t offset, os_off_t len) FUNC_MOCK_RUN_DEFAULT { UT_OUT("posix_fallocate: off %ju len %ju", offset, len); if (len > MAX_LEN) return ENOSPC; return _FUNC_REAL(os_posix_fallocate)(fd, offset, len); } FUNC_MOCK_END /* * ftruncate -- interpose on libc ftruncate() */ FUNC_MOCK(os_ftruncate, int, int fd, os_off_t len) FUNC_MOCK_RUN_DEFAULT { UT_OUT("ftruncate: len %ju", len); if (len > MAX_LEN) { errno = ENOSPC; return -1; } return _FUNC_REAL(os_ftruncate)(fd, len); } FUNC_MOCK_END
868
21.868421
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_locks/obj_tx_locks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_locks.c -- unit test for transaction locks */ #include "unittest.h" #define LAYOUT_NAME "direct" #define NUM_LOCKS 2 #define NUM_THREADS 10 #define TEST_VALUE_A 5 #define TEST_VALUE_B 10 #define TEST_VALUE_C 15 #define BEGIN_TX(pop, mutexes, rwlocks)\ TX_BEGIN_PARAM((pop), TX_PARAM_MUTEX,\ &(mutexes)[0], TX_PARAM_MUTEX, &(mutexes)[1], TX_PARAM_RWLOCK,\ &(rwlocks)[0], TX_PARAM_RWLOCK, &(rwlocks)[1], TX_PARAM_NONE) #define BEGIN_TX_OLD(pop, mutexes, rwlocks)\ TX_BEGIN_LOCK((pop), TX_LOCK_MUTEX,\ &(mutexes)[0], TX_LOCK_MUTEX, &(mutexes)[1], TX_LOCK_RWLOCK,\ &(rwlocks)[0], TX_LOCK_RWLOCK, &(rwlocks)[1], TX_LOCK_NONE) struct transaction_data { PMEMmutex mutexes[NUM_LOCKS]; PMEMrwlock rwlocks[NUM_LOCKS]; int a; int b; int c; }; static PMEMobjpool *Pop; /* * do_tx -- (internal) thread-friendly transaction */ static void * do_tx(void *arg) { struct transaction_data *data = arg; BEGIN_TX(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(data->a == TEST_VALUE_A); data->b = TEST_VALUE_B; } TX_ONABORT { /* not called */ data->a = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(data->b == TEST_VALUE_B); data->c = TEST_VALUE_C; } TX_END return NULL; } /* * do_tx_old -- (internal) thread-friendly transaction, tests deprecated macros */ static void * do_tx_old(void *arg) { struct transaction_data *data = arg; BEGIN_TX_OLD(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(data->a == TEST_VALUE_A); data->b = TEST_VALUE_B; } TX_ONABORT { /* not called */ data->a = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(data->b == TEST_VALUE_B); data->c = TEST_VALUE_C; } TX_END return NULL; } /* * do_aborted_tx -- (internal) thread-friendly aborted transaction */ static void * do_aborted_tx(void *arg) { struct transaction_data *data = arg; BEGIN_TX(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_A; pmemobj_tx_abort(EINVAL); data->a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ data->a = TEST_VALUE_B; } TX_ONABORT { UT_ASSERT(data->a == TEST_VALUE_A); data->b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(data->b == TEST_VALUE_B); data->c = TEST_VALUE_C; } TX_END return NULL; } /* * do_nested_tx-- (internal) thread-friendly nested transaction */ static void * do_nested_tx(void *arg) { struct transaction_data *data = arg; BEGIN_TX(Pop, data->mutexes, data->rwlocks) { BEGIN_TX(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(data->a == TEST_VALUE_A); data->b = TEST_VALUE_B; } TX_END } TX_ONCOMMIT { data->c = TEST_VALUE_C; } TX_END return NULL; } /* * do_aborted_nested_tx -- (internal) thread-friendly aborted nested transaction */ static void * do_aborted_nested_tx(void *arg) { struct transaction_data *data = arg; BEGIN_TX(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_C; BEGIN_TX(Pop, data->mutexes, data->rwlocks) { data->a = TEST_VALUE_A; pmemobj_tx_abort(EINVAL); data->a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ data->a = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(data->a == TEST_VALUE_A); data->b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(data->b == TEST_VALUE_B); data->c = TEST_VALUE_C; } TX_END data->a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ UT_ASSERT(data->a == TEST_VALUE_A); data->c = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(data->a == TEST_VALUE_A); UT_ASSERT(data->b == TEST_VALUE_B); UT_ASSERT(data->c == TEST_VALUE_C); data->a = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(data->a == TEST_VALUE_B); data->b = TEST_VALUE_A; } TX_END return NULL; } static void run_mt_test(void *(*worker)(void *), void *arg) { os_thread_t thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; ++i) { THREAD_CREATE(&thread[i], NULL, worker, arg); } for (int i = 0; i < NUM_THREADS; ++i) { THREAD_JOIN(&thread[i], NULL); } } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_locks"); if (argc > 3) UT_FATAL("usage: %s <file> [m]", argv[0]); if ((Pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); int multithread = 0; if (argc == 3) { multithread = (argv[2][0] == 'm'); if (!multithread) UT_FATAL("wrong test type supplied %c", argv[1][0]); } PMEMoid root = pmemobj_root(Pop, sizeof(struct transaction_data)); struct transaction_data *test_obj = (struct transaction_data *)pmemobj_direct(root); if (multithread) { run_mt_test(do_tx, test_obj); } else { do_tx(test_obj); do_tx(test_obj); } UT_ASSERT(test_obj->a == TEST_VALUE_A); UT_ASSERT(test_obj->b == TEST_VALUE_B); UT_ASSERT(test_obj->c == TEST_VALUE_C); if (multithread) { run_mt_test(do_aborted_tx, test_obj); } else { do_aborted_tx(test_obj); do_aborted_tx(test_obj); } UT_ASSERT(test_obj->a == TEST_VALUE_A); UT_ASSERT(test_obj->b == TEST_VALUE_B); UT_ASSERT(test_obj->c == TEST_VALUE_C); if (multithread) { run_mt_test(do_nested_tx, test_obj); } else { do_nested_tx(test_obj); do_nested_tx(test_obj); } UT_ASSERT(test_obj->a == TEST_VALUE_A); UT_ASSERT(test_obj->b == TEST_VALUE_B); UT_ASSERT(test_obj->c == TEST_VALUE_C); if (multithread) { run_mt_test(do_aborted_nested_tx, test_obj); } else { do_aborted_nested_tx(test_obj); do_aborted_nested_tx(test_obj); } UT_ASSERT(test_obj->a == TEST_VALUE_B); UT_ASSERT(test_obj->b == TEST_VALUE_A); UT_ASSERT(test_obj->c == TEST_VALUE_C); /* test that deprecated macros still work */ UT_COMPILE_ERROR_ON((int)TX_LOCK_NONE != (int)TX_PARAM_NONE); UT_COMPILE_ERROR_ON((int)TX_LOCK_MUTEX != (int)TX_PARAM_MUTEX); UT_COMPILE_ERROR_ON((int)TX_LOCK_RWLOCK != (int)TX_PARAM_RWLOCK); if (multithread) { run_mt_test(do_tx_old, test_obj); } else { do_tx_old(test_obj); do_tx_old(test_obj); } UT_ASSERT(test_obj->a == TEST_VALUE_A); UT_ASSERT(test_obj->b == TEST_VALUE_B); UT_ASSERT(test_obj->c == TEST_VALUE_C); pmemobj_close(Pop); DONE(NULL); }
6,164
21.918216
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_recovery/blk_recovery.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * blk_recovery.c -- unit test for pmemblk recovery * * usage: blk_recovery bsize file first_lba lba * */ #include "unittest.h" #include <sys/param.h> #include "blk.h" #include "btt_layout.h" #include <endian.h> static size_t Bsize; /* * construct -- build a buffer for writing */ static void construct(unsigned char *buf) { static int ord = 1; for (int i = 0; i < Bsize; i++) buf[i] = ord; ord++; if (ord > 255) ord = 1; } /* * ident -- identify what a buffer holds */ static char * ident(unsigned char *buf) { static char descr[100]; unsigned val = *buf; for (int i = 1; i < Bsize; i++) if (buf[i] != val) { sprintf(descr, "{%u} TORN at byte %d", val, i); return descr; } sprintf(descr, "{%u}", val); return descr; } int main(int argc, char *argv[]) { START(argc, argv, "blk_recovery"); if (argc != 5 && argc != 3) UT_FATAL("usage: %s bsize file [first_lba lba]", argv[0]); Bsize = strtoul(argv[1], NULL, 0); const char *path = argv[2]; if (argc > 3) { PMEMblkpool *handle; if ((handle = pmemblk_create(path, Bsize, 0, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!%s: pmemblk_create", path); UT_OUT("%s block size %zu usable blocks %zu", argv[1], Bsize, pmemblk_nblock(handle)); /* write the first lba */ os_off_t lba = STRTOL(argv[3], NULL, 0); unsigned char *buf = MALLOC(Bsize); construct(buf); if (pmemblk_write(handle, buf, lba) < 0) UT_FATAL("!write lba %zu", lba); UT_OUT("write lba %zu: %s", lba, ident(buf)); /* reach into the layout and write-protect the map */ struct btt_info *infop = (void *)((char *)handle + roundup(sizeof(struct pmemblk), BLK_FORMAT_DATA_ALIGN)); char *mapaddr = (char *)infop + le32toh(infop->mapoff); char *flogaddr = (char *)infop + le32toh(infop->flogoff); UT_OUT("write-protecting map, length %zu", (size_t)(flogaddr - mapaddr)); MPROTECT(mapaddr, (size_t)(flogaddr - mapaddr), PROT_READ); /* map each file argument with the given map type */ lba = STRTOL(argv[4], NULL, 0); construct(buf); if (pmemblk_write(handle, buf, lba) < 0) UT_FATAL("!write lba %zu", lba); else UT_FATAL("write lba %zu: %s", lba, ident(buf)); } else { int result = pmemblk_check(path, Bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", path); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", path); else UT_OUT("%s: consistent", path); } DONE(NULL); }
4,164
26.766667
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/out_err_mt/out_err_mt.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * out_err_mt.c -- unit test for error messages */ #include <sys/types.h> #include <stdarg.h> #include <errno.h> #include "unittest.h" #include "valgrind_internal.h" #include "util.h" #define NUM_THREADS 16 static void print_errors(const char *msg) { UT_OUT("%s", msg); UT_OUT("PMEM: %s", pmem_errormsg()); UT_OUT("PMEMOBJ: %s", pmemobj_errormsg()); UT_OUT("PMEMLOG: %s", pmemlog_errormsg()); UT_OUT("PMEMBLK: %s", pmemblk_errormsg()); UT_OUT("PMEMPOOL: %s", pmempool_errormsg()); } static void check_errors(unsigned ver) { int ret; int err_need; int err_found; ret = sscanf(pmem_errormsg(), "libpmem major version mismatch (need %d, found %d)", &err_need, &err_found); UT_ASSERTeq(ret, 2); UT_ASSERTeq(err_need, ver); UT_ASSERTeq(err_found, PMEM_MAJOR_VERSION); ret = sscanf(pmemobj_errormsg(), "libpmemobj major version mismatch (need %d, found %d)", &err_need, &err_found); UT_ASSERTeq(ret, 2); UT_ASSERTeq(err_need, ver); UT_ASSERTeq(err_found, PMEMOBJ_MAJOR_VERSION); ret = sscanf(pmemlog_errormsg(), "libpmemlog major version mismatch (need %d, found %d)", &err_need, &err_found); UT_ASSERTeq(ret, 2); UT_ASSERTeq(err_need, ver); UT_ASSERTeq(err_found, PMEMLOG_MAJOR_VERSION); ret = sscanf(pmemblk_errormsg(), "libpmemblk major version mismatch (need %d, found %d)", &err_need, &err_found); UT_ASSERTeq(ret, 2); UT_ASSERTeq(err_need, ver); UT_ASSERTeq(err_found, PMEMBLK_MAJOR_VERSION); ret = sscanf(pmempool_errormsg(), "libpmempool major version mismatch (need %d, found %d)", &err_need, &err_found); UT_ASSERTeq(ret, 2); UT_ASSERTeq(err_need, ver); UT_ASSERTeq(err_found, PMEMPOOL_MAJOR_VERSION); } static void * do_test(void *arg) { unsigned ver = *(unsigned *)arg; pmem_check_version(ver, 0); pmemobj_check_version(ver, 0); pmemlog_check_version(ver, 0); pmemblk_check_version(ver, 0); pmempool_check_version(ver, 0); check_errors(ver); return NULL; } static void run_mt_test(void *(*worker)(void *)) { os_thread_t thread[NUM_THREADS]; unsigned ver[NUM_THREADS]; for (unsigned i = 0; i < NUM_THREADS; ++i) { ver[i] = 10000 + i; THREAD_CREATE(&thread[i], NULL, worker, &ver[i]); } for (unsigned i = 0; i < NUM_THREADS; ++i) { THREAD_JOIN(&thread[i], NULL); } } int main(int argc, char *argv[]) { START(argc, argv, "out_err_mt"); if (argc != 6) UT_FATAL("usage: %s file1 file2 file3 file4 dir", argv[0]); print_errors("start"); PMEMobjpool *pop = pmemobj_create(argv[1], "test", PMEMOBJ_MIN_POOL, 0666); PMEMlogpool *plp = pmemlog_create(argv[2], PMEMLOG_MIN_POOL, 0666); PMEMblkpool *pbp = pmemblk_create(argv[3], 128, PMEMBLK_MIN_POOL, 0666); util_init(); pmem_check_version(10000, 0); pmemobj_check_version(10001, 0); pmemlog_check_version(10002, 0); pmemblk_check_version(10003, 0); pmempool_check_version(10006, 0); print_errors("version check"); void *ptr = NULL; /* * We are testing library error reporting and we don't want this test * to fail under memcheck. */ VALGRIND_DO_DISABLE_ERROR_REPORTING; pmem_msync(ptr, 1); VALGRIND_DO_ENABLE_ERROR_REPORTING; print_errors("pmem_msync"); int ret; PMEMoid oid; ret = pmemobj_alloc(pop, &oid, 0, 0, NULL, NULL); UT_ASSERTeq(ret, -1); print_errors("pmemobj_alloc"); pmemlog_append(plp, NULL, PMEMLOG_MIN_POOL); print_errors("pmemlog_append"); size_t nblock = pmemblk_nblock(pbp); pmemblk_set_error(pbp, (long long)nblock + 1); print_errors("pmemblk_set_error"); run_mt_test(do_test); pmemobj_close(pop); pmemlog_close(plp); pmemblk_close(pbp); PMEMpoolcheck *ppc; struct pmempool_check_args args = {NULL, }; ppc = pmempool_check_init(&args, sizeof(args) / 2); UT_ASSERTeq(ppc, NULL); print_errors("pmempool_check_init"); DONE(NULL); }
3,840
22.278788
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_api/libpmempool_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * libpmempool_test -- test of libpmempool. * */ #include <stddef.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include "unittest.h" /* * Exact copy of the struct pmempool_check_args from libpmempool 1.0 provided to * test libpmempool against various pmempool_check_args structure versions. */ struct pmempool_check_args_1_0 { const char *path; const char *backup_path; enum pmempool_pool_type pool_type; int flags; }; /* * check_pool -- check given pool */ static void check_pool(struct pmempool_check_args *args, size_t args_size) { const char *status2str[] = { [PMEMPOOL_CHECK_RESULT_CONSISTENT] = "consistent", [PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = "not consistent", [PMEMPOOL_CHECK_RESULT_REPAIRED] = "repaired", [PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = "cannot repair", [PMEMPOOL_CHECK_RESULT_ERROR] = "fatal", }; PMEMpoolcheck *ppc = pmempool_check_init(args, args_size); if (!ppc) { char buff[UT_MAX_ERR_MSG]; ut_strerror(errno, buff, UT_MAX_ERR_MSG); UT_OUT("Error: %s", buff); return; } struct pmempool_check_status *status = NULL; while ((status = pmempool_check(ppc)) != NULL) { switch (status->type) { case PMEMPOOL_CHECK_MSG_TYPE_ERROR: UT_OUT("%s", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_INFO: UT_OUT("%s", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: UT_OUT("%s", status->str.msg); status->str.answer = "yes"; break; default: pmempool_check_end(ppc); exit(EXIT_FAILURE); } } enum pmempool_check_result ret = pmempool_check_end(ppc); UT_OUT("status = %s", status2str[ret]); } /* * print_usage -- print usage of program */ static void print_usage(char *name) { UT_OUT("Usage: %s [-t <pool_type>] [-r <repair>] [-d <dry_run>] " "[-y <always_yes>] [-f <flags>] [-a <advanced>] " "[-b <backup_path>] <pool_path>", name); } /* * set_flag -- parse the value and set the flag according to a obtained value */ static void set_flag(const char *value, int *flags, int flag) { if (atoi(value) > 0) *flags |= flag; else *flags &= ~flag; } int main(int argc, char *argv[]) { START(argc, argv, "libpmempool_test"); int opt; struct pmempool_check_args_1_0 args = { .path = NULL, .backup_path = NULL, .pool_type = PMEMPOOL_POOL_TYPE_LOG, .flags = PMEMPOOL_CHECK_FORMAT_STR | PMEMPOOL_CHECK_REPAIR | PMEMPOOL_CHECK_VERBOSE }; size_t args_size = sizeof(struct pmempool_check_args_1_0); while ((opt = getopt(argc, argv, "t:r:d:a:y:s:b:")) != -1) { switch (opt) { case 't': if (strcmp(optarg, "blk") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BLK; } else if (strcmp(optarg, "log") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_LOG; } else if (strcmp(optarg, "obj") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_OBJ; } else if (strcmp(optarg, "btt") == 0) { args.pool_type = PMEMPOOL_POOL_TYPE_BTT; } else { args.pool_type = (uint32_t)strtoul(optarg, NULL, 0); } break; case 'r': set_flag(optarg, &args.flags, PMEMPOOL_CHECK_REPAIR); break; case 'd': set_flag(optarg, &args.flags, PMEMPOOL_CHECK_DRY_RUN); break; case 'a': set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ADVANCED); break; case 'y': set_flag(optarg, &args.flags, PMEMPOOL_CHECK_ALWAYS_YES); break; case 's': args_size = strtoul(optarg, NULL, 0); break; case 'b': args.backup_path = optarg; break; default: print_usage(argv[0]); UT_FATAL("unknown option: %c", opt); } } if (optind < argc) { args.path = argv[optind]; } check_pool((struct pmempool_check_args *)&args, args_size); DONE(NULL); }
3,753
22.31677
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/blk_nblock/blk_nblock.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2017, Intel Corporation */ /* * blk_nblock.c -- unit test for pmemblk_nblock() * * usage: blk_nblock bsize:file... * */ #include "unittest.h" int main(int argc, char *argv[]) { START(argc, argv, "blk_nblock"); if (argc < 2) UT_FATAL("usage: %s bsize:file...", argv[0]); /* map each file argument with the given map type */ for (int arg = 1; arg < argc; arg++) { char *fname; size_t bsize = strtoul(argv[arg], &fname, 0); if (*fname != ':') UT_FATAL("usage: %s bsize:file...", argv[0]); fname++; PMEMblkpool *handle; handle = pmemblk_create(fname, bsize, 0, S_IWUSR | S_IRUSR); if (handle == NULL) { UT_OUT("!%s: pmemblk_create", fname); } else { UT_OUT("%s: block size %zu usable blocks: %zu", fname, bsize, pmemblk_nblock(handle)); UT_ASSERTeq(pmemblk_bsize(handle), bsize); pmemblk_close(handle); int result = pmemblk_check(fname, bsize); if (result < 0) UT_OUT("!%s: pmemblk_check", fname); else if (result == 0) UT_OUT("%s: pmemblk_check: not consistent", fname); else { UT_ASSERTeq(pmemblk_check(fname, bsize + 1), -1); UT_ASSERTeq(pmemblk_check(fname, 0), 1); handle = pmemblk_open(fname, 0); UT_ASSERTeq(pmemblk_bsize(handle), bsize); pmemblk_close(handle); } } } DONE(NULL); }
1,358
22.431034
62
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_invalid/obj_tx_invalid.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * obj_tx_invalid.c -- tests which transactional functions are available in * which transaction stages */ #include <stddef.h> #include "file.h" #include "unittest.h" /* * Layout definition */ POBJ_LAYOUT_BEGIN(tx_invalid); POBJ_LAYOUT_ROOT(tx_invalid, struct dummy_root); POBJ_LAYOUT_TOID(tx_invalid, struct dummy_node); POBJ_LAYOUT_END(tx_invalid); struct dummy_node { int value; }; struct dummy_root { TOID(struct dummy_node) node; }; int main(int argc, char *argv[]) { if (argc != 3) UT_FATAL("usage: %s file-name op", argv[0]); START(argc, argv, "obj_tx_invalid %s", argv[2]); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(tx_invalid) != 1); PMEMobjpool *pop; const char *path = argv[1]; int exists = util_file_exists(path); if (exists < 0) UT_FATAL("!util_file_exists"); if (!exists) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(tx_invalid), PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) { UT_FATAL("!pmemobj_create %s", path); } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(tx_invalid))) == NULL) { UT_FATAL("!pmemobj_open %s", path); } } PMEMoid oid = pmemobj_first(pop); if (OID_IS_NULL(oid)) { if (pmemobj_alloc(pop, &oid, 10, 1, NULL, NULL)) UT_FATAL("!pmemobj_alloc"); } else { UT_ASSERTeq(pmemobj_type_num(oid), 1); } if (strcmp(argv[2], "alloc") == 0) pmemobj_tx_alloc(10, 1); else if (strcmp(argv[2], "alloc-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_alloc(10, 1); } TX_END } else if (strcmp(argv[2], "alloc-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_alloc(10, 1); } TX_END } else if (strcmp(argv[2], "alloc-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_alloc(10, 1); } TX_END } else if (strcmp(argv[2], "alloc-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_alloc(10, 1); } TX_END } else if (strcmp(argv[2], "alloc-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_alloc(10, 1); } else if (strcmp(argv[2], "zalloc") == 0) pmemobj_tx_zalloc(10, 1); else if (strcmp(argv[2], "zalloc-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_zalloc(10, 1); } TX_END } else if (strcmp(argv[2], "zalloc-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_zalloc(10, 1); } TX_END } else if (strcmp(argv[2], "zalloc-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_zalloc(10, 1); } TX_END } else if (strcmp(argv[2], "zalloc-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_zalloc(10, 1); } TX_END } else if (strcmp(argv[2], "zalloc-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_zalloc(10, 1); } else if (strcmp(argv[2], "strdup") == 0) pmemobj_tx_strdup("aaa", 1); else if (strcmp(argv[2], "strdup-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_strdup("aaa", 1); } TX_END } else if (strcmp(argv[2], "strdup-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_strdup("aaa", 1); } TX_END } else if (strcmp(argv[2], "strdup-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_strdup("aaa", 1); } TX_END } else if (strcmp(argv[2], "strdup-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_strdup("aaa", 1); } TX_END } else if (strcmp(argv[2], "strdup-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_strdup("aaa", 1); } else if (strcmp(argv[2], "realloc") == 0) pmemobj_tx_realloc(oid, 10, 1); else if (strcmp(argv[2], "realloc-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_realloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "realloc-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_realloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "realloc-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_realloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "realloc-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_realloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "realloc-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_realloc(oid, 10, 1); } else if (strcmp(argv[2], "zrealloc") == 0) pmemobj_tx_zrealloc(oid, 10, 1); else if (strcmp(argv[2], "zrealloc-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_zrealloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "zrealloc-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_zrealloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "zrealloc-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_zrealloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "zrealloc-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_zrealloc(oid, 10, 1); } TX_END } else if (strcmp(argv[2], "zrealloc-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_zrealloc(oid, 10, 1); } else if (strcmp(argv[2], "free") == 0) pmemobj_tx_free(oid); else if (strcmp(argv[2], "free-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_free(oid); } TX_END } else if (strcmp(argv[2], "free-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_free(oid); } TX_END } else if (strcmp(argv[2], "free-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_free(oid); } TX_END } else if (strcmp(argv[2], "free-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_free(oid); } TX_END } else if (strcmp(argv[2], "free-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_free(oid); } else if (strcmp(argv[2], "add_range") == 0) pmemobj_tx_add_range(oid, 0, 10); else if (strcmp(argv[2], "add_range-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_add_range(oid, 0, 10); } TX_END } else if (strcmp(argv[2], "add_range-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_add_range(oid, 0, 10); } TX_END } else if (strcmp(argv[2], "add_range-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_add_range(oid, 0, 10); } TX_END } else if (strcmp(argv[2], "add_range-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_add_range(oid, 0, 10); } TX_END } else if (strcmp(argv[2], "add_range-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_add_range(oid, 0, 10); } else if (strcmp(argv[2], "add_range_direct") == 0) pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); else if (strcmp(argv[2], "add_range_direct-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); } TX_END } else if (strcmp(argv[2], "add_range_direct-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); } TX_END } else if (strcmp(argv[2], "add_range_direct-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); } TX_END } else if (strcmp(argv[2], "add_range_direct-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); } TX_END } else if (strcmp(argv[2], "add_range_direct-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_add_range_direct(pmemobj_direct(oid), 10); } else if (strcmp(argv[2], "abort") == 0) pmemobj_tx_abort(ENOMEM); else if (strcmp(argv[2], "abort-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_END } else if (strcmp(argv[2], "abort-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_abort(ENOMEM); } TX_END } else if (strcmp(argv[2], "abort-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_abort(ENOMEM); } TX_END } else if (strcmp(argv[2], "abort-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_abort(ENOMEM); } TX_END } else if (strcmp(argv[2], "abort-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_abort(ENOMEM); } else if (strcmp(argv[2], "commit") == 0) pmemobj_tx_commit(); else if (strcmp(argv[2], "commit-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_commit(); } TX_END } else if (strcmp(argv[2], "commit-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_commit(); } TX_END } else if (strcmp(argv[2], "commit-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_commit(); } TX_END } else if (strcmp(argv[2], "commit-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_commit(); } TX_END } else if (strcmp(argv[2], "commit-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_commit(); } else if (strcmp(argv[2], "end") == 0) pmemobj_tx_end(); else if (strcmp(argv[2], "end-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_end(); } TX_END } else if (strcmp(argv[2], "end-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_end(); pmemobj_close(pop); exit(0); } TX_END } else if (strcmp(argv[2], "end-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_end(); pmemobj_close(pop); exit(0); } TX_END } else if (strcmp(argv[2], "end-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_end(); pmemobj_close(pop); exit(0); } TX_END } else if (strcmp(argv[2], "end-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_end(); } else if (strcmp(argv[2], "process") == 0) pmemobj_tx_process(); else if (strcmp(argv[2], "process-in-work") == 0) { TX_BEGIN(pop) { pmemobj_tx_process(); } TX_END } else if (strcmp(argv[2], "process-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { pmemobj_tx_process(); } TX_END } else if (strcmp(argv[2], "process-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { pmemobj_tx_process(); } TX_END } else if (strcmp(argv[2], "process-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { pmemobj_tx_process(); pmemobj_tx_end(); pmemobj_close(pop); exit(0); } TX_END } else if (strcmp(argv[2], "process-after-tx") == 0) { TX_BEGIN(pop) { } TX_END pmemobj_tx_process(); } else if (strcmp(argv[2], "begin") == 0) { TX_BEGIN(pop) { } TX_END } else if (strcmp(argv[2], "begin-in-work") == 0) { TX_BEGIN(pop) { TX_BEGIN(pop) { } TX_END } TX_END } else if (strcmp(argv[2], "begin-in-abort") == 0) { TX_BEGIN(pop) { pmemobj_tx_abort(ENOMEM); } TX_ONABORT { TX_BEGIN(pop) { } TX_END } TX_END } else if (strcmp(argv[2], "begin-in-commit") == 0) { TX_BEGIN(pop) { } TX_ONCOMMIT { TX_BEGIN(pop) { } TX_END } TX_END } else if (strcmp(argv[2], "begin-in-finally") == 0) { TX_BEGIN(pop) { } TX_FINALLY { TX_BEGIN(pop) { } TX_END } TX_END } else if (strcmp(argv[2], "begin-after-tx") == 0) { TX_BEGIN(pop) { } TX_END TX_BEGIN(pop) { } TX_END } pmemobj_close(pop); DONE(NULL); }
11,213
23.809735
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_has_auto_flush/mocks_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * mocks_posix.c -- mocked functions used in pmem_has_auto_flush.c */ #include <fts.h> #include "fs.h" #include "unittest.h" #define BUS_DEVICE_PATH "/sys/bus/nd/devices" /* * open -- open mock */ FUNC_MOCK(open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); if (!strstr(path, BUS_DEVICE_PATH)) return _FUNC_REAL(open)(path, flags, mode); const char *prefix = os_getenv("BUS_DEVICE_PATH"); char path2[PATH_MAX] = { 0 }; strcat(path2, prefix); strcat(path2, path + strlen(BUS_DEVICE_PATH)); return _FUNC_REAL(open)(path2, flags, mode); } FUNC_MOCK_END struct fs { FTS *ft; struct fs_entry entry; }; /* * fs_new -- creates fs traversal instance */ FUNC_MOCK(fs_new, struct fs *, const char *path) FUNC_MOCK_RUN_DEFAULT { if (!strstr(path, BUS_DEVICE_PATH)) return _FUNC_REAL(fs_new)(path); const char *prefix = os_getenv("BUS_DEVICE_PATH"); char path2[PATH_MAX] = { 0 }; strcat(path2, prefix); strcat(path2, path + strlen(BUS_DEVICE_PATH)); return _FUNC_REAL(fs_new)(path2); } FUNC_MOCK_END /* * os_stat -- os_stat mock to handle sysfs path */ FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf) FUNC_MOCK_RUN_DEFAULT { if (!strstr(path, BUS_DEVICE_PATH)) return _FUNC_REAL(os_stat)(path, buf); const char *prefix = os_getenv("BUS_DEVICE_PATH"); char path2[PATH_MAX] = { 0 }; strcat(path2, prefix); strcat(path2, path + strlen(BUS_DEVICE_PATH)); return _FUNC_REAL(os_stat)(path2, buf); } FUNC_MOCK_END
1,627
22.257143
66
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/mocks_ndctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks_ndctl.c -- mocked ndctl functions used * indirectly in pmem2_badblock_mocks.c */ #include <sys/stat.h> #include <ndctl/libndctl.h> #include "unittest.h" #include "pmem2_badblock_mocks.h" #define RESOURCE_ADDRESS 0x1000 /* any non-zero value */ #define UINT(ptr) (unsigned)((uintptr_t)ptr) /* index of bad blocks */ static unsigned i_bb; /* * ndctl_namespace_get_mode - mock ndctl_namespace_get_mode */ FUNC_MOCK(ndctl_namespace_get_mode, enum ndctl_namespace_mode, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { if (IS_MODE_NAMESPACE((uintptr_t)ndns)) /* namespace mode */ return NDCTL_NS_MODE_FSDAX; /* raw mode */ return NDCTL_NS_MODE_RAW; } FUNC_MOCK_END /* * ndctl_namespace_get_pfn - mock ndctl_namespace_get_pfn */ FUNC_MOCK(ndctl_namespace_get_pfn, struct ndctl_pfn *, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { if (IS_MODE_NAMESPACE((uintptr_t)ndns)) /* namespace mode */ return (struct ndctl_pfn *)ndns; return NULL; } FUNC_MOCK_END /* * ndctl_namespace_get_dax - mock ndctl_namespace_get_dax */ FUNC_MOCK(ndctl_namespace_get_dax, struct ndctl_dax *, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { if (IS_MODE_REGION((uintptr_t)ndns)) /* region mode */ return (struct ndctl_dax *)ndns; return NULL; } FUNC_MOCK_END /* * ndctl_pfn_get_resource - mock ndctl_pfn_get_resource */ FUNC_MOCK(ndctl_pfn_get_resource, unsigned long long, struct ndctl_pfn *pfn) FUNC_MOCK_RUN_DEFAULT { return RESOURCE_ADDRESS; } FUNC_MOCK_END /* * ndctl_pfn_get_size - mock ndctl_pfn_get_size */ FUNC_MOCK(ndctl_pfn_get_size, unsigned long long, struct ndctl_pfn *pfn) FUNC_MOCK_RUN_DEFAULT { return DEV_SIZE_1GB; /* 1 GiB */ } FUNC_MOCK_END /* * ndctl_dax_get_resource - mock ndctl_dax_get_resource */ FUNC_MOCK(ndctl_dax_get_resource, unsigned long long, struct ndctl_dax *dax) FUNC_MOCK_RUN_DEFAULT { return RESOURCE_ADDRESS; } FUNC_MOCK_END /* * ndctl_dax_get_size - mock ndctl_dax_get_size */ FUNC_MOCK(ndctl_dax_get_size, unsigned long long, struct ndctl_dax *dax) FUNC_MOCK_RUN_DEFAULT { return DEV_SIZE_1GB; /* 1 GiB */ } FUNC_MOCK_END /* * ndctl_namespace_get_resource - mock ndctl_namespace_get_resource */ FUNC_MOCK(ndctl_namespace_get_resource, unsigned long long, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { return RESOURCE_ADDRESS; } FUNC_MOCK_END /* * ndctl_namespace_get_size - mock ndctl_namespace_get_size */ FUNC_MOCK(ndctl_namespace_get_size, unsigned long long, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { return DEV_SIZE_1GB; /* 1 GiB */ } FUNC_MOCK_END /* * ndctl_region_get_resource - mock ndctl_region_get_resource */ FUNC_MOCK(ndctl_region_get_resource, unsigned long long, struct ndctl_region *region) FUNC_MOCK_RUN_DEFAULT { return RESOURCE_ADDRESS; } FUNC_MOCK_END /* * ndctl_region_get_bus - mock ndctl_region_get_bus */ FUNC_MOCK(ndctl_region_get_bus, struct ndctl_bus *, struct ndctl_region *region) FUNC_MOCK_RUN_DEFAULT { return (struct ndctl_bus *)region; } FUNC_MOCK_END /* * ndctl_namespace_get_first_badblock - mock ndctl_namespace_get_first_badblock */ FUNC_MOCK(ndctl_namespace_get_first_badblock, struct badblock *, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { i_bb = 0; return get_nth_hw_badblock(UINT(ndns), &i_bb); } FUNC_MOCK_END /* * ndctl_namespace_get_next_badblock - mock ndctl_namespace_get_next_badblock */ FUNC_MOCK(ndctl_namespace_get_next_badblock, struct badblock *, struct ndctl_namespace *ndns) FUNC_MOCK_RUN_DEFAULT { return get_nth_hw_badblock(UINT(ndns), &i_bb); } FUNC_MOCK_END /* * ndctl_region_get_first_badblock - mock ndctl_region_get_first_badblock */ FUNC_MOCK(ndctl_region_get_first_badblock, struct badblock *, struct ndctl_region *region) FUNC_MOCK_RUN_DEFAULT { i_bb = 0; return get_nth_hw_badblock(UINT(region), &i_bb); } FUNC_MOCK_END /* * ndctl_region_get_next_badblock - mock ndctl_region_get_next_badblock */ FUNC_MOCK(ndctl_region_get_next_badblock, struct badblock *, struct ndctl_region *region) FUNC_MOCK_RUN_DEFAULT { return get_nth_hw_badblock(UINT(region), &i_bb); } FUNC_MOCK_END static struct ndctl_data { uintptr_t bus; unsigned long long address; unsigned long long length; } data; /* * ndctl_bus_cmd_new_ars_cap - mock ndctl_bus_cmd_new_ars_cap */ FUNC_MOCK(ndctl_bus_cmd_new_ars_cap, struct ndctl_cmd *, struct ndctl_bus *bus, unsigned long long address, unsigned long long len) FUNC_MOCK_RUN_DEFAULT { data.bus = (uintptr_t)bus; data.address = address; data.length = len; return (struct ndctl_cmd *)&data; } FUNC_MOCK_END /* * ndctl_cmd_submit - mock ndctl_cmd_submit */ FUNC_MOCK(ndctl_cmd_submit, int, struct ndctl_cmd *cmd) FUNC_MOCK_RUN_DEFAULT { return 0; } FUNC_MOCK_END /* * ndctl_cmd_ars_cap_get_range - mock ndctl_cmd_ars_cap_get_range */ FUNC_MOCK(ndctl_cmd_ars_cap_get_range, int, struct ndctl_cmd *ars_cap, struct ndctl_range *range) FUNC_MOCK_RUN_DEFAULT { return 0; } FUNC_MOCK_END /* * ndctl_bus_cmd_new_clear_error - mock ndctl_bus_cmd_new_clear_error */ FUNC_MOCK(ndctl_bus_cmd_new_clear_error, struct ndctl_cmd *, unsigned long long address, unsigned long long len, struct ndctl_cmd *ars_cap) FUNC_MOCK_RUN_DEFAULT { return ars_cap; } FUNC_MOCK_END /* * ndctl_cmd_clear_error_get_cleared - mock ndctl_cmd_clear_error_get_cleared */ FUNC_MOCK(ndctl_cmd_clear_error_get_cleared, unsigned long long, struct ndctl_cmd *clear_err) FUNC_MOCK_RUN_DEFAULT { struct ndctl_data *pdata = (struct ndctl_data *)clear_err; UT_OUT("ndctl_clear_error(%lu, %llu, %llu)", pdata->bus, pdata->address, pdata->length); return pdata->length; } FUNC_MOCK_END /* * ndctl_cmd_unref - mock ndctl_cmd_unref */ FUNC_MOCK(ndctl_cmd_unref, void, struct ndctl_cmd *cmd) FUNC_MOCK_RUN_DEFAULT { } FUNC_MOCK_END
5,900
22.050781
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_badblock_mocks.h -- definitions for pmem2_badblock_mocks test */ #include "extent.h" /* fd bits 6-8: type of device */ #define FD_REG_FILE (1 << 6) /* regular file */ #define FD_CHR_DEV (2 << 6) /* character device */ #define FD_DIRECTORY (3 << 6) /* directory */ #define FD_BLK_DEV (4 << 6) /* block device */ /* fd bits 4-5: ndctl mode */ #define MODE_NO_DEVICE (1 << 4) /* did not found any matching device */ #define MODE_NAMESPACE (2 << 4) /* namespace mode */ #define MODE_REGION (3 << 4) /* region mode */ /* fd bits 0-3: number of test */ /* masks */ #define MASK_DEVICE 0b0111000000 /* bits 6-8: device mask */ #define MASK_MODE 0b0000110000 /* bits 4-5: mode mask */ #define MASK_TEST 0b0000001111 /* bits 0-3: test mask */ /* checks */ #define IS_MODE_NO_DEVICE(x) ((x & MASK_MODE) == MODE_NO_DEVICE) #define IS_MODE_NAMESPACE(x) ((x & MASK_MODE) == MODE_NAMESPACE) #define IS_MODE_REGION(x) ((x & MASK_MODE) == MODE_REGION) /* default block size: 1kB */ #define BLK_SIZE_1KB 1024 /* default size of device: 1 GiB */ #define DEV_SIZE_1GB (1024 * 1024 * 1024) struct badblock *get_nth_hw_badblock(unsigned test, unsigned *i_bb); int get_extents(int fd, struct extents **exts);
1,290
31.275
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/mocks_pmem2.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * mocks_pmem2.c -- mocked pmem2 functions used * indirectly in pmem2_badblock_mocks.c */ #include <ndctl/libndctl.h> #include "unittest.h" #include "out.h" #include "extent.h" #include "source.h" #include "pmem2_utils.h" #include "pmem2_badblock_mocks.h" /* * pmem2_region_namespace - mock pmem2_region_namespace */ FUNC_MOCK(pmem2_region_namespace, int, struct ndctl_ctx *ctx, const struct pmem2_source *src, struct ndctl_region **pregion, struct ndctl_namespace **pndns) FUNC_MOCK_RUN_DEFAULT { UT_ASSERTne(pregion, NULL); dev_t st_rdev = src->value.st_rdev; *pregion = (void *)st_rdev; if (pndns == NULL) return 0; UT_ASSERT(src->value.ftype == PMEM2_FTYPE_REG || src->value.ftype == PMEM2_FTYPE_DEVDAX); if (IS_MODE_NO_DEVICE(st_rdev)) { /* did not found any matching device */ *pndns = NULL; return 0; } *pndns = (void *)st_rdev; return 0; } FUNC_MOCK_END /* * pmem2_extents_create_get -- allocate extents structure and get extents * of the given file */ FUNC_MOCK(pmem2_extents_create_get, int, int fd, struct extents **exts) FUNC_MOCK_RUN_DEFAULT { return get_extents(fd, exts); } FUNC_MOCK_END
1,279
20.694915
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_badblock_mocks/pmem2_badblock_mocks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_badblock_mocks.c -- unit test for pmem2_badblock_*() */ #include <ndctl/libndctl.h> #include "unittest.h" #include "out.h" #include "source.h" #include "badblocks.h" #include "pmem2_badblock_mocks.h" #define BAD_BLOCKS_NUMBER 10 #define EXTENTS_NUMBER 8 #define MAX_BB_SET_STR "4" #define MAX_BB_SET 4 #define DEFAULT_BB_SET 1 #define USAGE_MSG \ "Usage: pmem2_badblock_mocks <test_case> <file_type> <mode> [bad_blocks_set]\n"\ "Possible values of arguments:\n"\ " test_case : test_basic, test_read_clear_bb \n"\ " file_type : reg_file, chr_dev\n"\ " mode : no_device, namespace, region\n"\ " bad_blocks_set : 1-"MAX_BB_SET_STR"\n\n" /* indexes of arguments */ enum args_t { ARG_TEST_CASE = 1, ARG_FILE_TYPE, ARG_MODE, ARG_BB_SET, /* it always has to be the last one */ ARG_NUMBER, /* number of arguments */ }; typedef int test_fn(struct pmem2_source *src); typedef struct badblock bad_blocks_array[BAD_BLOCKS_NUMBER]; /* HW bad blocks expressed in 512b sectors */ static bad_blocks_array hw_bad_blocks[] = { /* test #1 - no bad blocks */ { {0, 0} }, /* test #2 - 1 HW bad block */ { {1, 1}, {0, 0} }, /* test #3 - 6 HW bad blocks */ { {4, 10}, {16, 10}, {28, 2}, {32, 4}, {40, 4}, {50, 2}, {0, 0} }, /* test #4 - 7 HW bad blocks */ { {2, 4}, {8, 2}, {12, 6}, {20, 2}, {24, 10}, {38, 4}, {46, 2}, \ {0, 0} }, }; /* file's bad blocks expressed in 512b sectors */ static bad_blocks_array file_bad_blocks[] = { /* test #1 - no bad blocks */ { {0, 0} }, /* test #2 - 1 file bad block */ { {0, 2}, {0, 0} }, /* test #3 - 9 file bad blocks */ { {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \ {32, 2}, {40, 2}, {0, 0} }, /* test #4 - 9 file bad blocks */ { {4, 2}, {8, 2}, {12, 2}, {16, 2}, {20, 2}, {24, 2}, {28, 2}, \ {32, 2}, {40, 2}, {0, 0} }, }; /* file's extents expressed in 512b sectors */ static struct extent files_extents[][EXTENTS_NUMBER] = { /* test #1 - no extents */ { {0, 0, 0} }, /* test #2 - 1 extent */ { {0, 0, 2}, {0, 0, 0} }, /* test #3 - 7 extents */ { {2, 2, 4}, {8, 8, 2}, {12, 12, 6}, {20, 20, 2}, {24, 24, 10}, \ {38, 38, 4}, {46, 46, 2}, {0, 0, 0} }, /* test #4 - 6 extents */ { {4, 4, 10}, {16, 16, 10}, {28, 28, 2}, {32, 32, 4}, {40, 40, 4}, \ {50, 50, 2}, {0, 0, 0} }, }; /* * map_test_to_set -- map number of a test to an index of bad blocks' set */ static inline unsigned map_test_to_set(unsigned test) { return test & MASK_TEST; } /* * get_nth_typed_badblock -- get next typed badblock */ static struct badblock * get_nth_typed_badblock(unsigned test, unsigned *i_bb, bad_blocks_array bad_blocks[]) { unsigned set = map_test_to_set(test); struct badblock *bb = &bad_blocks[set][*i_bb]; if (bb->offset == 0 && bb->len == 0) bb = NULL; /* no more bad blocks */ else (*i_bb)++; return bb; } /* * get_nth_hw_badblock -- get next HW badblock */ struct badblock * get_nth_hw_badblock(unsigned test, unsigned *i_bb) { return get_nth_typed_badblock(test, i_bb, hw_bad_blocks); } /* * get_nth_file_badblock -- get next file's badblock */ static struct badblock * get_nth_file_badblock(unsigned test, unsigned *i_bb) { return get_nth_typed_badblock(test, i_bb, file_bad_blocks); } /* * get_nth_badblock -- get next badblock */ static struct badblock * get_nth_badblock(int fd, unsigned *i_bb) { UT_ASSERT(fd >= 0); if ((fd & MASK_MODE) == MODE_NO_DEVICE) /* no matching device found */ return NULL; switch (fd & MASK_DEVICE) { case FD_REG_FILE: /* regular file */ return get_nth_file_badblock((unsigned)fd, i_bb); case FD_CHR_DEV: /* character device */ return get_nth_hw_badblock((unsigned)fd, i_bb); case FD_DIRECTORY: case FD_BLK_DEV: break; } /* no bad blocks found */ return NULL; } /* * get_extents -- get file's extents */ int get_extents(int fd, struct extents **exts) { unsigned set = map_test_to_set((unsigned)fd); *exts = ZALLOC(sizeof(struct extents)); struct extents *pexts = *exts; /* set block size */ pexts->blksize = BLK_SIZE_1KB; if ((fd & MASK_DEVICE) != FD_REG_FILE) { /* not a regular file */ return 0; } /* count extents (length > 0) */ while (files_extents[set][pexts->extents_count].length) pexts->extents_count++; /* * It will be freed internally by libpmem2 * (pmem2_badblock_context_delete) */ pexts->extents = MALLOC(pexts->extents_count * sizeof(struct extent)); for (int i = 0; i < pexts->extents_count; i++) { struct extent ext = files_extents[set][i]; uint64_t off_phy = ext.offset_physical; uint64_t off_log = ext.offset_logical; uint64_t len = ext.length; /* check alignment */ UT_ASSERTeq(SEC2B(off_phy) % pexts->blksize, 0); UT_ASSERTeq(SEC2B(off_log) % pexts->blksize, 0); UT_ASSERTeq(SEC2B(len) % pexts->blksize, 0); pexts->extents[i].offset_physical = SEC2B(off_phy); pexts->extents[i].offset_logical = SEC2B(off_log); pexts->extents[i].length = SEC2B(len); } return 0; } /* * test_basic -- basic test */ static int test_basic(struct pmem2_source *src) { UT_OUT("TEST: test_basic: 0x%x", src->value.fd); struct pmem2_badblock_context *bbctx; struct pmem2_badblock bb; int ret; ret = pmem2_badblock_context_new(src, &bbctx); if (ret) return ret; ret = pmem2_badblock_next(bbctx, &bb); pmem2_badblock_context_delete(&bbctx); return ret; } /* * test_read_clear_bb -- test reading and clearing bad blocks */ static int test_read_clear_bb(struct pmem2_source *src) { UT_OUT("TEST: test_read_clear_bb: 0x%x", src->value.fd); struct pmem2_badblock_context *bbctx; struct pmem2_badblock bb; struct badblock *bb2; unsigned i_bb; int ret; ret = pmem2_badblock_context_new(src, &bbctx); if (ret) return ret; i_bb = 0; while ((ret = pmem2_badblock_next(bbctx, &bb)) == 0) { bb2 = get_nth_badblock(src->value.fd, &i_bb); UT_ASSERTne(bb2, NULL); UT_ASSERTeq(bb.offset, SEC2B(bb2->offset)); UT_ASSERTeq(bb.length, SEC2B(bb2->len)); ret = pmem2_badblock_clear(bbctx, &bb); if (ret) goto exit_free; } bb2 = get_nth_badblock(src->value.fd, &i_bb); UT_ASSERTeq(bb2, NULL); exit_free: pmem2_badblock_context_delete(&bbctx); return ret; } static void parse_arguments(int argc, char *argv[], int *test, enum pmem2_file_type *ftype, test_fn **test_func) { if (argc < (ARG_NUMBER - 1) || argc > ARG_NUMBER) { UT_OUT(USAGE_MSG); if (argc > ARG_NUMBER) UT_FATAL("too many arguments"); else UT_FATAL("missing required argument(s)"); } char *test_case = argv[ARG_TEST_CASE]; char *file_type = argv[ARG_FILE_TYPE]; char *mode = argv[ARG_MODE]; *test = 0; *test_func = NULL; if (strcmp(test_case, "test_basic") == 0) { *test_func = test_basic; } else if (strcmp(test_case, "test_read_clear_bb") == 0) { *test_func = test_read_clear_bb; } else { UT_OUT(USAGE_MSG); UT_FATAL("wrong test case: %s", test_case); } if (strcmp(file_type, "reg_file") == 0) { *test |= FD_REG_FILE; *ftype = PMEM2_FTYPE_REG; } else if (strcmp(file_type, "chr_dev") == 0) { *test |= FD_CHR_DEV; *ftype = PMEM2_FTYPE_DEVDAX; } else { UT_OUT(USAGE_MSG); UT_FATAL("wrong file type: %s", file_type); } if (strcmp(mode, "no_device") == 0) { *test |= MODE_NO_DEVICE; } else if (strcmp(mode, "namespace") == 0) { *test |= MODE_NAMESPACE; } else if (strcmp(mode, "region") == 0) { *test |= MODE_REGION; } else { UT_OUT(USAGE_MSG); UT_FATAL("wrong mode: %s", mode); } int bad_blocks_set = (argc == 5) ? atoi(argv[ARG_BB_SET]) : DEFAULT_BB_SET; if (bad_blocks_set >= 1 && bad_blocks_set <= MAX_BB_SET) { *test |= (bad_blocks_set - 1); } else { UT_OUT(USAGE_MSG); UT_FATAL("wrong bad_blocks_set: %i", bad_blocks_set); } } int main(int argc, char *argv[]) { START(argc, argv, "pmem2_badblock_mocks"); /* sanity check of defines */ UT_ASSERTeq(atoi(MAX_BB_SET_STR), MAX_BB_SET); struct pmem2_source src; test_fn *test_func; src.type = PMEM2_SOURCE_FD; parse_arguments(argc, argv, &src.value.fd, &src.value.ftype, &test_func); src.value.st_rdev = (dev_t)src.value.fd; int result = test_func(&src); UT_ASSERTeq(result, PMEM2_E_NO_BAD_BLOCK_FOUND); DONE(NULL); }
8,239
22.815029
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_api/pmem2_api.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem2_api.c -- PMEM2_API_[START|END] unittests */ #include "unittest.h" #include "ut_pmem2.h" #include "ut_pmem2_setup_integration.h" /* * map_valid -- return valid mapped pmem2_map and validate mapped memory length */ static struct pmem2_map * map_valid(struct pmem2_config *cfg, struct pmem2_source *src, size_t size) { struct pmem2_map *map = NULL; PMEM2_MAP(cfg, src, &map); UT_ASSERTeq(pmem2_map_get_size(map), size); return map; } /* * test_pmem2_api_logs -- map O_RDWR file and do pmem2_[cpy|set|move]_fns */ static int test_pmem2_api_logs(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL( "usage: test_mem_move_cpy_set_with_map_private <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); const char *word1 = "Persistent memory..."; const char *word2 = "Nonpersistent memory"; const char *word3 = "XXXXXXXXXXXXXXXXXXXX"; struct pmem2_config *cfg; struct pmem2_source *src; PMEM2_PREPARE_CONFIG_INTEGRATION(&cfg, &src, fd, PMEM2_GRANULARITY_PAGE); size_t size = 0; PMEM2_SOURCE_SIZE(src, &size); struct pmem2_map *map = map_valid(cfg, src, size); char *addr = pmem2_map_get_address(map); pmem2_memmove_fn memmove_fn = pmem2_get_memmove_fn(map); pmem2_memcpy_fn memcpy_fn = pmem2_get_memcpy_fn(map); pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); memcpy_fn(addr, word1, strlen(word1), 0); UT_ASSERTeq(strcmp(addr, word1), 0); memmove_fn(addr, word2, strlen(word2), 0); UT_ASSERTeq(strcmp(addr, word2), 0); memset_fn(addr, 'X', strlen(word3), 0); UT_ASSERTeq(strcmp(addr, word3), 0); /* cleanup after the test */ pmem2_unmap(&map); pmem2_config_delete(&cfg); pmem2_source_delete(&src); CLOSE(fd); return 1; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_pmem2_api_logs), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_api"); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); DONE(NULL); }
2,130
22.94382
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_open.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_obc_test_open.c -- test cases for open request message */ #include "rpmemd_obc_test_common.h" /* * Number of cases for checking open request message. Must be kept in sync * with client_bad_msg_open function. */ #define BAD_MSG_OPEN_COUNT 11 /* * client_bad_msg_open -- check if server detects invalid open request * messages */ static void client_bad_msg_open(const char *ctarget) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE; struct rpmem_msg_open *msg = MALLOC(msg_size); for (int i = 0; i < BAD_MSG_OPEN_COUNT; i++) { struct rpmem_ssh *ssh = clnt_connect(target); *msg = OPEN_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); switch (i) { case 0: msg->c.provider = 0; break; case 1: msg->c.provider = MAX_RPMEM_PROV; break; case 2: msg->pool_desc.size -= 1; break; case 3: msg->pool_desc.size += 1; break; case 4: msg->pool_desc.size = 0; msg->hdr.size = sizeof(OPEN_MSG) + msg->pool_desc.size; break; case 5: msg->pool_desc.size = 1; msg->hdr.size = sizeof(OPEN_MSG) + msg->pool_desc.size; break; case 6: msg->pool_desc.desc[0] = '\0'; break; case 7: msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0'; break; case 8: msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E'; break; case 9: msg->c.major = RPMEM_PROTO_MAJOR + 1; break; case 10: msg->c.minor = RPMEM_PROTO_MINOR + 1; break; default: UT_ASSERT(0); } rpmem_hton_msg_open(msg); clnt_send(ssh, msg, msg_size); clnt_wait_disconnect(ssh); clnt_close(ssh); } FREE(msg); FREE(target); } /* * client_msg_open_noresp -- send open request message and don't expect a * response */ static void client_msg_open_noresp(const char *ctarget) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE; struct rpmem_msg_open *msg = MALLOC(msg_size); struct rpmem_ssh *ssh = clnt_connect(target); *msg = OPEN_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); rpmem_hton_msg_open(msg); clnt_send(ssh, msg, msg_size); clnt_wait_disconnect(ssh); clnt_close(ssh); FREE(msg); FREE(target); } /* * client_msg_open_resp -- send open request message and expect a response * with specified status. If status is 0, validate open request response * message */ static void client_msg_open_resp(const char *ctarget, int status) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(OPEN_MSG) + POOL_DESC_SIZE; struct rpmem_msg_open *msg = MALLOC(msg_size); struct rpmem_msg_open_resp resp; struct rpmem_ssh *ssh = clnt_connect(target); *msg = OPEN_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); rpmem_hton_msg_open(msg); clnt_send(ssh, msg, msg_size); clnt_recv(ssh, &resp, sizeof(resp)); rpmem_ntoh_msg_open_resp(&resp); if (status) { UT_ASSERTeq(resp.hdr.status, (uint32_t)status); } else { UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_OPEN_RESP); UT_ASSERTeq(resp.hdr.size, sizeof(struct rpmem_msg_open_resp)); UT_ASSERTeq(resp.hdr.status, (uint32_t)status); UT_ASSERTeq(resp.ibc.port, PORT); UT_ASSERTeq(resp.ibc.rkey, RKEY); UT_ASSERTeq(resp.ibc.raddr, RADDR); UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD); } clnt_close(ssh); FREE(msg); FREE(target); } /* * client_open -- test case for open request message - client side */ int client_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; set_rpmem_cmd("server_bad_msg"); client_bad_msg_open(target); set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_OPEN); client_msg_open_noresp(target); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 0); client_msg_open_resp(target, 0); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_OPEN, 1); client_msg_open_resp(target, 1); return 1; }
4,105
21.56044
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_create.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_obc_test_create.c -- test cases for create request message */ #include "rpmemd_obc_test_common.h" /* * Number of cases for checking create request message. Must be kept in sync * with client_bad_msg_create function. */ #define BAD_MSG_CREATE_COUNT 11 /* * client_bad_msg_create -- check if server detects invalid create request * messages */ static void client_bad_msg_create(const char *ctarget) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE; struct rpmem_msg_create *msg = MALLOC(msg_size); for (int i = 0; i < BAD_MSG_CREATE_COUNT; i++) { struct rpmem_ssh *ssh = clnt_connect(target); *msg = CREATE_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); switch (i) { case 0: msg->c.provider = 0; break; case 1: msg->c.provider = MAX_RPMEM_PROV; break; case 2: msg->pool_desc.size -= 1; break; case 3: msg->pool_desc.size += 1; break; case 4: msg->pool_desc.size = 0; msg->hdr.size = sizeof(CREATE_MSG) + msg->pool_desc.size; break; case 5: msg->pool_desc.size = 1; msg->hdr.size = sizeof(CREATE_MSG) + msg->pool_desc.size; break; case 6: msg->pool_desc.desc[0] = '\0'; break; case 7: msg->pool_desc.desc[POOL_DESC_SIZE / 2] = '\0'; break; case 8: msg->pool_desc.desc[POOL_DESC_SIZE - 1] = 'E'; break; case 9: msg->c.major = RPMEM_PROTO_MAJOR + 1; break; case 10: msg->c.minor = RPMEM_PROTO_MINOR + 1; break; default: UT_ASSERT(0); } rpmem_hton_msg_create(msg); clnt_send(ssh, msg, msg_size); clnt_wait_disconnect(ssh); clnt_close(ssh); } FREE(msg); FREE(target); } /* * client_msg_create_noresp -- send create request message and don't expect * a response */ static void client_msg_create_noresp(const char *ctarget) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE; struct rpmem_msg_create *msg = MALLOC(msg_size); struct rpmem_ssh *ssh = clnt_connect(target); *msg = CREATE_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); rpmem_hton_msg_create(msg); clnt_send(ssh, msg, msg_size); clnt_close(ssh); FREE(msg); FREE(target); } /* * client_msg_create_resp -- send create request message and expect a response * with specified status. If status is 0, validate create request response * message */ static void client_msg_create_resp(const char *ctarget, int status) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(CREATE_MSG) + POOL_DESC_SIZE; struct rpmem_msg_create *msg = MALLOC(msg_size); struct rpmem_msg_create_resp resp; struct rpmem_ssh *ssh = clnt_connect(target); *msg = CREATE_MSG; msg->hdr.size = msg_size; memcpy(msg->pool_desc.desc, POOL_DESC, POOL_DESC_SIZE); rpmem_hton_msg_create(msg); clnt_send(ssh, msg, msg_size); clnt_recv(ssh, &resp, sizeof(resp)); rpmem_ntoh_msg_create_resp(&resp); if (status) { UT_ASSERTeq(resp.hdr.status, (uint32_t)status); } else { UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_CREATE_RESP); UT_ASSERTeq(resp.hdr.size, sizeof(struct rpmem_msg_create_resp)); UT_ASSERTeq(resp.hdr.status, (uint32_t)status); UT_ASSERTeq(resp.ibc.port, PORT); UT_ASSERTeq(resp.ibc.rkey, RKEY); UT_ASSERTeq(resp.ibc.raddr, RADDR); UT_ASSERTeq(resp.ibc.persist_method, PERSIST_METHOD); } clnt_close(ssh); FREE(msg); FREE(target); } /* * client_create -- test case for create request message - client side */ int client_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; set_rpmem_cmd("server_bad_msg"); client_bad_msg_create(target); set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CREATE); client_msg_create_noresp(target); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 0); client_msg_create_resp(target, 0); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CREATE, 1); client_msg_create_resp(target, 1); return 1; }
4,165
22.016575
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_set_attr.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * rpmemd_obc_test_set_attr.c -- test cases for set attributes request message */ #include "rpmemd_obc_test_common.h" /* * client_msg_set_attr_noresp -- send set attributes request message and don't * expect a response */ static void client_msg_set_attr_noresp(const char *ctarget) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(SET_ATTR_MSG); struct rpmem_msg_set_attr *msg = MALLOC(msg_size); struct rpmem_ssh *ssh = clnt_connect(target); *msg = SET_ATTR_MSG; rpmem_hton_msg_set_attr(msg); clnt_send(ssh, msg, msg_size); clnt_wait_disconnect(ssh); clnt_close(ssh); FREE(msg); FREE(target); } /* * client_msg_set_attr_resp -- send set attributes request message and expect * a response with specified status. If status is 0, validate set attributes * request response message */ static void client_msg_set_attr_resp(const char *ctarget, int status) { char *target = STRDUP(ctarget); size_t msg_size = sizeof(SET_ATTR_MSG); struct rpmem_msg_set_attr *msg = MALLOC(msg_size); struct rpmem_msg_set_attr_resp resp; struct rpmem_ssh *ssh = clnt_connect(target); *msg = SET_ATTR_MSG; rpmem_hton_msg_set_attr(msg); clnt_send(ssh, msg, msg_size); clnt_recv(ssh, &resp, sizeof(resp)); rpmem_ntoh_msg_set_attr_resp(&resp); if (status) { UT_ASSERTeq(resp.hdr.status, (uint32_t)status); } else { UT_ASSERTeq(resp.hdr.type, RPMEM_MSG_TYPE_SET_ATTR_RESP); UT_ASSERTeq(resp.hdr.size, sizeof(struct rpmem_msg_set_attr_resp)); UT_ASSERTeq(resp.hdr.status, (uint32_t)status); } clnt_close(ssh); FREE(msg); FREE(target); } /* * client_set_attr -- test case for set attributes request message - client * side */ int client_set_attr(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_SET_ATTR); client_msg_set_attr_noresp(target); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 0); client_msg_set_attr_resp(target, 0); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_SET_ATTR, 1); client_msg_set_attr_resp(target, 1); return 1; }
2,255
22.5
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_close.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * rpmemd_obc_test_close.c -- test cases for close request message */ #include "rpmemd_obc_test_common.h" /* * client_msg_close_noresp -- send close request message and don't expect a * response */ static void client_msg_close_noresp(const char *ctarget) { char *target = STRDUP(ctarget); struct rpmem_msg_close msg = CLOSE_MSG; rpmem_hton_msg_close(&msg); struct rpmem_ssh *ssh = clnt_connect(target); clnt_send(ssh, &msg, sizeof(msg)); clnt_wait_disconnect(ssh); clnt_close(ssh); FREE(target); } /* * client_msg_close_resp -- send close request message and expect a response * with specified status. If status is 0, validate close request response * message */ static void client_msg_close_resp(const char *ctarget, int status) { char *target = STRDUP(ctarget); struct rpmem_msg_close msg = CLOSE_MSG; rpmem_hton_msg_close(&msg); struct rpmem_msg_close_resp resp; struct rpmem_ssh *ssh = clnt_connect(target); clnt_send(ssh, &msg, sizeof(msg)); clnt_recv(ssh, &resp, sizeof(resp)); rpmem_ntoh_msg_close_resp(&resp); if (status) UT_ASSERTeq(resp.hdr.status, (uint32_t)status); clnt_close(ssh); FREE(target); } /* * client_close -- test case for close request message - client side */ int client_close(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; set_rpmem_cmd("server_msg_noresp %d", RPMEM_MSG_TYPE_CLOSE); client_msg_close_noresp(target); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 0); client_msg_close_resp(target, 0); set_rpmem_cmd("server_msg_resp %d %d", RPMEM_MSG_TYPE_CLOSE, 1); client_msg_close_resp(target, 1); return 1; }
1,791
21.683544
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_obc/rpmemd_obc_test_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_obc_test_common.h -- common declarations for rpmemd_obc test */ #include "unittest.h" #include "librpmem.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmem_ssh.h" #include "rpmem_util.h" #include "rpmemd_log.h" #include "rpmemd_obc.h" #define PORT 1234 #define RKEY 0x0123456789abcdef #define RADDR 0xfedcba9876543210 #define PERSIST_METHOD RPMEM_PM_APM #define POOL_ATTR_INIT {\ .signature = "<RPMEM>",\ .major = 1,\ .compat_features = 2,\ .incompat_features = 3,\ .ro_compat_features = 4,\ .poolset_uuid = "POOLSET_UUID0123",\ .uuid = "UUID0123456789AB",\ .next_uuid = "NEXT_UUID0123456",\ .prev_uuid = "PREV_UUID0123456",\ .user_flags = "USER_FLAGS012345",\ } #define POOL_ATTR_ALT {\ .signature = "<ALT>",\ .major = 5,\ .compat_features = 6,\ .incompat_features = 7,\ .ro_compat_features = 8,\ .poolset_uuid = "UUID_POOLSET_ALT",\ .uuid = "ALT_UUIDCDEFFEDC",\ .next_uuid = "456UUID_NEXT_ALT",\ .prev_uuid = "UUID012_ALT_PREV",\ .user_flags = "012345USER_FLAGS",\ } #define POOL_SIZE 0x0001234567abcdef #define NLANES 0x123 #define NLANES_RESP 16 #define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS #define POOL_DESC "pool.set" #define BUFF_SIZE 8192 static const char pool_desc[] = POOL_DESC; #define POOL_DESC_SIZE (sizeof(pool_desc) / sizeof(pool_desc[0])) struct rpmem_ssh *clnt_connect(char *target); void clnt_wait_disconnect(struct rpmem_ssh *ssh); void clnt_send(struct rpmem_ssh *ssh, const void *buff, size_t len); void clnt_recv(struct rpmem_ssh *ssh, void *buff, size_t len); void clnt_close(struct rpmem_ssh *ssh); enum conn_wait_close { CONN_CLOSE, CONN_WAIT_CLOSE, }; void set_rpmem_cmd(const char *fmt, ...); extern struct rpmemd_obc_requests REQ_CB; struct req_cb_arg { int resp; unsigned long long types; int force_ret; int ret; int status; }; static const struct rpmem_msg_hdr MSG_HDR = { .type = RPMEM_MSG_TYPE_CLOSE, .size = sizeof(struct rpmem_msg_hdr), }; static const struct rpmem_msg_create CREATE_MSG = { .hdr = { .type = RPMEM_MSG_TYPE_CREATE, .size = sizeof(struct rpmem_msg_create), }, .c = { .major = RPMEM_PROTO_MAJOR, .minor = RPMEM_PROTO_MINOR, .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .buff_size = BUFF_SIZE, }, .pool_attr = POOL_ATTR_INIT, .pool_desc = { .size = POOL_DESC_SIZE, }, }; static const struct rpmem_msg_open OPEN_MSG = { .hdr = { .type = RPMEM_MSG_TYPE_OPEN, .size = sizeof(struct rpmem_msg_open), }, .c = { .major = RPMEM_PROTO_MAJOR, .minor = RPMEM_PROTO_MINOR, .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .buff_size = BUFF_SIZE, }, .pool_desc = { .size = POOL_DESC_SIZE, }, }; static const struct rpmem_msg_close CLOSE_MSG = { .hdr = { .type = RPMEM_MSG_TYPE_CLOSE, .size = sizeof(struct rpmem_msg_close), }, }; static const struct rpmem_msg_set_attr SET_ATTR_MSG = { .hdr = { .type = RPMEM_MSG_TYPE_SET_ATTR, .size = sizeof(struct rpmem_msg_set_attr), }, .pool_attr = POOL_ATTR_ALT, }; TEST_CASE_DECLARE(server_accept_sim); TEST_CASE_DECLARE(server_accept_sim_fork); TEST_CASE_DECLARE(client_accept_sim); TEST_CASE_DECLARE(server_accept_seq); TEST_CASE_DECLARE(server_accept_seq_fork); TEST_CASE_DECLARE(client_accept_seq); TEST_CASE_DECLARE(client_bad_msg_hdr); TEST_CASE_DECLARE(server_bad_msg); TEST_CASE_DECLARE(server_msg_noresp); TEST_CASE_DECLARE(server_msg_resp); TEST_CASE_DECLARE(client_econnreset); TEST_CASE_DECLARE(server_econnreset); TEST_CASE_DECLARE(client_create); TEST_CASE_DECLARE(server_open); TEST_CASE_DECLARE(client_close); TEST_CASE_DECLARE(server_close); TEST_CASE_DECLARE(client_open); TEST_CASE_DECLARE(client_set_attr);
3,791
23
70
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/ctl_prefault/ctl_prefault.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * ctl_prefault.c -- tests for the ctl entry points: prefault */ #include <stdlib.h> #include <string.h> #include <sys/resource.h> #include "unittest.h" #define OBJ_STR "obj" #define BLK_STR "blk" #define LOG_STR "log" #define BSIZE 20 #define LAYOUT "obj_ctl_prefault" #ifdef __FreeBSD__ typedef char vec_t; #else typedef unsigned char vec_t; #endif typedef int (*fun)(void *, const char *, void *); /* * prefault_fun -- function ctl_get/set testing */ static void prefault_fun(int prefault, fun get_func, fun set_func) { int ret; int arg; int arg_read; if (prefault == 1) { /* prefault at open */ arg_read = -1; ret = get_func(NULL, "prefault.at_open", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); arg = 1; ret = set_func(NULL, "prefault.at_open", &arg); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg, 1); arg_read = -1; ret = get_func(NULL, "prefault.at_open", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); } else if (prefault == 2) { /* prefault at create */ arg_read = -1; ret = get_func(NULL, "prefault.at_create", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 0); arg = 1; ret = set_func(NULL, "prefault.at_create", &arg); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg, 1); arg_read = -1; ret = get_func(NULL, "prefault.at_create", &arg_read); UT_ASSERTeq(ret, 0); UT_ASSERTeq(arg_read, 1); } } /* * count_resident_pages -- count resident_pages */ static size_t count_resident_pages(void *pool, size_t length) { size_t arr_len = (length + Ut_pagesize - 1) / Ut_pagesize; vec_t *vec = MALLOC(sizeof(*vec) * arr_len); int ret = mincore(pool, length, vec); UT_ASSERTeq(ret, 0); size_t resident_pages = 0; for (size_t i = 0; i < arr_len; ++i) resident_pages += vec[i] & 0x1; FREE(vec); return resident_pages; } /* * test_obj -- open/create PMEMobjpool */ static void test_obj(const char *path, int open) { PMEMobjpool *pop; if (open) { if ((pop = pmemobj_open(path, LAYOUT)) == NULL) UT_FATAL("!pmemobj_open: %s", path); } else { if ((pop = pmemobj_create(path, LAYOUT, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create: %s", path); } size_t resident_pages = count_resident_pages(pop, PMEMOBJ_MIN_POOL); pmemobj_close(pop); UT_OUT("%ld", resident_pages); } /* * test_blk -- open/create PMEMblkpool */ static void test_blk(const char *path, int open) { PMEMblkpool *pbp; if (open) { if ((pbp = pmemblk_open(path, BSIZE)) == NULL) UT_FATAL("!pmemblk_open: %s", path); } else { if ((pbp = pmemblk_create(path, BSIZE, PMEMBLK_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemblk_create: %s", path); } size_t resident_pages = count_resident_pages(pbp, PMEMBLK_MIN_POOL); pmemblk_close(pbp); UT_OUT("%ld", resident_pages); } /* * test_log -- open/create PMEMlogpool */ static void test_log(const char *path, int open) { PMEMlogpool *plp; /* * To test prefaulting, pool must have size at least equal to 2 pages. * If 2MB huge pages are used this is at least 4MB. */ size_t pool_size = 2 * PMEMLOG_MIN_POOL; if (open) { if ((plp = pmemlog_open(path)) == NULL) UT_FATAL("!pmemlog_open: %s", path); } else { if ((plp = pmemlog_create(path, pool_size, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemlog_create: %s", path); } size_t resident_pages = count_resident_pages(plp, pool_size); pmemlog_close(plp); UT_OUT("%ld", resident_pages); } #define USAGE() do {\ UT_FATAL("usage: %s file-name type(obj/blk/log) prefault(0/1/2) "\ "open(0/1)", argv[0]);\ } while (0) int main(int argc, char *argv[]) { START(argc, argv, "ctl_prefault"); if (argc != 5) USAGE(); char *type = argv[1]; const char *path = argv[2]; int prefault = atoi(argv[3]); int open = atoi(argv[4]); if (strcmp(type, OBJ_STR) == 0) { prefault_fun(prefault, (fun)pmemobj_ctl_get, (fun)pmemobj_ctl_set); test_obj(path, open); } else if (strcmp(type, BLK_STR) == 0) { prefault_fun(prefault, (fun)pmemblk_ctl_get, (fun)pmemblk_ctl_set); test_blk(path, open); } else if (strcmp(type, LOG_STR) == 0) { prefault_fun(prefault, (fun)pmemlog_ctl_get, (fun)pmemlog_ctl_set); test_log(path, open); } else USAGE(); DONE(NULL); }
4,326
20.527363
71
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memcpy/memcpy_common.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * memcpy_common.c -- common part for tests doing a persistent memcpy */ #include "unittest.h" #include "memcpy_common.h" #include "valgrind_internal.h" /* * do_memcpy: Worker function for memcpy * * Always work within the boundary of bytes. Fill in 1/2 of the src * memory with the pattern we want to write. This allows us to check * that we did not overwrite anything we were not supposed to in the * dest. Use the non pmem version of the memset/memcpy commands * so as not to introduce any possible side affects. */ void do_memcpy(int fd, char *dest, int dest_off, char *src, int src_off, size_t bytes, size_t mapped_len, const char *file_name, memcpy_fn fn, unsigned flags, persist_fn persist) { void *ret; char *buf = MALLOC(bytes); memset(buf, 0, bytes); memset(dest, 0, bytes); persist(dest, bytes); memset(src, 0, bytes); persist(src, bytes); memset(src, 0x5A, bytes / 4); persist(src, bytes / 4); memset(src + bytes / 4, 0x46, bytes / 4); persist(src + bytes / 4, bytes / 4); /* dest == src */ ret = fn(dest + dest_off, dest + dest_off, bytes / 2, flags); UT_ASSERTeq(ret, dest + dest_off); UT_ASSERTeq(*(char *)(dest + dest_off), 0); /* len == 0 */ ret = fn(dest + dest_off, src, 0, flags); UT_ASSERTeq(ret, dest + dest_off); UT_ASSERTeq(*(char *)(dest + dest_off), 0); ret = fn(dest + dest_off, src + src_off, bytes / 2, flags); if (flags & PMEM2_F_MEM_NOFLUSH) VALGRIND_DO_PERSIST((dest + dest_off), bytes / 2); UT_ASSERTeq(ret, dest + dest_off); /* memcmp will validate that what I expect in memory. */ if (memcmp(src + src_off, dest + dest_off, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); /* Now validate the contents of the file */ LSEEK(fd, (os_off_t)(dest_off + (int)(mapped_len / 2)), SEEK_SET); if (READ(fd, buf, bytes / 2) == bytes / 2) { if (memcmp(src + src_off, buf, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); } FREE(buf); } unsigned Flags[] = { 0, PMEM_F_MEM_NODRAIN, PMEM_F_MEM_NONTEMPORAL, PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN, PMEM_F_MEM_WC, PMEM_F_MEM_WB, PMEM_F_MEM_NOFLUSH, /* all possible flags */ PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH | PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL | PMEM_F_MEM_WC | PMEM_F_MEM_WB, };
2,491
27.643678
73
c