repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/art.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * Copyright 2012, Armon Dadgar. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: art.h * * Description: header file for art tree on pmem implementation * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ /* * based on https://github.com/armon/libart/src/art.h */ #ifndef _ART_H #define _ART_H #ifdef __cplusplus extern "C" { #endif #define MAX_PREFIX_LEN 10 typedef enum { NODE4 = 0, NODE16 = 1, NODE48 = 2, NODE256 = 3, art_leaf_t = 4, art_node_types = 5 /* number of different art_nodes */ } art_node_type; char *art_node_names[] = { "art_node4", "art_node16", "art_node48", "art_node256", "art_leaf" }; /* * forward declarations; these are required when typedef shall be * used instead of struct */ struct _art_node_u; typedef struct _art_node_u art_node_u; struct _art_node; typedef struct _art_node art_node; struct _art_node4; typedef struct _art_node4 art_node4; struct _art_node16; typedef struct _art_node16 art_node16; struct _art_node48; typedef struct _art_node48 art_node48; struct _art_node256; typedef struct _art_node256 art_node256; struct _art_leaf; typedef struct _art_leaf art_leaf; struct _var_string; typedef struct _var_string var_string; POBJ_LAYOUT_BEGIN(arttree_tx); POBJ_LAYOUT_ROOT(arttree_tx, struct art_tree_root); POBJ_LAYOUT_TOID(arttree_tx, art_node_u); POBJ_LAYOUT_TOID(arttree_tx, art_node4); POBJ_LAYOUT_TOID(arttree_tx, art_node16); POBJ_LAYOUT_TOID(arttree_tx, art_node48); POBJ_LAYOUT_TOID(arttree_tx, art_node256); POBJ_LAYOUT_TOID(arttree_tx, art_leaf); POBJ_LAYOUT_TOID(arttree_tx, var_string); POBJ_LAYOUT_END(arttree_tx); struct _var_string { size_t len; unsigned char s[]; }; /* * This struct is included as part of all the various node sizes */ struct _art_node { uint8_t num_children; uint32_t partial_len; unsigned char partial[MAX_PREFIX_LEN]; }; /* * Small node with only 4 children */ struct _art_node4 { art_node n; unsigned char keys[4]; TOID(art_node_u) children[4]; }; /* * Node with 16 children */ struct _art_node16 { art_node n; unsigned char keys[16]; TOID(art_node_u) children[16]; }; /* * Node with 48 children, but a full 256 byte field. */ struct _art_node48 { art_node n; unsigned char keys[256]; TOID(art_node_u) children[48]; }; /* * Full node with 256 children */ struct _art_node256 { art_node n; TOID(art_node_u) children[256]; }; /* * Represents a leaf. These are of arbitrary size, as they include the key. */ struct _art_leaf { TOID(var_string) value; TOID(var_string) key; }; struct _art_node_u { uint8_t art_node_type; uint8_t art_node_tag; union { TOID(art_node4) an4; /* starts with art_node */ TOID(art_node16) an16; /* starts with art_node */ TOID(art_node48) an48; /* starts with art_node */ TOID(art_node256) an256; /* starts with art_node */ TOID(art_leaf) al; } u; }; struct art_tree_root { int size; TOID(art_node_u) root; }; typedef struct _cb_data { TOID(art_node_u) node; int child_idx; } cb_data; /* * Macros to manipulate art_node tags */ #define IS_LEAF(x) (((x)->art_node_type == art_leaf_t)) #define SET_LEAF(x) (((x)->art_node_tag = art_leaf_t)) #define COPY_BLOB(_obj, _blob, _len) \ D_RW(_obj)->len = _len; \ TX_MEMCPY(D_RW(_obj)->s, _blob, _len); \ D_RW(_obj)->s[(_len) - 1] = '\0'; typedef int(*art_callback)(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *value, uint32_t val_len); extern int art_tree_init(PMEMobjpool *pop, int *newpool); extern uint64_t art_size(PMEMobjpool *pop); extern int art_iter(PMEMobjpool *pop, art_callback cb, void *data); extern TOID(var_string) art_insert(PMEMobjpool *pop, const unsigned char *key, int key_len, void *value, int val_len); extern TOID(var_string) art_search(PMEMobjpool *pop, const unsigned char *key, int key_len); extern TOID(var_string) art_delete(PMEMobjpool *pop, const unsigned char *key, int key_len); #ifdef __cplusplus } #endif #endif /* _ART_H */
5,998
26.773148
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree_search.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree_search.c * * Description: implementation of search function for ART tree * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #include <stdio.h> #include <inttypes.h> #include <libgen.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <getopt.h> #include <stdint.h> #include <stdbool.h> #include <assert.h> #include <sys/mman.h> #include "arttree_structures.h" /* * search context */ struct search_ctx { struct pmem_context *pmem_ctx; unsigned char *search_key; int32_t hexdump; }; static struct search_ctx *s_ctx = NULL; struct search { const char *name; const char *brief; char *(*func)(char *, struct search_ctx *); void (*help)(char *); }; /* local functions */ static int search_parse_args(char *appname, int ac, char *av[], struct search_ctx *s_ctx); static struct search *get_search(char *type_name); static void print_usage(char *appname); static void dump_PMEMoid(char *prefix, PMEMoid *oid); static char *search_key(char *appname, struct search_ctx *ctx); static int leaf_matches(struct search_ctx *ctx, art_leaf *n, unsigned char *key, size_t key_len, int depth); static int check_prefix(art_node *an, unsigned char *key, int key_len, int depth); static uint64_t find_child(art_node *n, int node_type, unsigned char key); static void *get_node(struct search_ctx *ctx, int node_type, uint64_t off); static uint64_t get_offset_an(art_node_u *au); static void dump_PMEMoid(char *prefix, PMEMoid *oid); static void dump_art_tree_root(char *prefix, uint64_t off, void *p); /* global visible interface */ void arttree_search_help(char *appname); int arttree_search_func(char *appname, struct pmem_context *ctx, int ac, char *av[]); static const char *arttree_search_help_str = "Search for key in ART tree\n" "Arguments: <key>\n" " <key> key\n" ; static const struct option long_options[] = { {"hexdump", no_argument, NULL, 'x'}, {NULL, 0, NULL, 0 }, }; static struct search s_funcs[] = { { .name = "key", .brief = "search for key", .func = search_key, .help = NULL, } }; /* Simple inlined function */ static inline int min(int a, int b) { return (a < b) ? b : a; } /* * number of arttree examine commands */ #define COMMANDS_NUMBER (sizeof(s_funcs) / sizeof(s_funcs[0])) void arttree_search_help(char *appname) { printf("%s %s\n", appname, arttree_search_help_str); } int arttree_search_func(char *appname, struct pmem_context *ctx, int ac, char *av[]) { int errors = 0; struct search *s; char *value; value = NULL; if (ctx == NULL) { return -1; } if (s_ctx == NULL) { s_ctx = (struct search_ctx *)malloc(sizeof(struct search_ctx)); if (s_ctx == NULL) { return -1; } memset(s_ctx, 0, sizeof(struct search_ctx)); } if (ctx->art_tree_root_offset == 0) { fprintf(stderr, "search functions require knowledge" " about the art_tree_root.\n"); fprintf(stderr, "Use \"set_root <offset>\"" " to define where the \nart_tree_root object" " resides in the pmem file.\n"); errors++; } s_ctx->pmem_ctx = ctx; if (search_parse_args(appname, ac, av, s_ctx) != 0) { fprintf(stderr, "%s::%s: error parsing arguments\n", appname, __FUNCTION__); errors++; } if (!errors) { s = get_search("key"); if (s != NULL) { value = s->func(appname, s_ctx); } if (value != NULL) { printf("key [%s] found, value [%s]\n", s_ctx->search_key, value); } else { printf("key [%s] not found\n", s_ctx->search_key); } } if (s_ctx->search_key != NULL) { free(s_ctx->search_key); } free(s_ctx); return errors; } static int search_parse_args(char *appname, int ac, char *av[], struct search_ctx *s_ctx) { int ret = 0; int opt; optind = 0; while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) { switch (opt) { case 'x': s_ctx->hexdump = 1; break; default: print_usage(appname); ret = 1; } } if (ret == 0) { s_ctx->search_key = (unsigned char *)strdup(av[optind + 0]); } return ret; } static void print_usage(char *appname) { printf("%s: search <key>\n", appname); } /* * get_search -- returns command for specified command name */ static struct search * get_search(char *type_name) { if (type_name == NULL) { return NULL; } for (size_t i = 0; i < COMMANDS_NUMBER; i++) { if (strcmp(type_name, s_funcs[i].name) == 0) return &s_funcs[i]; } return NULL; } static void * get_node(struct search_ctx *ctx, int node_type, uint64_t off) { if (!VALID_NODE_TYPE(node_type)) return NULL; printf("%s at off 0x%" PRIx64 "\n", art_node_names[node_type], off); return ctx->pmem_ctx->addr + off; } static int leaf_matches(struct search_ctx *ctx, art_leaf *n, unsigned char *key, size_t key_len, int depth) { var_string *n_key; (void) depth; n_key = (var_string *)get_node(ctx, VAR_STRING, n->key.oid.off); if (n_key == NULL) return 1; // HACK for stupid null-terminated strings.... // else if (n_key->len != key_len) // ret = 1; if (n_key->len != key_len + 1) return 1; return memcmp(n_key->s, key, key_len); } static int check_prefix(art_node *n, unsigned char *key, int key_len, int depth) { int max_cmp = min(min(n->partial_len, MAX_PREFIX_LEN), key_len - depth); int idx; for (idx = 0; idx < max_cmp; idx++) { if (n->partial[idx] != key[depth + idx]) return idx; } return idx; } static uint64_t find_child(art_node *n, int node_type, unsigned char c) { int i; union { art_node4 *p1; art_node16 *p2; art_node48 *p3; art_node256 *p4; } p; printf("[%s] children %d search key %c [", art_node_names[node_type], n->num_children, c); switch (node_type) { case ART_NODE4: p.p1 = (art_node4 *)n; for (i = 0; i < n->num_children; i++) { printf("%c ", p.p1->keys[i]); if (p.p1->keys[i] == c) { printf("]\n"); return p.p1->children[i].oid.off; } } break; case ART_NODE16: p.p2 = (art_node16 *)n; for (i = 0; i < n->num_children; i++) { printf("%c ", p.p2->keys[i]); if (p.p2->keys[i] == c) { printf("]\n"); return p.p2->children[i].oid.off; } } break; case ART_NODE48: p.p3 = (art_node48 *)n; i = p.p3->keys[c]; printf("%d ", p.p3->keys[c]); if (i) { printf("]\n"); return p.p3->children[i - 1].oid.off; } break; case ART_NODE256: p.p4 = (art_node256 *)n; printf("0x%" PRIx64, p.p4->children[c].oid.off); if (p.p4->children[c].oid.off != 0) { printf("]\n"); return p.p4->children[c].oid.off; } break; default: abort(); } printf("]\n"); return 0; } static uint64_t get_offset_an(art_node_u *au) { uint64_t offset = 0; switch (au->art_node_type) { case ART_NODE4: offset = au->u.an4.oid.off; break; case ART_NODE16: offset = au->u.an16.oid.off; break; case ART_NODE48: offset = au->u.an48.oid.off; break; case ART_NODE256: offset = au->u.an256.oid.off; break; case ART_LEAF: offset = au->u.al.oid.off; break; default: break; } return offset; } static char * search_key(char *appname, struct search_ctx *ctx) { int errors = 0; void *p; /* something */ off_t p_off; art_node_u *p_au; /* art_node_u */ off_t p_au_off; void *p_an; /* specific art node from art_node_u */ off_t p_an_off; art_node *an; /* art node */ var_string *n_value; char *value; int prefix_len; int depth = 0; int key_len; uint64_t child_off; key_len = strlen((char *)(ctx->search_key)); value = NULL; p_off = ctx->pmem_ctx->art_tree_root_offset; p = get_node(ctx, ART_TREE_ROOT, p_off); assert(p != NULL); dump_art_tree_root("art_tree_root", p_off, p); p_au_off = ((art_tree_root *)p)->root.oid.off; p_au = (art_node_u *)get_node(ctx, ART_NODE_U, p_au_off); if (p_au == NULL) errors++; if (!errors) { while (p_au) { p_an_off = get_offset_an(p_au); p_an = get_node(ctx, p_au->art_node_type, p_an_off); assert(p_an != NULL); if (p_au->art_node_type == ART_LEAF) { if (!leaf_matches(ctx, (art_leaf *)p_an, ctx->search_key, key_len, depth)) { n_value = (var_string *) get_node(ctx, VAR_STRING, ((art_leaf *)p_an)->value.oid.off); return (char *)(n_value->s); } } an = (art_node *)p_an; if (an->partial_len) { prefix_len = check_prefix(an, ctx->search_key, key_len, depth); if (prefix_len != min(MAX_PREFIX_LEN, an->partial_len)) { return NULL; } depth = depth + an->partial_len; } child_off = find_child(an, p_au->art_node_type, ctx->search_key[depth]); if (child_off != 0) { p_au_off = child_off; p_au = get_node(ctx, ART_NODE_U, p_au_off); } else { p_au = NULL; } depth++; } } if (errors) { return NULL; } else { return value; } } static void dump_art_tree_root(char *prefix, uint64_t off, void *p) { art_tree_root *tree_root; tree_root = (art_tree_root *)p; printf("at offset 0x%" PRIx64 ", art_tree_root {\n", off); printf(" size %d\n", tree_root->size); dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root)); printf("\n};\n"); } static void dump_PMEMoid(char *prefix, PMEMoid *oid) { printf("%s { PMEMoid pool_uuid_lo %" PRIx64 " off 0x%" PRIx64 " = %" PRId64 " }\n", prefix, oid->pool_uuid_lo, oid->off, oid->off); }
11,265
22.717895
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree.h * * Description: header file for art tree on pmem implementation * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #ifndef _ARTTREE_H #define _ARTTREE_H #ifdef __cplusplus extern "C" { #endif #include "art.h" #ifdef __cplusplus } #endif #endif /* _ARTTREE_H */
2,337
34.969231
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/linkedlist/pmemobj_list.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * pmemobj_list.h -- macro definitions for persistent * singly linked list and tail queue */ #ifndef PMEMOBJ_LISTS_H #define PMEMOBJ_LISTS_H #include <libpmemobj.h> /* * This file defines two types of persistent data structures: * singly-linked lists and tail queue. * * All macros defined in this file must be used within libpmemobj * transactional API. Following snippet presents example of usage: * * TX_BEGIN(pop) { * POBJ_TAILQ_INIT(head); * } TX_ONABORT { * abort(); * } TX_END * * SLIST TAILQ * _HEAD + + * _ENTRY + + * _INIT + + * _EMPTY + + * _FIRST + + * _NEXT + + * _PREV - + * _LAST - + * _FOREACH + + * _FOREACH_REVERSE - + * _INSERT_HEAD + + * _INSERT_BEFORE - + * _INSERT_AFTER + + * _INSERT_TAIL - + * _MOVE_ELEMENT_HEAD - + * _MOVE_ELEMENT_TAIL - + * _REMOVE_HEAD + - * _REMOVE + + * _REMOVE_FREE + + * _SWAP_HEAD_TAIL - + */ /* * Singly-linked List definitions. */ #define POBJ_SLIST_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ } #define POBJ_SLIST_ENTRY(type)\ struct {\ TOID(type) pe_next;\ } /* * Singly-linked List access methods. */ #define POBJ_SLIST_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_SLIST_FIRST(head) ((head)->pe_first) #define POBJ_SLIST_NEXT(elm, field) (D_RO(elm)->field.pe_next) /* * Singly-linked List functions. */ #define POBJ_SLIST_INIT(head) do {\ TX_ADD_DIRECT(&(head)->pe_first);\ TOID_ASSIGN((head)->pe_first, OID_NULL);\ } while (0) #define POBJ_SLIST_INSERT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field.pe_next);\ elm_ptr->field.pe_next = (head)->pe_first;\ TX_SET_DIRECT(head, pe_first, elm);\ } while (0) #define POBJ_SLIST_INSERT_AFTER(slistelm, elm, field) do {\ TOID_TYPEOF(slistelm) *slistelm_ptr = D_RW(slistelm);\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field.pe_next);\ elm_ptr->field.pe_next = slistelm_ptr->field.pe_next;\ TX_ADD_DIRECT(&slistelm_ptr->field.pe_next);\ slistelm_ptr->field.pe_next = elm;\ } while (0) #define POBJ_SLIST_REMOVE_HEAD(head, field) do {\ TX_ADD_DIRECT(&(head)->pe_first);\ (head)->pe_first = D_RO((head)->pe_first)->field.pe_next;\ } while (0) #define POBJ_SLIST_REMOVE(head, elm, field) do {\ if (TOID_EQUALS((head)->pe_first, elm)) {\ POBJ_SLIST_REMOVE_HEAD(head, field);\ } else {\ TOID_TYPEOF(elm) *curelm_ptr = D_RW((head)->pe_first);\ while (!TOID_EQUALS(curelm_ptr->field.pe_next, elm))\ curelm_ptr = D_RW(curelm_ptr->field.pe_next);\ TX_ADD_DIRECT(&curelm_ptr->field.pe_next);\ curelm_ptr->field.pe_next = D_RO(elm)->field.pe_next;\ }\ } while (0) #define POBJ_SLIST_REMOVE_FREE(head, elm, field) do {\ POBJ_SLIST_REMOVE(head, elm, field);\ TX_FREE(elm);\ } while (0) #define POBJ_SLIST_FOREACH(var, head, field)\ for ((var) = POBJ_SLIST_FIRST(head);\ !TOID_IS_NULL(var);\ var = POBJ_SLIST_NEXT(var, field)) /* * Tail-queue definitions. */ #define POBJ_TAILQ_ENTRY(type)\ struct {\ TOID(type) pe_next;\ TOID(type) pe_prev;\ } #define POBJ_TAILQ_HEAD(name, type)\ struct name {\ TOID(type) pe_first;\ TOID(type) pe_last;\ } /* * Tail-queue access methods. */ #define POBJ_TAILQ_FIRST(head) ((head)->pe_first) #define POBJ_TAILQ_LAST(head) ((head)->pe_last) #define POBJ_TAILQ_EMPTY(head) (TOID_IS_NULL((head)->pe_first)) #define POBJ_TAILQ_NEXT(elm, field) (D_RO(elm)->field.pe_next) #define POBJ_TAILQ_PREV(elm, field) (D_RO(elm)->field.pe_prev) /* * Tail-queue List internal methods. */ #define _POBJ_SWAP_PTR(elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TX_ADD_DIRECT(&elm_ptr->field);\ __typeof__(elm) temp = elm_ptr->field.pe_prev;\ elm_ptr->field.pe_prev = elm_ptr->field.pe_next;\ elm_ptr->field.pe_next = temp;\ } while (0) /* * Tail-queue functions. */ #define POBJ_TAILQ_SWAP_HEAD_TAIL(head, field) do {\ __typeof__((head)->pe_first) temp = (head)->pe_first;\ TX_ADD_DIRECT(head);\ (head)->pe_first = (head)->pe_last;\ (head)->pe_last = temp;\ } while (0) #define POBJ_TAILQ_FOREACH(var, head, field)\ for ((var) = POBJ_TAILQ_FIRST(head);\ !TOID_IS_NULL(var);\ var = POBJ_TAILQ_NEXT(var, field)) #define POBJ_TAILQ_FOREACH_REVERSE(var, head, field)\ for ((var) = POBJ_TAILQ_LAST(head);\ !TOID_IS_NULL(var);\ var = POBJ_TAILQ_PREV(var, field)) #define POBJ_TAILQ_INIT(head) do {\ TX_ADD_FIELD_DIRECT(head, pe_first);\ TOID_ASSIGN((head)->pe_first, OID_NULL);\ TX_ADD_FIELD_DIRECT(head, pe_last);\ TOID_ASSIGN((head)->pe_last, OID_NULL);\ } while (0) #define POBJ_TAILQ_INSERT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL((head)->pe_first)) {\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_first;\ elm_ptr->field.pe_next = (head)->pe_first;\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm;\ (head)->pe_last = elm;\ } else {\ TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_next = (head)->pe_first;\ elm_ptr->field.pe_prev = first->field.pe_prev;\ TX_ADD_DIRECT(&first->field.pe_prev);\ first->field.pe_prev = elm;\ TX_SET_DIRECT(head, pe_first, elm);\ }\ } while (0) #define POBJ_TAILQ_INSERT_TAIL(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL((head)->pe_last)) {\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = (head)->pe_last;\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm;\ (head)->pe_last = elm;\ } else {\ TOID_TYPEOF(elm) *last = D_RW((head)->pe_last);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = last->field.pe_next;\ TX_ADD_DIRECT(&last->field.pe_next);\ last->field.pe_next = elm;\ TX_SET_DIRECT(head, pe_last, elm);\ }\ } while (0) #define POBJ_TAILQ_INSERT_AFTER(listelm, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = listelm;\ elm_ptr->field.pe_next = listelm_ptr->field.pe_next;\ if (TOID_IS_NULL(listelm_ptr->field.pe_next)) {\ TX_SET_DIRECT(head, pe_last, elm);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(listelm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm;\ }\ TX_ADD_DIRECT(&listelm_ptr->field.pe_next);\ listelm_ptr->field.pe_next = elm;\ } while (0) #define POBJ_TAILQ_INSERT_BEFORE(listelm, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ TOID_TYPEOF(listelm) *listelm_ptr = D_RW(listelm);\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_next = listelm;\ elm_ptr->field.pe_prev = listelm_ptr->field.pe_prev;\ if (TOID_IS_NULL(listelm_ptr->field.pe_prev)) {\ TX_SET_DIRECT(head, pe_first, elm);\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(listelm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm; \ }\ TX_ADD_DIRECT(&listelm_ptr->field.pe_prev);\ listelm_ptr->field.pe_prev = elm;\ } while (0) #define POBJ_TAILQ_REMOVE(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_IS_NULL(elm_ptr->field.pe_prev) &&\ TOID_IS_NULL(elm_ptr->field.pe_next)) {\ TX_ADD_DIRECT(head);\ (head)->pe_first = elm_ptr->field.pe_prev;\ (head)->pe_last = elm_ptr->field.pe_next;\ } else {\ if (TOID_IS_NULL(elm_ptr->field.pe_prev)) {\ TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ }\ if (TOID_IS_NULL(elm_ptr->field.pe_next)) {\ TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ }\ }\ } while (0) #define POBJ_TAILQ_REMOVE_FREE(head, elm, field) do {\ POBJ_TAILQ_REMOVE(head, elm, field);\ TX_FREE(elm);\ } while (0) /* * 2 cases: only two elements, the rest possibilities * including that elm is the last one */ #define POBJ_TAILQ_MOVE_ELEMENT_HEAD(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_EQUALS((head)->pe_last, elm) &&\ TOID_EQUALS(D_RO((head)->pe_first)->field.pe_next, elm)) {\ _POBJ_SWAP_PTR(elm, field);\ _POBJ_SWAP_PTR((head)->pe_first, field);\ POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\ } else {\ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ if (TOID_EQUALS((head)->pe_last, elm)) {\ TX_SET_DIRECT(head, pe_last, elm_ptr->field.pe_prev);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ }\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = D_RO((head)->pe_first)->field.pe_prev;\ elm_ptr->field.pe_next = (head)->pe_first;\ TOID_TYPEOF(elm) *first = D_RW((head)->pe_first);\ TX_ADD_DIRECT(&first->field.pe_prev);\ first->field.pe_prev = elm;\ TX_SET_DIRECT(head, pe_first, elm);\ }\ } while (0) #define POBJ_TAILQ_MOVE_ELEMENT_TAIL(head, elm, field) do {\ TOID_TYPEOF(elm) *elm_ptr = D_RW(elm);\ if (TOID_EQUALS((head)->pe_first, elm) &&\ TOID_EQUALS(D_RO((head)->pe_last)->field.pe_prev, elm)) {\ _POBJ_SWAP_PTR(elm, field);\ _POBJ_SWAP_PTR((head)->pe_last, field);\ POBJ_TAILQ_SWAP_HEAD_TAIL(head, field);\ } else {\ TOID_TYPEOF(elm) *next = D_RW(elm_ptr->field.pe_next);\ TX_ADD_DIRECT(&next->field.pe_prev);\ next->field.pe_prev = elm_ptr->field.pe_prev;\ if (TOID_EQUALS((head)->pe_first, elm)) {\ TX_SET_DIRECT(head, pe_first, elm_ptr->field.pe_next);\ } else { \ TOID_TYPEOF(elm) *prev = D_RW(elm_ptr->field.pe_prev);\ TX_ADD_DIRECT(&prev->field.pe_next);\ prev->field.pe_next = elm_ptr->field.pe_next;\ }\ TX_ADD_DIRECT(&elm_ptr->field);\ elm_ptr->field.pe_prev = (head)->pe_last;\ elm_ptr->field.pe_next = D_RO((head)->pe_last)->field.pe_next;\ __typeof__(elm_ptr) last = D_RW((head)->pe_last);\ TX_ADD_DIRECT(&last->field.pe_next);\ last->field.pe_next = elm;\ TX_SET_DIRECT(head, pe_last, elm);\ } \ } while (0) #endif /* PMEMOBJ_LISTS_H */
11,243
30.762712
66
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/linkedlist/fifo.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * fifo.c - example of tail queue usage */ #include <ex_common.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "pmemobj_list.h" POBJ_LAYOUT_BEGIN(list); POBJ_LAYOUT_ROOT(list, struct fifo_root); POBJ_LAYOUT_TOID(list, struct tqnode); POBJ_LAYOUT_END(list); POBJ_TAILQ_HEAD(tqueuehead, struct tqnode); struct fifo_root { struct tqueuehead head; }; struct tqnode { char data; POBJ_TAILQ_ENTRY(struct tqnode) tnd; }; static void print_help(void) { printf("usage: fifo <pool> <option> [<type>]\n"); printf("\tAvailable options:\n"); printf("\tinsert, <character> Insert character into FIFO\n"); printf("\tremove, Remove element from FIFO\n"); printf("\tprint, Print all FIFO elements\n"); } int main(int argc, const char *argv[]) { PMEMobjpool *pop; const char *path; if (argc < 3) { print_help(); return 0; } path = argv[1]; if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(list), PMEMOBJ_MIN_POOL, 0666)) == NULL) { perror("failed to create pool\n"); return -1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(list))) == NULL) { perror("failed to open pool\n"); return -1; } } TOID(struct fifo_root) root = POBJ_ROOT(pop, struct fifo_root); struct tqueuehead *tqhead = &D_RW(root)->head; TOID(struct tqnode) node; double totaltime = 0; if (strcmp(argv[2], "insert") == 0) { if (argc == 4) { totaltime = 0; for(int i=0;i<1000000;i++){ clock_t start, end; start = clock(); TX_BEGIN(pop) { node = TX_NEW(struct tqnode); D_RW(node)->data = *argv[3]; POBJ_TAILQ_INSERT_HEAD(tqhead, node, tnd); } TX_ONABORT { abort(); } TX_END end = clock(); totaltime += ((double) (end - start)) / CLOCKS_PER_SEC; } printf("TX/s = %f %f\n",1000000/totaltime, totaltime); printf("Added %c to FIFO\n", *argv[3]); } else { print_help(); } } else if (strcmp(argv[2], "remove") == 0) { totaltime = 0; for(int i=0;i<1000000;i++){ clock_t start, end; start = clock(); if (POBJ_TAILQ_EMPTY(tqhead)) { printf("FIFO is empty\n"); } else { node = POBJ_TAILQ_LAST(tqhead); TX_BEGIN(pop) { POBJ_TAILQ_REMOVE_FREE(tqhead, node, tnd); } TX_ONABORT { abort(); } TX_END printf("Removed element from FIFO\n"); } end = clock(); totaltime += ((double) (end - start)) / CLOCKS_PER_SEC; } printf("TX/s = %f %f\n",1000000/totaltime, totaltime); } else if (strcmp(argv[2], "print") == 0) { printf("Elements in FIFO:\n"); POBJ_TAILQ_FOREACH(node, tqhead, tnd) { printf("%c\t", D_RO(node)->data); } printf("\n"); } else { print_help(); } pmemobj_close(pop); return 0; }
2,782
21.264
64
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_hashmap_tx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * map_hashmap_tx.c -- common interface for maps */ #include <map.h> #include <hashmap_tx.h> #include "map_hashmap_tx.h" /* * map_hm_tx_check -- wrapper for hm_tx_check */ static int map_hm_tx_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_check(pop, hashmap_tx); } /* * map_hm_tx_count -- wrapper for hm_tx_count */ static size_t map_hm_tx_count(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_count(pop, hashmap_tx); } /* * map_hm_tx_init -- wrapper for hm_tx_init */ static int map_hm_tx_init(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_init(pop, hashmap_tx); } /* * map_hm_tx_create -- wrapper for hm_tx_create */ static int map_hm_tx_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct hashmap_tx) *hashmap_tx = (TOID(struct hashmap_tx) *)map; return hm_tx_create(pop, hashmap_tx, arg); } /* * map_hm_tx_insert -- wrapper for hm_tx_insert */ static int map_hm_tx_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_insert(pop, hashmap_tx, key, value); } /* * map_hm_tx_remove -- wrapper for hm_tx_remove */ static PMEMoid map_hm_tx_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_remove(pop, hashmap_tx, key); } /* * map_hm_tx_get -- wrapper for hm_tx_get */ static PMEMoid map_hm_tx_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_get(pop, hashmap_tx, key); } /* * map_hm_tx_lookup -- wrapper for hm_tx_lookup */ static int map_hm_tx_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_lookup(pop, hashmap_tx, key); } /* * map_hm_tx_foreach -- wrapper for hm_tx_foreach */ static int map_hm_tx_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_foreach(pop, hashmap_tx, cb, arg); } /* * map_hm_tx_cmd -- wrapper for hm_tx_cmd */ static int map_hm_tx_cmd(PMEMobjpool *pop, TOID(struct map) map, unsigned cmd, uint64_t arg) { TOID(struct hashmap_tx) hashmap_tx; TOID_ASSIGN(hashmap_tx, map.oid); return hm_tx_cmd(pop, hashmap_tx, cmd, arg); } struct map_ops hashmap_tx_ops = { /* .check = */ map_hm_tx_check, /* .create = */ map_hm_tx_create, /* .delete = */ NULL, /* .init = */ map_hm_tx_init, /* .insert = */ map_hm_tx_insert, /* .insert_new = */ NULL, /* .remove = */ map_hm_tx_remove, /* .remove_free = */ NULL, /* .clear = */ NULL, /* .get = */ map_hm_tx_get, /* .lookup = */ map_hm_tx_lookup, /* .foreach = */ map_hm_tx_foreach, /* .is_empty = */ NULL, /* .count = */ map_hm_tx_count, /* .cmd = */ map_hm_tx_cmd, };
3,316
20.966887
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/kv_server.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * kv_server.c -- persistent tcp key-value store server */ #include <uv.h> #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "libpmemobj.h" #include "map.h" #include "map_ctree.h" #include "map_btree.h" #include "map_rtree.h" #include "map_rbtree.h" #include "map_hashmap_atomic.h" #include "map_hashmap_tx.h" #include "map_hashmap_rp.h" #include "map_skiplist.h" #include "kv_protocol.h" #define COUNT_OF(x) (sizeof(x) / sizeof(0[x])) #define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1])) int use_ndp_redo = 0; POBJ_LAYOUT_BEGIN(kv_server); POBJ_LAYOUT_ROOT(kv_server, struct root); POBJ_LAYOUT_TOID(kv_server, struct map_value); POBJ_LAYOUT_TOID(kv_server, uint64_t); POBJ_LAYOUT_END(kv_server); struct map_value { uint64_t len; char buf[]; }; struct root { TOID(struct map) map; }; static struct map_ctx *mapc; static PMEMobjpool *pop; static TOID(struct map) map; static uv_tcp_t server; static uv_loop_t *loop; typedef int (*msg_handler)(uv_stream_t *client, const char *msg, size_t len); struct write_req { uv_write_t req; uv_buf_t buf; }; struct client_data { char *buf; /* current message, always NULL terminated */ size_t buf_len; /* sizeof(buf) */ size_t len; /* actual length of the message (while parsing) */ }; /* * djb2_hash -- string hashing function by Dan Bernstein */ static uint32_t djb2_hash(const char *str) { uint32_t hash = 5381; int c; while ((c = *str++)) hash = ((hash << 5) + hash) + c; return hash; } /* * write_done_cb -- callback after message write completes */ static void write_done_cb(uv_write_t *req, int status) { struct write_req *wr = (struct write_req *)req; free(wr); if (status == -1) { printf("response failed"); } } /* * client_close_cb -- callback after client tcp connection closes */ static void client_close_cb(uv_handle_t *handle) { struct client_data *d = handle->data; free(d->buf); free(handle->data); free(handle); } /* * response_write -- response writing helper */ static void response_write(uv_stream_t *client, char *resp, size_t len) { struct write_req *wr = malloc(sizeof(struct write_req)); assert(wr != NULL); wr->buf = uv_buf_init(resp, len); uv_write(&wr->req, client, &wr->buf, 1, write_done_cb); } /* * response_msg -- predefined message writing helper */ static void response_msg(uv_stream_t *client, enum resp_messages msg) { response_write(client, (char *)resp_msg[msg], strlen(resp_msg[msg])); } /* * cmsg_insert_handler -- handler of INSERT client message */ static int cmsg_insert_handler(uv_stream_t *client, const char *msg, size_t len) { int result = 0; TX_BEGIN(pop) { /* * For simplicity sake the length of the value buffer is just * a length of the message. */ TOID(struct map_value) val = TX_ZALLOC(struct map_value, sizeof(struct map_value) + len); char key[MAX_KEY_LEN]; int ret = sscanf(msg, "INSERT %254s %s\n", key, D_RW(val)->buf); assert(ret == 2); D_RW(val)->len = len; /* properly terminate the value */ D_RW(val)->buf[strlen(D_RO(val)->buf)] = '\n'; map_insert(mapc, map, djb2_hash(key), val.oid); } TX_ONABORT { result = 1; } TX_END response_msg(client, result); return 0; } /* * cmsg_remove_handler -- handler of REMOVE client message */ static int cmsg_remove_handler(uv_stream_t *client, const char *msg, size_t len) { char key[MAX_KEY_LEN] = {0}; /* check if the constant used in sscanf() below has the correct value */ COMPILE_ERROR_ON(MAX_KEY_LEN - 1 != 254); int ret = sscanf(msg, "REMOVE %254s\n", key); assert(ret == 1); int result = map_remove_free(mapc, map, djb2_hash(key)); response_msg(client, result); return 0; } /* * cmsg_get_handler -- handler of GET client message */ static int cmsg_get_handler(uv_stream_t *client, const char *msg, size_t len) { char key[MAX_KEY_LEN]; /* check if the constant used in sscanf() below has the correct value */ COMPILE_ERROR_ON(MAX_KEY_LEN - 1 != 254); int ret = sscanf(msg, "GET %254s\n", key); assert(ret == 1); TOID(struct map_value) value; TOID_ASSIGN(value, map_get(mapc, map, djb2_hash(key))); if (TOID_IS_NULL(value)) { response_msg(client, RESP_MSG_NULL); } else { response_write(client, D_RW(value)->buf, D_RO(value)->len); } return 0; } /* * cmsg_bye_handler -- handler of BYE client message */ static int cmsg_bye_handler(uv_stream_t *client, const char *msg, size_t len) { uv_close((uv_handle_t *)client, client_close_cb); return 0; } /* * cmsg_bye_handler -- handler of KILL client message */ static int cmsg_kill_handler(uv_stream_t *client, const char *msg, size_t len) { uv_close((uv_handle_t *)client, client_close_cb); uv_close((uv_handle_t *)&server, NULL); return 0; } /* kv protocol implementation */ static msg_handler protocol_impl[MAX_CMSG] = { cmsg_insert_handler, cmsg_remove_handler, cmsg_get_handler, cmsg_bye_handler, cmsg_kill_handler }; /* * cmsg_handle -- handles current client message */ static int cmsg_handle(uv_stream_t *client, struct client_data *data) { int ret = 0; int i; for (i = 0; i < MAX_CMSG; ++i) if (strncmp(kv_cmsg_token[i], data->buf, strlen(kv_cmsg_token[i])) == 0) break; if (i == MAX_CMSG) { response_msg(client, RESP_MSG_UNKNOWN); } else { ret = protocol_impl[i](client, data->buf, data->len); } data->len = 0; /* reset the message length */ return ret; } /* * cmsg_handle_stream -- handle incoming tcp stream from clients */ static int cmsg_handle_stream(uv_stream_t *client, struct client_data *data, const char *buf, ssize_t nread) { char *last; int ret; size_t len; /* * A single read operation can contain zero or more operations, so this * has to be handled appropriately. Client messages are terminated by * newline character. */ while ((last = memchr(buf, '\n', nread)) != NULL) { len = last - buf + 1; nread -= len; assert(data->len + len <= data->buf_len); memcpy(data->buf + data->len, buf, len); data->len += len; if ((ret = cmsg_handle(client, data)) != 0) return ret; buf = last + 1; } if (nread != 0) { memcpy(data->buf + data->len, buf, nread); data->len += nread; } return 0; } static uv_buf_t msg_buf = {0}; /* * get_read_buf_cb -- returns buffer for incoming client message */ static void get_read_buf_cb(uv_handle_t *handle, size_t size, uv_buf_t *buf) { buf->base = msg_buf.base; buf->len = msg_buf.len; } /* * read_cb -- async tcp read from clients */ static void read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf) { if (nread <= 0) { printf("client connection closed\n"); uv_close((uv_handle_t *)client, client_close_cb); return; } struct client_data *d = client->data; if (d->buf_len < (d->len + nread + 1)) { char *cbuf = realloc(d->buf, d->buf_len + nread + 1); assert(cbuf != NULL); /* zero only the new memory */ memset(cbuf + d->buf_len, 0, nread + 1); d->buf_len += nread + 1; d->buf = cbuf; } if (cmsg_handle_stream(client, client->data, buf->base, nread)) { printf("client disconnect\n"); uv_close((uv_handle_t *)client, client_close_cb); } } /* * connection_cb -- async incoming client request */ static void connection_cb(uv_stream_t *server, int status) { if (status != 0) { printf("client connect error\n"); return; } printf("new client\n"); uv_tcp_t *client = malloc(sizeof(uv_tcp_t)); assert(client != NULL); client->data = calloc(1, sizeof(struct client_data)); assert(client->data != NULL); uv_tcp_init(loop, client); if (uv_accept(server, (uv_stream_t *)client) == 0) { uv_read_start((uv_stream_t *)client, get_read_buf_cb, read_cb); } else { uv_close((uv_handle_t *)client, client_close_cb); } } static const struct { struct map_ops *ops; const char *name; } maps[] = { {MAP_HASHMAP_TX, "hashmap_tx"}, {MAP_HASHMAP_ATOMIC, "hashmap_atomic"}, {MAP_HASHMAP_RP, "hashmap_rp"}, {MAP_CTREE, "ctree"}, {MAP_BTREE, "btree"}, {MAP_RTREE, "rtree"}, {MAP_RBTREE, "rbtree"}, {MAP_SKIPLIST, "skiplist"} }; /* * get_map_ops_by_string -- parse the type string and return the associated ops */ static const struct map_ops * get_map_ops_by_string(const char *type) { for (int i = 0; i < COUNT_OF(maps); ++i) if (strcmp(maps[i].name, type) == 0) return maps[i].ops; return NULL; } #define KV_SIZE (PMEMOBJ_MIN_POOL) #define MAX_READ_LEN (64 * 1024) /* 64 kilobytes */ int main(int argc, char *argv[]) { if (argc < 4) { printf("usage: %s hashmap_tx|hashmap_atomic|hashmap_rp|" "ctree|btree|rtree|rbtree|skiplist file-name port\n", argv[0]); return 1; } const char *path = argv[2]; const char *type = argv[1]; int port = atoi(argv[3]); /* use only a single buffer for all incoming data */ void *read_buf = malloc(MAX_READ_LEN); assert(read_buf != NULL); msg_buf = uv_buf_init(read_buf, MAX_READ_LEN); if (access(path, F_OK) != 0) { pop = pmemobj_create(path, POBJ_LAYOUT_NAME(kv_server), KV_SIZE, 0666); if (pop == NULL) { fprintf(stderr, "failed to create pool: %s\n", pmemobj_errormsg()); return 1; } } else { pop = pmemobj_open(path, POBJ_LAYOUT_NAME(kv_server)); if (pop == NULL) { fprintf(stderr, "failed to open pool: %s\n", pmemobj_errormsg()); return 1; } } /* map context initialization */ mapc = map_ctx_init(get_map_ops_by_string(type), pop); if (!mapc) { pmemobj_close(pop); fprintf(stderr, "map_ctx_init failed (wrong type?)\n"); return 1; } /* initialize the actual map */ TOID(struct root) root = POBJ_ROOT(pop, struct root); if (TOID_IS_NULL(D_RO(root)->map)) { /* create new if it doesn't exist (a fresh pool) */ map_create(mapc, &D_RW(root)->map, NULL); } map = D_RO(root)->map; loop = uv_default_loop(); /* tcp server initialization */ uv_tcp_init(loop, &server); struct sockaddr_in bind_addr; uv_ip4_addr("0.0.0.0", port, &bind_addr); int ret = uv_tcp_bind(&server, (const struct sockaddr *)&bind_addr, 0); assert(ret == 0); ret = uv_listen((uv_stream_t *)&server, SOMAXCONN, connection_cb); assert(ret == 0); ret = uv_run(loop, UV_RUN_DEFAULT); assert(ret == 0); /* no more events in the loop, release resources and quit */ uv_loop_delete(loop); map_ctx_free(mapc); pmemobj_close(pop); free(read_buf); return 0; }
10,374
20.524896
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * map.h -- common interface for maps */ #ifndef MAP_H #define MAP_H #include <libpmemobj.h> #ifdef __cplusplus extern "C" { #endif #ifndef MAP_TYPE_OFFSET #define MAP_TYPE_OFFSET 1000 #endif TOID_DECLARE(struct map, MAP_TYPE_OFFSET + 0); struct map; struct map_ctx; struct map_ops { int(*check)(PMEMobjpool *pop, TOID(struct map) map); int(*create)(PMEMobjpool *pop, TOID(struct map) *map, void *arg); int(*destroy)(PMEMobjpool *pop, TOID(struct map) *map); int(*init)(PMEMobjpool *pop, TOID(struct map) map); int(*insert)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value); int(*insert_new)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid(*remove)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*remove_free)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*clear)(PMEMobjpool *pop, TOID(struct map) map); PMEMoid(*get)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*lookup)(PMEMobjpool *pop, TOID(struct map) map, uint64_t key); int(*foreach)(PMEMobjpool *pop, TOID(struct map) map, int(*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int(*is_empty)(PMEMobjpool *pop, TOID(struct map) map); size_t(*count)(PMEMobjpool *pop, TOID(struct map) map); int(*cmd)(PMEMobjpool *pop, TOID(struct map) map, unsigned cmd, uint64_t arg); }; struct map_ctx { PMEMobjpool *pop; const struct map_ops *ops; }; struct map_ctx *map_ctx_init(const struct map_ops *ops, PMEMobjpool *pop); void map_ctx_free(struct map_ctx *mapc); int map_check(struct map_ctx *mapc, TOID(struct map) map); int map_create(struct map_ctx *mapc, TOID(struct map) *map, void *arg); int map_destroy(struct map_ctx *mapc, TOID(struct map) *map); int map_init(struct map_ctx *mapc, TOID(struct map) map); int map_insert(struct map_ctx *mapc, TOID(struct map) map, uint64_t key, PMEMoid value); int map_insert_new(struct map_ctx *mapc, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void(*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid map_remove(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_remove_free(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_clear(struct map_ctx *mapc, TOID(struct map) map); PMEMoid map_get(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_lookup(struct map_ctx *mapc, TOID(struct map) map, uint64_t key); int map_foreach(struct map_ctx *mapc, TOID(struct map) map, int(*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int map_is_empty(struct map_ctx *mapc, TOID(struct map) map); size_t map_count(struct map_ctx *mapc, TOID(struct map) map); int map_cmd(struct map_ctx *mapc, TOID(struct map) map, unsigned cmd, uint64_t arg); #ifdef __cplusplus } #endif #endif /* MAP_H */
3,010
31.728261
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/data_store.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * data_store.c -- tree_map example usage */ #include <ex_common.h> #include <stdio.h> #include <sys/stat.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <assert.h> #include "map.h" #include "map_ctree.h" #include "map_btree.h" #include "map_rbtree.h" #include "map_hashmap_atomic.h" #include "map_hashmap_tx.h" #include "map_hashmap_rp.h" #include "map_skiplist.h" POBJ_LAYOUT_BEGIN(data_store); POBJ_LAYOUT_ROOT(data_store, struct store_root); POBJ_LAYOUT_TOID(data_store, struct store_item); POBJ_LAYOUT_END(data_store); /////////////////Page fault handling///////////////// #include <bits/types/sig_atomic_t.h> #include <bits/types/sigset_t.h> #include <signal.h> #include <unistd.h> #include <sys/mman.h> #include <fcntl.h> #define SIGSTKSZ 8192 #define SA_SIGINFO 4 #define SA_ONSTACK 0x08000000 /* Use signal stack by using `sa_restorer'. */ #define SA_RESTART 0x10000000 /* Restart syscall on signal return. */ #define SA_NODEFER 0x40000000 /* Don't automatically block the signal when*/ stack_t _sigstk; int updated_page_count = 0; int all_updates = 0; int start_timing = 0; void * checkpoint_start; void * page[50]; PMEMobjpool *pop; void * device; int tot_data_counter=0; #define CHPSIZE 2048 void cmd_issue( uint32_t opcode, uint32_t TXID, uint32_t TID, uint32_t OID, uint64_t data_addr, uint32_t data_size, void * ptr){ //command with thread id encoded as first 8 bits of each word uint32_t issue_cmd[7]; issue_cmd[0] = (TID<<24)|(opcode<<16)|(TXID<<8)|TID; issue_cmd[1] = (TID<<24)|(OID<<16)|(data_addr>>48); issue_cmd[2] = (TID<<24)|((data_addr & 0x0000FFFFFFFFFFFF)>>24); issue_cmd[3] = (TID<<24)|(data_addr & 0x0000000000FFFFFF); issue_cmd[4] = (TID<<24)|(data_size<<8); issue_cmd[5] = (TID<<24)|(0X00FFFFFF>>16); issue_cmd[6] = (TID<<24)|((0X00FFFFFF & 0x0000FFFF)<<8); for(int i=0;i<7;i++){ // printf("%08x\n",issue_cmd[i]); *((u_int32_t *) ptr) = issue_cmd[i]; } } static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } /// @brief Signal handler to trap SEGVs. static void segvHandle(int signum, siginfo_t * siginfo, void * context) { #define CPTIME #ifdef CPTIME uint64_t endCycles, startCycles,totalCycles; startCycles = getCycle(); #endif void * addr = siginfo->si_addr; // address of access uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); // Check if this was a SEGV that we are supposed to trap. if (siginfo->si_code == SEGV_ACCERR) { mprotect(pageStart, 4096, PROT_READ|PROT_WRITE); if(all_updates >= 0 || updated_page_count == 50){ for(int i=0;i<updated_page_count;i++){ //memcpy(checkpoint_start + 4096, pageStart,4096); //pmemobj_persist(pop, checkpoint_start + 4096,4096); cmd_issue(2,0,0,0, pageStart + i*4096,4096,device); tot_data_counter++; page[updated_page_count] = 0; } updated_page_count = 0; all_updates = 0; } all_updates ++; //printf("te\n"); for(int i=0; i<updated_page_count; i++){ if(page[i] == pageStart){ #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif return;} } page[updated_page_count] = pageStart; //printf("test1 %lx %d %d\n",page[updated_page_count],updated_page_count,all_updates); updated_page_count++; #ifdef CPTIME endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("cp %f\n", totTime); #endif //*((int *)checkpoint_start) = 10; //test++; //printf("test1 %lx %d\n",updated_page_count); } else if (siginfo->si_code == SEGV_MAPERR) { fprintf (stderr, "%d : map error with addr %p!\n", getpid(), addr); abort(); } else { fprintf (stderr, "%d : other access error with addr %p.\n", getpid(), addr); abort(); } } static void installSignalHandler(void) { // Set up an alternate signal stack. printf("page fault handler initialized!!\n"); _sigstk.ss_sp = mmap(NULL, SIGSTKSZ, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); _sigstk.ss_size = SIGSTKSZ; _sigstk.ss_flags = 0; sigaltstack(&_sigstk, (stack_t *) 0); // Now set up a signal handler for SIGSEGV events. struct sigaction siga; sigemptyset(&siga.sa_mask); // Set the following signals to a set sigaddset(&siga.sa_mask, SIGSEGV); sigaddset(&siga.sa_mask, SIGALRM); sigprocmask(SIG_BLOCK, &siga.sa_mask, NULL); // Point to the handler function. siga.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_RESTART | SA_NODEFER; siga.sa_sigaction = segvHandle; if (sigaction(SIGSEGV, &siga, NULL) == -1) { perror("sigaction(SIGSEGV)"); exit(-1); } sigprocmask(SIG_UNBLOCK, &siga.sa_mask, NULL); return; } static void setpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ); return; } static void resetpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ|PROT_WRITE); return; } void* open_device(const char* pathname) { //int fd = os_open("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0",O_RDWR|O_SYNC); int fd = open(pathname,O_RDWR|O_SYNC); if(fd == -1) { printf("Couldnt opene file!!\n"); exit(0); } void * ptr = mmap(0,4096,PROT_READ|PROT_WRITE, MAP_SHARED,fd,0); if(ptr == (void *)-1) { printf("Could not map memory!!\n"); exit(0); } printf("opened device without error!!\n"); return ptr; } /////////////////////////////////////////////////////////////// #define MAX_INSERTS 100000 int use_ndp_redo = 0; static uint64_t nkeys; static uint64_t keys[MAX_INSERTS]; //int page_skip_counter = 0; TOID_DECLARE(struct page_checkpoint, 0); struct page_checkpoint{ char page[50][4096]; }; struct store_item { uint64_t item_data; }; struct store_root { TOID(struct map) map; }; /* * new_store_item -- transactionally creates and initializes new item */ static TOID(struct store_item) new_store_item(void) { TOID(struct store_item) item = TX_NEW(struct store_item); D_RW(item)->item_data = rand(); return item; } /* * get_keys -- inserts the keys of the items by key order (sorted, descending) */ static int get_keys(uint64_t key, PMEMoid value, void *arg) { keys[nkeys++] = key; return 0; } /* * dec_keys -- decrements the keys count for every item */ static int dec_keys(uint64_t key, PMEMoid value, void *arg) { nkeys--; return 0; } /* * parse_map_type -- parse type of map */ static const struct map_ops * parse_map_type(const char *type) { if (strcmp(type, "ctree") == 0) return MAP_CTREE; else if (strcmp(type, "btree") == 0) return MAP_BTREE; else if (strcmp(type, "rbtree") == 0) return MAP_RBTREE; else if (strcmp(type, "hashmap_atomic") == 0) return MAP_HASHMAP_ATOMIC; else if (strcmp(type, "hashmap_tx") == 0) return MAP_HASHMAP_TX; else if (strcmp(type, "hashmap_rp") == 0) return MAP_HASHMAP_RP; else if (strcmp(type, "skiplist") == 0) return MAP_SKIPLIST; return NULL; } void installSignalHandler (void) __attribute__ ((constructor)); int current_tx1; int main(int argc, const char *argv[]) { if (argc < 3) { printf("usage: %s " "<ctree|btree|rbtree|hashmap_atomic|hashmap_rp|" "hashmap_tx|skiplist> file-name [nops]\n", argv[0]); return 1; } const char *type = argv[1]; const char *path = argv[2]; const struct map_ops *map_ops = parse_map_type(type); if (!map_ops) { fprintf(stderr, "invalid container type -- '%s'\n", type); return 1; } int nops = MAX_INSERTS; if (argc > 3) { nops = atoi(argv[3]); if (nops <= 0 || nops > MAX_INSERTS) { fprintf(stderr, "number of operations must be " "in range 1..%d\n", MAX_INSERTS); return 1; } } //PMEMobjpool *pop; srand((unsigned)time(NULL)); if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(data_store), (1024*1024*512), 0666)) == NULL) { perror("failed to create pool\n"); return 1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(data_store))) == NULL) { perror("failed to open pool\n"); return 1; } } device = open_device("/sys/devices/pci0000:00/0000:00:00.2/iommu/ivhd0/devices/0000:0a:00.0/resource0"); //TOID(struct store_root) root = (TOID(struct store_root))pmemobj_root(pop, 1024*1024*512); //struct queue *qu = pmemobj_direct(root); //checkpoint_start = (void *)(qu + ); TOID(struct store_root) root = POBJ_ROOT(pop, struct store_root); //checkpoint_start = D_RW(root) + (1024*1024*256); TX_BEGIN(pop) { checkpoint_start = D_RW(TX_NEW(struct page_checkpoint))->page; } TX_END struct map_ctx *mapc = map_ctx_init(map_ops, pop); if (!mapc) { perror("cannot allocate map context\n"); return 1; } /* delete the map if it exists */ if (!map_check(mapc, D_RW(root)->map)) map_destroy(mapc, &D_RW(root)->map); /* insert random items in a transaction */ int aborted = 0; uint64_t endCycles, startCycles,totalCycles; TX_BEGIN(pop) { map_create(mapc, &D_RW(root)->map, NULL); } TX_END //warmup database /*for (int i = 0; i < 10000; ++i) { TX_BEGIN(pop) { int keyused = rand(); map_insert(mapc, D_RW(root)->map, keyused, new_store_item().oid); } TX_ONABORT { perror("transaction aborted y\n"); map_ctx_free(mapc); aborted = 1; } TX_END }*/ int keyread[10000]; startCycles = getCycle(); PMEMoid readval; int readCount = 0; for (int i = 0; i < nops; ++i) { start_timing = 1; TX_BEGIN(pop) { if(i<(10000 -readCount)){ int keyused = rand(); keyread[i]= keyused; map_insert(mapc, D_RW(root)->map, keyused, new_store_item().oid); } else { if(readCount == 7500) readval = map_get(mapc, D_RW(root)->map, keyread[rand()%2500]); else readval = map_get(mapc, D_RW(root)->map, keyread[rand()%(10000 - readCount)]); } } TX_ONABORT { perror("transaction aborted y\n"); map_ctx_free(mapc); aborted = 1; } TX_END //updated_page_count = 0; } /* for (int i = 0; i < nops; ++i) { current_tx1 = 1; TX_BEGIN(pop) { int keyused = rand(); map_insert(mapc, D_RW(root)->map, keyused, new_store_item().oid); } TX_ONABORT { perror("transaction aborted y\n"); map_ctx_free(mapc); aborted = 1; } TX_END //updated_page_count = 0; } */ endCycles = getCycle(); totalCycles = endCycles - startCycles; double totTime = ((double)totalCycles)/2000000000; printf("TX/s %f\ntottime %f\n", nops/totTime, totTime);//RUN_COUNT/totTime, totTime); map_ctx_free(mapc); pmemobj_close(pop); return 0; }
11,362
23.38412
117
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_rtree.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * map_rtree.c -- common interface for maps */ #include <rtree_map.h> #include "map_rtree.h" /* * map_rtree_check -- wrapper for rtree_map_check */ static int map_rtree_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_check(pop, rtree_map); } /* * map_rtree_create -- wrapper for rtree_map_new */ static int map_rtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct rtree_map) *rtree_map = (TOID(struct rtree_map) *)map; return rtree_map_create(pop, rtree_map, arg); } /* * map_rtree_destroy -- wrapper for rtree_map_delete */ static int map_rtree_destroy(PMEMobjpool *pop, TOID(struct map) *map) { TOID(struct rtree_map) *rtree_map = (TOID(struct rtree_map) *)map; return rtree_map_destroy(pop, rtree_map); } /* * map_rtree_insert -- wrapper for rtree_map_insert */ static int map_rtree_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_insert(pop, rtree_map, (unsigned char *)&key, sizeof(key), value); } /* * map_rtree_insert_new -- wrapper for rtree_map_insert_new */ static int map_rtree_insert_new(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_insert_new(pop, rtree_map, (unsigned char *)&key, sizeof(key), size, type_num, constructor, arg); } /* * map_rtree_remove -- wrapper for rtree_map_remove */ static PMEMoid map_rtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_remove(pop, rtree_map, (unsigned char *)&key, sizeof(key)); } /* * map_rtree_remove_free -- wrapper for rtree_map_remove_free */ static int map_rtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_remove_free(pop, rtree_map, (unsigned char *)&key, sizeof(key)); } /* * map_rtree_clear -- wrapper for rtree_map_clear */ static int map_rtree_clear(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_clear(pop, rtree_map); } /* * map_rtree_get -- wrapper for rtree_map_get */ static PMEMoid map_rtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_get(pop, rtree_map, (unsigned char *)&key, sizeof(key)); } /* * map_rtree_lookup -- wrapper for rtree_map_lookup */ static int map_rtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_lookup(pop, rtree_map, (unsigned char *)&key, sizeof(key)); } struct cb_arg2 { int (*cb)(uint64_t key, PMEMoid value, void *arg); void *arg; }; /* * map_rtree_foreach_cb -- wrapper for callback */ static int map_rtree_foreach_cb(const unsigned char *key, uint64_t key_size, PMEMoid value, void *arg2) { const struct cb_arg2 *const a2 = (const struct cb_arg2 *)arg2; const uint64_t *const k2 = (uint64_t *)key; return a2->cb(*k2, value, a2->arg); } /* * map_rtree_foreach -- wrapper for rtree_map_foreach */ static int map_rtree_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { struct cb_arg2 arg2 = {cb, arg}; TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_foreach(pop, rtree_map, map_rtree_foreach_cb, &arg2); } /* * map_rtree_is_empty -- wrapper for rtree_map_is_empty */ static int map_rtree_is_empty(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rtree_map) rtree_map; TOID_ASSIGN(rtree_map, map.oid); return rtree_map_is_empty(pop, rtree_map); } struct map_ops rtree_map_ops = { /* .check = */map_rtree_check, /* .create = */map_rtree_create, /* .destroy = */map_rtree_destroy, /* .init = */NULL, /* .insert = */map_rtree_insert, /* .insert_new = */map_rtree_insert_new, /* .remove = */map_rtree_remove, /* .remove_free = */map_rtree_remove_free, /* .clear = */map_rtree_clear, /* .get = */map_rtree_get, /* .lookup = */map_rtree_lookup, /* .foreach = */map_rtree_foreach, /* .is_empty = */map_rtree_is_empty, /* .count = */NULL, /* .cmd = */NULL, };
4,700
21.710145
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_hashmap_rp.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * map_hashmap_rp.c -- common interface for maps */ #include <map.h> #include <hashmap_rp.h> #include "map_hashmap_rp.h" /* * map_hm_rp_check -- wrapper for hm_rp_check */ static int map_hm_rp_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_check(pop, hashmap_rp); } /* * map_hm_rp_count -- wrapper for hm_rp_count */ static size_t map_hm_rp_count(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_count(pop, hashmap_rp); } /* * map_hm_rp_init -- wrapper for hm_rp_init */ static int map_hm_rp_init(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_init(pop, hashmap_rp); } /* * map_hm_rp_create -- wrapper for hm_rp_create */ static int map_hm_rp_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct hashmap_rp) *hashmap_rp = (TOID(struct hashmap_rp) *)map; return hm_rp_create(pop, hashmap_rp, arg); } /* * map_hm_rp_insert -- wrapper for hm_rp_insert */ static int map_hm_rp_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_insert(pop, hashmap_rp, key, value); } /* * map_hm_rp_remove -- wrapper for hm_rp_remove */ static PMEMoid map_hm_rp_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_remove(pop, hashmap_rp, key); } /* * map_hm_rp_get -- wrapper for hm_rp_get */ static PMEMoid map_hm_rp_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_get(pop, hashmap_rp, key); } /* * map_hm_rp_lookup -- wrapper for hm_rp_lookup */ static int map_hm_rp_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_lookup(pop, hashmap_rp, key); } /* * map_hm_rp_foreach -- wrapper for hm_rp_foreach */ static int map_hm_rp_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_foreach(pop, hashmap_rp, cb, arg); } /* * map_hm_rp_cmd -- wrapper for hm_rp_cmd */ static int map_hm_rp_cmd(PMEMobjpool *pop, TOID(struct map) map, unsigned cmd, uint64_t arg) { TOID(struct hashmap_rp) hashmap_rp; TOID_ASSIGN(hashmap_rp, map.oid); return hm_rp_cmd(pop, hashmap_rp, cmd, arg); } struct map_ops hashmap_rp_ops = { /* .check = */ map_hm_rp_check, /* .create = */ map_hm_rp_create, /* .destroy = */ NULL, /* .init = */ map_hm_rp_init, /* .insert = */ map_hm_rp_insert, /* .insert_new = */ NULL, /* .remove = */ map_hm_rp_remove, /* .remove_free = */ NULL, /* .clear = */ NULL, /* .get = */ map_hm_rp_get, /* .lookup = */ map_hm_rp_lookup, /* .foreach = */ map_hm_rp_foreach, /* .is_empty = */ NULL, /* .count = */ map_hm_rp_count, /* .cmd = */ map_hm_rp_cmd, };
3,315
20.532468
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/hashmap_tx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* integer hash set implementation which uses only transaction APIs */ #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <inttypes.h> #include <libpmemobj.h> #include "hashmap_tx.h" #include "hashmap_internal.h" /* layout definition */ TOID_DECLARE(struct buckets, HASHMAP_TX_TYPE_OFFSET + 1); TOID_DECLARE(struct entry, HASHMAP_TX_TYPE_OFFSET + 2); struct entry { uint64_t key; PMEMoid value; /* next entry list pointer */ TOID(struct entry) next; }; struct buckets { /* number of buckets */ size_t nbuckets; /* array of lists */ TOID(struct entry) bucket[]; }; struct hashmap_tx { /* random number generator seed */ uint32_t seed; /* hash function coefficients */ uint32_t hash_fun_a; uint32_t hash_fun_b; uint64_t hash_fun_p; /* number of values inserted */ uint64_t count; /* buckets */ TOID(struct buckets) buckets; }; /* * create_hashmap -- hashmap initializer */ static void create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint32_t seed) { size_t len = INIT_BUCKETS_NUM; size_t sz = sizeof(struct buckets) + len * sizeof(TOID(struct entry)); TX_BEGIN(pop) { TX_ADD(hashmap); D_RW(hashmap)->seed = seed; do { D_RW(hashmap)->hash_fun_a = (uint32_t)rand(); } while (D_RW(hashmap)->hash_fun_a == 0); D_RW(hashmap)->hash_fun_b = (uint32_t)rand(); D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P; D_RW(hashmap)->buckets = TX_ZALLOC(struct buckets, sz); D_RW(D_RW(hashmap)->buckets)->nbuckets = len; } TX_ONABORT { fprintf(stderr, "%s: transaction aborted: %s\n", __func__, pmemobj_errormsg()); abort(); } TX_END } /* * hash -- the simplest hashing function, * see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers */ static uint64_t hash(const TOID(struct hashmap_tx) *hashmap, const TOID(struct buckets) *buckets, uint64_t value) { uint32_t a = D_RO(*hashmap)->hash_fun_a; uint32_t b = D_RO(*hashmap)->hash_fun_b; uint64_t p = D_RO(*hashmap)->hash_fun_p; size_t len = D_RO(*buckets)->nbuckets; return ((a * value + b) % p) % len; } /* * hm_tx_rebuild -- rebuilds the hashmap with a new number of buckets */ static void hm_tx_rebuild(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, size_t new_len) { TOID(struct buckets) buckets_old = D_RO(hashmap)->buckets; if (new_len == 0) new_len = D_RO(buckets_old)->nbuckets; size_t sz_old = sizeof(struct buckets) + D_RO(buckets_old)->nbuckets * sizeof(TOID(struct entry)); size_t sz_new = sizeof(struct buckets) + new_len * sizeof(TOID(struct entry)); TX_BEGIN(pop) { TX_ADD_FIELD(hashmap, buckets); TOID(struct buckets) buckets_new = TX_ZALLOC(struct buckets, sz_new); D_RW(buckets_new)->nbuckets = new_len; pmemobj_tx_add_range(buckets_old.oid, 0, sz_old); for (size_t i = 0; i < D_RO(buckets_old)->nbuckets; ++i) { while (!TOID_IS_NULL(D_RO(buckets_old)->bucket[i])) { TOID(struct entry) en = D_RO(buckets_old)->bucket[i]; uint64_t h = hash(&hashmap, &buckets_new, D_RO(en)->key); D_RW(buckets_old)->bucket[i] = D_RO(en)->next; TX_ADD_FIELD(en, next); D_RW(en)->next = D_RO(buckets_new)->bucket[h]; D_RW(buckets_new)->bucket[h] = en; } } D_RW(hashmap)->buckets = buckets_new; TX_FREE(buckets_old); } TX_ONABORT { fprintf(stderr, "%s: transaction aborted: %s\n", __func__, pmemobj_errormsg()); /* * We don't need to do anything here, because everything is * consistent. The only thing affected is performance. */ } TX_END } /* * hm_tx_insert -- inserts specified value into the hashmap, * returns: * - 0 if successful, * - 1 if value already existed, * - -1 if something bad happened */ int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key, PMEMoid value) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); int num = 0; for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) { if (D_RO(var)->key == key) return 1; num++; } int ret = 0; TX_BEGIN(pop) { TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]); TX_ADD_FIELD(hashmap, count); TOID(struct entry) e = TX_NEW(struct entry); D_RW(e)->key = key; D_RW(e)->value = value; D_RW(e)->next = D_RO(buckets)->bucket[h]; D_RW(buckets)->bucket[h] = e; D_RW(hashmap)->count++; num++; } TX_ONABORT { fprintf(stderr, "transaction aborted: %s\n", pmemobj_errormsg()); ret = -1; } TX_END if (ret) return ret; if (num > MAX_HASHSET_THRESHOLD || (num > MIN_HASHSET_THRESHOLD && D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets)) hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets * 2); return 0; } /* * hm_tx_remove -- removes specified value from the hashmap, * returns: * - key's value if successful, * - OID_NULL if value didn't exist or if something bad happened */ PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var, prev = TOID_NULL(struct entry); uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); prev = var, var = D_RO(var)->next) { if (D_RO(var)->key == key) break; } if (TOID_IS_NULL(var)) return OID_NULL; int ret = 0; PMEMoid retoid = D_RO(var)->value; TX_BEGIN(pop) { if (TOID_IS_NULL(prev)) TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]); else TX_ADD_FIELD(prev, next); TX_ADD_FIELD(hashmap, count); if (TOID_IS_NULL(prev)) D_RW(buckets)->bucket[h] = D_RO(var)->next; else D_RW(prev)->next = D_RO(var)->next; D_RW(hashmap)->count--; TX_FREE(var); } TX_ONABORT { fprintf(stderr, "transaction aborted: %s\n", pmemobj_errormsg()); ret = -1; } TX_END if (ret) return OID_NULL; if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets) hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2); return retoid; } /* * hm_tx_foreach -- prints all values from the hashmap */ int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; int ret = 0; for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) { if (TOID_IS_NULL(D_RO(buckets)->bucket[i])) continue; for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var); var = D_RO(var)->next) { ret = cb(D_RO(var)->key, D_RO(var)->value, arg); if (ret) break; } } return ret; } /* * hm_tx_debug -- prints complete hashmap state */ static void hm_tx_debug(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, FILE *out) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a, D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p); fprintf(out, "count: %" PRIu64 ", buckets: %zu\n", D_RO(hashmap)->count, D_RO(buckets)->nbuckets); for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) { if (TOID_IS_NULL(D_RO(buckets)->bucket[i])) continue; int num = 0; fprintf(out, "%zu: ", i); for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var); var = D_RO(var)->next) { fprintf(out, "%" PRIu64 " ", D_RO(var)->key); num++; } fprintf(out, "(%d)\n", num); } } /* * hm_tx_get -- checks whether specified value is in the hashmap */ PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) if (D_RO(var)->key == key) return D_RO(var)->value; return OID_NULL; } /* * hm_tx_lookup -- checks whether specified value exists */ int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) if (D_RO(var)->key == key) return 1; return 0; } /* * hm_tx_count -- returns number of elements */ size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { return D_RO(hashmap)->count; } /* * hm_tx_init -- recovers hashmap state, called after pmemobj_open */ int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { srand(D_RO(hashmap)->seed); return 0; } /* * hm_tx_create -- allocates new hashmap */ int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg) { struct hashmap_args *args = (struct hashmap_args *)arg; int ret = 0; TX_BEGIN(pop) { TX_ADD_DIRECT(map); *map = TX_ZNEW(struct hashmap_tx); uint32_t seed = args ? args->seed : 0; create_hashmap(pop, *map, seed); } TX_ONABORT { ret = -1; } TX_END return ret; } /* * hm_tx_check -- checks if specified persistent object is an * instance of hashmap */ int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap); } /* * hm_tx_cmd -- execute cmd for hashmap */ int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, unsigned cmd, uint64_t arg) { switch (cmd) { case HASHMAP_CMD_REBUILD: hm_tx_rebuild(pop, hashmap, arg); return 0; case HASHMAP_CMD_DEBUG: if (!arg) return -EINVAL; hm_tx_debug(pop, hashmap, (FILE *)arg); return 0; default: return -EINVAL; } }
9,692
22.078571
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_skiplist.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * map_skiplist.c -- common interface for maps */ #include <map.h> #include <skiplist_map.h> #include "map_skiplist.h" /* * map_skiplist_check -- wrapper for skiplist_map_check */ static int map_skiplist_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_check(pop, skiplist_map); } /* * map_skiplist_create -- wrapper for skiplist_map_new */ static int map_skiplist_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct skiplist_map_node) *skiplist_map = (TOID(struct skiplist_map_node) *)map; return skiplist_map_create(pop, skiplist_map, arg); } /* * map_skiplist_destroy -- wrapper for skiplist_map_delete */ static int map_skiplist_destroy(PMEMobjpool *pop, TOID(struct map) *map) { TOID(struct skiplist_map_node) *skiplist_map = (TOID(struct skiplist_map_node) *)map; return skiplist_map_destroy(pop, skiplist_map); } /* * map_skiplist_insert -- wrapper for skiplist_map_insert */ static int map_skiplist_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_insert(pop, skiplist_map, key, value); } /* * map_skiplist_insert_new -- wrapper for skiplist_map_insert_new */ static int map_skiplist_insert_new(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_insert_new(pop, skiplist_map, key, size, type_num, constructor, arg); } /* * map_skiplist_remove -- wrapper for skiplist_map_remove */ static PMEMoid map_skiplist_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_remove(pop, skiplist_map, key); } /* * map_skiplist_remove_free -- wrapper for skiplist_map_remove_free */ static int map_skiplist_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_remove_free(pop, skiplist_map, key); } /* * map_skiplist_clear -- wrapper for skiplist_map_clear */ static int map_skiplist_clear(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_clear(pop, skiplist_map); } /* * map_skiplist_get -- wrapper for skiplist_map_get */ static PMEMoid map_skiplist_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_get(pop, skiplist_map, key); } /* * map_skiplist_lookup -- wrapper for skiplist_map_lookup */ static int map_skiplist_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_lookup(pop, skiplist_map, key); } /* * map_skiplist_foreach -- wrapper for skiplist_map_foreach */ static int map_skiplist_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_foreach(pop, skiplist_map, cb, arg); } /* * map_skiplist_is_empty -- wrapper for skiplist_map_is_empty */ static int map_skiplist_is_empty(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct skiplist_map_node) skiplist_map; TOID_ASSIGN(skiplist_map, map.oid); return skiplist_map_is_empty(pop, skiplist_map); } struct map_ops skiplist_map_ops = { /* .check = */ map_skiplist_check, /* .create = */ map_skiplist_create, /* .destroy = */ map_skiplist_destroy, /* .init = */ NULL, /* .insert = */ map_skiplist_insert, /* .insert_new = */ map_skiplist_insert_new, /* .remove = */ map_skiplist_remove, /* .remove_free = */ map_skiplist_remove_free, /* .clear = */ map_skiplist_clear, /* .get = */ map_skiplist_get, /* .lookup = */ map_skiplist_lookup, /* .foreach = */ map_skiplist_foreach, /* .is_empty = */ map_skiplist_is_empty, /* .count = */ NULL, /* .cmd = */ NULL, };
4,488
23.664835
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_ctree.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * map_ctree.c -- common interface for maps */ #include <map.h> #include <ctree_map.h> #include "map_ctree.h" /* * map_ctree_check -- wrapper for ctree_map_check */ static int map_ctree_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_check(pop, ctree_map); } /* * map_ctree_create -- wrapper for ctree_map_create */ static int map_ctree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct ctree_map) *ctree_map = (TOID(struct ctree_map) *)map; return ctree_map_create(pop, ctree_map, arg); } /* * map_ctree_destroy -- wrapper for ctree_map_destroy */ static int map_ctree_destroy(PMEMobjpool *pop, TOID(struct map) *map) { TOID(struct ctree_map) *ctree_map = (TOID(struct ctree_map) *)map; return ctree_map_destroy(pop, ctree_map); } /* * map_ctree_insert -- wrapper for ctree_map_insert */ static int map_ctree_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_insert(pop, ctree_map, key, value); } /* * map_ctree_insert_new -- wrapper for ctree_map_insert_new */ static int map_ctree_insert_new(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_insert_new(pop, ctree_map, key, size, type_num, constructor, arg); } /* * map_ctree_remove -- wrapper for ctree_map_remove */ static PMEMoid map_ctree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_remove(pop, ctree_map, key); } /* * map_ctree_remove_free -- wrapper for ctree_map_remove_free */ static int map_ctree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_remove_free(pop, ctree_map, key); } /* * map_ctree_clear -- wrapper for ctree_map_clear */ static int map_ctree_clear(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_clear(pop, ctree_map); } /* * map_ctree_get -- wrapper for ctree_map_get */ static PMEMoid map_ctree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_get(pop, ctree_map, key); } /* * map_ctree_lookup -- wrapper for ctree_map_lookup */ static int map_ctree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_lookup(pop, ctree_map, key); } /* * map_ctree_foreach -- wrapper for ctree_map_foreach */ static int map_ctree_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_foreach(pop, ctree_map, cb, arg); } /* * map_ctree_is_empty -- wrapper for ctree_map_is_empty */ static int map_ctree_is_empty(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct ctree_map) ctree_map; TOID_ASSIGN(ctree_map, map.oid); return ctree_map_is_empty(pop, ctree_map); } struct map_ops ctree_map_ops = { /* .check = */ map_ctree_check, /* .create = */ map_ctree_create, /* .destroy = */ map_ctree_destroy, /* .init = */ NULL, /* .insert = */ map_ctree_insert, /* .insert_new = */ map_ctree_insert_new, /* .remove = */ map_ctree_remove, /* .remove_free = */ map_ctree_remove_free, /* .clear = */ map_ctree_clear, /* .get = */ map_ctree_get, /* .lookup = */ map_ctree_lookup, /* .foreach = */ map_ctree_foreach, /* .is_empty = */ map_ctree_is_empty, /* .count = */ NULL, /* .cmd = */ NULL, };
4,091
21.483516
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_btree.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * map_btree.c -- common interface for maps */ #include <map.h> #include <btree_map.h> #include "map_btree.h" /* * map_btree_check -- wrapper for btree_map_check */ static int map_btree_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_check(pop, btree_map); } /* * map_btree_create -- wrapper for btree_map_create */ static int map_btree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct btree_map) *btree_map = (TOID(struct btree_map) *)map; return btree_map_create(pop, btree_map, arg); } /* * map_btree_destroy -- wrapper for btree_map_destroy */ static int map_btree_destroy(PMEMobjpool *pop, TOID(struct map) *map) { TOID(struct btree_map) *btree_map = (TOID(struct btree_map) *)map; return btree_map_destroy(pop, btree_map); } /* * map_btree_insert -- wrapper for btree_map_insert */ static int map_btree_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_insert(pop, btree_map, key, value); } /* * map_btree_insert_new -- wrapper for btree_map_insert_new */ static int map_btree_insert_new(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_insert_new(pop, btree_map, key, size, type_num, constructor, arg); } /* * map_btree_remove -- wrapper for btree_map_remove */ static PMEMoid map_btree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_remove(pop, btree_map, key); } /* * map_btree_remove_free -- wrapper for btree_map_remove_free */ static int map_btree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_remove_free(pop, btree_map, key); } /* * map_btree_clear -- wrapper for btree_map_clear */ static int map_btree_clear(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_clear(pop, btree_map); } /* * map_btree_get -- wrapper for btree_map_get */ static PMEMoid map_btree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_get(pop, btree_map, key); } /* * map_btree_lookup -- wrapper for btree_map_lookup */ static int map_btree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_lookup(pop, btree_map, key); } /* * map_btree_foreach -- wrapper for btree_map_foreach */ static int map_btree_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_foreach(pop, btree_map, cb, arg); } /* * map_btree_is_empty -- wrapper for btree_map_is_empty */ static int map_btree_is_empty(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct btree_map) btree_map; TOID_ASSIGN(btree_map, map.oid); return btree_map_is_empty(pop, btree_map); } struct map_ops btree_map_ops = { /* .check = */ map_btree_check, /* .create = */ map_btree_create, /* .destroy = */ map_btree_destroy, /* .init = */ NULL, /* .insert = */ map_btree_insert, /* .insert_new = */ map_btree_insert_new, /* .remove = */ map_btree_remove, /* .remove_free = */ map_btree_remove_free, /* .clear = */ map_btree_clear, /* .get = */ map_btree_get, /* .lookup = */ map_btree_lookup, /* .foreach = */ map_btree_foreach, /* .is_empty = */ map_btree_is_empty, /* .count = */ NULL, /* .cmd = */ NULL, };
4,091
21.483516
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_hashmap_atomic.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * map_hashmap_atomic.c -- common interface for maps */ #include <map.h> #include <hashmap_atomic.h> #include "map_hashmap_atomic.h" /* * map_hm_atomic_check -- wrapper for hm_atomic_check */ static int map_hm_atomic_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_check(pop, hashmap_atomic); } /* * map_hm_atomic_count -- wrapper for hm_atomic_count */ static size_t map_hm_atomic_count(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_count(pop, hashmap_atomic); } /* * map_hm_atomic_init -- wrapper for hm_atomic_init */ static int map_hm_atomic_init(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_init(pop, hashmap_atomic); } /* * map_hm_atomic_new -- wrapper for hm_atomic_create */ static int map_hm_atomic_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct hashmap_atomic) *hashmap_atomic = (TOID(struct hashmap_atomic) *)map; return hm_atomic_create(pop, hashmap_atomic, arg); } /* * map_hm_atomic_insert -- wrapper for hm_atomic_insert */ static int map_hm_atomic_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_insert(pop, hashmap_atomic, key, value); } /* * map_hm_atomic_remove -- wrapper for hm_atomic_remove */ static PMEMoid map_hm_atomic_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_remove(pop, hashmap_atomic, key); } /* * map_hm_atomic_get -- wrapper for hm_atomic_get */ static PMEMoid map_hm_atomic_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_get(pop, hashmap_atomic, key); } /* * map_hm_atomic_lookup -- wrapper for hm_atomic_lookup */ static int map_hm_atomic_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_lookup(pop, hashmap_atomic, key); } /* * map_hm_atomic_foreach -- wrapper for hm_atomic_foreach */ static int map_hm_atomic_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_foreach(pop, hashmap_atomic, cb, arg); } /* * map_hm_atomic_cmd -- wrapper for hm_atomic_cmd */ static int map_hm_atomic_cmd(PMEMobjpool *pop, TOID(struct map) map, unsigned cmd, uint64_t arg) { TOID(struct hashmap_atomic) hashmap_atomic; TOID_ASSIGN(hashmap_atomic, map.oid); return hm_atomic_cmd(pop, hashmap_atomic, cmd, arg); } struct map_ops hashmap_atomic_ops = { /* .check = */ map_hm_atomic_check, /* .create = */ map_hm_atomic_create, /* .destroy = */ NULL, /* .init = */ map_hm_atomic_init, /* .insert = */ map_hm_atomic_insert, /* .insert_new = */ NULL, /* .remove = */ map_hm_atomic_remove, /* .remove_free = */ NULL, /* .clear = */ NULL, /* .get = */ map_hm_atomic_get, /* .lookup = */ map_hm_atomic_lookup, /* .foreach = */ map_hm_atomic_foreach, /* .is_empty = */ NULL, /* .count = */ map_hm_atomic_count, /* .cmd = */ map_hm_atomic_cmd, };
3,693
22.987013
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/map/map_rbtree.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * map_rbtree.c -- common interface for maps */ #include <map.h> #include <rbtree_map.h> #include "map_rbtree.h" /* * map_rbtree_check -- wrapper for rbtree_map_check */ static int map_rbtree_check(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_check(pop, rbtree_map); } /* * map_rbtree_create -- wrapper for rbtree_map_new */ static int map_rbtree_create(PMEMobjpool *pop, TOID(struct map) *map, void *arg) { TOID(struct rbtree_map) *rbtree_map = (TOID(struct rbtree_map) *)map; return rbtree_map_create(pop, rbtree_map, arg); } /* * map_rbtree_destroy -- wrapper for rbtree_map_delete */ static int map_rbtree_destroy(PMEMobjpool *pop, TOID(struct map) *map) { TOID(struct rbtree_map) *rbtree_map = (TOID(struct rbtree_map) *)map; return rbtree_map_destroy(pop, rbtree_map); } /* * map_rbtree_insert -- wrapper for rbtree_map_insert */ static int map_rbtree_insert(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, PMEMoid value) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_insert(pop, rbtree_map, key, value); } /* * map_rbtree_insert_new -- wrapper for rbtree_map_insert_new */ static int map_rbtree_insert_new(PMEMobjpool *pop, TOID(struct map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_insert_new(pop, rbtree_map, key, size, type_num, constructor, arg); } /* * map_rbtree_remove -- wrapper for rbtree_map_remove */ static PMEMoid map_rbtree_remove(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_remove(pop, rbtree_map, key); } /* * map_rbtree_remove_free -- wrapper for rbtree_map_remove_free */ static int map_rbtree_remove_free(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_remove_free(pop, rbtree_map, key); } /* * map_rbtree_clear -- wrapper for rbtree_map_clear */ static int map_rbtree_clear(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_clear(pop, rbtree_map); } /* * map_rbtree_get -- wrapper for rbtree_map_get */ static PMEMoid map_rbtree_get(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_get(pop, rbtree_map, key); } /* * map_rbtree_lookup -- wrapper for rbtree_map_lookup */ static int map_rbtree_lookup(PMEMobjpool *pop, TOID(struct map) map, uint64_t key) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_lookup(pop, rbtree_map, key); } /* * map_rbtree_foreach -- wrapper for rbtree_map_foreach */ static int map_rbtree_foreach(PMEMobjpool *pop, TOID(struct map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_foreach(pop, rbtree_map, cb, arg); } /* * map_rbtree_is_empty -- wrapper for rbtree_map_is_empty */ static int map_rbtree_is_empty(PMEMobjpool *pop, TOID(struct map) map) { TOID(struct rbtree_map) rbtree_map; TOID_ASSIGN(rbtree_map, map.oid); return rbtree_map_is_empty(pop, rbtree_map); } struct map_ops rbtree_map_ops = { /* .check = */ map_rbtree_check, /* .create = */ map_rbtree_create, /* .destroy = */ map_rbtree_destroy, /* .init = */ NULL, /* .insert = */ map_rbtree_insert, /* .insert_new = */ map_rbtree_insert_new, /* .remove = */ map_rbtree_remove, /* .remove_free = */ map_rbtree_remove_free, /* .clear = */ map_rbtree_clear, /* .get = */ map_rbtree_get, /* .lookup = */ map_rbtree_lookup, /* .foreach = */ map_rbtree_foreach, /* .is_empty = */ map_rbtree_is_empty, /* .count = */ NULL, /* .cmd = */ NULL, };
4,199
22.076923
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/ctree_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * ctree_map.c -- Crit-bit trie implementation */ #include <ex_common.h> #include <assert.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include "ctree_map.h" #define BIT_IS_SET(n, i) (!!((n) & (1ULL << (i)))) #include <x86intrin.h> static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } TOID_DECLARE(struct tree_map_node, CTREE_MAP_TYPE_OFFSET + 1); static void setpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ); return; } struct tree_map_entry { uint64_t key; PMEMoid slot; }; struct tree_map_node { int diff; /* most significant differing bit */ struct tree_map_entry entries[2]; }; struct ctree_map { struct tree_map_entry root; }; /* * find_crit_bit -- (internal) finds the most significant differing bit */ static int find_crit_bit(uint64_t lhs, uint64_t rhs) { return find_last_set_64(lhs ^ rhs); } /* * ctree_map_create -- allocates a new crit-bit tree instance */ int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg) { int ret = 0; TX_BEGIN(pop) { pmemobj_tx_add_range_direct(map, sizeof(*map)); *map = TX_ZNEW(struct ctree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * ctree_map_clear_node -- (internal) clears this node and its children */ static void ctree_map_clear_node(PMEMoid p) { if (OID_IS_NULL(p)) return; if (OID_INSTANCEOF(p, struct tree_map_node)) { TOID(struct tree_map_node) node; TOID_ASSIGN(node, p); ctree_map_clear_node(D_RW(node)->entries[0].slot); ctree_map_clear_node(D_RW(node)->entries[1].slot); } pmemobj_tx_free(p); } /* * ctree_map_clear -- removes all elements from the map */ int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map) { TX_BEGIN(pop) { ctree_map_clear_node(D_RW(map)->root.slot); TX_ADD_FIELD(map, root); D_RW(map)->root.slot = OID_NULL; } TX_END return 0; } /* * ctree_map_destroy -- cleanups and frees crit-bit tree instance */ int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map) { int ret = 0; TX_BEGIN(pop) { ctree_map_clear(pop, *map); pmemobj_tx_add_range_direct(map, sizeof(*map)); TX_FREE(*map); *map = TOID_NULL(struct ctree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * ctree_map_insert_leaf -- (internal) inserts a new leaf at the position */ static void ctree_map_insert_leaf(struct tree_map_entry *p, struct tree_map_entry e, int diff) { TOID(struct tree_map_node) new_node = TX_NEW(struct tree_map_node); D_RW(new_node)->diff = diff; int d = BIT_IS_SET(e.key, D_RO(new_node)->diff); /* insert the leaf at the direction based on the critical bit */ D_RW(new_node)->entries[d] = e; /* find the appropriate position in the tree to insert the node */ TOID(struct tree_map_node) node; while (OID_INSTANCEOF(p->slot, struct tree_map_node)) { TOID_ASSIGN(node, p->slot); /* the critical bits have to be sorted */ if (D_RO(node)->diff < D_RO(new_node)->diff) break; p = &D_RW(node)->entries[BIT_IS_SET(e.key, D_RO(node)->diff)]; } pmemobj_tx_add_range_direct(p, sizeof(*p)); /* insert the found destination in the other slot */ D_RW(new_node)->entries[!d] = *p; p->key = 0; p->slot = new_node.oid; } /* * ctree_map_insert_new -- allocates a new object and inserts it into the tree */ int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { int ret = 0; TX_BEGIN(pop) { PMEMoid n = pmemobj_tx_alloc(size, type_num); constructor(pop, pmemobj_direct(n), arg); ctree_map_insert(pop, map, key, n); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * ctree_map_insert -- inserts a new key-value pair into the map */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; uint64_t ulogcount; #endif int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, PMEMoid value) { int ret = 0; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; //ulogcount = 0; //uint64_t maxulogcount=0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif struct tree_map_entry *p = &D_RW(map)->root; /* descend the path until a best matching key is found */ TOID(struct tree_map_node) node; while (!OID_IS_NULL(p->slot) && OID_INSTANCEOF(p->slot, struct tree_map_node)) { TOID_ASSIGN(node, p->slot); p = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)]; } struct tree_map_entry e = {key, value}; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif //uint64_t startCycles1,endCycles1; TX_BEGIN(pop) { if (p->key == 0 || p->key == key) { pmemobj_tx_add_range_direct(p, sizeof(*p)); *p = e; } else { ctree_map_insert_leaf(&D_RW(map)->root, e, find_crit_bit(p->key, key)); } } TX_ONABORT { ret = 1; } TX_END //if( maxulogcount < ulogcount) // maxulogcount = ulogcount; //ulogcount = 0; #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000)); //printf("maxulogs = %ld\n", maxulogcount); #endif return ret; } /* * ctree_map_get_leaf -- (internal) searches for a leaf of the key */ static struct tree_map_entry * ctree_map_get_leaf(TOID(struct ctree_map) map, uint64_t key, struct tree_map_entry **parent) { struct tree_map_entry *n = &D_RW(map)->root; struct tree_map_entry *p = NULL; TOID(struct tree_map_node) node; while (!OID_IS_NULL(n->slot) && OID_INSTANCEOF(n->slot, struct tree_map_node)) { TOID_ASSIGN(node, n->slot); p = n; n = &D_RW(node)->entries[BIT_IS_SET(key, D_RW(node)->diff)]; } if (n->key == key) { if (parent) *parent = p; return n; } return NULL; } /* * ctree_map_remove_free -- removes and frees an object from the tree */ int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key) { int ret = 0; TX_BEGIN(pop) { PMEMoid val = ctree_map_remove(pop, map, key); pmemobj_tx_free(val); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * ctree_map_remove -- removes key-value pair from the map */ PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key) { PMEMoid ret; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ startCycles = getCycle(); #endif struct tree_map_entry *parent = NULL; struct tree_map_entry *leaf = ctree_map_get_leaf(map, key, &parent); if (leaf == NULL) return OID_NULL; ret = leaf->slot; if (parent == NULL) { /* root */ TX_BEGIN(pop) { pmemobj_tx_add_range_direct(leaf, sizeof(*leaf)); leaf->key = 0; leaf->slot = OID_NULL; } TX_END } else { /* * In this situation: * parent * / \ * LEFT RIGHT * there's no point in leaving the parent internal node * so it's swapped with the remaining node and then also freed. */ #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { struct tree_map_entry *dest = parent; TOID(struct tree_map_node) node; TOID_ASSIGN(node, parent->slot); pmemobj_tx_add_range_direct(dest, sizeof(*dest)); *dest = D_RW(node)->entries[ D_RO(node)->entries[0].key == leaf->key]; TX_FREE(node); } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree ulog total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree total wait time = %f\n", (((double)waitCycles)/2000000000)); #endif } return ret; } /* * ctree_map_get -- searches for a value of the key */ PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key) { struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL); return entry ? entry->slot : OID_NULL; } /* * ctree_map_lookup -- searches if a key exists */ int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key) { struct tree_map_entry *entry = ctree_map_get_leaf(map, key, NULL); return entry != NULL; } /* * ctree_map_foreach_node -- (internal) recursively traverses tree */ static int ctree_map_foreach_node(struct tree_map_entry e, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { int ret = 0; if (OID_INSTANCEOF(e.slot, struct tree_map_node)) { TOID(struct tree_map_node) node; TOID_ASSIGN(node, e.slot); if (ctree_map_foreach_node(D_RO(node)->entries[0], cb, arg) == 0) ctree_map_foreach_node(D_RO(node)->entries[1], cb, arg); } else { /* leaf */ ret = cb(e.key, e.slot, arg); } return ret; } /* * ctree_map_foreach -- initiates recursive traversal */ int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { if (OID_IS_NULL(D_RO(map)->root.slot)) return 0; return ctree_map_foreach_node(D_RO(map)->root, cb, arg); } /* * ctree_map_is_empty -- checks whether the tree map is empty */ int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map) { return D_RO(map)->root.key == 0; } /* * ctree_map_check -- check if given persistent object is a tree map */ int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map) { return TOID_IS_NULL(map) || !TOID_VALID(map); }
10,423
22.011038
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/ctree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * ctree_map.h -- TreeMap sorted collection implementation */ #ifndef CTREE_MAP_H #define CTREE_MAP_H #include <libpmemobj.h> #ifndef CTREE_MAP_TYPE_OFFSET #define CTREE_MAP_TYPE_OFFSET 1008 #endif struct ctree_map; TOID_DECLARE(struct ctree_map, CTREE_MAP_TYPE_OFFSET + 0); int ctree_map_check(PMEMobjpool *pop, TOID(struct ctree_map) map); int ctree_map_create(PMEMobjpool *pop, TOID(struct ctree_map) *map, void *arg); int ctree_map_destroy(PMEMobjpool *pop, TOID(struct ctree_map) *map); int ctree_map_insert(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, PMEMoid value); int ctree_map_insert_new(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid ctree_map_remove(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_remove_free(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_clear(PMEMobjpool *pop, TOID(struct ctree_map) map); PMEMoid ctree_map_get(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_lookup(PMEMobjpool *pop, TOID(struct ctree_map) map, uint64_t key); int ctree_map_foreach(PMEMobjpool *pop, TOID(struct ctree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int ctree_map_is_empty(PMEMobjpool *pop, TOID(struct ctree_map) map); #endif /* CTREE_MAP_H */
1,523
34.44186
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rtree_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rtree_map.c -- implementation of rtree */ #include <ex_common.h> #include <assert.h> #include <errno.h> #include <limits.h> #include <stdlib.h> #include <stdbool.h> #include <stdio.h> #include "rtree_map.h" #include <x86intrin.h> static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } TOID_DECLARE(struct tree_map_node, RTREE_MAP_TYPE_OFFSET + 1); /* Good values: 0x10 an 0x100, but implementation is bound to 0x100 */ #ifndef ALPHABET_SIZE #define ALPHABET_SIZE 0x100 #endif struct tree_map_node { TOID(struct tree_map_node) slots[ALPHABET_SIZE]; unsigned has_value; PMEMoid value; uint64_t key_size; unsigned char key[]; }; struct rtree_map { TOID(struct tree_map_node) root; }; /* * rtree_map_create -- allocates a new rtree instance */ int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg) { int ret = 0; TX_BEGIN(pop) { TX_ADD_DIRECT(map); *map = TX_ZNEW(struct rtree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rtree_map_clear_node -- (internal) removes all elements from the node */ static void rtree_map_clear_node(TOID(struct tree_map_node) node) { for (unsigned i = 0; i < ALPHABET_SIZE; i++) { rtree_map_clear_node(D_RO(node)->slots[i]); } pmemobj_tx_add_range(node.oid, 0, sizeof(struct tree_map_node) + D_RO(node)->key_size); TX_FREE(node); } /* * rtree_map_clear -- removes all elements from the map */ int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map) { int ret = 0; TX_BEGIN(pop) { rtree_map_clear_node(D_RO(map)->root); TX_ADD_FIELD(map, root); D_RW(map)->root = TOID_NULL(struct tree_map_node); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rtree_map_destroy -- cleanups and frees rtree instance */ int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map) { int ret = 0; TX_BEGIN(pop) { rtree_map_clear(pop, *map); TX_ADD_DIRECT(map); TX_FREE(*map); *map = TOID_NULL(struct rtree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rtree_new_node -- (internal) inserts a node into an empty map */ static TOID(struct tree_map_node) rtree_new_node(const unsigned char *key, uint64_t key_size, PMEMoid value, unsigned has_value) { TOID(struct tree_map_node) node; node = TX_ZALLOC(struct tree_map_node, sizeof(struct tree_map_node) + key_size); /* * !!! Here should be: D_RO(node)->value * ... because we don't change map */ D_RW(node)->value = value; D_RW(node)->has_value = has_value; D_RW(node)->key_size = key_size; memcpy(D_RW(node)->key, key, key_size); return node; } /* * rtree_map_insert_empty -- (internal) inserts a node into an empty map */ static void rtree_map_insert_empty(TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, PMEMoid value) { TX_ADD_FIELD(map, root); D_RW(map)->root = rtree_new_node(key, key_size, value, 1); } /* * key_comm_len -- (internal) calculate the len of common part of keys */ static unsigned key_comm_len(TOID(struct tree_map_node) node, const unsigned char *key, uint64_t key_size) { unsigned i; for (i = 0; i < MIN(key_size, D_RO(node)->key_size) && key[i] == D_RO(node)->key[i]; i++) ; return i; } /* * rtree_map_insert_value -- (internal) inserts a pair into a tree */ static void rtree_map_insert_value(TOID(struct tree_map_node) *node, const unsigned char *key, uint64_t key_size, PMEMoid value) { unsigned i; if (TOID_IS_NULL(*node)) { TX_ADD_DIRECT(node); *node = rtree_new_node(key, key_size, value, 1); return; } i = key_comm_len(*node, key, key_size); if (i != D_RO(*node)->key_size) { /* Node does not exist. Let's add. */ TOID(struct tree_map_node) orig_node = *node; TX_ADD_DIRECT(node); if (i != key_size) { *node = rtree_new_node(D_RO(orig_node)->key, i, OID_NULL, 0); } else { *node = rtree_new_node(D_RO(orig_node)->key, i, value, 1); } D_RW(*node)->slots[D_RO(orig_node)->key[i]] = orig_node; TX_ADD_FIELD(orig_node, key_size); D_RW(orig_node)->key_size -= i; pmemobj_tx_add_range_direct(D_RW(orig_node)->key, D_RO(orig_node)->key_size); memmove(D_RW(orig_node)->key, D_RO(orig_node)->key + i, D_RO(orig_node)->key_size); if (i != key_size) { D_RW(*node)->slots[key[i]] = rtree_new_node(key + i, key_size - i, value, 1); } return; } if (i == key_size) { if (OID_IS_NULL(D_RO(*node)->value) || D_RO(*node)->has_value) { /* Just replace old value with new */ TX_ADD_FIELD(*node, value); TX_ADD_FIELD(*node, has_value); D_RW(*node)->value = value; D_RW(*node)->has_value = 1; } else { /* * Ignore. By the fact current value should be * removed in advance, or handled in a different way. */ } } else { /* Recurse deeply */ return rtree_map_insert_value(&D_RW(*node)->slots[key[i]], key + i, key_size - i, value); } } /* * rtree_map_is_empty -- checks whether the tree map is empty */ int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map) { return TOID_IS_NULL(D_RO(map)->root); } /* * rtree_map_insert -- inserts a new key-value pair into the map */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; uint64_t resetCycles; #endif int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, PMEMoid value) { int ret = 0; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ startCycles = getCycle(); #endif TX_BEGIN(pop) { if (rtree_map_is_empty(pop, map)) { rtree_map_insert_empty(map, key, key_size, value); } else { rtree_map_insert_value(&D_RW(map)->root, key, key_size, value); } } TX_ONABORT { ret = 1; } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * rtree_map_insert_new -- allocates a new object and inserts it into the tree */ int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { int ret = 0; TX_BEGIN(pop) { PMEMoid n = pmemobj_tx_alloc(size, type_num); constructor(pop, pmemobj_direct(n), arg); rtree_map_insert(pop, map, key, key_size, n); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * is_leaf -- (internal) check a node for zero qty of children */ static bool is_leaf(TOID(struct tree_map_node) node) { unsigned j; for (j = 0; j < ALPHABET_SIZE && TOID_IS_NULL(D_RO(node)->slots[j]); j++) ; return (j == ALPHABET_SIZE); } /* * has_only_one_child -- (internal) check a node for qty of children */ static bool has_only_one_child(TOID(struct tree_map_node) node, unsigned *child_idx) { unsigned j, child_qty; for (j = 0, child_qty = 0; j < ALPHABET_SIZE; j++) if (!TOID_IS_NULL(D_RO(node)->slots[j])) { child_qty++; *child_idx = j; } return (1 == child_qty); } /* * remove_extra_node -- (internal) remove unneeded extra node */ static void remove_extra_node(TOID(struct tree_map_node) *node) { unsigned child_idx = UINT_MAX; TOID(struct tree_map_node) tmp, tmp_child; /* Our node has child with only one child. */ tmp = *node; has_only_one_child(tmp, &child_idx); assert(child_idx != UINT_MAX); tmp_child = D_RO(tmp)->slots[child_idx]; /* * That child's incoming label is appended to the ours incoming label * and the child is removed. */ uint64_t new_key_size = D_RO(tmp)->key_size + D_RO(tmp_child)->key_size; unsigned char *new_key = (unsigned char *)malloc(new_key_size); assert(new_key != NULL); memcpy(new_key, D_RO(tmp)->key, D_RO(tmp)->key_size); memcpy(new_key + D_RO(tmp)->key_size, D_RO(tmp_child)->key, D_RO(tmp_child)->key_size); TX_ADD_DIRECT(node); *node = rtree_new_node(new_key, new_key_size, D_RO(tmp_child)->value, D_RO(tmp_child)->has_value); free(new_key); TX_FREE(tmp); memcpy(D_RW(*node)->slots, D_RO(tmp_child)->slots, sizeof(D_RO(tmp_child)->slots)); TX_FREE(tmp_child); } /* * rtree_map_remove_node -- (internal) removes node from tree */ static PMEMoid rtree_map_remove_node(TOID(struct rtree_map) map, TOID(struct tree_map_node) *node, const unsigned char *key, uint64_t key_size, bool *check_for_child) { bool c4c; unsigned i, child_idx; PMEMoid ret = OID_NULL; *check_for_child = false; if (TOID_IS_NULL(*node)) return OID_NULL; i = key_comm_len(*node, key, key_size); if (i != D_RO(*node)->key_size) /* Node does not exist */ return OID_NULL; if (i == key_size) { if (0 == D_RO(*node)->has_value) return OID_NULL; /* Node is found */ ret = D_RO(*node)->value; /* delete node from tree */ TX_ADD_FIELD((*node), value); TX_ADD_FIELD((*node), has_value); D_RW(*node)->value = OID_NULL; D_RW(*node)->has_value = 0; if (is_leaf(*node)) { pmemobj_tx_add_range(node->oid, 0, sizeof(*node) + D_RO(*node)->key_size); TX_FREE(*node); TX_ADD_DIRECT(node); (*node) = TOID_NULL(struct tree_map_node); } return ret; } /* Recurse deeply */ ret = rtree_map_remove_node(map, &D_RW(*node)->slots[key[i]], key + i, key_size - i, &c4c); if (c4c) { /* Our node has child with only one child. Remove. */ remove_extra_node(&D_RW(*node)->slots[key[i]]); return ret; } if (has_only_one_child(*node, &child_idx) && (0 == D_RO(*node)->has_value)) { *check_for_child = true; } return ret; } /* * rtree_map_remove -- removes key-value pair from the map */ PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size) { PMEMoid ret; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif ret = OID_NULL; bool check_for_child; if (TOID_IS_NULL(map)) return OID_NULL; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { ret = rtree_map_remove_node(map, &D_RW(map)->root, key, key_size, &check_for_child); if (check_for_child) { /* Our root node has only one child. Remove. */ remove_extra_node(&D_RW(map)->root); } } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * rtree_map_remove_free -- removes and frees an object from the tree */ int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size) { int ret = 0; if (TOID_IS_NULL(map)) return 1; TX_BEGIN(pop) { pmemobj_tx_free(rtree_map_remove(pop, map, key, key_size)); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rtree_map_get_in_node -- (internal) searches for a value in the node */ static PMEMoid rtree_map_get_in_node(TOID(struct tree_map_node) node, const unsigned char *key, uint64_t key_size) { unsigned i; if (TOID_IS_NULL(node)) return OID_NULL; i = key_comm_len(node, key, key_size); if (i != D_RO(node)->key_size) /* Node does not exist */ return OID_NULL; if (i == key_size) { /* Node is found */ return D_RO(node)->value; } else { /* Recurse deeply */ return rtree_map_get_in_node(D_RO(node)->slots[key[i]], key + i, key_size - i); } } /* * rtree_map_get -- searches for a value of the key */ PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size) { if (TOID_IS_NULL(D_RO(map)->root)) return OID_NULL; return rtree_map_get_in_node(D_RO(map)->root, key, key_size); } /* * rtree_map_lookup_in_node -- (internal) searches for key if exists */ static int rtree_map_lookup_in_node(TOID(struct tree_map_node) node, const unsigned char *key, uint64_t key_size) { unsigned i; if (TOID_IS_NULL(node)) return 0; i = key_comm_len(node, key, key_size); if (i != D_RO(node)->key_size) /* Node does not exist */ return 0; if (i == key_size) { /* Node is found */ return 1; } /* Recurse deeply */ return rtree_map_lookup_in_node(D_RO(node)->slots[key[i]], key + i, key_size - i); } /* * rtree_map_lookup -- searches if key exists */ int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size) { if (TOID_IS_NULL(D_RO(map)->root)) return 0; return rtree_map_lookup_in_node(D_RO(map)->root, key, key_size); } /* * rtree_map_foreach_node -- (internal) recursively traverses tree */ static int rtree_map_foreach_node(const TOID(struct tree_map_node) node, int (*cb)(const unsigned char *key, uint64_t key_size, PMEMoid, void *arg), void *arg) { unsigned i; if (TOID_IS_NULL(node)) return 0; for (i = 0; i < ALPHABET_SIZE; i++) { if (rtree_map_foreach_node(D_RO(node)->slots[i], cb, arg) != 0) return 1; } if (NULL != cb) { if (cb(D_RO(node)->key, D_RO(node)->key_size, D_RO(node)->value, arg) != 0) return 1; } return 0; } /* * rtree_map_foreach -- initiates recursive traversal */ int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map, int (*cb)(const unsigned char *key, uint64_t key_size, PMEMoid value, void *arg), void *arg) { return rtree_map_foreach_node(D_RO(map)->root, cb, arg); } /* * ctree_map_check -- check if given persistent object is a tree map */ int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map) { return TOID_IS_NULL(map) || !TOID_VALID(map); }
14,705
21.081081
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/btree_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * btree_map.c -- textbook implementation of btree /w preemptive splitting */ #include <assert.h> #include <errno.h> #include <stdio.h> #include "btree_map.h" #ifdef GET_NDP_PERFORMENCE #include <x86intrin.h> static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } #endif static void setpage(void * addr){ uint64_t pageNo = ((uint64_t)addr)/4096; unsigned long * pageStart = (unsigned long *)(pageNo*4096); mprotect(pageStart, 4096, PROT_READ); return; } TOID_DECLARE(struct tree_map_node, BTREE_MAP_TYPE_OFFSET + 1); #define BTREE_ORDER 8 /* can't be odd */ #define BTREE_MIN ((BTREE_ORDER / 2) - 1) /* min number of keys per node */ struct tree_map_node_item { uint64_t key; PMEMoid value; }; struct tree_map_node { int n; /* number of occupied slots */ struct tree_map_node_item items[BTREE_ORDER - 1]; TOID(struct tree_map_node) slots[BTREE_ORDER]; }; struct btree_map { TOID(struct tree_map_node) root; }; /* * set_empty_item -- (internal) sets null to the item */ static void set_empty_item(struct tree_map_node_item *item) { item->key = 0; item->value = OID_NULL; } /* * btree_map_create -- allocates a new btree instance */ int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg) { int ret = 0; TX_BEGIN(pop) { pmemobj_tx_add_range_direct(map, sizeof(*map)); *map = TX_ZNEW(struct btree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * btree_map_clear_node -- (internal) removes all elements from the node */ static void btree_map_clear_node(TOID(struct tree_map_node) node) { for (int i = 0; i < D_RO(node)->n; ++i) { btree_map_clear_node(D_RO(node)->slots[i]); } TX_FREE(node); } /* * btree_map_clear -- removes all elements from the map */ int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map) { int ret = 0; TX_BEGIN(pop) { btree_map_clear_node(D_RO(map)->root); TX_ADD_FIELD(map, root); D_RW(map)->root = TOID_NULL(struct tree_map_node); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * btree_map_destroy -- cleanups and frees btree instance */ int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map) { int ret = 0; TX_BEGIN(pop) { btree_map_clear(pop, *map); pmemobj_tx_add_range_direct(map, sizeof(*map)); TX_FREE(*map); *map = TOID_NULL(struct btree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * btree_map_insert_item_at -- (internal) inserts an item at position */ static void btree_map_insert_item_at(TOID(struct tree_map_node) node, int pos, struct tree_map_node_item item) { D_RW(node)->items[pos] = item; D_RW(node)->n += 1; } /* * btree_map_insert_empty -- (internal) inserts an item into an empty node */ static void btree_map_insert_empty(TOID(struct btree_map) map, struct tree_map_node_item item) { TX_ADD_FIELD(map, root); D_RW(map)->root = TX_ZNEW(struct tree_map_node); btree_map_insert_item_at(D_RO(map)->root, 0, item); } /* * btree_map_insert_node -- (internal) inserts and makes space for new node */ static void btree_map_insert_node(TOID(struct tree_map_node) node, int p, struct tree_map_node_item item, TOID(struct tree_map_node) left, TOID(struct tree_map_node) right) { TX_ADD(node); if (D_RO(node)->items[p].key != 0) { /* move all existing data */ memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p], sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p))); memmove(&D_RW(node)->slots[p + 1], &D_RW(node)->slots[p], sizeof(TOID(struct tree_map_node)) * ((BTREE_ORDER - 1 - p))); } D_RW(node)->slots[p] = left; D_RW(node)->slots[p + 1] = right; btree_map_insert_item_at(node, p, item); } /* * btree_map_create_split_node -- (internal) splits a node into two */ static TOID(struct tree_map_node) btree_map_create_split_node(TOID(struct tree_map_node) node, struct tree_map_node_item *m) { TOID(struct tree_map_node) right = TX_ZNEW(struct tree_map_node); int c = (BTREE_ORDER / 2); *m = D_RO(node)->items[c - 1]; /* select median item */ TX_ADD(node); set_empty_item(&D_RW(node)->items[c - 1]); /* move everything right side of median to the new node */ for (int i = c; i < BTREE_ORDER; ++i) { if (i != BTREE_ORDER - 1) { D_RW(right)->items[D_RW(right)->n++] = D_RO(node)->items[i]; set_empty_item(&D_RW(node)->items[i]); } D_RW(right)->slots[i - c] = D_RO(node)->slots[i]; D_RW(node)->slots[i] = TOID_NULL(struct tree_map_node); } D_RW(node)->n = c - 1; return right; } /* * btree_map_find_dest_node -- (internal) finds a place to insert the new key at */ static TOID(struct tree_map_node) btree_map_find_dest_node(TOID(struct btree_map) map, TOID(struct tree_map_node) n, TOID(struct tree_map_node) parent, uint64_t key, int *p) { if (D_RO(n)->n == BTREE_ORDER - 1) { /* node is full, perform a split */ struct tree_map_node_item m; TOID(struct tree_map_node) right = btree_map_create_split_node(n, &m); if (!TOID_IS_NULL(parent)) { btree_map_insert_node(parent, *p, m, n, right); if (key > m.key) /* select node to continue search */ n = right; } else { /* replacing root node, the tree grows in height */ TOID(struct tree_map_node) up = TX_ZNEW(struct tree_map_node); D_RW(up)->n = 1; D_RW(up)->items[0] = m; D_RW(up)->slots[0] = n; D_RW(up)->slots[1] = right; TX_ADD_FIELD(map, root); D_RW(map)->root = up; n = up; } } int i; for (i = 0; i < BTREE_ORDER - 1; ++i) { *p = i; /* * The key either fits somewhere in the middle or at the * right edge of the node. */ if (D_RO(n)->n == i || D_RO(n)->items[i].key > key) { return TOID_IS_NULL(D_RO(n)->slots[i]) ? n : btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p); } } /* * The key is bigger than the last node element, go one level deeper * in the rightmost child. */ return btree_map_find_dest_node(map, D_RO(n)->slots[i], n, key, p); } /* * btree_map_insert_item -- (internal) inserts and makes space for new item */ static void btree_map_insert_item(TOID(struct tree_map_node) node, int p, struct tree_map_node_item item) { TX_ADD(node); if (D_RO(node)->items[p].key != 0) { memmove(&D_RW(node)->items[p + 1], &D_RW(node)->items[p], sizeof(struct tree_map_node_item) * ((BTREE_ORDER - 2 - p))); } btree_map_insert_item_at(node, p, item); } /* * btree_map_is_empty -- checks whether the tree map is empty */ int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map) { return TOID_IS_NULL(D_RO(map)->root) || D_RO(D_RO(map)->root)->n == 0; } /* * btree_map_insert -- inserts a new key-value pair into the map */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; #endif int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, PMEMoid value) { #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif struct tree_map_node_item item = {key, value}; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { if (btree_map_is_empty(pop, map)) { btree_map_insert_empty(map, item); } else { int p; /* position at the dest node to insert */ TOID(struct tree_map_node) parent = TOID_NULL(struct tree_map_node); TOID(struct tree_map_node) dest = btree_map_find_dest_node(map, D_RW(map)->root, parent, key, &p); btree_map_insert_item(dest, p, item); } } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000)); printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return 0; } /* * btree_map_rotate_right -- (internal) takes one element from right sibling */ static void btree_map_rotate_right(TOID(struct tree_map_node) rsb, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, int p) { /* move the separator from parent to the deficient node */ struct tree_map_node_item sep = D_RO(parent)->items[p]; btree_map_insert_item(node, D_RO(node)->n, sep); /* the first element of the right sibling is the new separator */ TX_ADD_FIELD(parent, items[p]); D_RW(parent)->items[p] = D_RO(rsb)->items[0]; /* the nodes are not necessarily leafs, so copy also the slot */ TX_ADD_FIELD(node, slots[D_RO(node)->n]); D_RW(node)->slots[D_RO(node)->n] = D_RO(rsb)->slots[0]; TX_ADD(rsb); D_RW(rsb)->n -= 1; /* it loses one element, but still > min */ /* move all existing elements back by one array slot */ memmove(D_RW(rsb)->items, D_RO(rsb)->items + 1, sizeof(struct tree_map_node_item) * (D_RO(rsb)->n)); memmove(D_RW(rsb)->slots, D_RO(rsb)->slots + 1, sizeof(TOID(struct tree_map_node)) * (D_RO(rsb)->n + 1)); } /* * btree_map_rotate_left -- (internal) takes one element from left sibling */ static void btree_map_rotate_left(TOID(struct tree_map_node) lsb, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, int p) { /* move the separator from parent to the deficient node */ struct tree_map_node_item sep = D_RO(parent)->items[p - 1]; btree_map_insert_item(node, 0, sep); /* the last element of the left sibling is the new separator */ TX_ADD_FIELD(parent, items[p - 1]); D_RW(parent)->items[p - 1] = D_RO(lsb)->items[D_RO(lsb)->n - 1]; /* rotate the node children */ memmove(D_RW(node)->slots + 1, D_RO(node)->slots, sizeof(TOID(struct tree_map_node)) * (D_RO(node)->n)); /* the nodes are not necessarily leafs, so copy also the slot */ D_RW(node)->slots[0] = D_RO(lsb)->slots[D_RO(lsb)->n]; TX_ADD_FIELD(lsb, n); D_RW(lsb)->n -= 1; /* it loses one element, but still > min */ } /* * btree_map_merge -- (internal) merges node and right sibling */ static void btree_map_merge(TOID(struct btree_map) map, TOID(struct tree_map_node) rn, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, int p) { struct tree_map_node_item sep = D_RO(parent)->items[p]; TX_ADD(node); /* add separator to the deficient node */ D_RW(node)->items[D_RW(node)->n++] = sep; /* copy right sibling data to node */ memcpy(&D_RW(node)->items[D_RO(node)->n], D_RO(rn)->items, sizeof(struct tree_map_node_item) * D_RO(rn)->n); memcpy(&D_RW(node)->slots[D_RO(node)->n], D_RO(rn)->slots, sizeof(TOID(struct tree_map_node)) * (D_RO(rn)->n + 1)); D_RW(node)->n += D_RO(rn)->n; TX_FREE(rn); /* right node is now empty */ TX_ADD(parent); D_RW(parent)->n -= 1; /* move everything to the right of the separator by one array slot */ memmove(D_RW(parent)->items + p, D_RW(parent)->items + p + 1, sizeof(struct tree_map_node_item) * (D_RO(parent)->n - p)); memmove(D_RW(parent)->slots + p + 1, D_RW(parent)->slots + p + 2, sizeof(TOID(struct tree_map_node)) * (D_RO(parent)->n - p + 1)); /* if the parent is empty then the tree shrinks in height */ if (D_RO(parent)->n == 0 && TOID_EQUALS(parent, D_RO(map)->root)) { TX_ADD(map); TX_FREE(D_RO(map)->root); D_RW(map)->root = node; } } /* * btree_map_rebalance -- (internal) performs tree rebalance */ static void btree_map_rebalance(TOID(struct btree_map) map, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, int p) { TOID(struct tree_map_node) rsb = p >= D_RO(parent)->n ? TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p + 1]; TOID(struct tree_map_node) lsb = p == 0 ? TOID_NULL(struct tree_map_node) : D_RO(parent)->slots[p - 1]; if (!TOID_IS_NULL(rsb) && D_RO(rsb)->n > BTREE_MIN) btree_map_rotate_right(rsb, node, parent, p); else if (!TOID_IS_NULL(lsb) && D_RO(lsb)->n > BTREE_MIN) btree_map_rotate_left(lsb, node, parent, p); else if (TOID_IS_NULL(rsb)) /* always merge with rightmost node */ btree_map_merge(map, node, lsb, parent, p - 1); else btree_map_merge(map, rsb, node, parent, p); } /* * btree_map_get_leftmost_leaf -- (internal) searches for the successor */ static TOID(struct tree_map_node) btree_map_get_leftmost_leaf(TOID(struct btree_map) map, TOID(struct tree_map_node) n, TOID(struct tree_map_node) *p) { if (TOID_IS_NULL(D_RO(n)->slots[0])) return n; *p = n; return btree_map_get_leftmost_leaf(map, D_RO(n)->slots[0], p); } /* * btree_map_remove_from_node -- (internal) removes element from node */ static void btree_map_remove_from_node(TOID(struct btree_map) map, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, int p) { if (TOID_IS_NULL(D_RO(node)->slots[0])) { /* leaf */ TX_ADD(node); if (D_RO(node)->n == 1 || p == BTREE_ORDER - 2) { set_empty_item(&D_RW(node)->items[p]); } else if (D_RO(node)->n != 1) { memmove(&D_RW(node)->items[p], &D_RW(node)->items[p + 1], sizeof(struct tree_map_node_item) * (D_RO(node)->n - p)); } D_RW(node)->n -= 1; return; } /* can't delete from non-leaf nodes, remove successor */ TOID(struct tree_map_node) rchild = D_RW(node)->slots[p + 1]; TOID(struct tree_map_node) lp = node; TOID(struct tree_map_node) lm = btree_map_get_leftmost_leaf(map, rchild, &lp); TX_ADD_FIELD(node, items[p]); D_RW(node)->items[p] = D_RO(lm)->items[0]; btree_map_remove_from_node(map, lm, lp, 0); if (D_RO(lm)->n < BTREE_MIN) /* right child can be deficient now */ btree_map_rebalance(map, lm, lp, TOID_EQUALS(lp, node) ? p + 1 : 0); } #define NODE_CONTAINS_ITEM(_n, _i, _k)\ ((_i) != D_RO(_n)->n && D_RO(_n)->items[_i].key == (_k)) #define NODE_CHILD_CAN_CONTAIN_ITEM(_n, _i, _k)\ ((_i) == D_RO(_n)->n || D_RO(_n)->items[_i].key > (_k)) &&\ !TOID_IS_NULL(D_RO(_n)->slots[_i]) /* * btree_map_remove_item -- (internal) removes item from node */ static PMEMoid btree_map_remove_item(TOID(struct btree_map) map, TOID(struct tree_map_node) node, TOID(struct tree_map_node) parent, uint64_t key, int p) { PMEMoid ret = OID_NULL; for (int i = 0; i <= D_RO(node)->n; ++i) { if (NODE_CONTAINS_ITEM(node, i, key)) { ret = D_RO(node)->items[i].value; btree_map_remove_from_node(map, node, parent, i); break; } else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) { ret = btree_map_remove_item(map, D_RO(node)->slots[i], node, key, i); break; } } /* check for deficient nodes walking up */ if (!TOID_IS_NULL(parent) && D_RO(node)->n < BTREE_MIN) btree_map_rebalance(map, node, parent, p); return ret; } /* * btree_map_remove -- removes key-value pair from the map */ PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key) { PMEMoid ret = OID_NULL; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ startCycles = getCycle(); #endif TX_BEGIN(pop) { ret = btree_map_remove_item(map, D_RW(map)->root, TOID_NULL(struct tree_map_node), key, 0); } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000)); printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * btree_map_get_in_node -- (internal) searches for a value in the node */ static PMEMoid btree_map_get_in_node(TOID(struct tree_map_node) node, uint64_t key) { for (int i = 0; i <= D_RO(node)->n; ++i) { if (NODE_CONTAINS_ITEM(node, i, key)) return D_RO(node)->items[i].value; else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) return btree_map_get_in_node(D_RO(node)->slots[i], key); } return OID_NULL; } /* * btree_map_get -- searches for a value of the key */ PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key) { if (TOID_IS_NULL(D_RO(map)->root)) return OID_NULL; return btree_map_get_in_node(D_RO(map)->root, key); } /* * btree_map_lookup_in_node -- (internal) searches for key if exists */ static int btree_map_lookup_in_node(TOID(struct tree_map_node) node, uint64_t key) { for (int i = 0; i <= D_RO(node)->n; ++i) { if (NODE_CONTAINS_ITEM(node, i, key)) return 1; else if (NODE_CHILD_CAN_CONTAIN_ITEM(node, i, key)) return btree_map_lookup_in_node( D_RO(node)->slots[i], key); } return 0; } /* * btree_map_lookup -- searches if key exists */ int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key) { if (TOID_IS_NULL(D_RO(map)->root)) return 0; return btree_map_lookup_in_node(D_RO(map)->root, key); } /* * btree_map_foreach_node -- (internal) recursively traverses tree */ static int btree_map_foreach_node(const TOID(struct tree_map_node) p, int (*cb)(uint64_t key, PMEMoid, void *arg), void *arg) { if (TOID_IS_NULL(p)) return 0; for (int i = 0; i <= D_RO(p)->n; ++i) { if (btree_map_foreach_node(D_RO(p)->slots[i], cb, arg) != 0) return 1; if (i != D_RO(p)->n && D_RO(p)->items[i].key != 0) { if (cb(D_RO(p)->items[i].key, D_RO(p)->items[i].value, arg) != 0) return 1; } } return 0; } /* * btree_map_foreach -- initiates recursive traversal */ int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { return btree_map_foreach_node(D_RO(map)->root, cb, arg); } /* * ctree_map_check -- check if given persistent object is a tree map */ int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map) { return TOID_IS_NULL(map) || !TOID_VALID(map); } /* * btree_map_insert_new -- allocates a new object and inserts it into the tree */ int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { int ret = 0; TX_BEGIN(pop) { PMEMoid n = pmemobj_tx_alloc(size, type_num); constructor(pop, pmemobj_direct(n), arg); btree_map_insert(pop, map, key, n); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * btree_map_remove_free -- removes and frees an object from the tree */ int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key) { int ret = 0; TX_BEGIN(pop) { PMEMoid val = btree_map_remove(pop, map, key); pmemobj_tx_free(val); } TX_ONABORT { ret = 1; } TX_END return ret; }
19,016
25.158184
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rtree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * rtree_map.h -- Radix TreeMap collection implementation */ #ifndef RTREE_MAP_H #define RTREE_MAP_H #include <libpmemobj.h> #ifndef RTREE_MAP_TYPE_OFFSET #define RTREE_MAP_TYPE_OFFSET 1020 #endif struct rtree_map; TOID_DECLARE(struct rtree_map, RTREE_MAP_TYPE_OFFSET + 0); int rtree_map_check(PMEMobjpool *pop, TOID(struct rtree_map) map); int rtree_map_create(PMEMobjpool *pop, TOID(struct rtree_map) *map, void *arg); int rtree_map_destroy(PMEMobjpool *pop, TOID(struct rtree_map) *map); int rtree_map_insert(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, PMEMoid value); int rtree_map_insert_new(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid rtree_map_remove(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_remove_free(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_clear(PMEMobjpool *pop, TOID(struct rtree_map) map); PMEMoid rtree_map_get(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_lookup(PMEMobjpool *pop, TOID(struct rtree_map) map, const unsigned char *key, uint64_t key_size); int rtree_map_foreach(PMEMobjpool *pop, TOID(struct rtree_map) map, int (*cb)(const unsigned char *key, uint64_t key_size, PMEMoid value, void *arg), void *arg); int rtree_map_is_empty(PMEMobjpool *pop, TOID(struct rtree_map) map); #endif /* RTREE_MAP_H */
1,739
36.826087
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rbtree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * rbtree_map.h -- TreeMap sorted collection implementation */ #ifndef RBTREE_MAP_H #define RBTREE_MAP_H #include <libpmemobj.h> #ifndef RBTREE_MAP_TYPE_OFFSET #define RBTREE_MAP_TYPE_OFFSET 1016 #endif struct rbtree_map; TOID_DECLARE(struct rbtree_map, RBTREE_MAP_TYPE_OFFSET + 0); int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map); int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg); int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map); int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, PMEMoid value); int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map); PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key); int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map); #endif /* RBTREE_MAP_H */
1,557
34.409091
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/btree_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * btree_map.h -- TreeMap sorted collection implementation */ #ifndef BTREE_MAP_H #define BTREE_MAP_H #include <libpmemobj.h> #ifndef BTREE_MAP_TYPE_OFFSET #define BTREE_MAP_TYPE_OFFSET 1012 #endif struct btree_map; TOID_DECLARE(struct btree_map, BTREE_MAP_TYPE_OFFSET + 0); int btree_map_check(PMEMobjpool *pop, TOID(struct btree_map) map); int btree_map_create(PMEMobjpool *pop, TOID(struct btree_map) *map, void *arg); int btree_map_destroy(PMEMobjpool *pop, TOID(struct btree_map) *map); int btree_map_insert(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, PMEMoid value); int btree_map_insert_new(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid btree_map_remove(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_remove_free(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_clear(PMEMobjpool *pop, TOID(struct btree_map) map); PMEMoid btree_map_get(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_lookup(PMEMobjpool *pop, TOID(struct btree_map) map, uint64_t key); int btree_map_foreach(PMEMobjpool *pop, TOID(struct btree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int btree_map_is_empty(PMEMobjpool *pop, TOID(struct btree_map) map); #endif /* BTREE_MAP_H */
1,523
34.44186
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/tree_map/rbtree_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * rbtree.c -- red-black tree implementation /w sentinel nodes */ #include <assert.h> #include <errno.h> #include "rbtree_map.h" #include <stdio.h> #include <x86intrin.h> static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } TOID_DECLARE(struct tree_map_node, RBTREE_MAP_TYPE_OFFSET + 1); #define NODE_P(_n)\ D_RW(_n)->parent #define NODE_GRANDP(_n)\ NODE_P(NODE_P(_n)) #define NODE_PARENT_AT(_n, _rbc)\ D_RW(NODE_P(_n))->slots[_rbc] #define NODE_PARENT_RIGHT(_n)\ NODE_PARENT_AT(_n, RB_RIGHT) #define NODE_IS(_n, _rbc)\ TOID_EQUALS(_n, NODE_PARENT_AT(_n, _rbc)) #define NODE_IS_RIGHT(_n)\ TOID_EQUALS(_n, NODE_PARENT_RIGHT(_n)) #define NODE_LOCATION(_n)\ NODE_IS_RIGHT(_n) #define RB_FIRST(_m)\ D_RW(D_RW(_m)->root)->slots[RB_LEFT] #define NODE_IS_NULL(_n)\ TOID_EQUALS(_n, s) enum rb_color { COLOR_BLACK, COLOR_RED, MAX_COLOR }; enum rb_children { RB_LEFT, RB_RIGHT, MAX_RB }; struct tree_map_node { uint64_t key; PMEMoid value; enum rb_color color; TOID(struct tree_map_node) parent; TOID(struct tree_map_node) slots[MAX_RB]; }; struct rbtree_map { TOID(struct tree_map_node) sentinel; TOID(struct tree_map_node) root; }; /* * rbtree_map_create -- allocates a new red-black tree instance */ int rbtree_map_create(PMEMobjpool *pop, TOID(struct rbtree_map) *map, void *arg) { int ret = 0; TX_BEGIN(pop) { pmemobj_tx_add_range_direct(map, sizeof(*map)); *map = TX_ZNEW(struct rbtree_map); TOID(struct tree_map_node) s = TX_ZNEW(struct tree_map_node); D_RW(s)->color = COLOR_BLACK; D_RW(s)->parent = s; D_RW(s)->slots[RB_LEFT] = s; D_RW(s)->slots[RB_RIGHT] = s; TOID(struct tree_map_node) r = TX_ZNEW(struct tree_map_node); D_RW(r)->color = COLOR_BLACK; D_RW(r)->parent = s; D_RW(r)->slots[RB_LEFT] = s; D_RW(r)->slots[RB_RIGHT] = s; D_RW(*map)->sentinel = s; D_RW(*map)->root = r; } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rbtree_map_clear_node -- (internal) clears this node and its children */ static void rbtree_map_clear_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p) { TOID(struct tree_map_node) s = D_RO(map)->sentinel; if (!NODE_IS_NULL(D_RO(p)->slots[RB_LEFT])) rbtree_map_clear_node(map, D_RO(p)->slots[RB_LEFT]); if (!NODE_IS_NULL(D_RO(p)->slots[RB_RIGHT])) rbtree_map_clear_node(map, D_RO(p)->slots[RB_RIGHT]); TX_FREE(p); } /* * rbtree_map_clear -- removes all elements from the map */ int rbtree_map_clear(PMEMobjpool *pop, TOID(struct rbtree_map) map) { TX_BEGIN(pop) { rbtree_map_clear_node(map, D_RW(map)->root); TX_ADD_FIELD(map, root); TX_ADD_FIELD(map, sentinel); TX_FREE(D_RW(map)->sentinel); D_RW(map)->root = TOID_NULL(struct tree_map_node); D_RW(map)->sentinel = TOID_NULL(struct tree_map_node); } TX_END return 0; } /* * rbtree_map_destroy -- cleanups and frees red-black tree instance */ int rbtree_map_destroy(PMEMobjpool *pop, TOID(struct rbtree_map) *map) { int ret = 0; TX_BEGIN(pop) { rbtree_map_clear(pop, *map); pmemobj_tx_add_range_direct(map, sizeof(*map)); TX_FREE(*map); *map = TOID_NULL(struct rbtree_map); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rbtree_map_rotate -- (internal) performs a left/right rotation around a node */ static void rbtree_map_rotate(TOID(struct rbtree_map) map, TOID(struct tree_map_node) node, enum rb_children c) { TOID(struct tree_map_node) child = D_RO(node)->slots[!c]; TOID(struct tree_map_node) s = D_RO(map)->sentinel; TX_ADD(node); TX_ADD(child); D_RW(node)->slots[!c] = D_RO(child)->slots[c]; if (!TOID_EQUALS(D_RO(child)->slots[c], s)) TX_SET(D_RW(child)->slots[c], parent, node); NODE_P(child) = NODE_P(node); TX_SET(NODE_P(node), slots[NODE_LOCATION(node)], child); D_RW(child)->slots[c] = node; D_RW(node)->parent = child; } /* * rbtree_map_insert_bst -- (internal) inserts a node in regular BST fashion */ static void rbtree_map_insert_bst(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n) { TOID(struct tree_map_node) parent = D_RO(map)->root; TOID(struct tree_map_node) *dst = &RB_FIRST(map); TOID(struct tree_map_node) s = D_RO(map)->sentinel; D_RW(n)->slots[RB_LEFT] = s; D_RW(n)->slots[RB_RIGHT] = s; while (!NODE_IS_NULL(*dst)) { parent = *dst; dst = &D_RW(*dst)->slots[D_RO(n)->key > D_RO(*dst)->key]; } TX_SET(n, parent, parent); pmemobj_tx_add_range_direct(dst, sizeof(*dst)); *dst = n; } /* * rbtree_map_recolor -- (internal) restores red-black tree properties */ static TOID(struct tree_map_node) rbtree_map_recolor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n, enum rb_children c) { TOID(struct tree_map_node) uncle = D_RO(NODE_GRANDP(n))->slots[!c]; if (D_RO(uncle)->color == COLOR_RED) { TX_SET(uncle, color, COLOR_BLACK); TX_SET(NODE_P(n), color, COLOR_BLACK); TX_SET(NODE_GRANDP(n), color, COLOR_RED); return NODE_GRANDP(n); } else { if (NODE_IS(n, !c)) { n = NODE_P(n); rbtree_map_rotate(map, n, c); } TX_SET(NODE_P(n), color, COLOR_BLACK); TX_SET(NODE_GRANDP(n), color, COLOR_RED); rbtree_map_rotate(map, NODE_GRANDP(n), (enum rb_children)!c); } return n; } /* * rbtree_map_insert -- inserts a new key-value pair into the map */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; uint64_t resetCycles; #endif int rbtree_map_insert(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, PMEMoid value) { int ret = 0; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ startCycles = getCycle(); #endif TX_BEGIN(pop) { TOID(struct tree_map_node) n = TX_ZNEW(struct tree_map_node); D_RW(n)->key = key; D_RW(n)->value = value; rbtree_map_insert_bst(map, n); D_RW(n)->color = COLOR_RED; while (D_RO(NODE_P(n))->color == COLOR_RED) n = rbtree_map_recolor(map, n, (enum rb_children) NODE_LOCATION(NODE_P(n))); TX_SET(RB_FIRST(map), color, COLOR_BLACK); } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000)); printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * rbtree_map_successor -- (internal) returns the successor of a node */ static TOID(struct tree_map_node) rbtree_map_successor(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n) { TOID(struct tree_map_node) dst = D_RO(n)->slots[RB_RIGHT]; TOID(struct tree_map_node) s = D_RO(map)->sentinel; if (!TOID_EQUALS(s, dst)) { while (!NODE_IS_NULL(D_RO(dst)->slots[RB_LEFT])) dst = D_RO(dst)->slots[RB_LEFT]; } else { dst = D_RO(n)->parent; while (TOID_EQUALS(n, D_RO(dst)->slots[RB_RIGHT])) { n = dst; dst = NODE_P(dst); } if (TOID_EQUALS(dst, D_RO(map)->root)) return s; } return dst; } /* * rbtree_map_find_node -- (internal) returns the node that contains the key */ static TOID(struct tree_map_node) rbtree_map_find_node(TOID(struct rbtree_map) map, uint64_t key) { TOID(struct tree_map_node) dst = RB_FIRST(map); TOID(struct tree_map_node) s = D_RO(map)->sentinel; while (!NODE_IS_NULL(dst)) { if (D_RO(dst)->key == key) return dst; dst = D_RO(dst)->slots[key > D_RO(dst)->key]; } return TOID_NULL(struct tree_map_node); } /* * rbtree_map_repair_branch -- (internal) restores red-black tree in one branch */ static TOID(struct tree_map_node) rbtree_map_repair_branch(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n, enum rb_children c) { TOID(struct tree_map_node) sb = NODE_PARENT_AT(n, !c); /* sibling */ if (D_RO(sb)->color == COLOR_RED) { TX_SET(sb, color, COLOR_BLACK); TX_SET(NODE_P(n), color, COLOR_RED); rbtree_map_rotate(map, NODE_P(n), c); sb = NODE_PARENT_AT(n, !c); } if (D_RO(D_RO(sb)->slots[RB_RIGHT])->color == COLOR_BLACK && D_RO(D_RO(sb)->slots[RB_LEFT])->color == COLOR_BLACK) { TX_SET(sb, color, COLOR_RED); return D_RO(n)->parent; } else { if (D_RO(D_RO(sb)->slots[!c])->color == COLOR_BLACK) { TX_SET(D_RW(sb)->slots[c], color, COLOR_BLACK); TX_SET(sb, color, COLOR_RED); rbtree_map_rotate(map, sb, (enum rb_children)!c); sb = NODE_PARENT_AT(n, !c); } TX_SET(sb, color, D_RO(NODE_P(n))->color); TX_SET(NODE_P(n), color, COLOR_BLACK); TX_SET(D_RW(sb)->slots[!c], color, COLOR_BLACK); rbtree_map_rotate(map, NODE_P(n), c); return RB_FIRST(map); } return n; } /* * rbtree_map_repair -- (internal) restores red-black tree properties * after remove */ static void rbtree_map_repair(TOID(struct rbtree_map) map, TOID(struct tree_map_node) n) { /* if left, repair right sibling, otherwise repair left sibling. */ while (!TOID_EQUALS(n, RB_FIRST(map)) && D_RO(n)->color == COLOR_BLACK) n = rbtree_map_repair_branch(map, n, (enum rb_children) NODE_LOCATION(n)); TX_SET(n, color, COLOR_BLACK); } /* * rbtree_map_remove -- removes key-value pair from the map */ PMEMoid rbtree_map_remove(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key) { PMEMoid ret = OID_NULL; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif TOID(struct tree_map_node) n = rbtree_map_find_node(map, key); if (TOID_IS_NULL(n)) return ret; ret = D_RO(n)->value; TOID(struct tree_map_node) s = D_RO(map)->sentinel; TOID(struct tree_map_node) r = D_RO(map)->root; TOID(struct tree_map_node) y = (NODE_IS_NULL(D_RO(n)->slots[RB_LEFT]) || NODE_IS_NULL(D_RO(n)->slots[RB_RIGHT])) ? n : rbtree_map_successor(map, n); TOID(struct tree_map_node) x = NODE_IS_NULL(D_RO(y)->slots[RB_LEFT]) ? D_RO(y)->slots[RB_RIGHT] : D_RO(y)->slots[RB_LEFT]; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { TX_SET(x, parent, NODE_P(y)); if (TOID_EQUALS(NODE_P(x), r)) { TX_SET(r, slots[RB_LEFT], x); } else { TX_SET(NODE_P(y), slots[NODE_LOCATION(y)], x); } if (D_RO(y)->color == COLOR_BLACK) rbtree_map_repair(map, x); if (!TOID_EQUALS(y, n)) { TX_ADD(y); D_RW(y)->slots[RB_LEFT] = D_RO(n)->slots[RB_LEFT]; D_RW(y)->slots[RB_RIGHT] = D_RO(n)->slots[RB_RIGHT]; D_RW(y)->parent = D_RO(n)->parent; D_RW(y)->color = D_RO(n)->color; TX_SET(D_RW(n)->slots[RB_LEFT], parent, y); TX_SET(D_RW(n)->slots[RB_RIGHT], parent, y); TX_SET(NODE_P(n), slots[NODE_LOCATION(n)], y); } TX_FREE(n); } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\nbtree 1 tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("btree 1 tx ulog time = %f\n", (((double)ulogCycles)/2000000000)); printf("btree 1 tx wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * rbtree_map_get -- searches for a value of the key */ PMEMoid rbtree_map_get(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key) { TOID(struct tree_map_node) node = rbtree_map_find_node(map, key); if (TOID_IS_NULL(node)) return OID_NULL; return D_RO(node)->value; } /* * rbtree_map_lookup -- searches if key exists */ int rbtree_map_lookup(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key) { TOID(struct tree_map_node) node = rbtree_map_find_node(map, key); if (TOID_IS_NULL(node)) return 0; return 1; } /* * rbtree_map_foreach_node -- (internal) recursively traverses tree */ static int rbtree_map_foreach_node(TOID(struct rbtree_map) map, TOID(struct tree_map_node) p, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { int ret = 0; if (TOID_EQUALS(p, D_RO(map)->sentinel)) return 0; if ((ret = rbtree_map_foreach_node(map, D_RO(p)->slots[RB_LEFT], cb, arg)) == 0) { if ((ret = cb(D_RO(p)->key, D_RO(p)->value, arg)) == 0) rbtree_map_foreach_node(map, D_RO(p)->slots[RB_RIGHT], cb, arg); } return ret; } /* * rbtree_map_foreach -- initiates recursive traversal */ int rbtree_map_foreach(PMEMobjpool *pop, TOID(struct rbtree_map) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { return rbtree_map_foreach_node(map, RB_FIRST(map), cb, arg); } /* * rbtree_map_is_empty -- checks whether the tree map is empty */ int rbtree_map_is_empty(PMEMobjpool *pop, TOID(struct rbtree_map) map) { return TOID_IS_NULL(RB_FIRST(map)); } /* * rbtree_map_check -- check if given persistent object is a tree map */ int rbtree_map_check(PMEMobjpool *pop, TOID(struct rbtree_map) map) { return TOID_IS_NULL(map) || !TOID_VALID(map); } /* * rbtree_map_insert_new -- allocates a new object and inserts it into the tree */ int rbtree_map_insert_new(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { int ret = 0; TX_BEGIN(pop) { PMEMoid n = pmemobj_tx_alloc(size, type_num); constructor(pop, pmemobj_direct(n), arg); rbtree_map_insert(pop, map, key, n); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * rbtree_map_remove_free -- removes and frees an object from the tree */ int rbtree_map_remove_free(PMEMobjpool *pop, TOID(struct rbtree_map) map, uint64_t key) { int ret = 0; TX_BEGIN(pop) { PMEMoid val = rbtree_map_remove(pop, map, key); pmemobj_tx_free(val); } TX_ONABORT { ret = 1; } TX_END return ret; }
14,171
23.102041
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmempool/manpage.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * manpage.c -- simple example for the libpmempool man page */ #include <stddef.h> #include <unistd.h> #include <stdlib.h> #include <stdio.h> #include <libpmempool.h> #define PATH "./pmem-fs/myfile" #define CHECK_FLAGS (PMEMPOOL_CHECK_FORMAT_STR|PMEMPOOL_CHECK_REPAIR|\ PMEMPOOL_CHECK_VERBOSE) int main(int argc, char *argv[]) { PMEMpoolcheck *ppc; struct pmempool_check_status *status; enum pmempool_check_result ret; /* arguments for check */ struct pmempool_check_args args = { .path = PATH, .backup_path = NULL, .pool_type = PMEMPOOL_POOL_TYPE_DETECT, .flags = CHECK_FLAGS }; /* initialize check context */ if ((ppc = pmempool_check_init(&args, sizeof(args))) == NULL) { perror("pmempool_check_init"); exit(EXIT_FAILURE); } /* perform check and repair, answer 'yes' for each question */ while ((status = pmempool_check(ppc)) != NULL) { switch (status->type) { case PMEMPOOL_CHECK_MSG_TYPE_ERROR: printf("%s\n", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_INFO: printf("%s\n", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: printf("%s\n", status->str.msg); status->str.answer = "yes"; break; default: pmempool_check_end(ppc); exit(EXIT_FAILURE); } } /* finalize the check and get the result */ ret = pmempool_check_end(ppc); switch (ret) { case PMEMPOOL_CHECK_RESULT_CONSISTENT: case PMEMPOOL_CHECK_RESULT_REPAIRED: return 0; default: return 1; } }
1,555
21.882353
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_ssh.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_ssh.h -- rpmem ssh transport layer header file */ #ifndef RPMEM_SSH_H #define RPMEM_SSH_H 1 #include <stddef.h> #ifdef __cplusplus extern "C" { #endif struct rpmem_ssh; struct rpmem_ssh *rpmem_ssh_open(const struct rpmem_target_info *info); struct rpmem_ssh *rpmem_ssh_exec(const struct rpmem_target_info *info, ...); struct rpmem_ssh *rpmem_ssh_execv(const struct rpmem_target_info *info, const char **argv); int rpmem_ssh_close(struct rpmem_ssh *rps); int rpmem_ssh_send(struct rpmem_ssh *rps, const void *buff, size_t len); int rpmem_ssh_recv(struct rpmem_ssh *rps, void *buff, size_t len); int rpmem_ssh_monitor(struct rpmem_ssh *rps, int nonblock); const char *rpmem_ssh_strerror(struct rpmem_ssh *rps, int oerrno); #ifdef __cplusplus } #endif #endif
866
23.771429
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_fip.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem_fip.h -- rpmem libfabric provider module header file */ #ifndef RPMEM_FIP_H #define RPMEM_FIP_H #include <stdint.h> #include <netinet/in.h> #include <sys/types.h> #include <sys/socket.h> #ifdef __cplusplus extern "C" { #endif struct rpmem_fip; struct rpmem_fip_attr { enum rpmem_provider provider; size_t max_wq_size; enum rpmem_persist_method persist_method; void *laddr; size_t size; size_t buff_size; unsigned nlanes; void *raddr; uint64_t rkey; }; struct rpmem_fip *rpmem_fip_init(const char *node, const char *service, struct rpmem_fip_attr *attr, unsigned *nlanes); void rpmem_fip_fini(struct rpmem_fip *fip); int rpmem_fip_connect(struct rpmem_fip *fip); int rpmem_fip_close(struct rpmem_fip *fip); int rpmem_fip_process_start(struct rpmem_fip *fip); int rpmem_fip_process_stop(struct rpmem_fip *fip); int rpmem_fip_flush(struct rpmem_fip *fip, size_t offset, size_t len, unsigned lane, unsigned flags); int rpmem_fip_drain(struct rpmem_fip *fip, unsigned lane); int rpmem_fip_persist(struct rpmem_fip *fip, size_t offset, size_t len, unsigned lane, unsigned flags); int rpmem_fip_read(struct rpmem_fip *fip, void *buff, size_t len, size_t off, unsigned lane); void rpmem_fip_probe_fork_safety(void); size_t rpmem_fip_get_wq_size(struct rpmem_fip *fip); #ifdef __cplusplus } #endif #endif
1,427
22.032258
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmem.c -- main source file for librpmem */ #include <stdlib.h> #include <netdb.h> #include <stdio.h> #include <errno.h> #include <limits.h> #include <inttypes.h> #include "librpmem.h" #include "out.h" #include "os.h" #include "os_thread.h" #include "util.h" #include "rpmem.h" #include "rpmem_common.h" #include "rpmem_util.h" #include "rpmem_obc.h" #include "rpmem_fip.h" #include "rpmem_fip_common.h" #include "rpmem_ssh.h" #include "rpmem_proto.h" #define RPMEM_REMOVE_FLAGS_ALL (\ RPMEM_REMOVE_FORCE | \ RPMEM_REMOVE_POOL_SET \ ) #define RPMEM_CHECK_FORK() do {\ if (Rpmem_fork_unsafe) {\ ERR("libfabric is initialized without fork() support");\ return NULL;\ }\ } while (0) static os_once_t Rpmem_fork_unsafe_key_once = OS_ONCE_INIT; /* * rpmem_pool -- remote pool context */ struct rpmem_pool { struct rpmem_obc *obc; /* out-of-band connection handle */ struct rpmem_fip *fip; /* fabric provider handle */ struct rpmem_target_info *info; char fip_service[NI_MAXSERV]; enum rpmem_provider provider; size_t max_wq_size; /* max WQ size supported by provider */ os_thread_t monitor; int closing; int no_headers; /* * Last error code, need to be volatile because it can * be accessed by multiple threads. */ volatile int error; }; /* * env_get_bool -- parse value of specified environment variable as a bool * * Return values: * 0 - defined, valp has value * 1 - not defined * -1 - parsing error */ static int env_get_bool(const char *name, int *valp) { LOG(3, "name %s, valp %p", name, valp); const char *env = os_getenv(name); if (!env) return 1; char *endptr; errno = 0; long val = strtol(env, &endptr, 10); if (*endptr != '\0' || errno) goto err; if (val < INT_MIN || val > INT_MAX) goto err; *valp = (int)val; return 0; err: RPMEM_LOG(ERR, "!parsing '%s' environment variable failed", name); return -1; } /* * rpmem_get_provider -- set provider based on node address and environment */ static int rpmem_set_provider(RPMEMpool *rpp, const char *node) { LOG(3, "rpp %p, node %s", rpp, node); struct rpmem_fip_probe probe; enum rpmem_provider prov = RPMEM_PROV_UNKNOWN; int ret = rpmem_fip_probe_get(node, &probe); if (ret) return -1; /* * The sockets provider can be used only if specified environment * variable is set to 1. */ if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) { int enable; ret = env_get_bool(RPMEM_PROV_SOCKET_ENV, &enable); if (!ret && enable) { prov = RPMEM_PROV_LIBFABRIC_SOCKETS; } } /* * The verbs provider is enabled by default. If appropriate * environment variable is set to 0, the verbs provider is disabled. * * The verbs provider has higher priority than sockets provider. */ if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) { int enable; ret = env_get_bool(RPMEM_PROV_VERBS_ENV, &enable); if (ret == 1 || (!ret && enable)) prov = RPMEM_PROV_LIBFABRIC_VERBS; } if (prov == RPMEM_PROV_UNKNOWN) return -1; RPMEM_ASSERT(prov < MAX_RPMEM_PROV); rpp->max_wq_size = probe.max_wq_size[prov]; rpp->provider = prov; return 0; } /* * rpmem_monitor_thread -- connection monitor background thread */ static void * rpmem_monitor_thread(void *arg) { LOG(3, "arg %p", arg); RPMEMpool *rpp = arg; int ret = rpmem_obc_monitor(rpp->obc, 0); if (ret && !rpp->closing) { RPMEM_LOG(ERR, "unexpected data received"); rpp->error = errno; } return NULL; } /* * rpmem_common_init -- common routine for initialization */ static RPMEMpool * rpmem_common_init(const char *target) { LOG(3, "target %s", target); int ret; RPMEMpool *rpp = calloc(1, sizeof(*rpp)); if (!rpp) { ERR("!calloc"); goto err_malloc_rpmem; } rpp->info = rpmem_target_parse(target); if (!rpp->info) { ERR("!parsing target node address failed"); goto err_target_split; } ret = rpmem_set_provider(rpp, rpp->info->node); if (ret) { errno = ENOMEDIUM; ERR("cannot find provider"); goto err_provider; } RPMEM_LOG(NOTICE, "provider: %s", rpmem_provider_to_str(rpp->provider)); if (rpp->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) { /* libfabric's sockets provider does not support IPv6 */ RPMEM_LOG(NOTICE, "forcing using IPv4"); rpp->info->flags |= RPMEM_FLAGS_USE_IPV4; } rpp->obc = rpmem_obc_init(); if (!rpp->obc) { ERR("!out-of-band connection initialization failed"); goto err_obc_init; } RPMEM_LOG(INFO, "establishing out-of-band connection"); ret = rpmem_obc_connect(rpp->obc, rpp->info); if (ret) { ERR("!out-of-band connection failed"); goto err_obc_connect; } RPMEM_LOG(NOTICE, "out-of-band connection established"); return rpp; err_obc_connect: rpmem_obc_fini(rpp->obc); err_obc_init: err_provider: rpmem_target_free(rpp->info); err_target_split: free(rpp); err_malloc_rpmem: return NULL; } /* * rpmem_common_fini -- common routing for deinitialization */ static void rpmem_common_fini(RPMEMpool *rpp, int join) { LOG(3, "rpp %p, join %d", rpp, join); rpmem_obc_disconnect(rpp->obc); if (join) { int ret = os_thread_join(&rpp->monitor, NULL); if (ret) { errno = ret; ERR("joining monitor thread failed"); } } rpmem_obc_fini(rpp->obc); rpmem_target_free(rpp->info); free(rpp); } /* * rpmem_common_fip_init -- common routine for initializing fabric provider */ static int rpmem_common_fip_init(RPMEMpool *rpp, struct rpmem_req_attr *req, struct rpmem_resp_attr *resp, void *pool_addr, size_t pool_size, unsigned *nlanes, size_t buff_size) { LOG(3, "rpp %p, req %p, resp %p, pool_addr %p, pool_size %zu, nlanes " "%p", rpp, req, resp, pool_addr, pool_size, nlanes); int ret; struct rpmem_fip_attr fip_attr = { .provider = req->provider, .max_wq_size = rpp->max_wq_size, .persist_method = resp->persist_method, .laddr = pool_addr, .size = pool_size, .buff_size = buff_size, .nlanes = min(*nlanes, resp->nlanes), .raddr = (void *)resp->raddr, .rkey = resp->rkey, }; ret = util_snprintf(rpp->fip_service, sizeof(rpp->fip_service), "%u", resp->port); if (ret < 0) { ERR("!snprintf"); goto err_port; } rpp->fip = rpmem_fip_init(rpp->info->node, rpp->fip_service, &fip_attr, nlanes); if (!rpp->fip) { ERR("!in-band connection initialization failed"); ret = -1; goto err_fip_init; } RPMEM_LOG(NOTICE, "final nlanes: %u", *nlanes); RPMEM_LOG(INFO, "establishing in-band connection"); ret = rpmem_fip_connect(rpp->fip); if (ret) { ERR("!establishing in-band connection failed"); goto err_fip_connect; } RPMEM_LOG(NOTICE, "in-band connection established"); return 0; err_fip_connect: rpmem_fip_fini(rpp->fip); err_fip_init: err_port: return ret; } /* * rpmem_common_fip_fini -- common routine for deinitializing fabric provider */ static void rpmem_common_fip_fini(RPMEMpool *rpp) { LOG(3, "rpp %p", rpp); RPMEM_LOG(INFO, "closing in-band connection"); rpmem_fip_fini(rpp->fip); RPMEM_LOG(NOTICE, "in-band connection closed"); } /* * rpmem_log_args -- log input arguments for rpmem_create and rpmem_open */ static void rpmem_log_args(const char *req, const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned nlanes) { LOG(3, "req %s, target %s, pool_set_name %s, pool_addr %p, pool_size " "%zu, nlanes %d", req, target, pool_set_name, pool_addr, pool_size, nlanes); RPMEM_LOG(NOTICE, "%s request:", req); RPMEM_LOG(NOTICE, "\ttarget: %s", target); RPMEM_LOG(NOTICE, "\tpool set: %s", pool_set_name); RPMEM_LOG(INFO, "\tpool addr: %p", pool_addr); RPMEM_LOG(INFO, "\tpool size: %lu", pool_size); RPMEM_LOG(NOTICE, "\tnlanes: %u", nlanes); } /* * rpmem_log_resp -- log response attributes */ static void rpmem_log_resp(const char *req, const struct rpmem_resp_attr *resp) { LOG(3, "req %s, resp %p", req, resp); RPMEM_LOG(NOTICE, "%s request response:", req); RPMEM_LOG(NOTICE, "\tnlanes: %u", resp->nlanes); RPMEM_LOG(NOTICE, "\tport: %u", resp->port); RPMEM_LOG(NOTICE, "\tpersist method: %s", rpmem_persist_method_to_str(resp->persist_method)); RPMEM_LOG(NOTICE, "\tremote addr: 0x%" PRIx64, resp->raddr); } /* * rpmem_check_args -- validate user's arguments */ static int rpmem_check_args(void *pool_addr, size_t pool_size, unsigned *nlanes) { LOG(3, "pool_addr %p, pool_size %zu, nlanes %p", pool_addr, pool_size, nlanes); if (!pool_addr) { errno = EINVAL; ERR("invalid pool address"); return -1; } if (!IS_PAGE_ALIGNED((uintptr_t)pool_addr)) { errno = EINVAL; ERR("Pool address must be aligned to page size (%llu)", Pagesize); return -1; } if (!IS_PAGE_ALIGNED(pool_size)) { errno = EINVAL; ERR("Pool size must be aligned to page size (%llu)", Pagesize); return -1; } if (!pool_size) { errno = EINVAL; ERR("invalid pool size"); return -1; } if (!nlanes) { errno = EINVAL; ERR("lanes pointer cannot be NULL"); return -1; } if (!(*nlanes)) { errno = EINVAL; ERR("number of lanes must be positive"); return -1; } return 0; } /* * rpmem_create -- create remote pool on target node * * target -- target node in format [<user>@]<target_name>[:<port>] * pool_set_name -- remote pool set name * pool_addr -- local pool memory address which will be replicated * pool_size -- required pool size * nlanes -- number of lanes * create_attr -- pool attributes used for creating the pool on remote node */ RPMEMpool * rpmem_create(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, const struct rpmem_pool_attr *create_attr) { LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, " "nlanes %p, create_attr %p", target, pool_set_name, pool_addr, pool_size, nlanes, create_attr); os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety); RPMEM_CHECK_FORK(); rpmem_log_args("create", target, pool_set_name, pool_addr, pool_size, *nlanes); if (rpmem_check_args(pool_addr, pool_size, nlanes)) return NULL; RPMEMpool *rpp = rpmem_common_init(target); if (!rpp) goto err_common_init; size_t buff_size = RPMEM_DEF_BUFF_SIZE; struct rpmem_req_attr req = { .pool_size = pool_size, .nlanes = min(*nlanes, Rpmem_max_nlanes), .provider = rpp->provider, .pool_desc = pool_set_name, .buff_size = buff_size, }; struct rpmem_resp_attr resp; int ret = rpmem_obc_create(rpp->obc, &req, &resp, create_attr); if (ret) { RPMEM_LOG(ERR, "!create request failed"); goto err_obc_create; } if (create_attr == NULL || util_is_zeroed(create_attr, sizeof(*create_attr))) rpp->no_headers = 1; rpmem_log_resp("create", &resp); ret = rpmem_common_fip_init(rpp, &req, &resp, pool_addr, pool_size, nlanes, buff_size); if (ret) goto err_fip_init; ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp); if (ret) { errno = ret; ERR("!starting monitor thread"); goto err_monitor; } return rpp; err_monitor: rpmem_common_fip_fini(rpp); err_fip_init: rpmem_obc_close(rpp->obc, RPMEM_CLOSE_FLAGS_REMOVE); err_obc_create: rpmem_common_fini(rpp, 0); err_common_init: return NULL; } /* * rpmem_open -- open remote pool on target node * * target -- target node in format [<user>@]<target_name>[:<port>] * pool_set_name -- remote pool set name * pool_addr -- local pool memory address which will be replicated * pool_size -- required pool size * nlanes -- number of lanes * open_attr -- pool attributes, received from remote host */ RPMEMpool * rpmem_open(const char *target, const char *pool_set_name, void *pool_addr, size_t pool_size, unsigned *nlanes, struct rpmem_pool_attr *open_attr) { LOG(3, "target %s, pool_set_name %s, pool_addr %p, pool_size %zu, " "nlanes %p, create_attr %p", target, pool_set_name, pool_addr, pool_size, nlanes, open_attr); os_once(&Rpmem_fork_unsafe_key_once, &rpmem_fip_probe_fork_safety); RPMEM_CHECK_FORK(); rpmem_log_args("open", target, pool_set_name, pool_addr, pool_size, *nlanes); if (rpmem_check_args(pool_addr, pool_size, nlanes)) return NULL; RPMEMpool *rpp = rpmem_common_init(target); if (!rpp) goto err_common_init; size_t buff_size = RPMEM_DEF_BUFF_SIZE; struct rpmem_req_attr req = { .pool_size = pool_size, .nlanes = min(*nlanes, Rpmem_max_nlanes), .provider = rpp->provider, .pool_desc = pool_set_name, .buff_size = buff_size, }; struct rpmem_resp_attr resp; int ret = rpmem_obc_open(rpp->obc, &req, &resp, open_attr); if (ret) { RPMEM_LOG(ERR, "!open request failed"); goto err_obc_create; } if (open_attr == NULL || util_is_zeroed(open_attr, sizeof(*open_attr))) rpp->no_headers = 1; rpmem_log_resp("open", &resp); ret = rpmem_common_fip_init(rpp, &req, &resp, pool_addr, pool_size, nlanes, buff_size); if (ret) goto err_fip_init; ret = os_thread_create(&rpp->monitor, NULL, rpmem_monitor_thread, rpp); if (ret) { errno = ret; ERR("!starting monitor thread"); goto err_monitor; } return rpp; err_monitor: rpmem_common_fip_fini(rpp); err_fip_init: rpmem_obc_close(rpp->obc, 0); err_obc_create: rpmem_common_fini(rpp, 0); err_common_init: return NULL; } /* * rpmem_close -- close remote pool on target node */ int rpmem_close(RPMEMpool *rpp) { LOG(3, "rpp %p", rpp); RPMEM_LOG(INFO, "closing out-of-band connection"); util_fetch_and_or32(&rpp->closing, 1); rpmem_fip_close(rpp->fip); int ret = rpmem_obc_close(rpp->obc, 0); if (ret) ERR("!close request failed"); RPMEM_LOG(NOTICE, "out-of-band connection closed"); rpmem_common_fip_fini(rpp); rpmem_common_fini(rpp, 1); return ret; } /* * rpmem_flush -- flush to target node operation * * rpp -- remote pool handle * offset -- offset in pool * length -- length of flush operation * lane -- lane number * flags -- additional flags */ int rpmem_flush(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags) { LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x", rpp, offset, length, lane, flags); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } if (flags & RPMEM_FLUSH_FLAGS_MASK) { ERR("invalid flags (0x%x)", flags); errno = EINVAL; return -1; } if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) { ERR("offset (%zu) in pool is less than %d bytes", offset, RPMEM_HDR_SIZE); errno = EINVAL; return -1; } /* * By default use RDMA SEND flush mode which has atomicity * guarantees. For relaxed flush use RDMA WRITE. */ unsigned mode = RPMEM_PERSIST_SEND; if (flags & RPMEM_FLUSH_RELAXED) mode = RPMEM_FLUSH_WRITE; int ret = rpmem_fip_flush(rpp->fip, offset, length, lane, mode); if (unlikely(ret)) { LOG(2, "flush operation failed"); rpp->error = ret; errno = rpp->error; return -1; } return 0; } /* * rpmem_drain -- drain on target node operation * * rpp -- remote pool handle * lane -- lane number * flags -- additional flags */ int rpmem_drain(RPMEMpool *rpp, unsigned lane, unsigned flags) { LOG(3, "rpp %p, lane %d, flags 0x%x", rpp, lane, flags); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } if (flags != 0) { ERR("invalid flags (0x%x)", flags); errno = EINVAL; return -1; } int ret = rpmem_fip_drain(rpp->fip, lane); if (unlikely(ret)) { LOG(2, "drain operation failed"); rpp->error = ret; errno = rpp->error; return -1; } return 0; } /* * rpmem_persist -- persist operation on target node * * rpp -- remote pool handle * offset -- offset in pool * length -- length of persist operation * lane -- lane number */ int rpmem_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags) { LOG(3, "rpp %p, offset %zu, length %zu, lane %d, flags 0x%x", rpp, offset, length, lane, flags); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } if (flags & RPMEM_PERSIST_FLAGS_MASK) { ERR("invalid flags (0x%x)", flags); errno = EINVAL; return -1; } if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) { ERR("offset (%zu) in pool is less than %d bytes", offset, RPMEM_HDR_SIZE); errno = EINVAL; return -1; } /* * By default use RDMA SEND persist mode which has atomicity * guarantees. For relaxed persist use RDMA WRITE. */ unsigned mode = RPMEM_PERSIST_SEND; if (flags & RPMEM_PERSIST_RELAXED) mode = RPMEM_FLUSH_WRITE; int ret = rpmem_fip_persist(rpp->fip, offset, length, lane, mode); if (unlikely(ret)) { LOG(2, "persist operation failed"); rpp->error = ret; errno = rpp->error; return -1; } return 0; } /* * rpmem_deep_persist -- deep flush operation on target node * * rpp -- remote pool handle * offset -- offset in pool * length -- length of deep flush operation * lane -- lane number */ int rpmem_deep_persist(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane) { LOG(3, "rpp %p, offset %zu, length %zu, lane %d", rpp, offset, length, lane); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } if (offset < RPMEM_HDR_SIZE) { ERR("offset (%zu) in pool is less than %d bytes", offset, RPMEM_HDR_SIZE); errno = EINVAL; return -1; } int ret = rpmem_fip_persist(rpp->fip, offset, length, lane, RPMEM_DEEP_PERSIST); if (unlikely(ret)) { ERR("persist operation failed"); rpp->error = ret; errno = rpp->error; return -1; } return 0; } /* * rpmem_read -- read data from remote pool: * * rpp -- remote pool handle * buff -- output buffer * offset -- offset in pool * length -- length of read operation */ int rpmem_read(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane) { LOG(3, "rpp %p, buff %p, offset %zu, length %zu, lane %d", rpp, buff, offset, length, lane); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } if (rpp->no_headers == 0 && offset < RPMEM_HDR_SIZE) LOG(1, "reading from pool at offset (%zu) less than %d bytes", offset, RPMEM_HDR_SIZE); int ret = rpmem_fip_read(rpp->fip, buff, length, offset, lane); if (unlikely(ret)) { errno = ret; ERR("!read operation failed"); rpp->error = ret; return -1; } return 0; } /* * rpmem_set_attr -- overwrite pool attributes on the remote node * * rpp -- remote pool handle * attr -- new pool attributes for the pool on remote node */ int rpmem_set_attr(RPMEMpool *rpp, const struct rpmem_pool_attr *attr) { LOG(3, "rpp %p, attr %p", rpp, attr); if (unlikely(rpp->error)) { errno = rpp->error; return -1; } int ret = rpmem_obc_set_attr(rpp->obc, attr); if (ret) { RPMEM_LOG(ERR, "!set attributes request failed"); } return ret; } /* * rpmem_remove -- remove pool from remote node * * target -- target node in format [<user>@]<target_name>[:<port>] * pool_set_name -- remote pool set name * flags -- bitwise OR of one or more of the following flags: * - RPMEM_REMOVE_FORCE * - RPMEM_REMOVE_POOL_SET */ int rpmem_remove(const char *target, const char *pool_set, int flags) { LOG(3, "target %s, pool_set %s, flags %d", target, pool_set, flags); if (flags & ~(RPMEM_REMOVE_FLAGS_ALL)) { ERR("invalid flags specified"); errno = EINVAL; return -1; } struct rpmem_target_info *info = rpmem_target_parse(target); if (!info) { ERR("!parsing target node address failed"); goto err_target; } const char *argv[5]; argv[0] = "--remove"; argv[1] = pool_set; const char **cur = &argv[2]; if (flags & RPMEM_REMOVE_FORCE) *cur++ = "--force"; if (flags & RPMEM_REMOVE_POOL_SET) *cur++ = "--pool-set"; *cur = NULL; struct rpmem_ssh *ssh = rpmem_ssh_execv(info, argv); if (!ssh) { ERR("!executing ssh command failed"); goto err_ssh_exec; } int ret; ret = rpmem_ssh_monitor(ssh, 0); if (ret) { ERR("!waiting for remote command failed"); goto err_ssh_monitor; } ret = rpmem_ssh_close(ssh); if (ret) { errno = ret; ERR("remote command failed"); goto err_ssh_close; } rpmem_target_free(info); return 0; err_ssh_monitor: rpmem_ssh_close(ssh); err_ssh_close: err_ssh_exec: rpmem_target_free(info); err_target: return -1; } #if FAULT_INJECTION void rpmem_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { return core_inject_fault_at(type, nth, at); } int rpmem_fault_injection_enabled(void) { return core_fault_injection_enabled(); } #endif
20,542
21.451366
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_util.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem_util.h -- util functions for librpmem header file */ #ifndef RPMEM_UTIL_H #define RPMEM_UTIL_H 1 #ifdef __cplusplus extern "C" { #endif enum { LERR = 1, LWARN = 2, LNOTICE = 3, LINFO = 4, _LDBG = 10, }; #define RPMEM_LOG(level, fmt, args...) LOG(L##level, fmt, ## args) #define RPMEM_DBG(fmt, args...) LOG(_LDBG, fmt, ## args) #define RPMEM_FATAL(fmt, args...) FATAL(fmt, ## args) #define RPMEM_ASSERT(cond) ASSERT(cond) #define RPMEM_PERSIST_FLAGS_ALL RPMEM_PERSIST_RELAXED #define RPMEM_PERSIST_FLAGS_MASK ((unsigned)(~RPMEM_PERSIST_FLAGS_ALL)) #define RPMEM_FLUSH_FLAGS_ALL RPMEM_FLUSH_RELAXED #define RPMEM_FLUSH_FLAGS_MASK ((unsigned)(~RPMEM_FLUSH_FLAGS_ALL)) const char *rpmem_util_proto_errstr(enum rpmem_err err); int rpmem_util_proto_errno(enum rpmem_err err); void rpmem_util_cmds_init(void); void rpmem_util_cmds_fini(void); const char *rpmem_util_cmd_get(void); void rpmem_util_get_env_max_nlanes(unsigned *max_nlanes); void rpmem_util_get_env_wq_size(unsigned *wq_size); #ifdef __cplusplus } #endif #endif
1,137
22.708333
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_obc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc.h -- rpmem out-of-band connection client header file */ #ifndef RPMEM_OBC_H #define RPMEM_OBC_H 1 #include <sys/types.h> #include <sys/socket.h> #include "librpmem.h" #ifdef __cplusplus extern "C" { #endif struct rpmem_obc; struct rpmem_obc *rpmem_obc_init(void); void rpmem_obc_fini(struct rpmem_obc *rpc); int rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info); int rpmem_obc_disconnect(struct rpmem_obc *rpc); int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock); int rpmem_obc_create(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, const struct rpmem_pool_attr *pool_attr); int rpmem_obc_open(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, struct rpmem_pool_attr *pool_attr); int rpmem_obc_set_attr(struct rpmem_obc *rpc, const struct rpmem_pool_attr *pool_attr); int rpmem_obc_close(struct rpmem_obc *rpc, int flags); #ifdef __cplusplus } #endif #endif
1,100
21.9375
65
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/librpmem/rpmem_obc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmem_obc.c -- rpmem out-of-band connection client source file */ #include <stdlib.h> #include <netdb.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <sys/types.h> #include <sys/socket.h> #include "librpmem.h" #include "rpmem.h" #include "rpmem_common.h" #include "rpmem_obc.h" #include "rpmem_proto.h" #include "rpmem_util.h" #include "rpmem_ssh.h" #include "out.h" #include "sys_util.h" #include "util.h" /* * rpmem_obc -- rpmem out-of-band client connection handle */ struct rpmem_obc { struct rpmem_ssh *ssh; }; /* * rpmem_obc_is_connected -- (internal) return non-zero value if client is * connected */ static inline int rpmem_obc_is_connected(struct rpmem_obc *rpc) { return rpc->ssh != NULL; } /* * rpmem_obc_check_ibc_attr -- (internal) check in-band connection * attributes */ static int rpmem_obc_check_ibc_attr(struct rpmem_msg_ibc_attr *ibc) { if (ibc->port == 0 || ibc->port > UINT16_MAX) { ERR("invalid port number received -- %u", ibc->port); errno = EPROTO; return -1; } if (ibc->persist_method != RPMEM_PM_GPSPM && ibc->persist_method != RPMEM_PM_APM) { ERR("invalid persistency method received -- %u", ibc->persist_method); errno = EPROTO; return -1; } return 0; } /* * rpmem_obc_check_port -- (internal) verify target node port number */ static int rpmem_obc_check_port(const struct rpmem_target_info *info) { if (!(info->flags & RPMEM_HAS_SERVICE)) return 0; if (*info->service == '\0') { ERR("invalid port number -- '%s'", info->service); goto err; } errno = 0; char *endptr; long port = strtol(info->service, &endptr, 10); if (errno || *endptr != '\0') { ERR("invalid port number -- '%s'", info->service); goto err; } if (port < 1) { ERR("port number must be positive -- '%s'", info->service); goto err; } if (port > UINT16_MAX) { ERR("port number too large -- '%s'", info->service); goto err; } return 0; err: errno = EINVAL; return -1; } /* * rpmem_obc_close_conn -- (internal) close connection */ static void rpmem_obc_close_conn(struct rpmem_obc *rpc) { rpmem_ssh_close(rpc->ssh); (void) util_fetch_and_and64(&rpc->ssh, 0); } /* * rpmem_obc_init_msg_hdr -- (internal) initialize message header */ static void rpmem_obc_set_msg_hdr(struct rpmem_msg_hdr *hdrp, enum rpmem_msg_type type, size_t size) { hdrp->type = type; hdrp->size = size; } /* * rpmem_obc_set_pool_desc -- (internal) fill the pool descriptor field */ static void rpmem_obc_set_pool_desc(struct rpmem_msg_pool_desc *pool_desc, const char *desc, size_t size) { RPMEM_ASSERT(size <= UINT32_MAX); RPMEM_ASSERT(size > 0); pool_desc->size = (uint32_t)size; memcpy(pool_desc->desc, desc, size); pool_desc->desc[size - 1] = '\0'; } /* * rpmem_obc_alloc_create_msg -- (internal) allocate and fill create request * message */ static struct rpmem_msg_create * rpmem_obc_alloc_create_msg(const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep) { size_t pool_desc_size = strlen(req->pool_desc) + 1; size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size; struct rpmem_msg_create *msg = malloc(msg_size); if (!msg) { ERR("!cannot allocate create request message"); return NULL; } rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_CREATE, msg_size); msg->c.major = RPMEM_PROTO_MAJOR; msg->c.minor = RPMEM_PROTO_MINOR; msg->c.pool_size = req->pool_size; msg->c.nlanes = req->nlanes; msg->c.provider = req->provider; msg->c.buff_size = req->buff_size; rpmem_obc_set_pool_desc(&msg->pool_desc, req->pool_desc, pool_desc_size); if (pool_attr) { pack_rpmem_pool_attr(pool_attr, &msg->pool_attr); } else { RPMEM_LOG(INFO, "using zeroed pool attributes"); memset(&msg->pool_attr, 0, sizeof(msg->pool_attr)); } *msg_sizep = msg_size; return msg; } /* * rpmem_obc_check_req -- (internal) check request attributes */ static int rpmem_obc_check_req(const struct rpmem_req_attr *req) { if (req->provider >= MAX_RPMEM_PROV) { ERR("invalid provider specified -- %u", req->provider); errno = EINVAL; return -1; } return 0; } /* * rpmem_obj_check_hdr_resp -- (internal) check response message header */ static int rpmem_obc_check_hdr_resp(struct rpmem_msg_hdr_resp *resp, enum rpmem_msg_type type, size_t size) { if (resp->type != type) { ERR("invalid message type received -- %u", resp->type); errno = EPROTO; return -1; } if (resp->size != size) { ERR("invalid message size received -- %lu", resp->size); errno = EPROTO; return -1; } if (resp->status >= MAX_RPMEM_ERR) { ERR("invalid status received -- %u", resp->status); errno = EPROTO; return -1; } if (resp->status) { enum rpmem_err status = (enum rpmem_err)resp->status; ERR("%s", rpmem_util_proto_errstr(status)); errno = rpmem_util_proto_errno(status); return -1; } return 0; } /* * rpmem_obc_check_create_resp -- (internal) check create response message */ static int rpmem_obc_check_create_resp(struct rpmem_msg_create_resp *resp) { if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CREATE_RESP, sizeof(struct rpmem_msg_create_resp))) return -1; if (rpmem_obc_check_ibc_attr(&resp->ibc)) return -1; return 0; } /* * rpmem_obc_get_res -- (internal) read response attributes */ static void rpmem_obc_get_res(struct rpmem_resp_attr *res, struct rpmem_msg_ibc_attr *ibc) { res->port = (unsigned short)ibc->port; res->rkey = ibc->rkey; res->raddr = ibc->raddr; res->persist_method = (enum rpmem_persist_method)ibc->persist_method; res->nlanes = ibc->nlanes; } /* * rpmem_obc_alloc_open_msg -- (internal) allocate and fill open request message */ static struct rpmem_msg_open * rpmem_obc_alloc_open_msg(const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr, size_t *msg_sizep) { size_t pool_desc_size = strlen(req->pool_desc) + 1; size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size; struct rpmem_msg_open *msg = malloc(msg_size); if (!msg) { ERR("!cannot allocate open request message"); return NULL; } rpmem_obc_set_msg_hdr(&msg->hdr, RPMEM_MSG_TYPE_OPEN, msg_size); msg->c.major = RPMEM_PROTO_MAJOR; msg->c.minor = RPMEM_PROTO_MINOR; msg->c.pool_size = req->pool_size; msg->c.nlanes = req->nlanes; msg->c.provider = req->provider; msg->c.buff_size = req->buff_size; rpmem_obc_set_pool_desc(&msg->pool_desc, req->pool_desc, pool_desc_size); *msg_sizep = msg_size; return msg; } /* * rpmem_obc_check_open_resp -- (internal) check open response message */ static int rpmem_obc_check_open_resp(struct rpmem_msg_open_resp *resp) { if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_OPEN_RESP, sizeof(struct rpmem_msg_open_resp))) return -1; if (rpmem_obc_check_ibc_attr(&resp->ibc)) return -1; return 0; } /* * rpmem_obc_check_close_resp -- (internal) check close response message */ static int rpmem_obc_check_close_resp(struct rpmem_msg_close_resp *resp) { if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_CLOSE_RESP, sizeof(struct rpmem_msg_close_resp))) return -1; return 0; } /* * rpmem_obc_check_set_attr_resp -- (internal) check set attributes response * message */ static int rpmem_obc_check_set_attr_resp(struct rpmem_msg_set_attr_resp *resp) { if (rpmem_obc_check_hdr_resp(&resp->hdr, RPMEM_MSG_TYPE_SET_ATTR_RESP, sizeof(struct rpmem_msg_set_attr_resp))) return -1; return 0; } /* * rpmem_obc_init -- initialize rpmem obc handle */ struct rpmem_obc * rpmem_obc_init(void) { struct rpmem_obc *rpc = calloc(1, sizeof(*rpc)); if (!rpc) { RPMEM_LOG(ERR, "!allocation of rpmem obc failed"); return NULL; } return rpc; } /* * rpmem_obc_fini -- destroy rpmem obc handle * * This function must be called with connection already closed - after calling * the rpmem_obc_disconnect or after receiving relevant value from * rpmem_obc_monitor. */ void rpmem_obc_fini(struct rpmem_obc *rpc) { free(rpc); } /* * rpmem_obc_connect -- connect to target node * * Connects to target node, the target must be in the following format: * <addr>[:<port>]. If the port number is not specified the default * ssh port will be used. The <addr> is translated into IP address. * * Returns an error if connection is already established. */ int rpmem_obc_connect(struct rpmem_obc *rpc, const struct rpmem_target_info *info) { if (rpmem_obc_is_connected(rpc)) { errno = EALREADY; goto err_notconnected; } if (rpmem_obc_check_port(info)) goto err_port; rpc->ssh = rpmem_ssh_open(info); if (!rpc->ssh) goto err_ssh_open; return 0; err_ssh_open: err_port: err_notconnected: return -1; } /* * rpmem_obc_disconnect -- close the connection to target node * * Returns error if socket is not connected. */ int rpmem_obc_disconnect(struct rpmem_obc *rpc) { if (rpmem_obc_is_connected(rpc)) { rpmem_obc_close_conn(rpc); return 0; } errno = ENOTCONN; return -1; } /* * rpmem_obc_monitor -- monitor connection with target node * * The nonblock variable indicates whether this function should return * immediately (= 1) or may block (= 0). * * If the function detects that socket was closed by remote peer it is * closed on local side and set to -1, so there is no need to call * rpmem_obc_disconnect function. Please take a look at functions' * descriptions to see which functions cannot be used if the connection * has been already closed. * * This function expects there is no data pending on socket, if any data * is pending this function returns an error and sets errno to EPROTO. * * Return values: * 0 - not connected * 1 - connected * < 0 - error */ int rpmem_obc_monitor(struct rpmem_obc *rpc, int nonblock) { if (!rpmem_obc_is_connected(rpc)) return 0; return rpmem_ssh_monitor(rpc->ssh, nonblock); } /* * rpmem_obc_create -- perform create request operation * * Returns error if connection has not been established yet. */ int rpmem_obc_create(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, const struct rpmem_pool_attr *pool_attr) { if (!rpmem_obc_is_connected(rpc)) { ERR("out-of-band connection not established"); errno = ENOTCONN; goto err_notconnected; } if (rpmem_obc_check_req(req)) goto err_req; size_t msg_size; struct rpmem_msg_create *msg = rpmem_obc_alloc_create_msg(req, pool_attr, &msg_size); if (!msg) goto err_alloc_msg; RPMEM_LOG(INFO, "sending create request message"); rpmem_hton_msg_create(msg); if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) { ERR("!sending create request message failed"); goto err_msg_send; } RPMEM_LOG(NOTICE, "create request message sent"); RPMEM_LOG(INFO, "receiving create request response"); struct rpmem_msg_create_resp resp; if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) { ERR("!receiving create request response failed"); goto err_msg_recv; } RPMEM_LOG(NOTICE, "create request response received"); rpmem_ntoh_msg_create_resp(&resp); if (rpmem_obc_check_create_resp(&resp)) goto err_msg_resp; rpmem_obc_get_res(res, &resp.ibc); free(msg); return 0; err_msg_resp: err_msg_recv: err_msg_send: free(msg); err_alloc_msg: err_req: err_notconnected: return -1; } /* * rpmem_obc_open -- perform open request operation * * Returns error if connection is not already established. */ int rpmem_obc_open(struct rpmem_obc *rpc, const struct rpmem_req_attr *req, struct rpmem_resp_attr *res, struct rpmem_pool_attr *pool_attr) { if (!rpmem_obc_is_connected(rpc)) { ERR("out-of-band connection not established"); errno = ENOTCONN; goto err_notconnected; } if (rpmem_obc_check_req(req)) goto err_req; size_t msg_size; struct rpmem_msg_open *msg = rpmem_obc_alloc_open_msg(req, pool_attr, &msg_size); if (!msg) goto err_alloc_msg; RPMEM_LOG(INFO, "sending open request message"); rpmem_hton_msg_open(msg); if (rpmem_ssh_send(rpc->ssh, msg, msg_size)) { ERR("!sending open request message failed"); goto err_msg_send; } RPMEM_LOG(NOTICE, "open request message sent"); RPMEM_LOG(INFO, "receiving open request response"); struct rpmem_msg_open_resp resp; if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) { ERR("!receiving open request response failed"); goto err_msg_recv; } RPMEM_LOG(NOTICE, "open request response received"); rpmem_ntoh_msg_open_resp(&resp); if (rpmem_obc_check_open_resp(&resp)) goto err_msg_resp; rpmem_obc_get_res(res, &resp.ibc); if (pool_attr) unpack_rpmem_pool_attr(&resp.pool_attr, pool_attr); free(msg); return 0; err_msg_resp: err_msg_recv: err_msg_send: free(msg); err_alloc_msg: err_req: err_notconnected: return -1; } /* * rpmem_obc_set_attr -- perform set attributes request operation * * Returns error if connection is not already established. */ int rpmem_obc_set_attr(struct rpmem_obc *rpc, const struct rpmem_pool_attr *pool_attr) { if (!rpmem_obc_is_connected(rpc)) { ERR("out-of-band connection not established"); errno = ENOTCONN; goto err_notconnected; } struct rpmem_msg_set_attr msg; rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_SET_ATTR, sizeof(msg)); if (pool_attr) { memcpy(&msg.pool_attr, pool_attr, sizeof(msg.pool_attr)); } else { RPMEM_LOG(INFO, "using zeroed pool attributes"); memset(&msg.pool_attr, 0, sizeof(msg.pool_attr)); } RPMEM_LOG(INFO, "sending set attributes request message"); rpmem_hton_msg_set_attr(&msg); if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) { ERR("!sending set attributes request message failed"); goto err_msg_send; } RPMEM_LOG(NOTICE, "set attributes request message sent"); RPMEM_LOG(INFO, "receiving set attributes request response"); struct rpmem_msg_set_attr_resp resp; if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) { ERR("!receiving set attributes request response failed"); goto err_msg_recv; } RPMEM_LOG(NOTICE, "set attributes request response received"); rpmem_ntoh_msg_set_attr_resp(&resp); if (rpmem_obc_check_set_attr_resp(&resp)) goto err_msg_resp; return 0; err_msg_resp: err_msg_recv: err_msg_send: err_notconnected: return -1; } /* * rpmem_obc_close -- perform close request operation * * Returns error if connection is not already established. * * NOTE: this function does not close the connection, but sends close request * message to remote node and receives a response. The connection must be * closed using rpmem_obc_disconnect function. */ int rpmem_obc_close(struct rpmem_obc *rpc, int flags) { if (!rpmem_obc_is_connected(rpc)) { errno = ENOTCONN; return -1; } struct rpmem_msg_close msg; rpmem_obc_set_msg_hdr(&msg.hdr, RPMEM_MSG_TYPE_CLOSE, sizeof(msg)); msg.flags = (uint32_t)flags; RPMEM_LOG(INFO, "sending close request message"); rpmem_hton_msg_close(&msg); if (rpmem_ssh_send(rpc->ssh, &msg, sizeof(msg))) { RPMEM_LOG(ERR, "!sending close request failed"); return -1; } RPMEM_LOG(NOTICE, "close request message sent"); RPMEM_LOG(INFO, "receiving close request response"); struct rpmem_msg_close_resp resp; if (rpmem_ssh_recv(rpc->ssh, &resp, sizeof(resp))) { RPMEM_LOG(ERR, "!receiving close request response failed"); return -1; } RPMEM_LOG(NOTICE, "close request response received"); rpmem_ntoh_msg_close_resp(&resp); if (rpmem_obc_check_close_resp(&resp)) return -1; return 0; }
15,410
21.730088
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/blk.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * blk.h -- internal definitions for libpmem blk module */ #ifndef BLK_H #define BLK_H 1 #include <stddef.h> #include "ctl.h" #include "os_thread.h" #include "pool_hdr.h" #include "page_size.h" #ifdef __cplusplus extern "C" { #endif #include "alloc.h" #include "fault_injection.h" #define PMEMBLK_LOG_PREFIX "libpmemblk" #define PMEMBLK_LOG_LEVEL_VAR "PMEMBLK_LOG_LEVEL" #define PMEMBLK_LOG_FILE_VAR "PMEMBLK_LOG_FILE" /* attributes of the blk memory pool format for the pool header */ #define BLK_HDR_SIG "PMEMBLK" /* must be 8 bytes including '\0' */ #define BLK_FORMAT_MAJOR 1 #define BLK_FORMAT_FEAT_DEFAULT \ {POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000} #define BLK_FORMAT_FEAT_CHECK \ {POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000} static const features_t blk_format_feat_default = BLK_FORMAT_FEAT_DEFAULT; struct pmemblk { struct pool_hdr hdr; /* memory pool header */ /* root info for on-media format... */ uint32_t bsize; /* block size */ /* flag indicating if the pool was zero-initialized */ int is_zeroed; /* some run-time state, allocated out of memory pool... */ void *addr; /* mapped region */ size_t size; /* size of mapped region */ int is_pmem; /* true if pool is PMEM */ int rdonly; /* true if pool is opened read-only */ void *data; /* post-header data area */ size_t datasize; /* size of data area */ size_t nlba; /* number of LBAs in pool */ struct btt *bttp; /* btt handle */ unsigned nlane; /* number of lanes */ unsigned next_lane; /* used to rotate through lanes */ os_mutex_t *locks; /* one per lane */ int is_dev_dax; /* true if mapped on device dax */ struct ctl *ctl; /* top level node of the ctl tree structure */ struct pool_set *set; /* pool set info */ #ifdef DEBUG /* held during read/write mprotected sections */ os_mutex_t write_lock; #endif }; /* data area starts at this alignment after the struct pmemblk above */ #define BLK_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE) #if FAULT_INJECTION void pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at); int pmemblk_fault_injection_enabled(void); #else static inline void pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { abort(); } static inline int pmemblk_fault_injection_enabled(void) { return 0; } #endif #ifdef __cplusplus } #endif #endif
2,483
23.116505
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/libpmemblk.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * libpmemblk.c -- pmem entry points for libpmemblk */ #include <stdio.h> #include <stdint.h> #include "libpmemblk.h" #include "ctl_global.h" #include "pmemcommon.h" #include "blk.h" /* * The variable from which the config is directly loaded. The string * cannot contain any comments or extraneous white characters. */ #define BLK_CONFIG_ENV_VARIABLE "PMEMBLK_CONF" /* * The variable that points to a config file from which the config is loaded. */ #define BLK_CONFIG_FILE_ENV_VARIABLE "PMEMBLK_CONF_FILE" /* * blk_ctl_init_and_load -- (static) initializes CTL and loads configuration * from env variable and file */ static int blk_ctl_init_and_load(PMEMblkpool *pbp) { LOG(3, "pbp %p", pbp); if (pbp != NULL && (pbp->ctl = ctl_new()) == NULL) { LOG(2, "!ctl_new"); return -1; } char *env_config = os_getenv(BLK_CONFIG_ENV_VARIABLE); if (env_config != NULL) { if (ctl_load_config_from_string(pbp ? pbp->ctl : NULL, pbp, env_config) != 0) { LOG(2, "unable to parse config stored in %s " "environment variable", BLK_CONFIG_ENV_VARIABLE); goto err; } } char *env_config_file = os_getenv(BLK_CONFIG_FILE_ENV_VARIABLE); if (env_config_file != NULL && env_config_file[0] != '\0') { if (ctl_load_config_from_file(pbp ? pbp->ctl : NULL, pbp, env_config_file) != 0) { LOG(2, "unable to parse config stored in %s " "file (from %s environment variable)", env_config_file, BLK_CONFIG_FILE_ENV_VARIABLE); goto err; } } return 0; err: if (pbp) ctl_delete(pbp->ctl); return -1; } /* * libpmemblk_init -- (internal) load-time initialization for blk * * Called automatically by the run-time loader. */ ATTR_CONSTRUCTOR void libpmemblk_init(void) { ctl_global_register(); if (blk_ctl_init_and_load(NULL)) FATAL("error: %s", pmemblk_errormsg()); common_init(PMEMBLK_LOG_PREFIX, PMEMBLK_LOG_LEVEL_VAR, PMEMBLK_LOG_FILE_VAR, PMEMBLK_MAJOR_VERSION, PMEMBLK_MINOR_VERSION); LOG(3, NULL); } /* * libpmemblk_fini -- libpmemblk cleanup routine * * Called automatically when the process terminates. */ ATTR_DESTRUCTOR void libpmemblk_fini(void) { LOG(3, NULL); common_fini(); } /* * pmemblk_check_versionU -- see if lib meets application version requirements */ #ifndef _WIN32 static inline #endif const char * pmemblk_check_versionU(unsigned major_required, unsigned minor_required) { LOG(3, "major_required %u minor_required %u", major_required, minor_required); if (major_required != PMEMBLK_MAJOR_VERSION) { ERR("libpmemblk major version mismatch (need %u, found %u)", major_required, PMEMBLK_MAJOR_VERSION); return out_get_errormsg(); } if (minor_required > PMEMBLK_MINOR_VERSION) { ERR("libpmemblk minor version mismatch (need %u, found %u)", minor_required, PMEMBLK_MINOR_VERSION); return out_get_errormsg(); } return NULL; } #ifndef _WIN32 /* * pmemblk_check_version -- see if lib meets application version requirements */ const char * pmemblk_check_version(unsigned major_required, unsigned minor_required) { return pmemblk_check_versionU(major_required, minor_required); } #else /* * pmemblk_check_versionW -- see if lib meets application version requirements */ const wchar_t * pmemblk_check_versionW(unsigned major_required, unsigned minor_required) { if (pmemblk_check_versionU(major_required, minor_required) != NULL) return out_get_errormsgW(); else return NULL; } #endif /* * pmemblk_set_funcs -- allow overriding libpmemblk's call to malloc, etc. */ void pmemblk_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)) { LOG(3, NULL); util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func); } /* * pmemblk_errormsgU -- return last error message */ #ifndef _WIN32 static inline #endif const char * pmemblk_errormsgU(void) { return out_get_errormsg(); } #ifndef _WIN32 /* * pmemblk_errormsg -- return last error message */ const char * pmemblk_errormsg(void) { return pmemblk_errormsgU(); } #else /* * pmemblk_errormsgW -- return last error message as wchar_t */ const wchar_t * pmemblk_errormsgW(void) { return out_get_errormsgW(); } #endif
4,318
20.487562
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/btt.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * btt.h -- btt module definitions */ #ifndef BTT_H #define BTT_H 1 #ifdef __cplusplus extern "C" { #endif /* callback functions passed to btt_init() */ struct ns_callback { int (*nsread)(void *ns, unsigned lane, void *buf, size_t count, uint64_t off); int (*nswrite)(void *ns, unsigned lane, const void *buf, size_t count, uint64_t off); int (*nszero)(void *ns, unsigned lane, size_t count, uint64_t off); ssize_t (*nsmap)(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off); void (*nssync)(void *ns, unsigned lane, void *addr, size_t len); int ns_is_zeroed; }; struct btt_info; struct btt *btt_init(uint64_t rawsize, uint32_t lbasize, uint8_t parent_uuid[], unsigned maxlane, void *ns, const struct ns_callback *ns_cbp); unsigned btt_nlane(struct btt *bttp); size_t btt_nlba(struct btt *bttp); int btt_read(struct btt *bttp, unsigned lane, uint64_t lba, void *buf); int btt_write(struct btt *bttp, unsigned lane, uint64_t lba, const void *buf); int btt_set_zero(struct btt *bttp, unsigned lane, uint64_t lba); int btt_set_error(struct btt *bttp, unsigned lane, uint64_t lba); int btt_check(struct btt *bttp); void btt_fini(struct btt *bttp); uint64_t btt_flog_size(uint32_t nfree); uint64_t btt_map_size(uint32_t external_nlba); uint64_t btt_arena_datasize(uint64_t arena_size, uint32_t nfree); int btt_info_set(struct btt_info *info, uint32_t external_lbasize, uint32_t nfree, uint64_t arena_size, uint64_t space_left); struct btt_flog *btt_flog_get_valid(struct btt_flog *flog_pair, int *next); int map_entry_is_initial(uint32_t map_entry); void btt_info_convert2h(struct btt_info *infop); void btt_info_convert2le(struct btt_info *infop); void btt_flog_convert2h(struct btt_flog *flogp); void btt_flog_convert2le(struct btt_flog *flogp); #ifdef __cplusplus } #endif #endif
1,908
30.816667
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/btt_layout.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * btt_layout.h -- block translation table on-media layout definitions */ /* * Layout of BTT info block. All integers are stored little-endian. */ #ifndef BTT_LAYOUT_H #define BTT_LAYOUT_H 1 #ifdef __cplusplus extern "C" { #endif #define BTT_ALIGNMENT ((uintptr_t)4096) /* alignment of all BTT structures */ #define BTTINFO_SIG_LEN 16 #define BTTINFO_UUID_LEN 16 #define BTTINFO_UNUSED_LEN 3968 #define BTTINFO_SIG "BTT_ARENA_INFO\0" struct btt_info { char sig[BTTINFO_SIG_LEN]; /* must be "BTT_ARENA_INFO\0\0" */ uint8_t uuid[BTTINFO_UUID_LEN]; /* BTT UUID */ uint8_t parent_uuid[BTTINFO_UUID_LEN]; /* UUID of container */ uint32_t flags; /* see flag bits below */ uint16_t major; /* major version */ uint16_t minor; /* minor version */ uint32_t external_lbasize; /* advertised LBA size (bytes) */ uint32_t external_nlba; /* advertised LBAs in this arena */ uint32_t internal_lbasize; /* size of data area blocks (bytes) */ uint32_t internal_nlba; /* number of blocks in data area */ uint32_t nfree; /* number of free blocks */ uint32_t infosize; /* size of this info block */ /* * The following offsets are relative to the beginning of * the btt_info block. */ uint64_t nextoff; /* offset to next arena (or zero) */ uint64_t dataoff; /* offset to arena data area */ uint64_t mapoff; /* offset to area map */ uint64_t flogoff; /* offset to area flog */ uint64_t infooff; /* offset to backup info block */ char unused[BTTINFO_UNUSED_LEN]; /* must be zero */ uint64_t checksum; /* Fletcher64 of all fields */ }; /* * Definitions for flags mask for btt_info structure above. */ #define BTTINFO_FLAG_ERROR 0x00000001 /* error state (read-only) */ #define BTTINFO_FLAG_ERROR_MASK 0x00000001 /* all error bits */ /* * Current on-media format versions. */ #define BTTINFO_MAJOR_VERSION 1 #define BTTINFO_MINOR_VERSION 1 /* * Layout of a BTT "flog" entry. All integers are stored little-endian. * * The "nfree" field in the BTT info block determines how many of these * flog entries there are, and each entry consists of two of the following * structs (entry updates alternate between the two structs), padded up * to a cache line boundary to isolate adjacent updates. */ #define BTT_FLOG_PAIR_ALIGN ((uintptr_t)64) struct btt_flog { uint32_t lba; /* last pre-map LBA using this entry */ uint32_t old_map; /* old post-map LBA (the freed block) */ uint32_t new_map; /* new post-map LBA */ uint32_t seq; /* sequence number (01, 10, 11) */ }; /* * Layout of a BTT "map" entry. 4-byte internal LBA offset, little-endian. */ #define BTT_MAP_ENTRY_SIZE 4 #define BTT_MAP_ENTRY_ERROR 0x40000000U #define BTT_MAP_ENTRY_ZERO 0x80000000U #define BTT_MAP_ENTRY_NORMAL 0xC0000000U #define BTT_MAP_ENTRY_LBA_MASK 0x3fffffffU #define BTT_MAP_LOCK_ALIGN ((uintptr_t)64) /* * BTT layout properties... */ #define BTT_MIN_SIZE ((1u << 20) * 16) #define BTT_MAX_ARENA (1ull << 39) /* 512GB per arena */ #define BTT_MIN_LBA_SIZE (size_t)512 #define BTT_INTERNAL_LBA_ALIGNMENT 256U #define BTT_DEFAULT_NFREE 256 #ifdef __cplusplus } #endif #endif
3,197
28.611111
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemblk/blk.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * blk.c -- block memory pool entry points for libpmem */ #include <inttypes.h> #include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/param.h> #include <unistd.h> #include <errno.h> #include <time.h> #include <stdint.h> #include <endian.h> #include <stdbool.h> #include "libpmem.h" #include "libpmemblk.h" #include "mmap.h" #include "set.h" #include "out.h" #include "btt.h" #include "blk.h" #include "util.h" #include "sys_util.h" #include "util_pmem.h" #include "valgrind_internal.h" static const struct pool_attr Blk_create_attr = { BLK_HDR_SIG, BLK_FORMAT_MAJOR, BLK_FORMAT_FEAT_DEFAULT, {0}, {0}, {0}, {0}, {0} }; static const struct pool_attr Blk_open_attr = { BLK_HDR_SIG, BLK_FORMAT_MAJOR, BLK_FORMAT_FEAT_CHECK, {0}, {0}, {0}, {0}, {0} }; /* * lane_enter -- (internal) acquire a unique lane number */ static void lane_enter(PMEMblkpool *pbp, unsigned *lane) { unsigned mylane; mylane = util_fetch_and_add32(&pbp->next_lane, 1) % pbp->nlane; /* lane selected, grab the per-lane lock */ util_mutex_lock(&pbp->locks[mylane]); *lane = mylane; } /* * lane_exit -- (internal) drop lane lock */ static void lane_exit(PMEMblkpool *pbp, unsigned mylane) { util_mutex_unlock(&pbp->locks[mylane]); } /* * nsread -- (internal) read data from the namespace encapsulating the BTT * * This routine is provided to btt_init() to allow the btt module to * do I/O on the memory pool containing the BTT layout. */ static int nsread(void *ns, unsigned lane, void *buf, size_t count, uint64_t off) { struct pmemblk *pbp = (struct pmemblk *)ns; LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off); if (off + count > pbp->datasize) { ERR("offset + count (%zu) past end of data area (%zu)", (size_t)off + count, pbp->datasize); errno = EINVAL; return -1; } memcpy(buf, (char *)pbp->data + off, count); return 0; } /* * nswrite -- (internal) write data to the namespace encapsulating the BTT * * This routine is provided to btt_init() to allow the btt module to * do I/O on the memory pool containing the BTT layout. */ static int nswrite(void *ns, unsigned lane, const void *buf, size_t count, uint64_t off) { struct pmemblk *pbp = (struct pmemblk *)ns; LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off); if (off + count > pbp->datasize) { ERR("offset + count (%zu) past end of data area (%zu)", (size_t)off + count, pbp->datasize); errno = EINVAL; return -1; } void *dest = (char *)pbp->data + off; #ifdef DEBUG /* grab debug write lock */ util_mutex_lock(&pbp->write_lock); #endif /* unprotect the memory (debug version only) */ RANGE_RW(dest, count, pbp->is_dev_dax); if (pbp->is_pmem) pmem_memcpy_nodrain(dest, buf, count); else memcpy(dest, buf, count); /* protect the memory again (debug version only) */ RANGE_RO(dest, count, pbp->is_dev_dax); #ifdef DEBUG /* release debug write lock */ util_mutex_unlock(&pbp->write_lock); #endif if (pbp->is_pmem) pmem_drain(); else pmem_msync(dest, count); return 0; } /* * nsmap -- (internal) allow direct access to a range of a namespace * * The caller requests a range to be "mapped" but the return value * may indicate a smaller amount (in which case the caller is expected * to call back later for another mapping). * * This routine is provided to btt_init() to allow the btt module to * do I/O on the memory pool containing the BTT layout. */ static ssize_t nsmap(void *ns, unsigned lane, void **addrp, size_t len, uint64_t off) { struct pmemblk *pbp = (struct pmemblk *)ns; LOG(12, "pbp %p lane %u len %zu off %" PRIu64, pbp, lane, len, off); ASSERT(((ssize_t)len) >= 0); if (off + len >= pbp->datasize) { ERR("offset + len (%zu) past end of data area (%zu)", (size_t)off + len, pbp->datasize - 1); errno = EINVAL; return -1; } /* * Since the entire file is memory-mapped, this callback * can always provide the entire length requested. */ *addrp = (char *)pbp->data + off; LOG(12, "returning addr %p", *addrp); return (ssize_t)len; } /* * nssync -- (internal) flush changes made to a namespace range * * This is used in conjunction with the addresses handed out by * nsmap() above. There's no need to sync things written via * nswrite() since those changes are flushed each time nswrite() * is called. * * This routine is provided to btt_init() to allow the btt module to * do I/O on the memory pool containing the BTT layout. */ static void nssync(void *ns, unsigned lane, void *addr, size_t len) { struct pmemblk *pbp = (struct pmemblk *)ns; LOG(12, "pbp %p lane %u addr %p len %zu", pbp, lane, addr, len); if (pbp->is_pmem) pmem_persist(addr, len); else pmem_msync(addr, len); } /* * nszero -- (internal) zero data in the namespace encapsulating the BTT * * This routine is provided to btt_init() to allow the btt module to * zero the memory pool containing the BTT layout. */ static int nszero(void *ns, unsigned lane, size_t count, uint64_t off) { struct pmemblk *pbp = (struct pmemblk *)ns; LOG(13, "pbp %p lane %u count %zu off %" PRIu64, pbp, lane, count, off); if (off + count > pbp->datasize) { ERR("offset + count (%zu) past end of data area (%zu)", (size_t)off + count, pbp->datasize); errno = EINVAL; return -1; } void *dest = (char *)pbp->data + off; /* unprotect the memory (debug version only) */ RANGE_RW(dest, count, pbp->is_dev_dax); pmem_memset_persist(dest, 0, count); /* protect the memory again (debug version only) */ RANGE_RO(dest, count, pbp->is_dev_dax); return 0; } /* callbacks for btt_init() */ static struct ns_callback ns_cb = { .nsread = nsread, .nswrite = nswrite, .nszero = nszero, .nsmap = nsmap, .nssync = nssync, .ns_is_zeroed = 0 }; /* * blk_descr_create -- (internal) create block memory pool descriptor */ static void blk_descr_create(PMEMblkpool *pbp, uint32_t bsize, int zeroed) { LOG(3, "pbp %p bsize %u zeroed %d", pbp, bsize, zeroed); /* create the required metadata */ pbp->bsize = htole32(bsize); util_persist(pbp->is_pmem, &pbp->bsize, sizeof(bsize)); pbp->is_zeroed = zeroed; util_persist(pbp->is_pmem, &pbp->is_zeroed, sizeof(pbp->is_zeroed)); } /* * blk_descr_check -- (internal) validate block memory pool descriptor */ static int blk_descr_check(PMEMblkpool *pbp, size_t *bsize) { LOG(3, "pbp %p bsize %zu", pbp, *bsize); size_t hdr_bsize = le32toh(pbp->bsize); if (*bsize && *bsize != hdr_bsize) { ERR("wrong bsize (%zu), pool created with bsize %zu", *bsize, hdr_bsize); errno = EINVAL; return -1; } *bsize = hdr_bsize; LOG(3, "using block size from header: %zu", *bsize); return 0; } /* * blk_runtime_init -- (internal) initialize block memory pool runtime data */ static int blk_runtime_init(PMEMblkpool *pbp, size_t bsize, int rdonly) { LOG(3, "pbp %p bsize %zu rdonly %d", pbp, bsize, rdonly); /* remove volatile part of header */ VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr, sizeof(struct pmemblk) - sizeof(struct pool_hdr) - sizeof(pbp->bsize) - sizeof(pbp->is_zeroed)); /* * Use some of the memory pool area for run-time info. This * run-time state is never loaded from the file, it is always * created here, so no need to worry about byte-order. */ pbp->rdonly = rdonly; pbp->data = (char *)pbp->addr + roundup(sizeof(*pbp), BLK_FORMAT_DATA_ALIGN); ASSERT(((char *)pbp->addr + pbp->size) >= (char *)pbp->data); pbp->datasize = (size_t) (((char *)pbp->addr + pbp->size) - (char *)pbp->data); LOG(4, "data area %p data size %zu bsize %zu", pbp->data, pbp->datasize, bsize); long ncpus = sysconf(_SC_NPROCESSORS_ONLN); if (ncpus < 1) ncpus = 1; ns_cb.ns_is_zeroed = pbp->is_zeroed; /* things free by "goto err" if not NULL */ struct btt *bttp = NULL; os_mutex_t *locks = NULL; bttp = btt_init(pbp->datasize, (uint32_t)bsize, pbp->hdr.poolset_uuid, (unsigned)ncpus * 2, pbp, &ns_cb); if (bttp == NULL) goto err; /* btt_init set errno, called LOG */ pbp->bttp = bttp; pbp->nlane = btt_nlane(pbp->bttp); pbp->next_lane = 0; if ((locks = Malloc(pbp->nlane * sizeof(*locks))) == NULL) { ERR("!Malloc for lane locks"); goto err; } for (unsigned i = 0; i < pbp->nlane; i++) util_mutex_init(&locks[i]); pbp->locks = locks; #ifdef DEBUG /* initialize debug lock */ util_mutex_init(&pbp->write_lock); #endif /* * If possible, turn off all permissions on the pool header page. * * The prototype PMFS doesn't allow this when large pages are in * use. It is not considered an error if this fails. */ RANGE_NONE(pbp->addr, sizeof(struct pool_hdr), pbp->is_dev_dax); /* the data area should be kept read-only for debug version */ RANGE_RO(pbp->data, pbp->datasize, pbp->is_dev_dax); return 0; err: LOG(4, "error clean up"); int oerrno = errno; if (bttp) btt_fini(bttp); errno = oerrno; return -1; } /* * pmemblk_createU -- create a block memory pool */ #ifndef _WIN32 static inline #endif PMEMblkpool * pmemblk_createU(const char *path, size_t bsize, size_t poolsize, mode_t mode) { LOG(3, "path %s bsize %zu poolsize %zu mode %o", path, bsize, poolsize, mode); /* check if bsize is valid */ if (bsize == 0) { ERR("Invalid block size %zu", bsize); errno = EINVAL; return NULL; } if (bsize > UINT32_MAX) { ERR("Invalid block size %zu", bsize); errno = EINVAL; return NULL; } struct pool_set *set; struct pool_attr adj_pool_attr = Blk_create_attr; /* force set SDS feature */ if (SDS_at_create) adj_pool_attr.features.incompat |= POOL_FEAT_SDS; else adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS; if (util_pool_create(&set, path, poolsize, PMEMBLK_MIN_POOL, PMEMBLK_MIN_PART, &adj_pool_attr, NULL, REPLICAS_DISABLED) != 0) { LOG(2, "cannot create pool or pool set"); return NULL; } ASSERT(set->nreplicas > 0); struct pool_replica *rep = set->replica[0]; PMEMblkpool *pbp = rep->part[0].addr; VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr, sizeof(struct pmemblk) - ((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr)); pbp->addr = pbp; pbp->size = rep->repsize; pbp->set = set; pbp->is_pmem = rep->is_pmem; pbp->is_dev_dax = rep->part[0].is_dev_dax; /* is_dev_dax implies is_pmem */ ASSERT(!pbp->is_dev_dax || pbp->is_pmem); /* create pool descriptor */ blk_descr_create(pbp, (uint32_t)bsize, set->zeroed); /* initialize runtime parts */ if (blk_runtime_init(pbp, bsize, 0) != 0) { ERR("pool initialization failed"); goto err; } if (util_poolset_chmod(set, mode)) goto err; util_poolset_fdclose(set); LOG(3, "pbp %p", pbp); return pbp; err: LOG(4, "error clean up"); int oerrno = errno; util_poolset_close(set, DELETE_CREATED_PARTS); errno = oerrno; return NULL; } #ifndef _WIN32 /* * pmemblk_create -- create a block memory pool */ PMEMblkpool * pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode) { return pmemblk_createU(path, bsize, poolsize, mode); } #else /* * pmemblk_createW -- create a block memory pool */ PMEMblkpool * pmemblk_createW(const wchar_t *path, size_t bsize, size_t poolsize, mode_t mode) { char *upath = util_toUTF8(path); if (upath == NULL) return NULL; PMEMblkpool *ret = pmemblk_createU(upath, bsize, poolsize, mode); util_free_UTF8(upath); return ret; } #endif /* * blk_open_common -- (internal) open a block memory pool * * This routine does all the work, but takes a cow flag so internal * calls can map a read-only pool if required. * * Passing in bsize == 0 means a valid pool header must exist (which * will supply the block size). */ static PMEMblkpool * blk_open_common(const char *path, size_t bsize, unsigned flags) { LOG(3, "path %s bsize %zu flags 0x%x", path, bsize, flags); struct pool_set *set; if (util_pool_open(&set, path, PMEMBLK_MIN_PART, &Blk_open_attr, NULL, NULL, flags) != 0) { LOG(2, "cannot open pool or pool set"); return NULL; } ASSERT(set->nreplicas > 0); struct pool_replica *rep = set->replica[0]; PMEMblkpool *pbp = rep->part[0].addr; VALGRIND_REMOVE_PMEM_MAPPING(&pbp->addr, sizeof(struct pmemblk) - ((uintptr_t)&pbp->addr - (uintptr_t)&pbp->hdr)); pbp->addr = pbp; pbp->size = rep->repsize; pbp->set = set; pbp->is_pmem = rep->is_pmem; pbp->is_dev_dax = rep->part[0].is_dev_dax; /* is_dev_dax implies is_pmem */ ASSERT(!pbp->is_dev_dax || pbp->is_pmem); if (set->nreplicas > 1) { errno = ENOTSUP; ERR("!replicas not supported"); goto err; } /* validate pool descriptor */ if (blk_descr_check(pbp, &bsize) != 0) { LOG(2, "descriptor check failed"); goto err; } /* initialize runtime parts */ if (blk_runtime_init(pbp, bsize, set->rdonly) != 0) { ERR("pool initialization failed"); goto err; } util_poolset_fdclose(set); LOG(3, "pbp %p", pbp); return pbp; err: LOG(4, "error clean up"); int oerrno = errno; util_poolset_close(set, DO_NOT_DELETE_PARTS); errno = oerrno; return NULL; } /* * pmemblk_openU -- open a block memory pool */ #ifndef _WIN32 static inline #endif PMEMblkpool * pmemblk_openU(const char *path, size_t bsize) { LOG(3, "path %s bsize %zu", path, bsize); return blk_open_common(path, bsize, COW_at_open ? POOL_OPEN_COW : 0); } #ifndef _WIN32 /* * pmemblk_open -- open a block memory pool */ PMEMblkpool * pmemblk_open(const char *path, size_t bsize) { return pmemblk_openU(path, bsize); } #else /* * pmemblk_openW -- open a block memory pool */ PMEMblkpool * pmemblk_openW(const wchar_t *path, size_t bsize) { char *upath = util_toUTF8(path); if (upath == NULL) return NULL; PMEMblkpool *ret = pmemblk_openU(upath, bsize); util_free_UTF8(upath); return ret; } #endif /* * pmemblk_close -- close a block memory pool */ void pmemblk_close(PMEMblkpool *pbp) { LOG(3, "pbp %p", pbp); btt_fini(pbp->bttp); if (pbp->locks) { for (unsigned i = 0; i < pbp->nlane; i++) util_mutex_destroy(&pbp->locks[i]); Free((void *)pbp->locks); } #ifdef DEBUG /* destroy debug lock */ util_mutex_destroy(&pbp->write_lock); #endif util_poolset_close(pbp->set, DO_NOT_DELETE_PARTS); } /* * pmemblk_bsize -- return size of block for specified pool */ size_t pmemblk_bsize(PMEMblkpool *pbp) { LOG(3, "pbp %p", pbp); return le32toh(pbp->bsize); } /* * pmemblk_nblock -- return number of usable blocks in a block memory pool */ size_t pmemblk_nblock(PMEMblkpool *pbp) { LOG(3, "pbp %p", pbp); return btt_nlba(pbp->bttp); } /* * pmemblk_read -- read a block in a block memory pool */ int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno) { LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno); if (blockno < 0) { ERR("negative block number"); errno = EINVAL; return -1; } unsigned lane; lane_enter(pbp, &lane); int err = btt_read(pbp->bttp, lane, (uint64_t)blockno, buf); lane_exit(pbp, lane); return err; } /* * pmemblk_write -- write a block (atomically) in a block memory pool */ int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno) { LOG(3, "pbp %p buf %p blockno %lld", pbp, buf, blockno); if (pbp->rdonly) { ERR("EROFS (pool is read-only)"); errno = EROFS; return -1; } if (blockno < 0) { ERR("negative block number"); errno = EINVAL; return -1; } unsigned lane; lane_enter(pbp, &lane); int err = btt_write(pbp->bttp, lane, (uint64_t)blockno, buf); lane_exit(pbp, lane); return err; } /* * pmemblk_set_zero -- zero a block in a block memory pool */ int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno) { LOG(3, "pbp %p blockno %lld", pbp, blockno); if (pbp->rdonly) { ERR("EROFS (pool is read-only)"); errno = EROFS; return -1; } if (blockno < 0) { ERR("negative block number"); errno = EINVAL; return -1; } unsigned lane; lane_enter(pbp, &lane); int err = btt_set_zero(pbp->bttp, lane, (uint64_t)blockno); lane_exit(pbp, lane); return err; } /* * pmemblk_set_error -- set the error state on a block in a block memory pool */ int pmemblk_set_error(PMEMblkpool *pbp, long long blockno) { LOG(3, "pbp %p blockno %lld", pbp, blockno); if (pbp->rdonly) { ERR("EROFS (pool is read-only)"); errno = EROFS; return -1; } if (blockno < 0) { ERR("negative block number"); errno = EINVAL; return -1; } unsigned lane; lane_enter(pbp, &lane); int err = btt_set_error(pbp->bttp, lane, (uint64_t)blockno); lane_exit(pbp, lane); return err; } /* * pmemblk_checkU -- block memory pool consistency check */ #ifndef _WIN32 static inline #endif int pmemblk_checkU(const char *path, size_t bsize) { LOG(3, "path \"%s\" bsize %zu", path, bsize); /* map the pool read-only */ PMEMblkpool *pbp = blk_open_common(path, bsize, POOL_OPEN_COW); if (pbp == NULL) return -1; /* errno set by blk_open_common() */ int retval = btt_check(pbp->bttp); int oerrno = errno; pmemblk_close(pbp); errno = oerrno; return retval; } #ifndef _WIN32 /* * pmemblk_check -- block memory pool consistency check */ int pmemblk_check(const char *path, size_t bsize) { return pmemblk_checkU(path, bsize); } #else /* * pmemblk_checkW -- block memory pool consistency check */ int pmemblk_checkW(const wchar_t *path, size_t bsize) { char *upath = util_toUTF8(path); if (upath == NULL) return -1; int ret = pmemblk_checkU(upath, bsize); util_free_UTF8(upath); return ret; } #endif /* * pmemblk_ctl_getU -- programmatically executes a read ctl query */ #ifndef _WIN32 static inline #endif int pmemblk_ctl_getU(PMEMblkpool *pbp, const char *name, void *arg) { LOG(3, "pbp %p name %s arg %p", pbp, name, arg); return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg); } /* * pmemblk_ctl_setU -- programmatically executes a write ctl query */ #ifndef _WIN32 static inline #endif int pmemblk_ctl_setU(PMEMblkpool *pbp, const char *name, void *arg) { LOG(3, "pbp %p name %s arg %p", pbp, name, arg); return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg); } /* * pmemblk_ctl_execU -- programmatically executes a runnable ctl query */ #ifndef _WIN32 static inline #endif int pmemblk_ctl_execU(PMEMblkpool *pbp, const char *name, void *arg) { LOG(3, "pbp %p name %s arg %p", pbp, name, arg); return ctl_query(pbp == NULL ? NULL : pbp->ctl, pbp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg); } #ifndef _WIN32 /* * pmemblk_ctl_get -- programmatically executes a read ctl query */ int pmemblk_ctl_get(PMEMblkpool *pbp, const char *name, void *arg) { return pmemblk_ctl_getU(pbp, name, arg); } /* * pmemblk_ctl_set -- programmatically executes a write ctl query */ int pmemblk_ctl_set(PMEMblkpool *pbp, const char *name, void *arg) { return pmemblk_ctl_setU(pbp, name, arg); } /* * pmemblk_ctl_exec -- programmatically executes a runnable ctl query */ int pmemblk_ctl_exec(PMEMblkpool *pbp, const char *name, void *arg) { return pmemblk_ctl_execU(pbp, name, arg); } #else /* * pmemblk_ctl_getW -- programmatically executes a read ctl query */ int pmemblk_ctl_getW(PMEMblkpool *pbp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemblk_ctl_getU(pbp, uname, arg); util_free_UTF8(uname); return ret; } /* * pmemblk_ctl_setW -- programmatically executes a write ctl query */ int pmemblk_ctl_setW(PMEMblkpool *pbp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemblk_ctl_setU(pbp, uname, arg); util_free_UTF8(uname); return ret; } /* * pmemblk_ctl_execW -- programmatically executes a runnable ctl query */ int pmemblk_ctl_execW(PMEMblkpool *pbp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemblk_ctl_execU(pbp, uname, arg); util_free_UTF8(uname); return ret; } #endif #if FAULT_INJECTION void pmemblk_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { core_inject_fault_at(type, nth, at); } int pmemblk_fault_injection_enabled(void) { return core_fault_injection_enabled(); } #endif
20,218
20.305585
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_ravl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * container_ravl.c -- implementation of ravl-based block container */ #include "container_ravl.h" #include "ravl.h" #include "out.h" #include "sys_util.h" struct block_container_ravl { struct block_container super; struct ravl *tree; }; /* * container_compare_memblocks -- (internal) compares two memory blocks */ static int container_compare_memblocks(const void *lhs, const void *rhs) { const struct memory_block *l = lhs; const struct memory_block *r = rhs; int64_t diff = (int64_t)l->size_idx - (int64_t)r->size_idx; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->zone_id - (int64_t)r->zone_id; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->block_off - (int64_t)r->block_off; if (diff != 0) return diff > 0 ? 1 : -1; return 0; } /* * container_ravl_insert_block -- (internal) inserts a new memory block * into the container */ static int container_ravl_insert_block(struct block_container *bc, const struct memory_block *m) { struct block_container_ravl *c = (struct block_container_ravl *)bc; struct memory_block *e = m->m_ops->get_user_data(m); VALGRIND_DO_MAKE_MEM_DEFINED(e, sizeof(*e)); VALGRIND_ADD_TO_TX(e, sizeof(*e)); *e = *m; VALGRIND_SET_CLEAN(e, sizeof(*e)); VALGRIND_REMOVE_FROM_TX(e, sizeof(*e)); return ravl_insert(c->tree, e); } /* * container_ravl_get_rm_block_bestfit -- (internal) removes and returns the * best-fit memory block for size */ static int container_ravl_get_rm_block_bestfit(struct block_container *bc, struct memory_block *m) { struct block_container_ravl *c = (struct block_container_ravl *)bc; struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_GREATER_EQUAL); if (n == NULL) return ENOMEM; struct memory_block *e = ravl_data(n); *m = *e; ravl_remove(c->tree, n); return 0; } /* * container_ravl_get_rm_block_exact -- * (internal) removes exact match memory block */ static int container_ravl_get_rm_block_exact(struct block_container *bc, const struct memory_block *m) { struct block_container_ravl *c = (struct block_container_ravl *)bc; struct ravl_node *n = ravl_find(c->tree, m, RAVL_PREDICATE_EQUAL); if (n == NULL) return ENOMEM; ravl_remove(c->tree, n); return 0; } /* * container_ravl_is_empty -- (internal) checks whether the container is empty */ static int container_ravl_is_empty(struct block_container *bc) { struct block_container_ravl *c = (struct block_container_ravl *)bc; return ravl_empty(c->tree); } /* * container_ravl_rm_all -- (internal) removes all elements from the tree */ static void container_ravl_rm_all(struct block_container *bc) { struct block_container_ravl *c = (struct block_container_ravl *)bc; ravl_clear(c->tree); } /* * container_ravl_delete -- (internal) deletes the container */ static void container_ravl_destroy(struct block_container *bc) { struct block_container_ravl *c = (struct block_container_ravl *)bc; ravl_delete(c->tree); Free(bc); } /* * Tree-based block container used to provide best-fit functionality to the * bucket. The time complexity for this particular container is O(k) where k is * the length of the key. * * The get methods also guarantee that the block with lowest possible address * that best matches the requirements is provided. */ static const struct block_container_ops container_ravl_ops = { .insert = container_ravl_insert_block, .get_rm_exact = container_ravl_get_rm_block_exact, .get_rm_bestfit = container_ravl_get_rm_block_bestfit, .is_empty = container_ravl_is_empty, .rm_all = container_ravl_rm_all, .destroy = container_ravl_destroy, }; /* * container_new_ravl -- allocates and initializes a ravl container */ struct block_container * container_new_ravl(struct palloc_heap *heap) { struct block_container_ravl *bc = Malloc(sizeof(*bc)); if (bc == NULL) goto error_container_malloc; bc->super.heap = heap; bc->super.c_ops = &container_ravl_ops; bc->tree = ravl_new(container_compare_memblocks); if (bc->tree == NULL) goto error_ravl_new; return (struct block_container *)&bc->super; error_ravl_new: Free(bc); error_container_malloc: return NULL; }
4,333
21.931217
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/heap_layout.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * heap_layout.h -- internal definitions for heap layout */ #ifndef LIBPMEMOBJ_HEAP_LAYOUT_H #define LIBPMEMOBJ_HEAP_LAYOUT_H 1 #include <stddef.h> #include <stdint.h> #ifdef __cplusplus extern "C" { #endif #define HEAP_MAJOR 1 #define HEAP_MINOR 0 #define MAX_CHUNK (UINT16_MAX - 7) /* has to be multiple of 8 */ #define CHUNK_BASE_ALIGNMENT 1024 #define CHUNKSIZE ((size_t)1024 * 256) /* 256 kilobytes */ #define MAX_MEMORY_BLOCK_SIZE (MAX_CHUNK * CHUNKSIZE) #define HEAP_SIGNATURE_LEN 16 #define HEAP_SIGNATURE "MEMORY_HEAP_HDR\0" #define ZONE_HEADER_MAGIC 0xC3F0A2D2 #define ZONE_MIN_SIZE (sizeof(struct zone) + sizeof(struct chunk)) #define ZONE_MAX_SIZE (sizeof(struct zone) + sizeof(struct chunk) * MAX_CHUNK) #define HEAP_MIN_SIZE (sizeof(struct heap_layout) + ZONE_MIN_SIZE) /* Base bitmap values, relevant for both normal and flexible bitmaps */ #define RUN_BITS_PER_VALUE 64U #define RUN_BASE_METADATA_VALUES\ ((unsigned)(sizeof(struct chunk_run_header) / sizeof(uint64_t))) #define RUN_BASE_METADATA_SIZE (sizeof(struct chunk_run_header)) #define RUN_CONTENT_SIZE (CHUNKSIZE - RUN_BASE_METADATA_SIZE) /* * Calculates the size in bytes of a single run instance, including bitmap */ #define RUN_CONTENT_SIZE_BYTES(size_idx)\ (RUN_CONTENT_SIZE + (((size_idx) - 1) * CHUNKSIZE)) /* Default bitmap values, specific for old, non-flexible, bitmaps */ #define RUN_DEFAULT_METADATA_VALUES 40 /* in 8 byte words, 320 bytes total */ #define RUN_DEFAULT_BITMAP_VALUES \ (RUN_DEFAULT_METADATA_VALUES - RUN_BASE_METADATA_VALUES) #define RUN_DEFAULT_BITMAP_SIZE (sizeof(uint64_t) * RUN_DEFAULT_BITMAP_VALUES) #define RUN_DEFAULT_BITMAP_NBITS\ (RUN_BITS_PER_VALUE * RUN_DEFAULT_BITMAP_VALUES) #define RUN_DEFAULT_SIZE \ (CHUNKSIZE - RUN_BASE_METADATA_SIZE - RUN_DEFAULT_BITMAP_SIZE) /* * Calculates the size in bytes of a single run instance, without bitmap, * but only for the default fixed-bitmap algorithm */ #define RUN_DEFAULT_SIZE_BYTES(size_idx)\ (RUN_DEFAULT_SIZE + (((size_idx) - 1) * CHUNKSIZE)) #define CHUNK_MASK ((CHUNKSIZE) - 1) #define CHUNK_ALIGN_UP(value) ((((value) + CHUNK_MASK) & ~CHUNK_MASK)) enum chunk_flags { CHUNK_FLAG_COMPACT_HEADER = 0x0001, CHUNK_FLAG_HEADER_NONE = 0x0002, CHUNK_FLAG_ALIGNED = 0x0004, CHUNK_FLAG_FLEX_BITMAP = 0x0008, }; #define CHUNK_FLAGS_ALL_VALID (\ CHUNK_FLAG_COMPACT_HEADER |\ CHUNK_FLAG_HEADER_NONE |\ CHUNK_FLAG_ALIGNED |\ CHUNK_FLAG_FLEX_BITMAP\ ) enum chunk_type { CHUNK_TYPE_UNKNOWN, CHUNK_TYPE_FOOTER, /* not actual chunk type */ CHUNK_TYPE_FREE, CHUNK_TYPE_USED, CHUNK_TYPE_RUN, CHUNK_TYPE_RUN_DATA, MAX_CHUNK_TYPE }; struct chunk { uint8_t data[CHUNKSIZE]; }; struct chunk_run_header { uint64_t block_size; uint64_t alignment; /* valid only /w CHUNK_FLAG_ALIGNED */ }; struct chunk_run { struct chunk_run_header hdr; uint8_t content[RUN_CONTENT_SIZE]; /* bitmap + data */ }; struct chunk_header { uint16_t type; uint16_t flags; uint32_t size_idx; }; struct zone_header { uint32_t magic; uint32_t size_idx; uint8_t reserved[56]; }; struct zone { struct zone_header header; struct chunk_header chunk_headers[MAX_CHUNK]; struct chunk chunks[]; }; struct heap_header { char signature[HEAP_SIGNATURE_LEN]; uint64_t major; uint64_t minor; uint64_t unused; /* might be garbage */ uint64_t chunksize; uint64_t chunks_per_zone; uint8_t reserved[960]; uint64_t checksum; }; struct heap_layout { struct heap_header header; struct zone zone0; /* first element of zones array */ }; #define ALLOC_HDR_SIZE_SHIFT (48ULL) #define ALLOC_HDR_FLAGS_MASK (((1ULL) << ALLOC_HDR_SIZE_SHIFT) - 1) struct allocation_header_legacy { uint8_t unused[8]; uint64_t size; uint8_t unused2[32]; uint64_t root_size; uint64_t type_num; }; #define ALLOC_HDR_COMPACT_SIZE sizeof(struct allocation_header_compact) struct allocation_header_compact { uint64_t size; uint64_t extra; }; enum header_type { HEADER_LEGACY, HEADER_COMPACT, HEADER_NONE, MAX_HEADER_TYPES }; static const size_t header_type_to_size[MAX_HEADER_TYPES] = { sizeof(struct allocation_header_legacy), sizeof(struct allocation_header_compact), 0 }; static const enum chunk_flags header_type_to_flag[MAX_HEADER_TYPES] = { (enum chunk_flags)0, CHUNK_FLAG_COMPACT_HEADER, CHUNK_FLAG_HEADER_NONE }; static inline struct zone * ZID_TO_ZONE(struct heap_layout *layout, size_t zone_id) { return (struct zone *) ((uintptr_t)&layout->zone0 + ZONE_MAX_SIZE * zone_id); } static inline struct chunk_header * GET_CHUNK_HDR(struct heap_layout *layout, size_t zone_id, unsigned chunk_id) { return &ZID_TO_ZONE(layout, zone_id)->chunk_headers[chunk_id]; } static inline struct chunk * GET_CHUNK(struct heap_layout *layout, size_t zone_id, unsigned chunk_id) { return &ZID_TO_ZONE(layout, zone_id)->chunks[chunk_id]; } static inline struct chunk_run * GET_CHUNK_RUN(struct heap_layout *layout, size_t zone_id, unsigned chunk_id) { return (struct chunk_run *)GET_CHUNK(layout, zone_id, chunk_id); } #ifdef __cplusplus } #endif #endif
5,105
23.666667
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/alloc_class.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * alloc_class.h -- internal definitions for allocation classes */ #ifndef LIBPMEMOBJ_ALLOC_CLASS_H #define LIBPMEMOBJ_ALLOC_CLASS_H 1 #include <stddef.h> #include <stdint.h> #include <sys/types.h> #include "heap_layout.h" #include "memblock.h" #ifdef __cplusplus extern "C" { #endif #define MAX_ALLOCATION_CLASSES (UINT8_MAX) #define DEFAULT_ALLOC_CLASS_ID (0) #define RUN_UNIT_MAX RUN_BITS_PER_VALUE struct alloc_class_collection; enum alloc_class_type { CLASS_UNKNOWN, CLASS_HUGE, CLASS_RUN, MAX_ALLOC_CLASS_TYPES }; struct alloc_class { uint8_t id; uint16_t flags; size_t unit_size; enum header_type header_type; enum alloc_class_type type; /* run-specific data */ struct run_descriptor rdsc; }; struct alloc_class_collection *alloc_class_collection_new(void); void alloc_class_collection_delete(struct alloc_class_collection *ac); struct alloc_class *alloc_class_by_run( struct alloc_class_collection *ac, size_t unit_size, uint16_t flags, uint32_t size_idx); struct alloc_class *alloc_class_by_alloc_size( struct alloc_class_collection *ac, size_t size); struct alloc_class *alloc_class_by_id( struct alloc_class_collection *ac, uint8_t id); int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id); int alloc_class_find_first_free_slot(struct alloc_class_collection *ac, uint8_t *slot); ssize_t alloc_class_calc_size_idx(struct alloc_class *c, size_t size); struct alloc_class * alloc_class_new(int id, struct alloc_class_collection *ac, enum alloc_class_type type, enum header_type htype, size_t unit_size, size_t alignment, uint32_t size_idx); void alloc_class_delete(struct alloc_class_collection *ac, struct alloc_class *c); #ifdef __cplusplus } #endif #endif
1,815
21.7
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/recycler.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * recycler.c -- implementation of run recycler */ #include "heap.h" #include "recycler.h" #include "vec.h" #include "out.h" #include "util.h" #include "sys_util.h" #include "ravl.h" #include "valgrind_internal.h" #define THRESHOLD_MUL 4 /* * recycler_element_cmp -- compares two recycler elements */ static int recycler_element_cmp(const void *lhs, const void *rhs) { const struct recycler_element *l = lhs; const struct recycler_element *r = rhs; int64_t diff = (int64_t)l->max_free_block - (int64_t)r->max_free_block; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->free_space - (int64_t)r->free_space; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->zone_id - (int64_t)r->zone_id; if (diff != 0) return diff > 0 ? 1 : -1; diff = (int64_t)l->chunk_id - (int64_t)r->chunk_id; if (diff != 0) return diff > 0 ? 1 : -1; return 0; } struct recycler { struct ravl *runs; struct palloc_heap *heap; /* * How many unaccounted units there *might* be inside of the memory * blocks stored in the recycler. * The value is not meant to be accurate, but rather a rough measure on * how often should the memory block scores be recalculated. * * Per-chunk unaccounted units are shared for all zones, which might * lead to some unnecessary recalculations. */ size_t unaccounted_units[MAX_CHUNK]; size_t unaccounted_total; size_t nallocs; size_t *peak_arenas; VEC(, struct recycler_element) recalc; os_mutex_t lock; }; /* * recycler_new -- creates new recycler instance */ struct recycler * recycler_new(struct palloc_heap *heap, size_t nallocs, size_t *peak_arenas) { struct recycler *r = Malloc(sizeof(struct recycler)); if (r == NULL) goto error_alloc_recycler; r->runs = ravl_new_sized(recycler_element_cmp, sizeof(struct recycler_element)); if (r->runs == NULL) goto error_alloc_tree; r->heap = heap; r->nallocs = nallocs; r->peak_arenas = peak_arenas; r->unaccounted_total = 0; memset(&r->unaccounted_units, 0, sizeof(r->unaccounted_units)); VEC_INIT(&r->recalc); util_mutex_init(&r->lock); return r; error_alloc_tree: Free(r); error_alloc_recycler: return NULL; } /* * recycler_delete -- deletes recycler instance */ void recycler_delete(struct recycler *r) { VEC_DELETE(&r->recalc); util_mutex_destroy(&r->lock); ravl_delete(r->runs); Free(r); } /* * recycler_element_new -- calculates how many free bytes does a run have and * what's the largest request that the run can handle, returns that as * recycler element struct */ struct recycler_element recycler_element_new(struct palloc_heap *heap, const struct memory_block *m) { /* * Counting of the clear bits can race with a concurrent deallocation * that operates on the same run. This race is benign and has absolutely * no effect on the correctness of this algorithm. Ideally, we would * avoid grabbing the lock, but helgrind gets very confused if we * try to disable reporting for this function. */ os_mutex_t *lock = m->m_ops->get_lock(m); util_mutex_lock(lock); struct recycler_element e = { .free_space = 0, .max_free_block = 0, .chunk_id = m->chunk_id, .zone_id = m->zone_id, }; m->m_ops->calc_free(m, &e.free_space, &e.max_free_block); util_mutex_unlock(lock); return e; } /* * recycler_put -- inserts new run into the recycler */ int recycler_put(struct recycler *r, const struct memory_block *m, struct recycler_element element) { int ret = 0; util_mutex_lock(&r->lock); ret = ravl_emplace_copy(r->runs, &element); util_mutex_unlock(&r->lock); return ret; } /* * recycler_get -- retrieves a chunk from the recycler */ int recycler_get(struct recycler *r, struct memory_block *m) { int ret = 0; util_mutex_lock(&r->lock); struct recycler_element e = { .max_free_block = m->size_idx, 0, 0, 0}; struct ravl_node *n = ravl_find(r->runs, &e, RAVL_PREDICATE_GREATER_EQUAL); if (n == NULL) { ret = ENOMEM; goto out; } struct recycler_element *ne = ravl_data(n); m->chunk_id = ne->chunk_id; m->zone_id = ne->zone_id; ravl_remove(r->runs, n); struct chunk_header *hdr = heap_get_chunk_hdr(r->heap, m); m->size_idx = hdr->size_idx; memblock_rebuild_state(r->heap, m); out: util_mutex_unlock(&r->lock); return ret; } /* * recycler_recalc -- recalculates the scores of runs in the recycler to match * the updated persistent state */ struct empty_runs recycler_recalc(struct recycler *r, int force) { struct empty_runs runs; VEC_INIT(&runs); uint64_t units = r->unaccounted_total; size_t peak_arenas; util_atomic_load64(r->peak_arenas, &peak_arenas); uint64_t recalc_threshold = THRESHOLD_MUL * peak_arenas * r->nallocs; if (!force && units < recalc_threshold) return runs; if (util_mutex_trylock(&r->lock) != 0) return runs; /* If the search is forced, recalculate everything */ uint64_t search_limit = force ? UINT64_MAX : units; uint64_t found_units = 0; struct memory_block nm = MEMORY_BLOCK_NONE; struct ravl_node *n; struct recycler_element next = {0, 0, 0, 0}; enum ravl_predicate p = RAVL_PREDICATE_GREATER_EQUAL; do { if ((n = ravl_find(r->runs, &next, p)) == NULL) break; p = RAVL_PREDICATE_GREATER; struct recycler_element *ne = ravl_data(n); next = *ne; uint64_t chunk_units = r->unaccounted_units[ne->chunk_id]; if (!force && chunk_units == 0) continue; uint32_t existing_free_space = ne->free_space; nm.chunk_id = ne->chunk_id; nm.zone_id = ne->zone_id; memblock_rebuild_state(r->heap, &nm); struct recycler_element e = recycler_element_new(r->heap, &nm); ASSERT(e.free_space >= existing_free_space); uint64_t free_space_diff = e.free_space - existing_free_space; found_units += free_space_diff; if (free_space_diff == 0) continue; /* * Decrease the per chunk_id counter by the number of nallocs * found, increased by the blocks potentially freed in the * active memory block. Cap the sub value to prevent overflow. */ util_fetch_and_sub64(&r->unaccounted_units[nm.chunk_id], MIN(chunk_units, free_space_diff + r->nallocs)); ravl_remove(r->runs, n); if (e.free_space == r->nallocs) { memblock_rebuild_state(r->heap, &nm); if (VEC_PUSH_BACK(&runs, nm) != 0) ASSERT(0); /* XXX: fix after refactoring */ } else { VEC_PUSH_BACK(&r->recalc, e); } } while (found_units < search_limit); struct recycler_element *e; VEC_FOREACH_BY_PTR(e, &r->recalc) { ravl_emplace_copy(r->runs, e); } VEC_CLEAR(&r->recalc); util_mutex_unlock(&r->lock); util_fetch_and_sub64(&r->unaccounted_total, units); return runs; } /* * recycler_inc_unaccounted -- increases the number of unaccounted units in the * recycler */ void recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m) { util_fetch_and_add64(&r->unaccounted_total, m->size_idx); util_fetch_and_add64(&r->unaccounted_units[m->chunk_id], m->size_idx); }
6,997
22.019737
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/alloc_class.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * alloc_class.c -- implementation of allocation classes */ #include <float.h> #include <string.h> #include "alloc_class.h" #include "heap_layout.h" #include "util.h" #include "out.h" #include "bucket.h" #include "critnib.h" #define RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)\ ((uint64_t)(map_idx_s) << 32 |\ (uint64_t)(flags_s) << 16 |\ (uint64_t)(size_idx_s)) /* * Value used to mark a reserved spot in the bucket array. */ #define ACLASS_RESERVED ((void *)0xFFFFFFFFULL) /* * The last size that is handled by runs. */ #define MAX_RUN_SIZE (CHUNKSIZE * 10) /* * Maximum number of bytes the allocation class generation algorithm can decide * to waste in a single run chunk. */ #define MAX_RUN_WASTED_BYTES 1024 /* * Allocation categories are used for allocation classes generation. Each one * defines the biggest handled size (in bytes) and step pct of the generation * process. The step percentage defines maximum allowed external fragmentation * for the category. */ #define MAX_ALLOC_CATEGORIES 9 /* * The first size (in byes) which is actually used in the allocation * class generation algorithm. All smaller sizes use the first predefined bucket * with the smallest run unit size. */ #define FIRST_GENERATED_CLASS_SIZE 128 /* * The granularity of the allocation class generation algorithm. */ #define ALLOC_BLOCK_SIZE_GEN 64 /* * The first predefined allocation class size */ #define MIN_UNIT_SIZE 128 static const struct { size_t size; float step; } categories[MAX_ALLOC_CATEGORIES] = { /* dummy category - the first allocation class is predefined */ {FIRST_GENERATED_CLASS_SIZE, 0.05f}, {1024, 0.05f}, {2048, 0.05f}, {4096, 0.05f}, {8192, 0.05f}, {16384, 0.05f}, {32768, 0.05f}, {131072, 0.05f}, {393216, 0.05f}, }; #define RUN_UNIT_MAX_ALLOC 8U /* * Every allocation has to be a multiple of at least 8 because we need to * ensure proper alignment of every pmem structure. */ #define ALLOC_BLOCK_SIZE 16 /* * Converts size (in bytes) to number of allocation blocks. */ #define SIZE_TO_CLASS_MAP_INDEX(_s, _g) (1 + (((_s) - 1) / (_g))) /* * Target number of allocations per run instance. */ #define RUN_MIN_NALLOCS 200 /* * Hard limit of chunks per single run. */ #define RUN_SIZE_IDX_CAP (16) #define ALLOC_CLASS_DEFAULT_FLAGS CHUNK_FLAG_FLEX_BITMAP struct alloc_class_collection { size_t granularity; struct alloc_class *aclasses[MAX_ALLOCATION_CLASSES]; /* * The last size (in bytes) that is handled by runs, everything bigger * uses the default class. */ size_t last_run_max_size; /* maps allocation classes to allocation sizes, excluding the header! */ uint8_t *class_map_by_alloc_size; /* maps allocation classes to run unit sizes */ struct critnib *class_map_by_unit_size; int fail_on_missing_class; int autogenerate_on_missing_class; }; /* * alloc_class_find_first_free_slot -- searches for the * first available allocation class slot * * This function must be thread-safe because allocation classes can be created * at runtime. */ int alloc_class_find_first_free_slot(struct alloc_class_collection *ac, uint8_t *slot) { LOG(10, NULL); for (int n = 0; n < MAX_ALLOCATION_CLASSES; ++n) { if (util_bool_compare_and_swap64(&ac->aclasses[n], NULL, ACLASS_RESERVED)) { *slot = (uint8_t)n; return 0; } } return -1; } /* * alloc_class_reserve -- reserve the specified class id */ int alloc_class_reserve(struct alloc_class_collection *ac, uint8_t id) { LOG(10, NULL); return util_bool_compare_and_swap64(&ac->aclasses[id], NULL, ACLASS_RESERVED) ? 0 : -1; } /* * alloc_class_reservation_clear -- removes the reservation on class id */ static void alloc_class_reservation_clear(struct alloc_class_collection *ac, int id) { LOG(10, NULL); int ret = util_bool_compare_and_swap64(&ac->aclasses[id], ACLASS_RESERVED, NULL); ASSERT(ret); } /* * alloc_class_new -- creates a new allocation class */ struct alloc_class * alloc_class_new(int id, struct alloc_class_collection *ac, enum alloc_class_type type, enum header_type htype, size_t unit_size, size_t alignment, uint32_t size_idx) { LOG(10, NULL); struct alloc_class *c = Malloc(sizeof(*c)); if (c == NULL) goto error_class_alloc; c->unit_size = unit_size; c->header_type = htype; c->type = type; c->flags = (uint16_t) (header_type_to_flag[c->header_type] | (alignment ? CHUNK_FLAG_ALIGNED : 0)) | ALLOC_CLASS_DEFAULT_FLAGS; switch (type) { case CLASS_HUGE: id = DEFAULT_ALLOC_CLASS_ID; break; case CLASS_RUN: c->rdsc.alignment = alignment; memblock_run_bitmap(&size_idx, c->flags, unit_size, alignment, NULL, &c->rdsc.bitmap); c->rdsc.nallocs = c->rdsc.bitmap.nbits; c->rdsc.size_idx = size_idx; /* these two fields are duplicated from class */ c->rdsc.unit_size = c->unit_size; c->rdsc.flags = c->flags; uint8_t slot = (uint8_t)id; if (id < 0 && alloc_class_find_first_free_slot(ac, &slot) != 0) goto error_class_alloc; id = slot; size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(c->unit_size, ac->granularity); ASSERT(map_idx <= UINT32_MAX); uint32_t map_idx_s = (uint32_t)map_idx; uint16_t size_idx_s = (uint16_t)size_idx; uint16_t flags_s = (uint16_t)c->flags; uint64_t k = RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s); if (critnib_insert(ac->class_map_by_unit_size, k, c) != 0) { ERR("unable to register allocation class"); goto error_map_insert; } break; default: ASSERT(0); } c->id = (uint8_t)id; ac->aclasses[c->id] = c; return c; error_map_insert: Free(c); error_class_alloc: if (id >= 0) alloc_class_reservation_clear(ac, id); return NULL; } /* * alloc_class_delete -- (internal) deletes an allocation class */ void alloc_class_delete(struct alloc_class_collection *ac, struct alloc_class *c) { LOG(10, NULL); ac->aclasses[c->id] = NULL; Free(c); } /* * alloc_class_find_or_create -- (internal) searches for the * biggest allocation class for which unit_size is evenly divisible by n. * If no such class exists, create one. */ static struct alloc_class * alloc_class_find_or_create(struct alloc_class_collection *ac, size_t n) { LOG(10, NULL); COMPILE_ERROR_ON(MAX_ALLOCATION_CLASSES > UINT8_MAX); uint64_t required_size_bytes = n * RUN_MIN_NALLOCS; uint32_t required_size_idx = 1; if (required_size_bytes > RUN_DEFAULT_SIZE) { required_size_bytes -= RUN_DEFAULT_SIZE; required_size_idx += CALC_SIZE_IDX(CHUNKSIZE, required_size_bytes); if (required_size_idx > RUN_SIZE_IDX_CAP) required_size_idx = RUN_SIZE_IDX_CAP; } for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) { struct alloc_class *c = ac->aclasses[i]; if (c == NULL || c->type == CLASS_HUGE || c->rdsc.size_idx < required_size_idx) continue; if (n % c->unit_size == 0 && n / c->unit_size <= RUN_UNIT_MAX_ALLOC) return c; } /* * In order to minimize the wasted space at the end of the run the * run data size must be divisible by the allocation class unit size * with the smallest possible remainder, preferably 0. */ struct run_bitmap b; size_t runsize_bytes = 0; do { if (runsize_bytes != 0) /* don't increase on first iteration */ n += ALLOC_BLOCK_SIZE_GEN; uint32_t size_idx = required_size_idx; memblock_run_bitmap(&size_idx, ALLOC_CLASS_DEFAULT_FLAGS, n, 0, NULL, &b); runsize_bytes = RUN_CONTENT_SIZE_BYTES(size_idx) - b.size; } while ((runsize_bytes % n) > MAX_RUN_WASTED_BYTES); /* * Now that the desired unit size is found the existing classes need * to be searched for possible duplicates. If a class that can handle * the calculated size already exists, simply return that. */ for (int i = 1; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = ac->aclasses[i]; if (c == NULL || c->type == CLASS_HUGE) continue; if (n / c->unit_size <= RUN_UNIT_MAX_ALLOC && n % c->unit_size == 0) return c; if (c->unit_size == n) return c; } return alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, n, 0, required_size_idx); } /* * alloc_class_find_min_frag -- searches for an existing allocation * class that will provide the smallest internal fragmentation for the given * size. */ static struct alloc_class * alloc_class_find_min_frag(struct alloc_class_collection *ac, size_t n) { LOG(10, NULL); struct alloc_class *best_c = NULL; size_t lowest_waste = SIZE_MAX; ASSERTne(n, 0); /* * Start from the largest buckets in order to minimize unit size of * allocated memory blocks. */ for (int i = MAX_ALLOCATION_CLASSES - 1; i >= 0; --i) { struct alloc_class *c = ac->aclasses[i]; /* can't use alloc classes /w no headers by default */ if (c == NULL || c->header_type == HEADER_NONE) continue; size_t real_size = n + header_type_to_size[c->header_type]; size_t units = CALC_SIZE_IDX(c->unit_size, real_size); /* can't exceed the maximum allowed run unit max */ if (c->type == CLASS_RUN && units > RUN_UNIT_MAX_ALLOC) continue; if (c->unit_size * units == real_size) return c; size_t waste = (c->unit_size * units) - real_size; /* * If we assume that the allocation class is only ever going to * be used with exactly one size, the effective internal * fragmentation would be increased by the leftover * memory at the end of the run. */ if (c->type == CLASS_RUN) { size_t wasted_units = c->rdsc.nallocs % units; size_t wasted_bytes = wasted_units * c->unit_size; size_t waste_avg_per_unit = wasted_bytes / c->rdsc.nallocs; waste += waste_avg_per_unit; } if (best_c == NULL || lowest_waste > waste) { best_c = c; lowest_waste = waste; } } ASSERTne(best_c, NULL); return best_c; } /* * alloc_class_collection_new -- creates a new collection of allocation classes */ struct alloc_class_collection * alloc_class_collection_new() { LOG(10, NULL); struct alloc_class_collection *ac = Zalloc(sizeof(*ac)); if (ac == NULL) return NULL; ac->granularity = ALLOC_BLOCK_SIZE; ac->last_run_max_size = MAX_RUN_SIZE; ac->fail_on_missing_class = 0; ac->autogenerate_on_missing_class = 1; size_t maps_size = (MAX_RUN_SIZE / ac->granularity) + 1; if ((ac->class_map_by_alloc_size = Malloc(maps_size)) == NULL) goto error; if ((ac->class_map_by_unit_size = critnib_new()) == NULL) goto error; memset(ac->class_map_by_alloc_size, 0xFF, maps_size); if (alloc_class_new(-1, ac, CLASS_HUGE, HEADER_COMPACT, CHUNKSIZE, 0, 1) == NULL) goto error; struct alloc_class *predefined_class = alloc_class_new(-1, ac, CLASS_RUN, HEADER_COMPACT, MIN_UNIT_SIZE, 0, 1); if (predefined_class == NULL) goto error; for (size_t i = 0; i < FIRST_GENERATED_CLASS_SIZE / ac->granularity; ++i) { ac->class_map_by_alloc_size[i] = predefined_class->id; } /* * Based on the defined categories, a set of allocation classes is * created. The unit size of those classes is depended on the category * initial size and step. */ size_t granularity_mask = ALLOC_BLOCK_SIZE_GEN - 1; for (int c = 1; c < MAX_ALLOC_CATEGORIES; ++c) { size_t n = categories[c - 1].size + ALLOC_BLOCK_SIZE_GEN; do { if (alloc_class_find_or_create(ac, n) == NULL) goto error; float stepf = (float)n * categories[c].step; size_t stepi = (size_t)stepf; stepi = (stepf - (float)stepi < FLT_EPSILON) ? stepi : stepi + 1; n += (stepi + (granularity_mask)) & ~granularity_mask; } while (n <= categories[c].size); } /* * Find the largest alloc class and use it's unit size as run allocation * threshold. */ uint8_t largest_aclass_slot; for (largest_aclass_slot = MAX_ALLOCATION_CLASSES - 1; largest_aclass_slot > 0 && ac->aclasses[largest_aclass_slot] == NULL; --largest_aclass_slot) { /* intentional NOP */ } struct alloc_class *c = ac->aclasses[largest_aclass_slot]; /* * The actual run might contain less unit blocks than the theoretical * unit max variable. This may be the case for very large unit sizes. */ size_t real_unit_max = c->rdsc.nallocs < RUN_UNIT_MAX_ALLOC ? c->rdsc.nallocs : RUN_UNIT_MAX_ALLOC; size_t theoretical_run_max_size = c->unit_size * real_unit_max; ac->last_run_max_size = MAX_RUN_SIZE > theoretical_run_max_size ? theoretical_run_max_size : MAX_RUN_SIZE; #ifdef DEBUG /* * Verify that each bucket's unit size points back to the bucket by the * bucket map. This must be true for the default allocation classes, * otherwise duplicate buckets will be created. */ for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = ac->aclasses[i]; if (c != NULL && c->type == CLASS_RUN) { ASSERTeq(i, c->id); ASSERTeq(alloc_class_by_run(ac, c->unit_size, c->flags, c->rdsc.size_idx), c); } } #endif return ac; error: alloc_class_collection_delete(ac); return NULL; } /* * alloc_class_collection_delete -- deletes the allocation class collection and * all of the classes within it */ void alloc_class_collection_delete(struct alloc_class_collection *ac) { LOG(10, NULL); for (size_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = ac->aclasses[i]; if (c != NULL) { alloc_class_delete(ac, c); } } if (ac->class_map_by_unit_size) critnib_delete(ac->class_map_by_unit_size); Free(ac->class_map_by_alloc_size); Free(ac); } /* * alloc_class_assign_by_size -- (internal) chooses the allocation class that * best approximates the provided size */ static struct alloc_class * alloc_class_assign_by_size(struct alloc_class_collection *ac, size_t size) { LOG(10, NULL); size_t class_map_index = SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity); struct alloc_class *c = alloc_class_find_min_frag(ac, class_map_index * ac->granularity); ASSERTne(c, NULL); /* * We don't lock this array because locking this section here and then * bailing out if someone else was faster would be still slower than * just calculating the class and failing to assign the variable. * We are using a compare and swap so that helgrind/drd don't complain. */ util_bool_compare_and_swap64( &ac->class_map_by_alloc_size[class_map_index], MAX_ALLOCATION_CLASSES, c->id); return c; } /* * alloc_class_by_alloc_size -- returns allocation class that is assigned * to handle an allocation of the provided size */ struct alloc_class * alloc_class_by_alloc_size(struct alloc_class_collection *ac, size_t size) { if (size < ac->last_run_max_size) { uint8_t class_id = ac->class_map_by_alloc_size[ SIZE_TO_CLASS_MAP_INDEX(size, ac->granularity)]; if (class_id == MAX_ALLOCATION_CLASSES) { if (ac->fail_on_missing_class) return NULL; else if (ac->autogenerate_on_missing_class) return alloc_class_assign_by_size(ac, size); else return ac->aclasses[DEFAULT_ALLOC_CLASS_ID]; } return ac->aclasses[class_id]; } else { return ac->aclasses[DEFAULT_ALLOC_CLASS_ID]; } } /* * alloc_class_by_run -- returns the allocation class that has the given * unit size */ struct alloc_class * alloc_class_by_run(struct alloc_class_collection *ac, size_t unit_size, uint16_t flags, uint32_t size_idx) { size_t map_idx = SIZE_TO_CLASS_MAP_INDEX(unit_size, ac->granularity); ASSERT(map_idx <= UINT32_MAX); uint32_t map_idx_s = (uint32_t)map_idx; ASSERT(size_idx <= UINT16_MAX); uint16_t size_idx_s = (uint16_t)size_idx; uint16_t flags_s = (uint16_t)flags; return critnib_get(ac->class_map_by_unit_size, RUN_CLASS_KEY_PACK(map_idx_s, flags_s, size_idx_s)); } /* * alloc_class_by_id -- returns the allocation class with an id */ struct alloc_class * alloc_class_by_id(struct alloc_class_collection *ac, uint8_t id) { return ac->aclasses[id]; } /* * alloc_class_calc_size_idx -- calculates how many units does the size require */ ssize_t alloc_class_calc_size_idx(struct alloc_class *c, size_t size) { uint32_t size_idx = CALC_SIZE_IDX(c->unit_size, size + header_type_to_size[c->header_type]); if (c->type == CLASS_RUN) { if (c->header_type == HEADER_NONE && size_idx != 1) return -1; else if (size_idx > RUN_UNIT_MAX) return -1; else if (size_idx > c->rdsc.nallocs) return -1; } return size_idx; }
16,240
24.496075
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/obj.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * obj.h -- internal definitions for obj module */ #ifndef LIBPMEMOBJ_OBJ_H #define LIBPMEMOBJ_OBJ_H 1 #include <stddef.h> #include <stdint.h> #include "lane.h" #include "pool_hdr.h" #include "pmalloc.h" #include "ctl.h" #include "sync.h" #include "stats.h" #include "ctl_debug.h" #include "page_size.h" #ifdef __cplusplus extern "C" { #endif #include "alloc.h" #include "fault_injection.h" #define PMEMOBJ_LOG_PREFIX "libpmemobj" #define PMEMOBJ_LOG_LEVEL_VAR "PMEMOBJ_LOG_LEVEL" #define PMEMOBJ_LOG_FILE_VAR "PMEMOBJ_LOG_FILE" /* attributes of the obj memory pool format for the pool header */ #define OBJ_HDR_SIG "PMEMOBJ" /* must be 8 bytes including '\0' */ #define OBJ_FORMAT_MAJOR 6 #define OBJ_FORMAT_FEAT_DEFAULT \ {POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000} #define OBJ_FORMAT_FEAT_CHECK \ {POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000} static const features_t obj_format_feat_default = OBJ_FORMAT_FEAT_CHECK; /* size of the persistent part of PMEMOBJ pool descriptor */ #define OBJ_DSC_P_SIZE 2048 /* size of unused part of the persistent part of PMEMOBJ pool descriptor */ #define OBJ_DSC_P_UNUSED (OBJ_DSC_P_SIZE - PMEMOBJ_MAX_LAYOUT - 40) #define OBJ_LANES_OFFSET (sizeof(struct pmemobjpool)) /* lanes offset */ #define OBJ_NLANES 1024 /* number of lanes */ #define OBJ_OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off))) #define OBJ_PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop)) #define OBJ_OID_IS_NULL(oid) ((oid).off == 0) #define OBJ_LIST_EMPTY(head) OBJ_OID_IS_NULL((head)->pe_first) #define OBJ_OFF_FROM_HEAP(pop, off)\ ((off) >= (pop)->heap_offset &&\ (off) < (pop)->heap_offset + (pop)->heap_size) #define OBJ_OFF_FROM_LANES(pop, off)\ ((off) >= (pop)->lanes_offset &&\ (off) < (pop)->lanes_offset +\ (pop)->nlanes * sizeof(struct lane_layout)) #define OBJ_PTR_FROM_POOL(pop, ptr)\ ((uintptr_t)(ptr) >= (uintptr_t)(pop) &&\ (uintptr_t)(ptr) < (uintptr_t)(pop) +\ (pop)->heap_offset + (pop)->heap_size) #define OBJ_OFF_IS_VALID(pop, off)\ (OBJ_OFF_FROM_HEAP(pop, off) ||\ (OBJ_PTR_TO_OFF(pop, &(pop)->root_offset) == (off)) ||\ (OBJ_PTR_TO_OFF(pop, &(pop)->root_size) == (off)) ||\ (OBJ_OFF_FROM_LANES(pop, off))) #define OBJ_PTR_IS_VALID(pop, ptr)\ OBJ_OFF_IS_VALID(pop, OBJ_PTR_TO_OFF(pop, ptr)) typedef void (*persist_local_fn)(const void *, size_t); typedef void (*flush_local_fn)(const void *, size_t); typedef void (*drain_local_fn)(void); typedef void *(*memcpy_local_fn)(void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memmove_local_fn)(void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memset_local_fn)(void *dest, int c, size_t len, unsigned flags); typedef int (*persist_remote_fn)(PMEMobjpool *pop, const void *addr, size_t len, unsigned lane, unsigned flags); typedef uint64_t type_num_t; #define CONVERSION_FLAG_OLD_SET_CACHE ((1ULL) << 0) /* PMEM_OBJ_POOL_HEAD_SIZE Without the unused and unused2 arrays */ #define PMEM_OBJ_POOL_HEAD_SIZE 2196 #define PMEM_OBJ_POOL_UNUSED2_SIZE (PMEM_PAGESIZE \ - OBJ_DSC_P_UNUSED\ - PMEM_OBJ_POOL_HEAD_SIZE) /* //NEW //#define _GNU_SOURCE //#include <sys/types.h> //#include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> //int __real_open(const char *__path, int __oflag); //int __wrap_open(const char *__path, int __oflag); void* open_device(void); //END NEW */ struct pmemobjpool { struct pool_hdr hdr; /* memory pool header */ /* persistent part of PMEMOBJ pool descriptor (2kB) */ char layout[PMEMOBJ_MAX_LAYOUT]; uint64_t lanes_offset; uint64_t nlanes; uint64_t heap_offset; uint64_t unused3; unsigned char unused[OBJ_DSC_P_UNUSED]; /* must be zero */ uint64_t checksum; /* checksum of above fields */ uint64_t root_offset; /* unique runID for this program run - persistent but not checksummed */ uint64_t run_id; uint64_t root_size; /* * These flags can be set from a conversion tool and are set only for * the first recovery of the pool. */ uint64_t conversion_flags; uint64_t heap_size; struct stats_persistent stats_persistent; char pmem_reserved[496]; /* must be zeroed */ /* some run-time state, allocated out of memory pool... */ void *addr; /* mapped region */ int is_pmem; /* true if pool is PMEM */ int rdonly; /* true if pool is opened read-only */ struct palloc_heap heap; struct lane_descriptor lanes_desc; uint64_t uuid_lo; int is_dev_dax; /* true if mapped on device dax */ struct ctl *ctl; /* top level node of the ctl tree structure */ struct stats *stats; struct pool_set *set; /* pool set info */ struct pmemobjpool *replica; /* next replica */ /* per-replica functions: pmem or non-pmem */ persist_local_fn persist_local; /* persist function */ flush_local_fn flush_local; /* flush function */ drain_local_fn drain_local; /* drain function */ memcpy_local_fn memcpy_local; /* persistent memcpy function */ memmove_local_fn memmove_local; /* persistent memmove function */ memset_local_fn memset_local; /* persistent memset function */ /* for 'master' replica: with or without data replication */ struct pmem_ops p_ops; PMEMmutex rootlock; /* root object lock */ int is_master_replica; int has_remote_replicas; /* remote replica section */ void *rpp; /* RPMEMpool opaque handle if it is a remote replica */ uintptr_t remote_base; /* beginning of the remote pool */ char *node_addr; /* address of a remote node */ char *pool_desc; /* descriptor of a poolset */ persist_remote_fn persist_remote; /* remote persist function */ int vg_boot; int tx_debug_skip_expensive_checks; struct tx_parameters *tx_params; /* * Locks are dynamically allocated on FreeBSD. Keep track so * we can free them on pmemobj_close. */ PMEMmutex_internal *mutex_head; PMEMrwlock_internal *rwlock_head; PMEMcond_internal *cond_head; struct { struct ravl *map; os_mutex_t lock; int verify; } ulog_user_buffers; void *user_data; //New //void *device; /* padding to align size of this structure to page boundary */ /* sizeof(unused2) == 8192 - offsetof(struct pmemobjpool, unused2) */ char unused2[PMEM_OBJ_POOL_UNUSED2_SIZE -28 ]; }; /* * Stored in the 'size' field of oobh header, determines whether the object * is internal or not. Internal objects are skipped in pmemobj iteration * functions. */ #define OBJ_INTERNAL_OBJECT_MASK ((1ULL) << 15) #define CLASS_ID_FROM_FLAG(flag)\ ((uint16_t)((flag) >> 48)) #define ARENA_ID_FROM_FLAG(flag)\ ((uint16_t)((flag) >> 32)) /* * pmemobj_get_uuid_lo -- (internal) evaluates XOR sum of least significant * 8 bytes with most significant 8 bytes. */ static inline uint64_t pmemobj_get_uuid_lo(PMEMobjpool *pop) { uint64_t uuid_lo = 0; for (int i = 0; i < 8; i++) { uuid_lo = (uuid_lo << 8) | (pop->hdr.poolset_uuid[i] ^ pop->hdr.poolset_uuid[8 + i]); } return uuid_lo; } /* * OBJ_OID_IS_VALID -- (internal) checks if 'oid' is valid */ static inline int OBJ_OID_IS_VALID(PMEMobjpool *pop, PMEMoid oid) { return OBJ_OID_IS_NULL(oid) || (oid.pool_uuid_lo == pop->uuid_lo && oid.off >= pop->heap_offset && oid.off < pop->heap_offset + pop->heap_size); } static inline int OBJ_OFF_IS_VALID_FROM_CTX(void *ctx, uint64_t offset) { PMEMobjpool *pop = (PMEMobjpool *)ctx; return OBJ_OFF_IS_VALID(pop, offset); } void obj_init(void); void obj_fini(void); int obj_read_remote(void *ctx, uintptr_t base, void *dest, void *addr, size_t length); /* * (debug helper macro) logs notice message if used inside a transaction */ #ifdef DEBUG #define _POBJ_DEBUG_NOTICE_IN_TX()\ _pobj_debug_notice(__func__, NULL, 0) #else #define _POBJ_DEBUG_NOTICE_IN_TX() do {} while (0) #endif #if FAULT_INJECTION void pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at); int pmemobj_fault_injection_enabled(void); #else static inline void pmemobj_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { abort(); } static inline int pmemobj_fault_injection_enabled(void) { return 0; } #endif #ifdef __cplusplus } #endif #endif
8,196
25.441935
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/list.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * list.h -- internal definitions for persistent atomic lists module */ #ifndef LIBPMEMOBJ_LIST_H #define LIBPMEMOBJ_LIST_H 1 #include <stddef.h> #include <stdint.h> #include <sys/types.h> #include "libpmemobj.h" #include "lane.h" #include "pmalloc.h" #include "ulog.h" #ifdef __cplusplus extern "C" { #endif struct list_entry { PMEMoid pe_next; PMEMoid pe_prev; }; struct list_head { PMEMoid pe_first; PMEMmutex lock; }; int list_insert_new_user(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before, size_t size, uint64_t type_num, palloc_constr constructor, void *arg, PMEMoid *oidp); int list_insert(PMEMobjpool *pop, ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before, PMEMoid oid); int list_remove_free_user(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid *oidp); int list_remove(PMEMobjpool *pop, ssize_t pe_offset, struct list_head *head, PMEMoid oid); int list_move(PMEMobjpool *pop, size_t pe_offset_old, struct list_head *head_old, size_t pe_offset_new, struct list_head *head_new, PMEMoid dest, int before, PMEMoid oid); void list_move_oob(PMEMobjpool *pop, struct list_head *head_old, struct list_head *head_new, PMEMoid oid); #ifdef __cplusplus } #endif #endif
1,376
20.184615
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memops.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * memops.c -- aggregated memory operations helper implementation * * The operation collects all of the required memory modifications that * need to happen in an atomic way (all of them or none), and abstracts * away the storage type (transient/persistent) and the underlying * implementation of how it's actually performed - in some cases using * the redo log is unnecessary and the allocation process can be sped up * a bit by completely omitting that whole machinery. * * The modifications are not visible until the context is processed. */ #include "memops.h" #include "obj.h" #include "out.h" #include "ravl.h" #include "valgrind_internal.h" #include "vecq.h" #include "sys_util.h" #include <x86intrin.h> #define ULOG_BASE_SIZE 1024 #define OP_MERGE_SEARCH 64 static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } enum operation_state { OPERATION_IDLE, OPERATION_IN_PROGRESS, OPERATION_CLEANUP, }; struct operation_log { size_t capacity; /* capacity of the ulog log */ size_t offset; /* data offset inside of the log */ struct ulog *ulog; /* DRAM allocated log of modifications */ }; /* * operation_context -- context of an ongoing palloc operation */ struct operation_context { enum log_type type; ulog_extend_fn extend; /* function to allocate next ulog */ ulog_free_fn ulog_free; /* function to free next ulogs */ const struct pmem_ops *p_ops; struct pmem_ops t_ops; /* used for transient data processing */ struct pmem_ops s_ops; /* used for shadow copy data processing */ size_t ulog_curr_offset; /* offset in the log for buffer stores */ size_t ulog_curr_capacity; /* capacity of the current log */ size_t ulog_curr_gen_num; /* transaction counter in the current log */ struct ulog *ulog_curr; /* current persistent log */ size_t total_logged; /* total amount of buffer stores in the logs */ struct ulog *ulog; /* pointer to the persistent ulog log */ size_t ulog_base_nbytes; /* available bytes in initial ulog log */ size_t ulog_capacity; /* sum of capacity, incl all next ulog logs */ int ulog_auto_reserve; /* allow or do not to auto ulog reservation */ int ulog_any_user_buffer; /* set if any user buffer is added */ struct ulog_next next; /* vector of 'next' fields of persistent ulog */ enum operation_state state; /* operation sanity check */ struct operation_log pshadow_ops; /* shadow copy of persistent ulog */ struct operation_log transient_ops; /* log of transient changes */ /* collection used to look for potential merge candidates */ VECQ(, struct ulog_entry_val *) merge_entries; }; /* * operation_log_transient_init -- (internal) initialize operation log * containing transient memory resident changes */ static int operation_log_transient_init(struct operation_log *log) { log->capacity = ULOG_BASE_SIZE; log->offset = 0; struct ulog *src = Zalloc(sizeof(struct ulog) + ULOG_BASE_SIZE); if (src == NULL) { ERR("!Zalloc"); return -1; } /* initialize underlying redo log structure */ src->capacity = ULOG_BASE_SIZE; log->ulog = src; return 0; } /* * operation_log_persistent_init -- (internal) initialize operation log * containing persistent memory resident changes */ static int operation_log_persistent_init(struct operation_log *log, size_t ulog_base_nbytes) { log->capacity = ULOG_BASE_SIZE; log->offset = 0; struct ulog *src = Zalloc(sizeof(struct ulog) + ULOG_BASE_SIZE); if (src == NULL) { ERR("!Zalloc"); return -1; } /* initialize underlying redo log structure */ src->capacity = ulog_base_nbytes; memset(src->unused, 0, sizeof(src->unused)); log->ulog = src; return 0; } /* * operation_transient_clean -- cleans pmemcheck address state */ static int operation_transient_clean(void *base, const void *addr, size_t len, unsigned flags) { VALGRIND_SET_CLEAN(addr, len); return 0; } /* * operation_transient_drain -- noop */ static void operation_transient_drain(void *base) { } /* * operation_transient_memcpy -- transient memcpy wrapper */ static void * operation_transient_memcpy(void *base, void *dest, const void *src, size_t len, unsigned flags) { return memcpy(dest, src, len); } /* * operation_new -- creates new operation context */ struct operation_context * operation_new(struct ulog *ulog, size_t ulog_base_nbytes, ulog_extend_fn extend, ulog_free_fn ulog_free, const struct pmem_ops *p_ops, enum log_type type) { struct operation_context *ctx = Zalloc(sizeof(*ctx)); if (ctx == NULL) { ERR("!Zalloc"); goto error_ctx_alloc; } ctx->ulog = ulog; ctx->ulog_base_nbytes = ulog_base_nbytes; ctx->ulog_capacity = ulog_capacity(ulog, ulog_base_nbytes, p_ops); ctx->extend = extend; ctx->ulog_free = ulog_free; ctx->state = OPERATION_IDLE; VEC_INIT(&ctx->next); ulog_rebuild_next_vec(ulog, &ctx->next, p_ops); ctx->p_ops = p_ops; ctx->type = type; ctx->ulog_any_user_buffer = 0; ctx->ulog_curr_offset = 0; ctx->ulog_curr_capacity = 0; ctx->ulog_curr = NULL; ctx->t_ops.base = NULL; ctx->t_ops.flush = operation_transient_clean; ctx->t_ops.memcpy = operation_transient_memcpy; ctx->t_ops.drain = operation_transient_drain; ctx->s_ops.base = p_ops->base; ctx->s_ops.flush = operation_transient_clean; ctx->s_ops.memcpy = operation_transient_memcpy; ctx->s_ops.drain = operation_transient_drain; VECQ_INIT(&ctx->merge_entries); if (operation_log_transient_init(&ctx->transient_ops) != 0) goto error_ulog_alloc; if (operation_log_persistent_init(&ctx->pshadow_ops, ulog_base_nbytes) != 0) goto error_ulog_alloc; return ctx; error_ulog_alloc: operation_delete(ctx); error_ctx_alloc: return NULL; } /* * operation_delete -- deletes operation context */ void operation_delete(struct operation_context *ctx) { VECQ_DELETE(&ctx->merge_entries); VEC_DELETE(&ctx->next); Free(ctx->pshadow_ops.ulog); Free(ctx->transient_ops.ulog); Free(ctx); } /* * operation_user_buffer_remove -- removes range from the tree and returns 0 */ static int operation_user_buffer_remove(void *base, void *addr) { PMEMobjpool *pop = base; if (!pop->ulog_user_buffers.verify) return 0; util_mutex_lock(&pop->ulog_user_buffers.lock); struct ravl *ravl = pop->ulog_user_buffers.map; enum ravl_predicate predict = RAVL_PREDICATE_EQUAL; struct user_buffer_def range; range.addr = addr; range.size = 0; struct ravl_node *n = ravl_find(ravl, &range, predict); ASSERTne(n, NULL); ravl_remove(ravl, n); util_mutex_unlock(&pop->ulog_user_buffers.lock); return 0; } /* * operation_free_logs -- free all logs except first */ void operation_free_logs(struct operation_context *ctx, uint64_t flags) { int freed = ulog_free_next(ctx->ulog, ctx->p_ops, ctx->ulog_free, operation_user_buffer_remove, flags); if (freed) { ctx->ulog_capacity = ulog_capacity(ctx->ulog, ctx->ulog_base_nbytes, ctx->p_ops); VEC_CLEAR(&ctx->next); ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops); } ASSERTeq(VEC_SIZE(&ctx->next), 0); } /* * operation_merge -- (internal) performs operation on a field */ static inline void operation_merge(struct ulog_entry_base *entry, uint64_t value, ulog_operation_type type) { struct ulog_entry_val *e = (struct ulog_entry_val *)entry; switch (type) { case ULOG_OPERATION_AND: e->value &= value; break; case ULOG_OPERATION_OR: e->value |= value; break; case ULOG_OPERATION_SET: e->value = value; break; default: ASSERT(0); /* unreachable */ } } /* * operation_try_merge_entry -- tries to merge the incoming log entry with * existing entries * * Because this requires a reverse foreach, it cannot be implemented using * the on-media ulog log structure since there's no way to find what's * the previous entry in the log. Instead, the last N entries are stored * in a collection and traversed backwards. */ static int operation_try_merge_entry(struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type) { int ret = 0; uint64_t offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, ptr); struct ulog_entry_val *e; VECQ_FOREACH_REVERSE(e, &ctx->merge_entries) { if (ulog_entry_offset(&e->base) == offset) { if (ulog_entry_type(&e->base) == type) { operation_merge(&e->base, value, type); return 1; } else { break; } } } return ret; } /* * operation_merge_entry_add -- adds a new entry to the merge collection, * keeps capacity at OP_MERGE_SEARCH. Removes old entries in FIFO fashion. */ static void operation_merge_entry_add(struct operation_context *ctx, struct ulog_entry_val *entry) { if (VECQ_SIZE(&ctx->merge_entries) == OP_MERGE_SEARCH) (void) VECQ_DEQUEUE(&ctx->merge_entries); if (VECQ_ENQUEUE(&ctx->merge_entries, entry) != 0) { /* this is fine, only runtime perf will get slower */ LOG(2, "out of memory - unable to track entries"); } } /* * operation_add_typed_value -- adds new entry to the current operation, if the * same ptr address already exists and the operation type is set, * the new value is not added and the function has no effect. */ int operation_add_typed_entry(struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type, enum operation_log_type log_type) { struct operation_log *oplog = log_type == LOG_PERSISTENT ? &ctx->pshadow_ops : &ctx->transient_ops; /* * Always make sure to have one extra spare cacheline so that the * ulog log entry creation has enough room for zeroing. */ if (oplog->offset + CACHELINE_SIZE == oplog->capacity) { size_t ncapacity = oplog->capacity + ULOG_BASE_SIZE; struct ulog *ulog = Realloc(oplog->ulog, SIZEOF_ULOG(ncapacity)); if (ulog == NULL) return -1; oplog->capacity += ULOG_BASE_SIZE; oplog->ulog = ulog; oplog->ulog->capacity = oplog->capacity; /* * Realloc invalidated the ulog entries that are inside of this * vector, need to clear it to avoid use after free. */ VECQ_CLEAR(&ctx->merge_entries); } if (log_type == LOG_PERSISTENT && operation_try_merge_entry(ctx, ptr, value, type) != 0) return 0; struct ulog_entry_val *entry = ulog_entry_val_create( oplog->ulog, oplog->offset, ptr, value, type, log_type == LOG_TRANSIENT ? &ctx->t_ops : &ctx->s_ops); if (log_type == LOG_PERSISTENT) operation_merge_entry_add(ctx, entry); oplog->offset += ulog_entry_size(&entry->base); return 0; } /* * operation_add_value -- adds new entry to the current operation with * entry type autodetected based on the memory location */ int operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type) { const struct pmem_ops *p_ops = ctx->p_ops; PMEMobjpool *pop = (PMEMobjpool *)p_ops->base; int from_pool = OBJ_OFF_IS_VALID(pop, (uintptr_t)ptr - (uintptr_t)p_ops->base); return operation_add_typed_entry(ctx, ptr, value, type, from_pool ? LOG_PERSISTENT : LOG_TRANSIENT); } /* * operation_add_buffer -- adds a buffer operation to the log */ int operation_add_buffer(struct operation_context *ctx, void *dest, void *src, size_t size, ulog_operation_type type) { size_t real_size = size + sizeof(struct ulog_entry_buf); /* if there's no space left in the log, reserve some more */ if (ctx->ulog_curr_capacity == 0) { ctx->ulog_curr_gen_num = ctx->ulog->gen_num; if (operation_reserve(ctx, ctx->total_logged + real_size) != 0) return -1; ctx->ulog_curr = ctx->ulog_curr == NULL ? ctx->ulog : ulog_next(ctx->ulog_curr, ctx->p_ops); ASSERTne(ctx->ulog_curr, NULL); ctx->ulog_curr_offset = 0; ctx->ulog_curr_capacity = ctx->ulog_curr->capacity; } size_t curr_size = MIN(real_size, ctx->ulog_curr_capacity); size_t data_size = curr_size - sizeof(struct ulog_entry_buf); size_t entry_size = ALIGN_UP(curr_size, CACHELINE_SIZE); /* * To make sure that the log is consistent and contiguous, we need * make sure that the header of the entry that would be located * immediately after this one is zeroed. */ struct ulog_entry_base *next_entry = NULL; if (entry_size == ctx->ulog_curr_capacity) { struct ulog *u = ulog_next(ctx->ulog_curr, ctx->p_ops); if (u != NULL) next_entry = (struct ulog_entry_base *)u->data; } else { size_t next_entry_offset = ctx->ulog_curr_offset + entry_size; next_entry = (struct ulog_entry_base *)(ctx->ulog_curr->data + next_entry_offset); } #ifdef USE_NDP_CLOBBER int clear_next_header = 0; if (next_entry != NULL){ clear_next_header = 1; } #else if (next_entry != NULL){ ulog_clobber_entry(next_entry, ctx->p_ops); } #endif #ifdef GET_NDP_BREAKDOWN uint64_t startCycles = getCycle(); #endif //ulogcount++; #ifdef USE_NDP_CLOBBER ulog_entry_buf_create(ctx->ulog_curr, ctx->ulog_curr_offset, ctx->ulog_curr_gen_num, dest, src, data_size, type, ctx->p_ops, clear_next_header); #else ulog_entry_buf_create(ctx->ulog_curr, ctx->ulog_curr_offset, ctx->ulog_curr_gen_num, dest, src, data_size, type, ctx->p_ops); #endif #ifdef GET_NDP_BREAKDOWN uint64_t endCycles = getCycle(); ulogCycles += endCycles - startCycles; #endif /* create a persistent log entry */ /* struct ulog_entry_buf *e = ulog_entry_buf_create(ctx->ulog_curr, ctx->ulog_curr_offset, ctx->ulog_curr_gen_num, dest, src, data_size, type, ctx->p_ops); */ // ASSERT(entry_size == ulog_entry_size(&e->base)); // ASSERT(entry_size <= ctx->ulog_curr_capacity); ctx->total_logged += entry_size; ctx->ulog_curr_offset += entry_size; ctx->ulog_curr_capacity -= entry_size; /* * Recursively add the data to the log until the entire buffer is * processed. */ return size - data_size == 0 ? 0 : operation_add_buffer(ctx, (char *)dest + data_size, (char *)src + data_size, size - data_size, type); } /* * operation_user_buffer_range_cmp -- compares addresses of * user buffers */ int operation_user_buffer_range_cmp(const void *lhs, const void *rhs) { const struct user_buffer_def *l = lhs; const struct user_buffer_def *r = rhs; if (l->addr > r->addr) return 1; else if (l->addr < r->addr) return -1; return 0; } /* * operation_user_buffer_try_insert -- adds a user buffer range to the tree, * if the buffer already exists in the tree function returns -1, otherwise * it returns 0 */ static int operation_user_buffer_try_insert(PMEMobjpool *pop, struct user_buffer_def *userbuf) { int ret = 0; if (!pop->ulog_user_buffers.verify) return ret; util_mutex_lock(&pop->ulog_user_buffers.lock); void *addr_end = (char *)userbuf->addr + userbuf->size; struct user_buffer_def search; search.addr = addr_end; struct ravl_node *n = ravl_find(pop->ulog_user_buffers.map, &search, RAVL_PREDICATE_LESS_EQUAL); if (n != NULL) { struct user_buffer_def *r = ravl_data(n); void *r_end = (char *)r->addr + r->size; if (r_end > userbuf->addr && r->addr < addr_end) { /* what was found overlaps with what is being added */ ret = -1; goto out; } } if (ravl_emplace_copy(pop->ulog_user_buffers.map, userbuf) == -1) { ASSERTne(errno, EEXIST); ret = -1; } out: util_mutex_unlock(&pop->ulog_user_buffers.lock); return ret; } /* * operation_user_buffer_verify_align -- verify if the provided buffer can be * used as a transaction log, and if so - perform necessary alignments */ int operation_user_buffer_verify_align(struct operation_context *ctx, struct user_buffer_def *userbuf) { /* * Address of the buffer has to be aligned up, and the size * has to be aligned down, taking into account the number of bytes * the address was incremented by. The remaining size has to be large * enough to contain the header and at least one ulog entry. */ uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, userbuf->addr); ptrdiff_t size_diff = (intptr_t)ulog_by_offset(buffer_offset, ctx->p_ops) - (intptr_t)userbuf->addr; ssize_t capacity_unaligned = (ssize_t)userbuf->size - size_diff - (ssize_t)sizeof(struct ulog); if (capacity_unaligned < (ssize_t)CACHELINE_SIZE) { ERR("Capacity insufficient"); return -1; } size_t capacity_aligned = ALIGN_DOWN((size_t)capacity_unaligned, CACHELINE_SIZE); userbuf->addr = ulog_by_offset(buffer_offset, ctx->p_ops); userbuf->size = capacity_aligned + sizeof(struct ulog); if (operation_user_buffer_try_insert(ctx->p_ops->base, userbuf)) { ERR("Buffer currently used"); return -1; } return 0; } /* * operation_add_user_buffer -- add user buffer to the ulog */ void operation_add_user_buffer(struct operation_context *ctx, struct user_buffer_def *userbuf) { uint64_t buffer_offset = OBJ_PTR_TO_OFF(ctx->p_ops->base, userbuf->addr); size_t capacity = userbuf->size - sizeof(struct ulog); ulog_construct(buffer_offset, capacity, ctx->ulog->gen_num, 1, ULOG_USER_OWNED, ctx->p_ops); struct ulog *last_log; /* if there is only one log */ if (!VEC_SIZE(&ctx->next)) last_log = ctx->ulog; else /* get last element from vector */ last_log = ulog_by_offset(VEC_BACK(&ctx->next), ctx->p_ops); ASSERTne(last_log, NULL); size_t next_size = sizeof(last_log->next); VALGRIND_ADD_TO_TX(&last_log->next, next_size); last_log->next = buffer_offset; pmemops_persist(ctx->p_ops, &last_log->next, next_size); VEC_PUSH_BACK(&ctx->next, buffer_offset); ctx->ulog_capacity += capacity; operation_set_any_user_buffer(ctx, 1); } /* * operation_set_auto_reserve -- set auto reserve value for context */ void operation_set_auto_reserve(struct operation_context *ctx, int auto_reserve) { ctx->ulog_auto_reserve = auto_reserve; } /* * operation_set_any_user_buffer -- set ulog_any_user_buffer value for context */ void operation_set_any_user_buffer(struct operation_context *ctx, int any_user_buffer) { ctx->ulog_any_user_buffer = any_user_buffer; } /* * operation_get_any_user_buffer -- get ulog_any_user_buffer value from context */ int operation_get_any_user_buffer(struct operation_context *ctx) { return ctx->ulog_any_user_buffer; } /* * operation_process_persistent_redo -- (internal) process using ulog */ static void operation_process_persistent_redo(struct operation_context *ctx) { ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0); ulog_store(ctx->ulog, ctx->pshadow_ops.ulog, ctx->pshadow_ops.offset, ctx->ulog_base_nbytes, ctx->ulog_capacity, &ctx->next, ctx->p_ops); #ifdef USE_NDP_REDO if(!use_ndp_redo){ #endif ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops); //ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, // ctx->p_ops); #ifdef USE_NDP_REDO } else { //ulog_process(ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX, // ctx->p_ops); //while(1){} ulog_process_ndp(ctx->ulog, ctx->pshadow_ops.ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops); //while(1){} } #endif // while(((*((uint32_t*)(ctx->p_ops->device)+254)) & 2) != 2){ //asm volatile ("clflush (%0)" :: "r"((uint32_t*)(tx->pop->p_ops.device)+254)); //printf("waiting %x %x\n",*((uint32_t*)(tx->pop->p_ops.device)+11),*((uint32_t*)(tx->pop->p_ops.device)+254)); //printf("waiting!!\n"); // } ulog_clobber(ctx->ulog, &ctx->next, ctx->p_ops); } /* * operation_process_persistent_undo -- (internal) process using ulog */ static void operation_process_persistent_undo(struct operation_context *ctx) { ASSERTeq(ctx->pshadow_ops.capacity % CACHELINE_SIZE, 0); ulog_process(ctx->ulog, OBJ_OFF_IS_VALID_FROM_CTX, ctx->p_ops); } /* * operation_reserve -- (internal) reserves new capacity in persistent ulog log */ int operation_reserve(struct operation_context *ctx, size_t new_capacity) { if (new_capacity > ctx->ulog_capacity) { if (ctx->extend == NULL) { ERR("no extend function present"); return -1; } if (ulog_reserve(ctx->ulog, ctx->ulog_base_nbytes, ctx->ulog_curr_gen_num, ctx->ulog_auto_reserve, &new_capacity, ctx->extend, &ctx->next, ctx->p_ops) != 0) return -1; ctx->ulog_capacity = new_capacity; } return 0; } /* * operation_init -- initializes runtime state of an operation */ void operation_init(struct operation_context *ctx) { struct operation_log *plog = &ctx->pshadow_ops; struct operation_log *tlog = &ctx->transient_ops; VALGRIND_ANNOTATE_NEW_MEMORY(ctx, sizeof(*ctx)); VALGRIND_ANNOTATE_NEW_MEMORY(tlog->ulog, sizeof(struct ulog) + tlog->capacity); VALGRIND_ANNOTATE_NEW_MEMORY(plog->ulog, sizeof(struct ulog) + plog->capacity); tlog->offset = 0; plog->offset = 0; VECQ_REINIT(&ctx->merge_entries); ctx->ulog_curr_offset = 0; ctx->ulog_curr_capacity = 0; ctx->ulog_curr_gen_num = 0; ctx->ulog_curr = NULL; ctx->total_logged = 0; ctx->ulog_auto_reserve = 1; ctx->ulog_any_user_buffer = 0; } /* * operation_start -- initializes and starts a new operation */ void operation_start(struct operation_context *ctx) { operation_init(ctx); ASSERTeq(ctx->state, OPERATION_IDLE); ctx->state = OPERATION_IN_PROGRESS; } void operation_resume(struct operation_context *ctx) { operation_start(ctx); ctx->total_logged = ulog_base_nbytes(ctx->ulog); } /* * operation_cancel -- cancels a running operation */ void operation_cancel(struct operation_context *ctx) { ASSERTeq(ctx->state, OPERATION_IN_PROGRESS); ctx->state = OPERATION_IDLE; } /* * operation_process -- processes registered operations * * The order of processing is important: persistent, transient. * This is because the transient entries that reside on persistent memory might * require write to a location that is currently occupied by a valid persistent * state but becomes a transient state after operation is processed. */ void operation_process(struct operation_context *ctx) { /* * If there's exactly one persistent entry there's no need to involve * the redo log. We can simply assign the value, the operation will be * atomic. */ int redo_process = ctx->type == LOG_TYPE_REDO && ctx->pshadow_ops.offset != 0; if (redo_process && ctx->pshadow_ops.offset == sizeof(struct ulog_entry_val)) { struct ulog_entry_base *e = (struct ulog_entry_base *) ctx->pshadow_ops.ulog->data; ulog_operation_type t = ulog_entry_type(e); if (t == ULOG_OPERATION_SET || t == ULOG_OPERATION_AND || t == ULOG_OPERATION_OR) { ulog_entry_apply(e, 1, ctx->p_ops); //could not be effectiv ein ndp redo_process = 0; } } if (redo_process) { operation_process_persistent_redo(ctx); //ndp ctx->state = OPERATION_CLEANUP; } else if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) { operation_process_persistent_undo(ctx); ctx->state = OPERATION_CLEANUP; } /* process transient entries with transient memory ops */ if (ctx->transient_ops.offset != 0) ulog_process(ctx->transient_ops.ulog, NULL, &ctx->t_ops); //where is this used? } /* * operation_finish -- finalizes the operation */ void operation_finish(struct operation_context *ctx, unsigned flags) { ASSERTne(ctx->state, OPERATION_IDLE); if (ctx->type == LOG_TYPE_UNDO && ctx->total_logged != 0) ctx->state = OPERATION_CLEANUP; if (ctx->ulog_any_user_buffer) { flags |= ULOG_ANY_USER_BUFFER; ctx->state = OPERATION_CLEANUP; } if (ctx->state != OPERATION_CLEANUP) goto out; if (ctx->type == LOG_TYPE_UNDO) { int ret = ulog_clobber_data(ctx->ulog, ctx->total_logged, ctx->ulog_base_nbytes, &ctx->next, ctx->ulog_free, operation_user_buffer_remove, ctx->p_ops, flags); if (ret == 0) goto out; } else if (ctx->type == LOG_TYPE_REDO) { int ret = ulog_free_next(ctx->ulog, ctx->p_ops, ctx->ulog_free, operation_user_buffer_remove, flags); if (ret == 0) goto out; } /* clobbering shrunk the ulog */ ctx->ulog_capacity = ulog_capacity(ctx->ulog, ctx->ulog_base_nbytes, ctx->p_ops); VEC_CLEAR(&ctx->next); ulog_rebuild_next_vec(ctx->ulog, &ctx->next, ctx->p_ops); out: ctx->state = OPERATION_IDLE; }
24,116
25.589857
113
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/stats.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * stats.c -- implementation of statistics */ #include "obj.h" #include "stats.h" STATS_CTL_HANDLER(persistent, curr_allocated, heap_curr_allocated); STATS_CTL_HANDLER(transient, run_allocated, heap_run_allocated); STATS_CTL_HANDLER(transient, run_active, heap_run_active); static const struct ctl_node CTL_NODE(heap)[] = { STATS_CTL_LEAF(persistent, curr_allocated), STATS_CTL_LEAF(transient, run_allocated), STATS_CTL_LEAF(transient, run_active), CTL_NODE_END }; /* * CTL_READ_HANDLER(enabled) -- returns whether or not statistics are enabled */ static int CTL_READ_HANDLER(enabled)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; enum pobj_stats_enabled *arg_out = arg; *arg_out = pop->stats->enabled; return 0; } /* * stats_enabled_parser -- parses the stats enabled type */ static int stats_enabled_parser(const void *arg, void *dest, size_t dest_size) { const char *vstr = arg; enum pobj_stats_enabled *enabled = dest; ASSERTeq(dest_size, sizeof(enum pobj_stats_enabled)); int bool_out; if (ctl_arg_boolean(arg, &bool_out, sizeof(bool_out)) == 0) { *enabled = bool_out ? POBJ_STATS_ENABLED_BOTH : POBJ_STATS_DISABLED; return 0; } if (strcmp(vstr, "disabled") == 0) { *enabled = POBJ_STATS_DISABLED; } else if (strcmp(vstr, "both") == 0) { *enabled = POBJ_STATS_ENABLED_BOTH; } else if (strcmp(vstr, "persistent") == 0) { *enabled = POBJ_STATS_ENABLED_PERSISTENT; } else if (strcmp(vstr, "transient") == 0) { *enabled = POBJ_STATS_ENABLED_TRANSIENT; } else { ERR("invalid enable type"); errno = EINVAL; return -1; } return 0; } /* * CTL_WRITE_HANDLER(enabled) -- enables or disables statistics counting */ static int CTL_WRITE_HANDLER(enabled)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; pop->stats->enabled = *(enum pobj_stats_enabled *)arg; return 0; } static const struct ctl_argument CTL_ARG(enabled) = { .dest_size = sizeof(enum pobj_stats_enabled), .parsers = { CTL_ARG_PARSER(sizeof(enum pobj_stats_enabled), stats_enabled_parser), CTL_ARG_PARSER_END } }; static const struct ctl_node CTL_NODE(stats)[] = { CTL_CHILD(heap), CTL_LEAF_RW(enabled), CTL_NODE_END }; /* * stats_new -- allocates and initializes statistics instance */ struct stats * stats_new(PMEMobjpool *pop) { struct stats *s = Malloc(sizeof(*s)); if (s == NULL) { ERR("!Malloc"); return NULL; } s->enabled = POBJ_STATS_ENABLED_TRANSIENT; s->persistent = &pop->stats_persistent; VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(s->persistent, sizeof(*s->persistent)); s->transient = Zalloc(sizeof(struct stats_transient)); if (s->transient == NULL) goto error_transient_alloc; return s; error_transient_alloc: Free(s); return NULL; } /* * stats_delete -- deletes statistics instance */ void stats_delete(PMEMobjpool *pop, struct stats *s) { pmemops_persist(&pop->p_ops, s->persistent, sizeof(struct stats_persistent)); Free(s->transient); Free(s); } /* * stats_ctl_register -- registers ctl nodes for statistics */ void stats_ctl_register(PMEMobjpool *pop) { CTL_REGISTER_MODULE(pop->ctl, stats); }
3,293
20.671053
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/heap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * heap.h -- internal definitions for heap */ #ifndef LIBPMEMOBJ_HEAP_H #define LIBPMEMOBJ_HEAP_H 1 #include <stddef.h> #include <stdint.h> #include "bucket.h" #include "memblock.h" #include "memops.h" #include "palloc.h" #include "os_thread.h" #ifdef __cplusplus extern "C" { #endif #define HEAP_OFF_TO_PTR(heap, off) ((void *)((char *)((heap)->base) + (off))) #define HEAP_PTR_TO_OFF(heap, ptr)\ ((uintptr_t)(ptr) - (uintptr_t)((heap)->base)) #define BIT_IS_CLR(a, i) (!((a) & (1ULL << (i)))) #define HEAP_ARENA_PER_THREAD (0) int heap_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size, uint64_t *sizep, void *base, struct pmem_ops *p_ops, struct stats *stats, struct pool_set *set); int heap_init(void *heap_start, uint64_t heap_size, uint64_t *sizep, struct pmem_ops *p_ops); void heap_cleanup(struct palloc_heap *heap); int heap_check(void *heap_start, uint64_t heap_size); int heap_check_remote(void *heap_start, uint64_t heap_size, struct remote_ops *ops); int heap_buckets_init(struct palloc_heap *heap); int heap_create_alloc_class_buckets(struct palloc_heap *heap, struct alloc_class *c); int heap_extend(struct palloc_heap *heap, struct bucket *defb, size_t size); struct alloc_class * heap_get_best_class(struct palloc_heap *heap, size_t size); struct bucket * heap_bucket_acquire(struct palloc_heap *heap, uint8_t class_id, uint16_t arena_id); void heap_bucket_release(struct palloc_heap *heap, struct bucket *b); int heap_get_bestfit_block(struct palloc_heap *heap, struct bucket *b, struct memory_block *m); struct memory_block heap_coalesce_huge(struct palloc_heap *heap, struct bucket *b, const struct memory_block *m); os_mutex_t *heap_get_run_lock(struct palloc_heap *heap, uint32_t chunk_id); void heap_force_recycle(struct palloc_heap *heap); void heap_discard_run(struct palloc_heap *heap, struct memory_block *m); void heap_memblock_on_free(struct palloc_heap *heap, const struct memory_block *m); int heap_free_chunk_reuse(struct palloc_heap *heap, struct bucket *bucket, struct memory_block *m); void heap_foreach_object(struct palloc_heap *heap, object_callback cb, void *arg, struct memory_block start); struct alloc_class_collection *heap_alloc_classes(struct palloc_heap *heap); void *heap_end(struct palloc_heap *heap); unsigned heap_get_narenas_total(struct palloc_heap *heap); unsigned heap_get_narenas_max(struct palloc_heap *heap); int heap_set_narenas_max(struct palloc_heap *heap, unsigned size); unsigned heap_get_narenas_auto(struct palloc_heap *heap); unsigned heap_get_thread_arena_id(struct palloc_heap *heap); int heap_arena_create(struct palloc_heap *heap); struct bucket ** heap_get_arena_buckets(struct palloc_heap *heap, unsigned arena_id); int heap_get_arena_auto(struct palloc_heap *heap, unsigned arena_id); int heap_set_arena_auto(struct palloc_heap *heap, unsigned arena_id, int automatic); void heap_set_arena_thread(struct palloc_heap *heap, unsigned arena_id); void heap_vg_open(struct palloc_heap *heap, object_callback cb, void *arg, int objects); static inline struct chunk_header * heap_get_chunk_hdr(struct palloc_heap *heap, const struct memory_block *m) { return GET_CHUNK_HDR(heap->layout, m->zone_id, m->chunk_id); } static inline struct chunk * heap_get_chunk(struct palloc_heap *heap, const struct memory_block *m) { return GET_CHUNK(heap->layout, m->zone_id, m->chunk_id); } static inline struct chunk_run * heap_get_chunk_run(struct palloc_heap *heap, const struct memory_block *m) { return GET_CHUNK_RUN(heap->layout, m->zone_id, m->chunk_id); } #ifdef __cplusplus } #endif #endif
3,719
26.969925
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/list.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * list.c -- implementation of persistent atomic lists module */ #include <inttypes.h> #include "list.h" #include "obj.h" #include "os_thread.h" #include "out.h" #include "sync.h" #include "valgrind_internal.h" #include "memops.h" #define PREV_OFF (offsetof(struct list_entry, pe_prev) + offsetof(PMEMoid, off)) #define NEXT_OFF (offsetof(struct list_entry, pe_next) + offsetof(PMEMoid, off)) /* * list_args_common -- common arguments for operations on list * * pe_offset - offset to list entry relative to user data * obj_doffset - offset to element's data relative to pmemobj pool * entry_ptr - list entry structure of element */ struct list_args_common { ssize_t pe_offset; uint64_t obj_doffset; struct list_entry *entry_ptr; }; /* * list_args_insert -- arguments for inserting element to list * * head - list head * dest - destination element OID * dest_entry_ptr - list entry of destination element * before - insert before or after destination element */ struct list_args_insert { struct list_head *head; PMEMoid dest; struct list_entry *dest_entry_ptr; int before; }; /* * list_args_reinsert -- arguments for reinserting element on list * * head - list head * entry_ptr - list entry of old element * obj_doffset - offset to element's data relative to pmemobj pool */ struct list_args_reinsert { struct list_head *head; struct list_entry *entry_ptr; uint64_t obj_doffset; }; /* * list_args_remove -- arguments for removing element from list * * pe_offset - offset to list entry relative to user data * obj_doffset - offset to element's data relative to pmemobj pool * head - list head * entry_ptr - list entry structure of element */ struct list_args_remove { ssize_t pe_offset; uint64_t obj_doffset; struct list_head *head; struct list_entry *entry_ptr; }; /* * list_mutexes_lock -- (internal) grab one or two locks in ascending * address order */ static inline int list_mutexes_lock(PMEMobjpool *pop, struct list_head *head1, struct list_head *head2) { ASSERTne(head1, NULL); if (!head2 || head1 == head2) return pmemobj_mutex_lock(pop, &head1->lock); PMEMmutex *lock1; PMEMmutex *lock2; if ((uintptr_t)&head1->lock < (uintptr_t)&head2->lock) { lock1 = &head1->lock; lock2 = &head2->lock; } else { lock1 = &head2->lock; lock2 = &head1->lock; } int ret; if ((ret = pmemobj_mutex_lock(pop, lock1))) goto err; if ((ret = pmemobj_mutex_lock(pop, lock2))) goto err_unlock; return 0; err_unlock: pmemobj_mutex_unlock(pop, lock1); err: return ret; } /* * list_mutexes_unlock -- (internal) release one or two locks */ static inline void list_mutexes_unlock(PMEMobjpool *pop, struct list_head *head1, struct list_head *head2) { ASSERTne(head1, NULL); if (!head2 || head1 == head2) { pmemobj_mutex_unlock_nofail(pop, &head1->lock); return; } pmemobj_mutex_unlock_nofail(pop, &head1->lock); pmemobj_mutex_unlock_nofail(pop, &head2->lock); } /* * list_get_dest -- (internal) return destination object ID * * If the input dest is not OID_NULL returns dest. * If the input dest is OID_NULL and before is set returns first element. * If the input dest is OID_NULL and before is no set returns last element. */ static inline PMEMoid list_get_dest(PMEMobjpool *pop, struct list_head *head, PMEMoid dest, ssize_t pe_offset, int before) { if (dest.off) return dest; if (head->pe_first.off == 0 || !!before == POBJ_LIST_DEST_HEAD) return head->pe_first; struct list_entry *first_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, (uintptr_t)((ssize_t)head->pe_first.off + pe_offset)); return first_ptr->pe_prev; } /* * list_set_oid_redo_log -- (internal) set PMEMoid value using redo log */ static size_t list_set_oid_redo_log(PMEMobjpool *pop, struct operation_context *ctx, PMEMoid *oidp, uint64_t obj_doffset, int oidp_inited) { ASSERT(OBJ_PTR_IS_VALID(pop, oidp)); if (!oidp_inited || oidp->pool_uuid_lo != pop->uuid_lo) { if (oidp_inited) ASSERTeq(oidp->pool_uuid_lo, 0); operation_add_entry(ctx, &oidp->pool_uuid_lo, pop->uuid_lo, ULOG_OPERATION_SET); } operation_add_entry(ctx, &oidp->off, obj_doffset, ULOG_OPERATION_SET); return 0; } /* * list_update_head -- (internal) update pe_first entry in list head */ static size_t list_update_head(PMEMobjpool *pop, struct operation_context *ctx, struct list_head *head, uint64_t first_offset) { LOG(15, NULL); operation_add_entry(ctx, &head->pe_first.off, first_offset, ULOG_OPERATION_SET); if (head->pe_first.pool_uuid_lo == 0) { operation_add_entry(ctx, &head->pe_first.pool_uuid_lo, pop->uuid_lo, ULOG_OPERATION_SET); } return 0; } /* * u64_add_offset -- (internal) add signed offset to unsigned integer and check * for overflows */ static void u64_add_offset(uint64_t *value, ssize_t off) { uint64_t prev = *value; if (off >= 0) { *value += (size_t)off; ASSERT(*value >= prev); /* detect overflow */ } else { *value -= (size_t)-off; ASSERT(*value < prev); } } /* * list_fill_entry_persist -- (internal) fill new entry using persist function * * Used for newly allocated objects. */ static void list_fill_entry_persist(PMEMobjpool *pop, struct list_entry *entry_ptr, uint64_t next_offset, uint64_t prev_offset) { LOG(15, NULL); VALGRIND_ADD_TO_TX(entry_ptr, sizeof(*entry_ptr)); entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo; entry_ptr->pe_next.off = next_offset; entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo; entry_ptr->pe_prev.off = prev_offset; VALGRIND_REMOVE_FROM_TX(entry_ptr, sizeof(*entry_ptr)); pmemops_persist(&pop->p_ops, entry_ptr, sizeof(*entry_ptr)); } /* * list_fill_entry_redo_log -- (internal) fill new entry using redo log * * Used to update entry in existing object. */ static size_t list_fill_entry_redo_log(PMEMobjpool *pop, struct operation_context *ctx, struct list_args_common *args, uint64_t next_offset, uint64_t prev_offset, int set_uuid) { LOG(15, NULL); struct pmem_ops *ops = &pop->p_ops; ASSERTne(args->entry_ptr, NULL); ASSERTne(args->obj_doffset, 0); if (set_uuid) { VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_next.pool_uuid_lo), sizeof(args->entry_ptr->pe_next.pool_uuid_lo)); VALGRIND_ADD_TO_TX(&(args->entry_ptr->pe_prev.pool_uuid_lo), sizeof(args->entry_ptr->pe_prev.pool_uuid_lo)); /* don't need to fill pool uuid using redo log */ args->entry_ptr->pe_next.pool_uuid_lo = pop->uuid_lo; args->entry_ptr->pe_prev.pool_uuid_lo = pop->uuid_lo; VALGRIND_REMOVE_FROM_TX( &(args->entry_ptr->pe_next.pool_uuid_lo), sizeof(args->entry_ptr->pe_next.pool_uuid_lo)); VALGRIND_REMOVE_FROM_TX( &(args->entry_ptr->pe_prev.pool_uuid_lo), sizeof(args->entry_ptr->pe_prev.pool_uuid_lo)); pmemops_persist(ops, args->entry_ptr, sizeof(*args->entry_ptr)); } else { ASSERTeq(args->entry_ptr->pe_next.pool_uuid_lo, pop->uuid_lo); ASSERTeq(args->entry_ptr->pe_prev.pool_uuid_lo, pop->uuid_lo); } /* set current->next and current->prev using redo log */ uint64_t next_off_off = args->obj_doffset + NEXT_OFF; uint64_t prev_off_off = args->obj_doffset + PREV_OFF; u64_add_offset(&next_off_off, args->pe_offset); u64_add_offset(&prev_off_off, args->pe_offset); void *next_ptr = (char *)pop + next_off_off; void *prev_ptr = (char *)pop + prev_off_off; operation_add_entry(ctx, next_ptr, next_offset, ULOG_OPERATION_SET); operation_add_entry(ctx, prev_ptr, prev_offset, ULOG_OPERATION_SET); return 0; } /* * list_remove_single -- (internal) remove element from single list */ static size_t list_remove_single(PMEMobjpool *pop, struct operation_context *ctx, struct list_args_remove *args) { LOG(15, NULL); if (args->entry_ptr->pe_next.off == args->obj_doffset) { /* only one element on list */ ASSERTeq(args->head->pe_first.off, args->obj_doffset); ASSERTeq(args->entry_ptr->pe_prev.off, args->obj_doffset); return list_update_head(pop, ctx, args->head, 0); } else { /* set next->prev = prev and prev->next = next */ uint64_t next_off = args->entry_ptr->pe_next.off; uint64_t next_prev_off = next_off + PREV_OFF; u64_add_offset(&next_prev_off, args->pe_offset); uint64_t prev_off = args->entry_ptr->pe_prev.off; uint64_t prev_next_off = prev_off + NEXT_OFF; u64_add_offset(&prev_next_off, args->pe_offset); void *prev_ptr = (char *)pop + next_prev_off; void *next_ptr = (char *)pop + prev_next_off; operation_add_entry(ctx, prev_ptr, prev_off, ULOG_OPERATION_SET); operation_add_entry(ctx, next_ptr, next_off, ULOG_OPERATION_SET); if (args->head->pe_first.off == args->obj_doffset) { /* removing element is the first one */ return list_update_head(pop, ctx, args->head, next_off); } else { return 0; } } } /* * list_insert_before -- (internal) insert element at offset before an element */ static size_t list_insert_before(PMEMobjpool *pop, struct operation_context *ctx, struct list_args_insert *args, struct list_args_common *args_common, uint64_t *next_offset, uint64_t *prev_offset) { LOG(15, NULL); /* current->next = dest and current->prev = dest->prev */ *next_offset = args->dest.off; *prev_offset = args->dest_entry_ptr->pe_prev.off; /* dest->prev = current and dest->prev->next = current */ uint64_t dest_prev_off = args->dest.off + PREV_OFF; u64_add_offset(&dest_prev_off, args_common->pe_offset); uint64_t dest_prev_next_off = args->dest_entry_ptr->pe_prev.off + NEXT_OFF; u64_add_offset(&dest_prev_next_off, args_common->pe_offset); void *dest_prev_ptr = (char *)pop + dest_prev_off; void *dest_prev_next_ptr = (char *)pop + dest_prev_next_off; operation_add_entry(ctx, dest_prev_ptr, args_common->obj_doffset, ULOG_OPERATION_SET); operation_add_entry(ctx, dest_prev_next_ptr, args_common->obj_doffset, ULOG_OPERATION_SET); return 0; } /* * list_insert_after -- (internal) insert element at offset after an element */ static size_t list_insert_after(PMEMobjpool *pop, struct operation_context *ctx, struct list_args_insert *args, struct list_args_common *args_common, uint64_t *next_offset, uint64_t *prev_offset) { LOG(15, NULL); /* current->next = dest->next and current->prev = dest */ *next_offset = args->dest_entry_ptr->pe_next.off; *prev_offset = args->dest.off; /* dest->next = current and dest->next->prev = current */ uint64_t dest_next_off = args->dest.off + NEXT_OFF; u64_add_offset(&dest_next_off, args_common->pe_offset); uint64_t dest_next_prev_off = args->dest_entry_ptr->pe_next.off + PREV_OFF; u64_add_offset(&dest_next_prev_off, args_common->pe_offset); void *dest_next_ptr = (char *)pop + dest_next_off; void *dest_next_prev_ptr = (char *)pop + dest_next_prev_off; operation_add_entry(ctx, dest_next_ptr, args_common->obj_doffset, ULOG_OPERATION_SET); operation_add_entry(ctx, dest_next_prev_ptr, args_common->obj_doffset, ULOG_OPERATION_SET); return 0; } /* * list_insert_user -- (internal) insert element at offset to a user list */ static size_t list_insert_user(PMEMobjpool *pop, struct operation_context *ctx, struct list_args_insert *args, struct list_args_common *args_common, uint64_t *next_offset, uint64_t *prev_offset) { LOG(15, NULL); if (args->dest.off == 0) { /* inserting the first element on list */ ASSERTeq(args->head->pe_first.off, 0); /* set loop on current element */ *next_offset = args_common->obj_doffset; *prev_offset = args_common->obj_doffset; /* update head */ list_update_head(pop, ctx, args->head, args_common->obj_doffset); } else { if (args->before) { /* inserting before dest */ list_insert_before(pop, ctx, args, args_common, next_offset, prev_offset); if (args->dest.off == args->head->pe_first.off) { /* current element at first position */ list_update_head(pop, ctx, args->head, args_common->obj_doffset); } } else { /* inserting after dest */ list_insert_after(pop, ctx, args, args_common, next_offset, prev_offset); } } return 0; } /* * list_insert_new -- allocate and insert element to oob and user lists * * pop - pmemobj pool handle * pe_offset - offset to list entry on user list relative to user data * user_head - user list head, must be locked if not NULL * dest - destination on user list * before - insert before/after destination on user list * size - size of allocation, will be increased by OBJ_OOB_SIZE * constructor - object's constructor * arg - argument for object's constructor * oidp - pointer to target object ID */ static int list_insert_new(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before, size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr, size_t usable_size, void *arg), void *arg, PMEMoid *oidp) { LOG(3, NULL); ASSERT(user_head != NULL); int ret; #ifdef DEBUG int r = pmemobj_mutex_assert_locked(pop, &user_head->lock); ASSERTeq(r, 0); #endif struct lane *lane; lane_hold(pop, &lane); struct pobj_action reserved; if (palloc_reserve(&pop->heap, size, constructor, arg, type_num, 0, 0, 0, &reserved) != 0) { ERR("!palloc_reserve"); ret = -1; goto err_pmalloc; } uint64_t obj_doffset = reserved.heap.offset; struct operation_context *ctx = lane->external; operation_start(ctx); ASSERT((ssize_t)pe_offset >= 0); dest = list_get_dest(pop, user_head, dest, (ssize_t)pe_offset, before); struct list_entry *entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, obj_doffset + pe_offset); struct list_entry *dest_entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, dest.off + pe_offset); struct list_args_insert args = { .dest = dest, .dest_entry_ptr = dest_entry_ptr, .head = user_head, .before = before, }; struct list_args_common args_common = { .obj_doffset = obj_doffset, .entry_ptr = entry_ptr, .pe_offset = (ssize_t)pe_offset, }; uint64_t next_offset; uint64_t prev_offset; /* insert element to user list */ list_insert_user(pop, ctx, &args, &args_common, &next_offset, &prev_offset); /* don't need to use redo log for filling new element */ list_fill_entry_persist(pop, entry_ptr, next_offset, prev_offset); if (oidp != NULL) { if (OBJ_PTR_IS_VALID(pop, oidp)) { list_set_oid_redo_log(pop, ctx, oidp, obj_doffset, 0); } else { oidp->off = obj_doffset; oidp->pool_uuid_lo = pop->uuid_lo; } } palloc_publish(&pop->heap, &reserved, 1, ctx); ret = 0; err_pmalloc: lane_release(pop); ASSERT(ret == 0 || ret == -1); return ret; } /* * list_insert_new_user -- allocate and insert element to oob and user lists * * pop - pmemobj pool handle * oob_head - oob list head * pe_offset - offset to list entry on user list relative to user data * user_head - user list head * dest - destination on user list * before - insert before/after destination on user list * size - size of allocation, will be increased by OBJ_OOB_SIZE * constructor - object's constructor * arg - argument for object's constructor * oidp - pointer to target object ID */ int list_insert_new_user(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid dest, int before, size_t size, uint64_t type_num, int (*constructor)(void *ctx, void *ptr, size_t usable_size, void *arg), void *arg, PMEMoid *oidp) { int ret; if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) { errno = ret; LOG(2, "pmemobj_mutex_lock failed"); return -1; } ret = list_insert_new(pop, pe_offset, user_head, dest, before, size, type_num, constructor, arg, oidp); pmemobj_mutex_unlock_nofail(pop, &user_head->lock); ASSERT(ret == 0 || ret == -1); return ret; } /* * list_insert -- insert object to a single list * * pop - pmemobj handle * pe_offset - offset to list entry on user list relative to user data * head - list head * dest - destination object ID * before - before/after destination * oid - target object ID */ int list_insert(PMEMobjpool *pop, ssize_t pe_offset, struct list_head *head, PMEMoid dest, int before, PMEMoid oid) { LOG(3, NULL); ASSERTne(head, NULL); struct lane *lane; lane_hold(pop, &lane); int ret; if ((ret = pmemobj_mutex_lock(pop, &head->lock))) { errno = ret; LOG(2, "pmemobj_mutex_lock failed"); ret = -1; goto err; } struct operation_context *ctx = lane->external; operation_start(ctx); dest = list_get_dest(pop, head, dest, pe_offset, before); struct list_entry *entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, (uintptr_t)((ssize_t)oid.off + pe_offset)); struct list_entry *dest_entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, (uintptr_t)((ssize_t)dest.off + pe_offset)); struct list_args_insert args = { .dest = dest, .dest_entry_ptr = dest_entry_ptr, .head = head, .before = before, }; struct list_args_common args_common = { .obj_doffset = oid.off, .entry_ptr = entry_ptr, .pe_offset = (ssize_t)pe_offset, }; uint64_t next_offset; uint64_t prev_offset; /* insert element to user list */ list_insert_user(pop, ctx, &args, &args_common, &next_offset, &prev_offset); /* fill entry of existing element using redo log */ list_fill_entry_redo_log(pop, ctx, &args_common, next_offset, prev_offset, 1); operation_process(ctx); operation_finish(ctx, 0); pmemobj_mutex_unlock_nofail(pop, &head->lock); err: lane_release(pop); ASSERT(ret == 0 || ret == -1); return ret; } /* * list_remove_free -- remove from two lists and free an object * * pop - pmemobj pool handle * oob_head - oob list head * pe_offset - offset to list entry on user list relative to user data * user_head - user list head, *must* be locked if not NULL * oidp - pointer to target object ID */ static void list_remove_free(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid *oidp) { LOG(3, NULL); ASSERT(user_head != NULL); #ifdef DEBUG int r = pmemobj_mutex_assert_locked(pop, &user_head->lock); ASSERTeq(r, 0); #endif struct lane *lane; lane_hold(pop, &lane); struct operation_context *ctx = lane->external; operation_start(ctx); struct pobj_action deferred; palloc_defer_free(&pop->heap, oidp->off, &deferred); uint64_t obj_doffset = oidp->off; ASSERT((ssize_t)pe_offset >= 0); struct list_entry *entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, obj_doffset + pe_offset); struct list_args_remove args = { .pe_offset = (ssize_t)pe_offset, .head = user_head, .entry_ptr = entry_ptr, .obj_doffset = obj_doffset }; /* remove from user list */ list_remove_single(pop, ctx, &args); /* clear the oid */ if (OBJ_PTR_IS_VALID(pop, oidp)) list_set_oid_redo_log(pop, ctx, oidp, 0, 1); else oidp->off = 0; palloc_publish(&pop->heap, &deferred, 1, ctx); lane_release(pop); } /* * list_remove_free_user -- remove from two lists and free an object * * pop - pmemobj pool handle * oob_head - oob list head * pe_offset - offset to list entry on user list relative to user data * user_head - user list head * oidp - pointer to target object ID */ int list_remove_free_user(PMEMobjpool *pop, size_t pe_offset, struct list_head *user_head, PMEMoid *oidp) { LOG(3, NULL); int ret; if ((ret = pmemobj_mutex_lock(pop, &user_head->lock))) { errno = ret; LOG(2, "pmemobj_mutex_lock failed"); return -1; } list_remove_free(pop, pe_offset, user_head, oidp); pmemobj_mutex_unlock_nofail(pop, &user_head->lock); return 0; } /* * list_remove -- remove object from list * * pop - pmemobj handle * pe_offset - offset to list entry on user list relative to user data * head - list head * oid - target object ID */ int list_remove(PMEMobjpool *pop, ssize_t pe_offset, struct list_head *head, PMEMoid oid) { LOG(3, NULL); ASSERTne(head, NULL); int ret; struct lane *lane; lane_hold(pop, &lane); if ((ret = pmemobj_mutex_lock(pop, &head->lock))) { errno = ret; LOG(2, "pmemobj_mutex_lock failed"); ret = -1; goto err; } struct operation_context *ctx = lane->external; operation_start(ctx); struct list_entry *entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, oid.off + (size_t)pe_offset); struct list_args_remove args = { .pe_offset = (ssize_t)pe_offset, .head = head, .entry_ptr = entry_ptr, .obj_doffset = oid.off, }; struct list_args_common args_common = { .obj_doffset = oid.off, .entry_ptr = entry_ptr, .pe_offset = (ssize_t)pe_offset, }; /* remove element from user list */ list_remove_single(pop, ctx, &args); /* clear next and prev offsets in removing element using redo log */ list_fill_entry_redo_log(pop, ctx, &args_common, 0, 0, 0); operation_process(ctx); operation_finish(ctx, 0); pmemobj_mutex_unlock_nofail(pop, &head->lock); err: lane_release(pop); ASSERT(ret == 0 || ret == -1); return ret; } /* * list_move -- move object between two lists * * pop - pmemobj handle * pe_offset_old - offset to old list entry relative to user data * head_old - old list head * pe_offset_new - offset to new list entry relative to user data * head_new - new list head * dest - destination object ID * before - before/after destination * oid - target object ID */ int list_move(PMEMobjpool *pop, size_t pe_offset_old, struct list_head *head_old, size_t pe_offset_new, struct list_head *head_new, PMEMoid dest, int before, PMEMoid oid) { LOG(3, NULL); ASSERTne(head_old, NULL); ASSERTne(head_new, NULL); int ret; struct lane *lane; lane_hold(pop, &lane); /* * Grab locks in specified order to avoid dead-locks. * * XXX performance improvement: initialize oob locks at pool opening */ if ((ret = list_mutexes_lock(pop, head_new, head_old))) { errno = ret; LOG(2, "list_mutexes_lock failed"); ret = -1; goto err; } struct operation_context *ctx = lane->external; operation_start(ctx); dest = list_get_dest(pop, head_new, dest, (ssize_t)pe_offset_new, before); struct list_entry *entry_ptr_old = (struct list_entry *)OBJ_OFF_TO_PTR(pop, oid.off + pe_offset_old); struct list_entry *entry_ptr_new = (struct list_entry *)OBJ_OFF_TO_PTR(pop, oid.off + pe_offset_new); struct list_entry *dest_entry_ptr = (struct list_entry *)OBJ_OFF_TO_PTR(pop, dest.off + pe_offset_new); if (head_old == head_new) { /* moving within the same list */ if (dest.off == oid.off) goto unlock; if (before && dest_entry_ptr->pe_prev.off == oid.off) { if (head_old->pe_first.off != dest.off) goto unlock; list_update_head(pop, ctx, head_old, oid.off); goto redo_last; } if (!before && dest_entry_ptr->pe_next.off == oid.off) { if (head_old->pe_first.off != oid.off) goto unlock; list_update_head(pop, ctx, head_old, entry_ptr_old->pe_next.off); goto redo_last; } } ASSERT((ssize_t)pe_offset_old >= 0); struct list_args_remove args_remove = { .pe_offset = (ssize_t)pe_offset_old, .head = head_old, .entry_ptr = entry_ptr_old, .obj_doffset = oid.off, }; struct list_args_insert args_insert = { .head = head_new, .dest = dest, .dest_entry_ptr = dest_entry_ptr, .before = before, }; ASSERT((ssize_t)pe_offset_new >= 0); struct list_args_common args_common = { .obj_doffset = oid.off, .entry_ptr = entry_ptr_new, .pe_offset = (ssize_t)pe_offset_new, }; uint64_t next_offset; uint64_t prev_offset; /* remove element from user list */ list_remove_single(pop, ctx, &args_remove); /* insert element to user list */ list_insert_user(pop, ctx, &args_insert, &args_common, &next_offset, &prev_offset); /* offsets differ, move is between different list entries - set uuid */ int set_uuid = pe_offset_new != pe_offset_old ? 1 : 0; /* fill next and prev offsets of moving element using redo log */ list_fill_entry_redo_log(pop, ctx, &args_common, next_offset, prev_offset, set_uuid); redo_last: unlock: operation_process(ctx); operation_finish(ctx, 0); list_mutexes_unlock(pop, head_new, head_old); err: lane_release(pop); ASSERT(ret == 0 || ret == -1); return ret; }
24,297
24.848936
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * memops.h -- aggregated memory operations helper definitions */ #ifndef LIBPMEMOBJ_MEMOPS_H #define LIBPMEMOBJ_MEMOPS_H 1 #include <stddef.h> #include <stdint.h> #include "vec.h" #include "pmemops.h" #include "ulog.h" #include "lane.h" #ifdef __cplusplus extern "C" { #endif enum operation_log_type { LOG_PERSISTENT, /* log of persistent modifications */ LOG_TRANSIENT, /* log of transient memory modifications */ MAX_OPERATION_LOG_TYPE }; enum log_type { LOG_TYPE_UNDO, LOG_TYPE_REDO, MAX_LOG_TYPE, }; struct user_buffer_def { void *addr; size_t size; }; #ifdef GET_NDP_BREAKDOWN extern uint64_t ulogCycles; #endif #ifdef USE_NDP_REDO extern int use_ndp_redo; #endif struct operation_context; struct operation_context * operation_new(struct ulog *redo, size_t ulog_base_nbytes, ulog_extend_fn extend, ulog_free_fn ulog_free, const struct pmem_ops *p_ops, enum log_type type); void operation_init(struct operation_context *ctx); void operation_start(struct operation_context *ctx); void operation_resume(struct operation_context *ctx); void operation_delete(struct operation_context *ctx); void operation_free_logs(struct operation_context *ctx, uint64_t flags); int operation_add_buffer(struct operation_context *ctx, void *dest, void *src, size_t size, ulog_operation_type type); int operation_add_entry(struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type); int operation_add_typed_entry(struct operation_context *ctx, void *ptr, uint64_t value, ulog_operation_type type, enum operation_log_type log_type); int operation_user_buffer_verify_align(struct operation_context *ctx, struct user_buffer_def *userbuf); void operation_add_user_buffer(struct operation_context *ctx, struct user_buffer_def *userbuf); void operation_set_auto_reserve(struct operation_context *ctx, int auto_reserve); void operation_set_any_user_buffer(struct operation_context *ctx, int any_user_buffer); int operation_get_any_user_buffer(struct operation_context *ctx); int operation_user_buffer_range_cmp(const void *lhs, const void *rhs); int operation_reserve(struct operation_context *ctx, size_t new_capacity); void operation_process(struct operation_context *ctx); void operation_finish(struct operation_context *ctx, unsigned flags); void operation_cancel(struct operation_context *ctx); #ifdef __cplusplus } #endif #endif
2,467
26.422222
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmalloc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * pmalloc.h -- internal definitions for persistent malloc */ #ifndef LIBPMEMOBJ_PMALLOC_H #define LIBPMEMOBJ_PMALLOC_H 1 #include <stddef.h> #include <stdint.h> #include "libpmemobj.h" #include "memops.h" #include "palloc.h" #ifdef __cplusplus extern "C" { #endif /* single operations done in the internal context of the lane */ int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t object_flags); int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id); int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t object_flags); void pfree(PMEMobjpool *pop, uint64_t *off); /* external operation to be used together with context-aware palloc funcs */ struct operation_context *pmalloc_operation_hold(PMEMobjpool *pop); struct operation_context *pmalloc_operation_hold_no_start(PMEMobjpool *pop); void pmalloc_operation_release(PMEMobjpool *pop); void pmalloc_ctl_register(PMEMobjpool *pop); int pmalloc_cleanup(PMEMobjpool *pop); int pmalloc_boot(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif
1,291
24.333333
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/recycler.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * recycler.h -- internal definitions of run recycler * * This is a container that stores runs that are currently not used by any of * the buckets. */ #ifndef LIBPMEMOBJ_RECYCLER_H #define LIBPMEMOBJ_RECYCLER_H 1 #include "memblock.h" #include "vec.h" #ifdef __cplusplus extern "C" { #endif struct recycler; VEC(empty_runs, struct memory_block); struct recycler_element { uint32_t max_free_block; uint32_t free_space; uint32_t chunk_id; uint32_t zone_id; }; struct recycler *recycler_new(struct palloc_heap *layout, size_t nallocs, size_t *peak_arenas); void recycler_delete(struct recycler *r); struct recycler_element recycler_element_new(struct palloc_heap *heap, const struct memory_block *m); int recycler_put(struct recycler *r, const struct memory_block *m, struct recycler_element element); int recycler_get(struct recycler *r, struct memory_block *m); struct empty_runs recycler_recalc(struct recycler *r, int force); void recycler_inc_unaccounted(struct recycler *r, const struct memory_block *m); #ifdef __cplusplus } #endif #endif
1,158
20.867925
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/palloc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * palloc.h -- internal definitions for persistent allocator */ #ifndef LIBPMEMOBJ_PALLOC_H #define LIBPMEMOBJ_PALLOC_H 1 #include <stddef.h> #include <stdint.h> #include "libpmemobj.h" #include "memops.h" #include "ulog.h" #include "valgrind_internal.h" #include "stats.h" #ifdef __cplusplus extern "C" { #endif #define PALLOC_CTL_DEBUG_NO_PATTERN (-1) struct palloc_heap { struct pmem_ops p_ops; struct heap_layout *layout; struct heap_rt *rt; uint64_t *sizep; uint64_t growsize; struct stats *stats; struct pool_set *set; void *base; int alloc_pattern; }; struct memory_block; typedef int (*palloc_constr)(void *base, void *ptr, size_t usable_size, void *arg); int palloc_operation(struct palloc_heap *heap, uint64_t off, uint64_t *dest_off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, uint16_t arena_id, struct operation_context *ctx); int palloc_reserve(struct palloc_heap *heap, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id, uint16_t arena_id, struct pobj_action *act); void palloc_defer_free(struct palloc_heap *heap, uint64_t off, struct pobj_action *act); void palloc_cancel(struct palloc_heap *heap, struct pobj_action *actv, size_t actvcnt); void palloc_publish(struct palloc_heap *heap, struct pobj_action *actv, size_t actvcnt, struct operation_context *ctx); void palloc_set_value(struct palloc_heap *heap, struct pobj_action *act, uint64_t *ptr, uint64_t value); uint64_t palloc_first(struct palloc_heap *heap); uint64_t palloc_next(struct palloc_heap *heap, uint64_t off); size_t palloc_usable_size(struct palloc_heap *heap, uint64_t off); uint64_t palloc_extra(struct palloc_heap *heap, uint64_t off); uint16_t palloc_flags(struct palloc_heap *heap, uint64_t off); int palloc_boot(struct palloc_heap *heap, void *heap_start, uint64_t heap_size, uint64_t *sizep, void *base, struct pmem_ops *p_ops, struct stats *stats, struct pool_set *set); int palloc_buckets_init(struct palloc_heap *heap); int palloc_init(void *heap_start, uint64_t heap_size, uint64_t *sizep, struct pmem_ops *p_ops); void *palloc_heap_end(struct palloc_heap *h); int palloc_heap_check(void *heap_start, uint64_t heap_size); int palloc_heap_check_remote(void *heap_start, uint64_t heap_size, struct remote_ops *ops); void palloc_heap_cleanup(struct palloc_heap *heap); size_t palloc_heap(void *heap_start); int palloc_defrag(struct palloc_heap *heap, uint64_t **objv, size_t objcnt, struct operation_context *ctx, struct pobj_defrag_result *result); /* foreach callback, terminates iteration if return value is non-zero */ typedef int (*object_callback)(const struct memory_block *m, void *arg); #if VG_MEMCHECK_ENABLED void palloc_heap_vg_open(struct palloc_heap *heap, int objects); #endif #ifdef __cplusplus } #endif #endif
3,006
25.377193
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * container.h -- internal definitions for block containers */ #ifndef LIBPMEMOBJ_CONTAINER_H #define LIBPMEMOBJ_CONTAINER_H 1 #include "memblock.h" #ifdef __cplusplus extern "C" { #endif struct block_container { const struct block_container_ops *c_ops; struct palloc_heap *heap; }; struct block_container_ops { /* inserts a new memory block into the container */ int (*insert)(struct block_container *c, const struct memory_block *m); /* removes exact match memory block */ int (*get_rm_exact)(struct block_container *c, const struct memory_block *m); /* removes and returns the best-fit memory block for size */ int (*get_rm_bestfit)(struct block_container *c, struct memory_block *m); /* checks whether the container is empty */ int (*is_empty)(struct block_container *c); /* removes all elements from the container */ void (*rm_all)(struct block_container *c); /* deletes the container */ void (*destroy)(struct block_container *c); }; #ifdef __cplusplus } #endif #endif /* LIBPMEMOBJ_CONTAINER_H */
1,125
21.979592
72
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/stats.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * stats.h -- definitions of statistics */ #ifndef LIBPMEMOBJ_STATS_H #define LIBPMEMOBJ_STATS_H 1 #include "ctl.h" #include "libpmemobj/ctl.h" #ifdef __cplusplus extern "C" { #endif struct stats_transient { uint64_t heap_run_allocated; uint64_t heap_run_active; }; struct stats_persistent { uint64_t heap_curr_allocated; }; struct stats { enum pobj_stats_enabled enabled; struct stats_transient *transient; struct stats_persistent *persistent; }; #define STATS_INC(stats, type, name, value) do {\ STATS_INC_##type(stats, name, value);\ } while (0) #define STATS_INC_transient(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_fetch_and_add64((&(stats)->transient->name), (value));\ } while (0) #define STATS_INC_persistent(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_fetch_and_add64((&(stats)->persistent->name), (value));\ } while (0) #define STATS_SUB(stats, type, name, value) do {\ STATS_SUB_##type(stats, name, value);\ } while (0) #define STATS_SUB_transient(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_fetch_and_sub64((&(stats)->transient->name), (value));\ } while (0) #define STATS_SUB_persistent(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_fetch_and_sub64((&(stats)->persistent->name), (value));\ } while (0) #define STATS_SET(stats, type, name, value) do {\ STATS_SET_##type(stats, name, value);\ } while (0) #define STATS_SET_transient(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_TRANSIENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_atomic_store_explicit64((&(stats)->transient->name),\ (value), memory_order_release);\ } while (0) #define STATS_SET_persistent(stats, name, value) do {\ if ((stats)->enabled == POBJ_STATS_ENABLED_PERSISTENT ||\ (stats)->enabled == POBJ_STATS_ENABLED_BOTH)\ util_atomic_store_explicit64((&(stats)->persistent->name),\ (value), memory_order_release);\ } while (0) #define STATS_CTL_LEAF(type, name)\ {CTL_STR(name), CTL_NODE_LEAF,\ {CTL_READ_HANDLER(type##_##name), NULL, NULL},\ NULL, NULL} #define STATS_CTL_HANDLER(type, name, varname)\ static int CTL_READ_HANDLER(type##_##name)(void *ctx,\ enum ctl_query_source source, void *arg, struct ctl_indexes *indexes)\ {\ PMEMobjpool *pop = ctx;\ uint64_t *argv = arg;\ util_atomic_load_explicit64(&pop->stats->type->varname,\ argv, memory_order_acquire);\ return 0;\ } void stats_ctl_register(PMEMobjpool *pop); struct stats *stats_new(PMEMobjpool *pop); void stats_delete(PMEMobjpool *pop, struct stats *stats); #ifdef __cplusplus } #endif #endif
2,990
26.440367
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/bucket.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * bucket.c -- bucket implementation * * Buckets manage volatile state of the heap. They are the abstraction layer * between the heap-managed chunks/runs and memory allocations. * * Each bucket instance can have a different underlying container that is * responsible for selecting blocks - which means that whether the allocator * serves memory blocks in best/first/next -fit manner is decided during bucket * creation. */ #include "alloc_class.h" #include "bucket.h" #include "heap.h" #include "out.h" #include "sys_util.h" #include "valgrind_internal.h" /* * bucket_new -- creates a new bucket instance */ struct bucket * bucket_new(struct block_container *c, struct alloc_class *aclass) { if (c == NULL) return NULL; struct bucket *b = Malloc(sizeof(*b)); if (b == NULL) return NULL; b->container = c; b->c_ops = c->c_ops; util_mutex_init(&b->lock); b->is_active = 0; b->active_memory_block = NULL; if (aclass && aclass->type == CLASS_RUN) { b->active_memory_block = Zalloc(sizeof(struct memory_block_reserved)); if (b->active_memory_block == NULL) goto error_active_alloc; } b->aclass = aclass; return b; error_active_alloc: util_mutex_destroy(&b->lock); Free(b); return NULL; } /* * bucket_insert_block -- inserts a block into the bucket */ int bucket_insert_block(struct bucket *b, const struct memory_block *m) { #if VG_MEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_DRD_ENABLED if (On_memcheck || On_drd_or_hg) { size_t size = m->m_ops->get_real_size(m); void *data = m->m_ops->get_real_data(m); VALGRIND_DO_MAKE_MEM_NOACCESS(data, size); VALGRIND_ANNOTATE_NEW_MEMORY(data, size); } #endif return b->c_ops->insert(b->container, m); } /* * bucket_delete -- cleanups and deallocates bucket instance */ void bucket_delete(struct bucket *b) { if (b->active_memory_block) Free(b->active_memory_block); util_mutex_destroy(&b->lock); b->c_ops->destroy(b->container); Free(b); } /* * bucket_current_resvp -- returns the pointer to the current reservation count */ int * bucket_current_resvp(struct bucket *b) { return b->active_memory_block ? &b->active_memory_block->nresv : NULL; }
2,251
21.52
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/container_seglists.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * container_seglists.c -- implementation of segregated lists block container * * This container is constructed from N (up to 64) intrusive lists and a * single 8 byte bitmap that stores the information whether a given list is * empty or not. */ #include "container_seglists.h" #include "out.h" #include "sys_util.h" #include "util.h" #include "valgrind_internal.h" #include "vecq.h" #define SEGLIST_BLOCK_LISTS 64U struct block_container_seglists { struct block_container super; struct memory_block m; VECQ(, uint32_t) blocks[SEGLIST_BLOCK_LISTS]; uint64_t nonempty_lists; }; /* * container_seglists_insert_block -- (internal) inserts a new memory block * into the container */ static int container_seglists_insert_block(struct block_container *bc, const struct memory_block *m) { ASSERT(m->chunk_id < MAX_CHUNK); ASSERT(m->zone_id < UINT16_MAX); ASSERTne(m->size_idx, 0); struct block_container_seglists *c = (struct block_container_seglists *)bc; if (c->nonempty_lists == 0) c->m = *m; ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS); ASSERT(m->chunk_id == c->m.chunk_id); ASSERT(m->zone_id == c->m.zone_id); if (VECQ_ENQUEUE(&c->blocks[m->size_idx - 1], m->block_off) != 0) return -1; /* marks the list as nonempty */ c->nonempty_lists |= 1ULL << (m->size_idx - 1); return 0; } /* * container_seglists_get_rm_block_bestfit -- (internal) removes and returns the * best-fit memory block for size */ static int container_seglists_get_rm_block_bestfit(struct block_container *bc, struct memory_block *m) { struct block_container_seglists *c = (struct block_container_seglists *)bc; ASSERT(m->size_idx <= SEGLIST_BLOCK_LISTS); uint32_t i = 0; /* applicable lists */ uint64_t size_mask = (1ULL << (m->size_idx - 1)) - 1; uint64_t v = c->nonempty_lists & ~size_mask; if (v == 0) return ENOMEM; /* finds the list that serves the smallest applicable size */ i = util_lssb_index64(v); uint32_t block_offset = VECQ_DEQUEUE(&c->blocks[i]); if (VECQ_SIZE(&c->blocks[i]) == 0) /* marks the list as empty */ c->nonempty_lists &= ~(1ULL << (i)); *m = c->m; m->block_off = block_offset; m->size_idx = i + 1; return 0; } /* * container_seglists_is_empty -- (internal) checks whether the container is * empty */ static int container_seglists_is_empty(struct block_container *bc) { struct block_container_seglists *c = (struct block_container_seglists *)bc; return c->nonempty_lists == 0; } /* * container_seglists_rm_all -- (internal) removes all elements from the tree */ static void container_seglists_rm_all(struct block_container *bc) { struct block_container_seglists *c = (struct block_container_seglists *)bc; for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i) VECQ_CLEAR(&c->blocks[i]); c->nonempty_lists = 0; } /* * container_seglists_delete -- (internal) deletes the container */ static void container_seglists_destroy(struct block_container *bc) { struct block_container_seglists *c = (struct block_container_seglists *)bc; for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i) VECQ_DELETE(&c->blocks[i]); Free(c); } /* * This container does not support retrieval of exact memory blocks, but other * than provides best-fit in O(1) time for unit sizes that do not exceed 64. */ static const struct block_container_ops container_seglists_ops = { .insert = container_seglists_insert_block, .get_rm_exact = NULL, .get_rm_bestfit = container_seglists_get_rm_block_bestfit, .is_empty = container_seglists_is_empty, .rm_all = container_seglists_rm_all, .destroy = container_seglists_destroy, }; /* * container_new_seglists -- allocates and initializes a seglists container */ struct block_container * container_new_seglists(struct palloc_heap *heap) { struct block_container_seglists *bc = Malloc(sizeof(*bc)); if (bc == NULL) goto error_container_malloc; bc->super.heap = heap; bc->super.c_ops = &container_seglists_ops; for (unsigned i = 0; i < SEGLIST_BLOCK_LISTS; ++i) VECQ_INIT(&bc->blocks[i]); bc->nonempty_lists = 0; return (struct block_container *)&bc->super; error_container_malloc: return NULL; }
4,215
23.511628
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * tx.h -- internal definitions for transactions */ #ifndef LIBPMEMOBJ_INTERNAL_TX_H #define LIBPMEMOBJ_INTERNAL_TX_H 1 #include <stdint.h> #include "obj.h" #include "ulog.h" #ifdef __cplusplus extern "C" { #endif #define TX_DEFAULT_RANGE_CACHE_SIZE (1 << 15) #define TX_DEFAULT_RANGE_CACHE_THRESHOLD (1 << 12) #define TX_RANGE_MASK (8ULL - 1) #define TX_RANGE_MASK_LEGACY (32ULL - 1) #define TX_ALIGN_SIZE(s, amask) (((s) + (amask)) & ~(amask)) #define TX_SNAPSHOT_LOG_ENTRY_ALIGNMENT CACHELINE_SIZE #define TX_SNAPSHOT_LOG_BUFFER_OVERHEAD sizeof(struct ulog) #define TX_SNAPSHOT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_buf) #define TX_INTENT_LOG_BUFFER_ALIGNMENT CACHELINE_SIZE #define TX_INTENT_LOG_BUFFER_OVERHEAD sizeof(struct ulog) #define TX_INTENT_LOG_ENTRY_OVERHEAD sizeof(struct ulog_entry_val) struct tx_parameters { size_t cache_size; }; /* * Returns the current transaction's pool handle, NULL if not within * a transaction. */ PMEMobjpool *tx_get_pop(void); void tx_ctl_register(PMEMobjpool *pop); struct tx_parameters *tx_params_new(void); void tx_params_delete(struct tx_parameters *tx_params); #ifdef __cplusplus } #endif #endif
1,258
22.314815
68
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/critnib.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * critnib.c -- implementation of critnib tree * * It offers identity lookup (like a hashmap) and <= lookup (like a search * tree). Unlike some hashing algorithms (cuckoo hash, perfect hashing) the * complexity isn't constant, but for data sizes we expect it's several * times as fast as cuckoo, and has no "stop the world" cases that would * cause latency (ie, better worst case behaviour). */ /* * STRUCTURE DESCRIPTION * * Critnib is a hybrid between a radix tree and DJ Bernstein's critbit: * it skips nodes for uninteresting radix nodes (ie, ones that would have * exactly one child), this requires adding to every node a field that * describes the slice (4-bit in our case) that this radix level is for. * * This implementation also stores each node's path (ie, bits that are * common to every key in that subtree) -- this doesn't help with lookups * at all (unused in == match, could be reconstructed at no cost in <= * after first dive) but simplifies inserts and removes. If we ever want * that piece of memory it's easy to trim it down. */ /* * CONCURRENCY ISSUES * * Reads are completely lock-free sync-free, but only almost wait-free: * if for some reason a read thread gets pathologically stalled, it will * notice the data being stale and restart the work. In usual cases, * the structure having been modified does _not_ cause a restart. * * Writes could be easily made lock-free as well (with only a cmpxchg * sync), but this leads to problems with removes. A possible solution * would be doing removes by overwriting by NULL w/o freeing -- yet this * would lead to the structure growing without bounds. Complex per-node * locks would increase concurrency but they slow down individual writes * enough that in practice a simple global write lock works faster. * * Removes are the only operation that can break reads. The structure * can do local RCU well -- the problem being knowing when it's safe to * free. Any synchronization with reads would kill their speed, thus * instead we have a remove count. The grace period is DELETED_LIFE, * after which any read will notice staleness and restart its work. */ #include <errno.h> #include <stdbool.h> #include "alloc.h" #include "critnib.h" #include "out.h" #include "sys_util.h" #include "valgrind_internal.h" /* * A node that has been deleted is left untouched for this many delete * cycles. Reads have guaranteed correctness if they took no longer than * DELETED_LIFE concurrent deletes, otherwise they notice something is * wrong and restart. The memory of deleted nodes is never freed to * malloc nor their pointers lead anywhere wrong, thus a stale read will * (temporarily) get a wrong answer but won't crash. * * There's no need to count writes as they never interfere with reads. * * Allowing stale reads (of arbitrarily old writes or of deletes less than * DELETED_LIFE old) might sound counterintuitive, but it doesn't affect * semantics in any way: the thread could have been stalled just after * returning from our code. Thus, the guarantee is: the result of get() or * find_le() is a value that was current at any point between the call * start and end. */ #define DELETED_LIFE 16 #define SLICE 4 #define NIB ((1ULL << SLICE) - 1) #define SLNODES (1 << SLICE) typedef unsigned char sh_t; struct critnib_node { /* * path is the part of a tree that's already traversed (be it through * explicit nodes or collapsed links) -- ie, any subtree below has all * those bits set to this value. * * nib is a 4-bit slice that's an index into the node's children. * * shift is the length (in bits) of the part of the key below this node. * * nib * |XXXXXXXXXX|?|*****| * path ^ * +-----+ * shift */ struct critnib_node *child[SLNODES]; uint64_t path; sh_t shift; }; struct critnib_leaf { uint64_t key; void *value; }; struct critnib { struct critnib_node *root; /* pool of freed nodes: singly linked list, next at child[0] */ struct critnib_node *deleted_node; struct critnib_leaf *deleted_leaf; /* nodes removed but not yet eligible for reuse */ struct critnib_node *pending_del_nodes[DELETED_LIFE]; struct critnib_leaf *pending_del_leaves[DELETED_LIFE]; uint64_t remove_count; os_mutex_t mutex; /* writes/removes */ }; /* * atomic load */ static void load(void *src, void *dst) { util_atomic_load_explicit64((uint64_t *)src, (uint64_t *)dst, memory_order_acquire); } /* * atomic store */ static void store(void *dst, void *src) { util_atomic_store_explicit64((uint64_t *)dst, (uint64_t)src, memory_order_release); } /* * internal: is_leaf -- check tagged pointer for leafness */ static inline bool is_leaf(struct critnib_node *n) { return (uint64_t)n & 1; } /* * internal: to_leaf -- untag a leaf pointer */ static inline struct critnib_leaf * to_leaf(struct critnib_node *n) { return (void *)((uint64_t)n & ~1ULL); } /* * internal: path_mask -- return bit mask of a path above a subtree [shift] * bits tall */ static inline uint64_t path_mask(sh_t shift) { return ~NIB << shift; } /* * internal: slice_index -- return index of child at the given nib */ static inline unsigned slice_index(uint64_t key, sh_t shift) { return (unsigned)((key >> shift) & NIB); } /* * critnib_new -- allocates a new critnib structure */ struct critnib * critnib_new(void) { struct critnib *c = Zalloc(sizeof(struct critnib)); if (!c) return NULL; util_mutex_init(&c->mutex); VALGRIND_HG_DRD_DISABLE_CHECKING(&c->root, sizeof(c->root)); VALGRIND_HG_DRD_DISABLE_CHECKING(&c->remove_count, sizeof(c->remove_count)); return c; } /* * internal: delete_node -- recursively free (to malloc) a subtree */ static void delete_node(struct critnib_node *__restrict n) { if (!is_leaf(n)) { for (int i = 0; i < SLNODES; i++) { if (n->child[i]) delete_node(n->child[i]); } Free(n); } else { Free(to_leaf(n)); } } /* * critnib_delete -- destroy and free a critnib struct */ void critnib_delete(struct critnib *c) { if (c->root) delete_node(c->root); util_mutex_destroy(&c->mutex); for (struct critnib_node *m = c->deleted_node; m; ) { struct critnib_node *mm = m->child[0]; Free(m); m = mm; } for (struct critnib_leaf *k = c->deleted_leaf; k; ) { struct critnib_leaf *kk = k->value; Free(k); k = kk; } for (int i = 0; i < DELETED_LIFE; i++) { Free(c->pending_del_nodes[i]); Free(c->pending_del_leaves[i]); } Free(c); } /* * internal: free_node -- free (to internal pool, not malloc) a node. * * We cannot free them to malloc as a stalled reader thread may still walk * through such nodes; it will notice the result being bogus but only after * completing the walk, thus we need to ensure any freed nodes still point * to within the critnib structure. */ static void free_node(struct critnib *__restrict c, struct critnib_node *__restrict n) { if (!n) return; ASSERT(!is_leaf(n)); n->child[0] = c->deleted_node; c->deleted_node = n; } /* * internal: alloc_node -- allocate a node from our pool or from malloc */ static struct critnib_node * alloc_node(struct critnib *__restrict c) { if (!c->deleted_node) { struct critnib_node *n = Malloc(sizeof(struct critnib_node)); if (n == NULL) ERR("!Malloc"); return n; } struct critnib_node *n = c->deleted_node; c->deleted_node = n->child[0]; VALGRIND_ANNOTATE_NEW_MEMORY(n, sizeof(*n)); return n; } /* * internal: free_leaf -- free (to internal pool, not malloc) a leaf. * * See free_node(). */ static void free_leaf(struct critnib *__restrict c, struct critnib_leaf *__restrict k) { if (!k) return; k->value = c->deleted_leaf; c->deleted_leaf = k; } /* * internal: alloc_leaf -- allocate a leaf from our pool or from malloc */ static struct critnib_leaf * alloc_leaf(struct critnib *__restrict c) { if (!c->deleted_leaf) { struct critnib_leaf *k = Malloc(sizeof(struct critnib_leaf)); if (k == NULL) ERR("!Malloc"); return k; } struct critnib_leaf *k = c->deleted_leaf; c->deleted_leaf = k->value; VALGRIND_ANNOTATE_NEW_MEMORY(k, sizeof(*k)); return k; } /* * crinib_insert -- write a key:value pair to the critnib structure * * Returns: * • 0 on success * • EEXIST if such a key already exists * • ENOMEM if we're out of memory * * Takes a global write lock but doesn't stall any readers. */ int critnib_insert(struct critnib *c, uint64_t key, void *value) { util_mutex_lock(&c->mutex); struct critnib_leaf *k = alloc_leaf(c); if (!k) { util_mutex_unlock(&c->mutex); return ENOMEM; } VALGRIND_HG_DRD_DISABLE_CHECKING(k, sizeof(struct critnib_leaf)); k->key = key; k->value = value; struct critnib_node *kn = (void *)((uint64_t)k | 1); struct critnib_node *n = c->root; if (!n) { c->root = kn; util_mutex_unlock(&c->mutex); return 0; } struct critnib_node **parent = &c->root; struct critnib_node *prev = c->root; while (n && !is_leaf(n) && (key & path_mask(n->shift)) == n->path) { prev = n; parent = &n->child[slice_index(key, n->shift)]; n = *parent; } if (!n) { n = prev; store(&n->child[slice_index(key, n->shift)], kn); util_mutex_unlock(&c->mutex); return 0; } uint64_t path = is_leaf(n) ? to_leaf(n)->key : n->path; /* Find where the path differs from our key. */ uint64_t at = path ^ key; if (!at) { ASSERT(is_leaf(n)); free_leaf(c, to_leaf(kn)); /* fail instead of replacing */ util_mutex_unlock(&c->mutex); return EEXIST; } /* and convert that to an index. */ sh_t sh = util_mssb_index64(at) & (sh_t)~(SLICE - 1); struct critnib_node *m = alloc_node(c); if (!m) { free_leaf(c, to_leaf(kn)); util_mutex_unlock(&c->mutex); return ENOMEM; } VALGRIND_HG_DRD_DISABLE_CHECKING(m, sizeof(struct critnib_node)); for (int i = 0; i < SLNODES; i++) m->child[i] = NULL; m->child[slice_index(key, sh)] = kn; m->child[slice_index(path, sh)] = n; m->shift = sh; m->path = key & path_mask(sh); store(parent, m); util_mutex_unlock(&c->mutex); return 0; } /* * critnib_remove -- delete a key from the critnib structure, return its value */ void * critnib_remove(struct critnib *c, uint64_t key) { struct critnib_leaf *k; void *value = NULL; util_mutex_lock(&c->mutex); struct critnib_node *n = c->root; if (!n) goto not_found; uint64_t del = util_fetch_and_add64(&c->remove_count, 1) % DELETED_LIFE; free_node(c, c->pending_del_nodes[del]); free_leaf(c, c->pending_del_leaves[del]); c->pending_del_nodes[del] = NULL; c->pending_del_leaves[del] = NULL; if (is_leaf(n)) { k = to_leaf(n); if (k->key == key) { store(&c->root, NULL); goto del_leaf; } goto not_found; } /* * n and k are a parent:child pair (after the first iteration); k is the * leaf that holds the key we're deleting. */ struct critnib_node **k_parent = &c->root; struct critnib_node **n_parent = &c->root; struct critnib_node *kn = n; while (!is_leaf(kn)) { n_parent = k_parent; n = kn; k_parent = &kn->child[slice_index(key, kn->shift)]; kn = *k_parent; if (!kn) goto not_found; } k = to_leaf(kn); if (k->key != key) goto not_found; store(&n->child[slice_index(key, n->shift)], NULL); /* Remove the node if there's only one remaining child. */ int ochild = -1; for (int i = 0; i < SLNODES; i++) { if (n->child[i]) { if (ochild != -1) goto del_leaf; ochild = i; } } ASSERTne(ochild, -1); store(n_parent, n->child[ochild]); c->pending_del_nodes[del] = n; del_leaf: value = k->value; c->pending_del_leaves[del] = k; not_found: util_mutex_unlock(&c->mutex); return value; } /* * critnib_get -- query for a key ("==" match), returns value or NULL * * Doesn't need a lock but if many deletes happened while our thread was * somehow stalled the query is restarted (as freed nodes remain unused only * for a grace period). * * Counterintuitively, it's pointless to return the most current answer, * we need only one that was valid at any point after the call started. */ void * critnib_get(struct critnib *c, uint64_t key) { uint64_t wrs1, wrs2; void *res; do { struct critnib_node *n; load(&c->remove_count, &wrs1); load(&c->root, &n); /* * critbit algorithm: dive into the tree, looking at nothing but * each node's critical bit^H^H^Hnibble. This means we risk * going wrong way if our path is missing, but that's ok... */ while (n && !is_leaf(n)) load(&n->child[slice_index(key, n->shift)], &n); /* ... as we check it at the end. */ struct critnib_leaf *k = to_leaf(n); res = (n && k->key == key) ? k->value : NULL; load(&c->remove_count, &wrs2); } while (wrs1 + DELETED_LIFE <= wrs2); return res; } /* * internal: find_successor -- return the rightmost non-null node in a subtree */ static void * find_successor(struct critnib_node *__restrict n) { while (1) { int nib; for (nib = NIB; nib >= 0; nib--) if (n->child[nib]) break; if (nib < 0) return NULL; n = n->child[nib]; if (is_leaf(n)) return to_leaf(n)->value; } } /* * internal: find_le -- recursively search <= in a subtree */ static void * find_le(struct critnib_node *__restrict n, uint64_t key) { if (!n) return NULL; if (is_leaf(n)) { struct critnib_leaf *k = to_leaf(n); return (k->key <= key) ? k->value : NULL; } /* * is our key outside the subtree we're in? * * If we're inside, all bits above the nib will be identical; note * that shift points at the nib's lower rather than upper edge, so it * needs to be masked away as well. */ if ((key ^ n->path) >> (n->shift) & ~NIB) { /* * subtree is too far to the left? * -> its rightmost value is good */ if (n->path < key) return find_successor(n); /* * subtree is too far to the right? * -> it has nothing of interest to us */ return NULL; } unsigned nib = slice_index(key, n->shift); /* recursive call: follow the path */ { struct critnib_node *m; load(&n->child[nib], &m); void *value = find_le(m, key); if (value) return value; } /* * nothing in that subtree? We strayed from the path at this point, * thus need to search every subtree to our left in this node. No * need to dive into any but the first non-null, though. */ for (; nib > 0; nib--) { struct critnib_node *m; load(&n->child[nib - 1], &m); if (m) { n = m; if (is_leaf(n)) return to_leaf(n)->value; return find_successor(n); } } return NULL; } /* * critnib_find_le -- query for a key ("<=" match), returns value or NULL * * Same guarantees as critnib_get(). */ void * critnib_find_le(struct critnib *c, uint64_t key) { uint64_t wrs1, wrs2; void *res; do { load(&c->remove_count, &wrs1); struct critnib_node *n; /* avoid a subtle TOCTOU */ load(&c->root, &n); res = n ? find_le(n, key) : NULL; load(&c->remove_count, &wrs2); } while (wrs1 + DELETED_LIFE <= wrs2); return res; }
15,052
22.087423
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/memblock.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * memblock.h -- internal definitions for memory block */ #ifndef LIBPMEMOBJ_MEMBLOCK_H #define LIBPMEMOBJ_MEMBLOCK_H 1 #include <stddef.h> #include <stdint.h> #include "os_thread.h" #include "heap_layout.h" #include "memops.h" #include "palloc.h" #ifdef __cplusplus extern "C" { #endif #define MEMORY_BLOCK_NONE \ (struct memory_block)\ {0, 0, 0, 0, NULL, NULL, MAX_HEADER_TYPES, MAX_MEMORY_BLOCK, NULL} #define MEMORY_BLOCK_IS_NONE(_m)\ ((_m).heap == NULL) #define MEMORY_BLOCK_EQUALS(lhs, rhs)\ ((lhs).zone_id == (rhs).zone_id && (lhs).chunk_id == (rhs).chunk_id &&\ (lhs).block_off == (rhs).block_off && (lhs).heap == (rhs).heap) enum memory_block_type { /* * Huge memory blocks are directly backed by memory chunks. A single * huge block can consist of several chunks. * The persistent representation of huge memory blocks can be thought * of as a doubly linked list with variable length elements. * That list is stored in the chunk headers array where one element * directly corresponds to one chunk. * * U - used, F - free, R - footer, . - empty * |U| represents a used chunk with a size index of 1, with type * information (CHUNK_TYPE_USED) stored in the corresponding header * array element - chunk_headers[chunk_id]. * * |F...R| represents a free chunk with size index of 5. The empty * chunk headers have undefined values and shouldn't be used. All * chunks with size larger than 1 must have a footer in the last * corresponding header array - chunk_headers[chunk_id - size_idx - 1]. * * The above representation of chunks will be used to describe the * way fail-safety is achieved during heap operations. * * Allocation of huge memory block with size index 5: * Initial heap state: |U| <> |F..R| <> |U| <> |F......R| * * The only block that matches that size is at very end of the chunks * list: |F......R| * * As the request was for memory block of size 5, and this ones size is * 7 there's a need to first split the chunk in two. * 1) The last chunk header of the new allocation is marked as footer * and the block after that one is marked as free: |F...RF.R| * This is allowed and has no impact on the heap because this * modification is into chunk header that is otherwise unused, in * other words the linked list didn't change. * * 2) The size index of the first header is changed from previous value * of 7 to 5: |F...R||F.R| * This is a single fail-safe atomic operation and this is the * first change that is noticeable by the heap operations. * A single linked list element is split into two new ones. * * 3) The allocation process either uses redo log or changes directly * the chunk header type from free to used: |U...R| <> |F.R| * * In a similar fashion the reverse operation, free, is performed: * Initial heap state: |U| <> |F..R| <> |F| <> |U...R| <> |F.R| * * This is the heap after the previous example with the single chunk * in between changed from used to free. * * 1) Determine the neighbors of the memory block which is being * freed. * * 2) Update the footer (if needed) information of the last chunk which * is the memory block being freed or it's neighbor to the right. * |F| <> |U...R| <> |F.R << this one| * * 3) Update the size index and type of the left-most chunk header. * And so this: |F << this one| <> |U...R| <> |F.R| * becomes this: |F.......R| * The entire chunk header can be updated in a single fail-safe * atomic operation because it's size is only 64 bytes. */ MEMORY_BLOCK_HUGE, /* * Run memory blocks are chunks with CHUNK_TYPE_RUN and size index of 1. * The entire chunk is subdivided into smaller blocks and has an * additional metadata attached in the form of a bitmap - each bit * corresponds to a single block. * In this case there's no need to perform any coalescing or splitting * on the persistent metadata. * The bitmap is stored on a variable number of 64 bit values and * because of the requirement of allocation fail-safe atomicity the * maximum size index of a memory block from a run is 64 - since that's * the limit of atomic write guarantee. * * The allocation/deallocation process is a single 8 byte write that * sets/clears the corresponding bits. Depending on the user choice * it can either be made atomically or using redo-log when grouped with * other operations. * It's also important to note that in a case of realloc it might so * happen that a single 8 byte bitmap value has its bits both set and * cleared - that's why the run memory block metadata changes operate * on AND'ing or OR'ing a bitmask instead of directly setting the value. */ MEMORY_BLOCK_RUN, MAX_MEMORY_BLOCK }; enum memblock_state { MEMBLOCK_STATE_UNKNOWN, MEMBLOCK_ALLOCATED, MEMBLOCK_FREE, MAX_MEMBLOCK_STATE, }; /* runtime bitmap information for a run */ struct run_bitmap { unsigned nvalues; /* number of 8 byte values - size of values array */ unsigned nbits; /* number of valid bits */ size_t size; /* total size of the bitmap in bytes */ uint64_t *values; /* pointer to the bitmap's values array */ }; /* runtime information necessary to create a run */ struct run_descriptor { uint16_t flags; /* chunk flags for the run */ size_t unit_size; /* the size of a single unit in a run */ uint32_t size_idx; /* size index of a single run instance */ size_t alignment; /* required alignment of objects */ unsigned nallocs; /* number of allocs per run */ struct run_bitmap bitmap; }; struct memory_block_ops { /* returns memory block size */ size_t (*block_size)(const struct memory_block *m); /* prepares header modification operation */ void (*prep_hdr)(const struct memory_block *m, enum memblock_state dest_state, struct operation_context *ctx); /* returns lock associated with memory block */ os_mutex_t *(*get_lock)(const struct memory_block *m); /* returns whether a block is allocated or not */ enum memblock_state (*get_state)(const struct memory_block *m); /* returns pointer to the data of a block */ void *(*get_user_data)(const struct memory_block *m); /* * Returns the size of a memory block without overhead. * This is the size of a data block that can be used. */ size_t (*get_user_size)(const struct memory_block *m); /* returns pointer to the beginning of data of a run block */ void *(*get_real_data)(const struct memory_block *m); /* returns the size of a memory block, including headers */ size_t (*get_real_size)(const struct memory_block *m); /* writes a header of an allocation */ void (*write_header)(const struct memory_block *m, uint64_t extra_field, uint16_t flags); void (*invalidate)(const struct memory_block *m); /* * Checks the header type of a chunk matches the expected type and * modifies it if necessary. This is fail-safe atomic. */ void (*ensure_header_type)(const struct memory_block *m, enum header_type t); /* * Reinitializes a block after a heap restart. * This is called for EVERY allocation, but *only* under Valgrind. */ void (*reinit_header)(const struct memory_block *m); /* returns the extra field of an allocation */ uint64_t (*get_extra)(const struct memory_block *m); /* returns the flags of an allocation */ uint16_t (*get_flags)(const struct memory_block *m); /* initializes memblock in valgrind */ void (*vg_init)(const struct memory_block *m, int objects, object_callback cb, void *arg); /* iterates over every free block */ int (*iterate_free)(const struct memory_block *m, object_callback cb, void *arg); /* iterates over every used block */ int (*iterate_used)(const struct memory_block *m, object_callback cb, void *arg); /* calculates number of free units, valid only for runs */ void (*calc_free)(const struct memory_block *m, uint32_t *free_space, uint32_t *max_free_block); /* this is called exactly once for every existing chunk */ void (*reinit_chunk)(const struct memory_block *m); /* * Initializes bitmap data for a run. * Do *not* use this function unless absolutely necessary, it breaks * the abstraction layer by exposing implementation details. */ void (*get_bitmap)(const struct memory_block *m, struct run_bitmap *b); /* calculates the ratio between occupied and unoccupied space */ unsigned (*fill_pct)(const struct memory_block *m); }; struct memory_block { uint32_t chunk_id; /* index of the memory block in its zone */ uint32_t zone_id; /* index of this block zone in the heap */ /* * Size index of the memory block represented in either multiple of * CHUNKSIZE in the case of a huge chunk or in multiple of a run * block size. */ uint32_t size_idx; /* * Used only for run chunks, must be zeroed for huge. * Number of preceding blocks in the chunk. In other words, the * position of this memory block in run bitmap. */ uint32_t block_off; /* * The variables below are associated with the memory block and are * stored here for convenience. Those fields are filled by either the * memblock_from_offset or memblock_rebuild_state, and they should not * be modified manually. */ const struct memory_block_ops *m_ops; struct palloc_heap *heap; enum header_type header_type; enum memory_block_type type; struct run_bitmap *cached_bitmap; }; /* * This is a representation of a run memory block that is active in a bucket or * is on a pending list in the recycler. * This structure should never be passed around by value because the address of * the nresv variable can be in reservations made through palloc_reserve(). Only * if the number of reservations equals 0 the structure can be moved/freed. */ struct memory_block_reserved { struct memory_block m; struct bucket *bucket; /* * Number of reservations made from this run, the pointer to this value * is stored in a user facing pobj_action structure. Decremented once * the reservation is published or canceled. */ int nresv; }; struct memory_block memblock_from_offset(struct palloc_heap *heap, uint64_t off); struct memory_block memblock_from_offset_opt(struct palloc_heap *heap, uint64_t off, int size); void memblock_rebuild_state(struct palloc_heap *heap, struct memory_block *m); struct memory_block memblock_huge_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, uint32_t size_idx); struct memory_block memblock_run_init(struct palloc_heap *heap, uint32_t chunk_id, uint32_t zone_id, struct run_descriptor *rdsc); void memblock_run_bitmap(uint32_t *size_idx, uint16_t flags, uint64_t unit_size, uint64_t alignment, void *content, struct run_bitmap *b); #ifdef __cplusplus } #endif #endif
10,750
34.019544
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmalloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * pmalloc.c -- implementation of pmalloc POSIX-like API * * This is the front-end part of the persistent memory allocator. It uses both * transient and persistent representation of the heap to provide memory blocks * in a reasonable time and with an acceptable common-case fragmentation. */ #include <inttypes.h> #include "valgrind_internal.h" #include "heap.h" #include "lane.h" #include "memblock.h" #include "memops.h" #include "obj.h" #include "out.h" #include "palloc.h" #include "pmalloc.h" #include "alloc_class.h" #include "set.h" #include "mmap.h" enum pmalloc_operation_type { OPERATION_INTERNAL, /* used only for single, one-off operations */ OPERATION_EXTERNAL, /* used for everything else, incl. large redos */ MAX_OPERATION_TYPE, }; struct lane_alloc_runtime { struct operation_context *ctx[MAX_OPERATION_TYPE]; }; /* * pmalloc_operation_hold_type -- acquires allocator lane section and returns a * pointer to its operation context */ static struct operation_context * pmalloc_operation_hold_type(PMEMobjpool *pop, enum pmalloc_operation_type type, int start) { struct lane *lane; lane_hold(pop, &lane); struct operation_context *ctx = type == OPERATION_INTERNAL ? lane->internal : lane->external; if (start) operation_start(ctx); return ctx; } /* * pmalloc_operation_hold_type -- acquires allocator lane section and returns a * pointer to its operation context without starting */ struct operation_context * pmalloc_operation_hold_no_start(PMEMobjpool *pop) { return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 0); } /* * pmalloc_operation_hold -- acquires allocator lane section and returns a * pointer to its redo log */ struct operation_context * pmalloc_operation_hold(PMEMobjpool *pop) { return pmalloc_operation_hold_type(pop, OPERATION_EXTERNAL, 1); } /* * pmalloc_operation_release -- releases allocator lane section */ void pmalloc_operation_release(PMEMobjpool *pop) { lane_release(pop); } /* * pmalloc -- allocates a new block of memory * * The pool offset is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc(PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t object_flags) { struct operation_context *ctx = pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1); int ret = palloc_operation(&pop->heap, 0, off, size, NULL, NULL, extra_field, object_flags, 0, 0, ctx); pmalloc_operation_release(pop); return ret; } /* * pmalloc_construct -- allocates a new block of memory with a constructor * * The block offset is written persistently into the off variable, but only * after the constructor function has been called. * * If successful function returns zero. Otherwise an error number is returned. */ int pmalloc_construct(PMEMobjpool *pop, uint64_t *off, size_t size, palloc_constr constructor, void *arg, uint64_t extra_field, uint16_t object_flags, uint16_t class_id) { struct operation_context *ctx = pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1); int ret = palloc_operation(&pop->heap, 0, off, size, constructor, arg, extra_field, object_flags, class_id, 0, ctx); pmalloc_operation_release(pop); return ret; } /* * prealloc -- resizes in-place a previously allocated memory block * * The block offset is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ int prealloc(PMEMobjpool *pop, uint64_t *off, size_t size, uint64_t extra_field, uint16_t object_flags) { struct operation_context *ctx = pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1); int ret = palloc_operation(&pop->heap, *off, off, size, NULL, NULL, extra_field, object_flags, 0, 0, ctx); pmalloc_operation_release(pop); return ret; } /* * pfree -- deallocates a memory block previously allocated by pmalloc * * A zero value is written persistently into the off variable. * * If successful function returns zero. Otherwise an error number is returned. */ void pfree(PMEMobjpool *pop, uint64_t *off) { struct operation_context *ctx = pmalloc_operation_hold_type(pop, OPERATION_INTERNAL, 1); int ret = palloc_operation(&pop->heap, *off, off, 0, NULL, NULL, 0, 0, 0, 0, ctx); ASSERTeq(ret, 0); pmalloc_operation_release(pop); } /* * pmalloc_boot -- global runtime init routine of allocator section */ int pmalloc_boot(PMEMobjpool *pop) { int ret = palloc_boot(&pop->heap, (char *)pop + pop->heap_offset, pop->set->poolsize - pop->heap_offset, &pop->heap_size, pop, &pop->p_ops, pop->stats, pop->set); if (ret) return ret; #if VG_MEMCHECK_ENABLED if (On_memcheck) palloc_heap_vg_open(&pop->heap, pop->vg_boot); #endif ret = palloc_buckets_init(&pop->heap); if (ret) palloc_heap_cleanup(&pop->heap); return ret; } /* * pmalloc_cleanup -- global cleanup routine of allocator section */ int pmalloc_cleanup(PMEMobjpool *pop) { palloc_heap_cleanup(&pop->heap); return 0; } /* * CTL_WRITE_HANDLER(desc) -- creates a new allocation class */ static int CTL_WRITE_HANDLER(desc)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; uint8_t id; struct alloc_class_collection *ac = heap_alloc_classes(&pop->heap); struct pobj_alloc_class_desc *p = arg; if (p->unit_size <= 0 || p->unit_size > PMEMOBJ_MAX_ALLOC_SIZE || p->units_per_block <= 0) { errno = EINVAL; return -1; } if (p->alignment != 0 && p->unit_size % p->alignment != 0) { ERR("unit size must be evenly divisible by alignment"); errno = EINVAL; return -1; } if (p->alignment > (MEGABYTE * 2)) { ERR("alignment cannot be larger than 2 megabytes"); errno = EINVAL; return -1; } enum header_type lib_htype = MAX_HEADER_TYPES; switch (p->header_type) { case POBJ_HEADER_LEGACY: lib_htype = HEADER_LEGACY; break; case POBJ_HEADER_COMPACT: lib_htype = HEADER_COMPACT; break; case POBJ_HEADER_NONE: lib_htype = HEADER_NONE; break; case MAX_POBJ_HEADER_TYPES: default: ERR("invalid header type"); errno = EINVAL; return -1; } if (PMDK_SLIST_EMPTY(indexes)) { if (alloc_class_find_first_free_slot(ac, &id) != 0) { ERR("no available free allocation class identifier"); errno = EINVAL; return -1; } } else { struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "class_id"), 0); if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) { ERR("class id outside of the allowed range"); errno = ERANGE; return -1; } id = (uint8_t)idx->value; if (alloc_class_reserve(ac, id) != 0) { ERR("attempted to overwrite an allocation class"); errno = EEXIST; return -1; } } size_t runsize_bytes = CHUNK_ALIGN_UP((p->units_per_block * p->unit_size) + RUN_BASE_METADATA_SIZE); /* aligning the buffer might require up-to to 'alignment' bytes */ if (p->alignment != 0) runsize_bytes += p->alignment; uint32_t size_idx = (uint32_t)(runsize_bytes / CHUNKSIZE); if (size_idx > UINT16_MAX) size_idx = UINT16_MAX; struct alloc_class *c = alloc_class_new(id, heap_alloc_classes(&pop->heap), CLASS_RUN, lib_htype, p->unit_size, p->alignment, size_idx); if (c == NULL) { errno = EINVAL; return -1; } if (heap_create_alloc_class_buckets(&pop->heap, c) != 0) { alloc_class_delete(ac, c); return -1; } p->class_id = c->id; p->units_per_block = c->rdsc.nallocs; return 0; } /* * pmalloc_header_type_parser -- parses the alloc header type argument */ static int pmalloc_header_type_parser(const void *arg, void *dest, size_t dest_size) { const char *vstr = arg; enum pobj_header_type *htype = dest; ASSERTeq(dest_size, sizeof(enum pobj_header_type)); if (strcmp(vstr, "none") == 0) { *htype = POBJ_HEADER_NONE; } else if (strcmp(vstr, "compact") == 0) { *htype = POBJ_HEADER_COMPACT; } else if (strcmp(vstr, "legacy") == 0) { *htype = POBJ_HEADER_LEGACY; } else { ERR("invalid header type"); errno = EINVAL; return -1; } return 0; } /* * CTL_READ_HANDLER(desc) -- reads the information about allocation class */ static int CTL_READ_HANDLER(desc)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; uint8_t id; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "class_id"), 0); if (idx->value < 0 || idx->value >= MAX_ALLOCATION_CLASSES) { ERR("class id outside of the allowed range"); errno = ERANGE; return -1; } id = (uint8_t)idx->value; struct alloc_class *c = alloc_class_by_id( heap_alloc_classes(&pop->heap), id); if (c == NULL) { ERR("class with the given id does not exist"); errno = ENOENT; return -1; } enum pobj_header_type user_htype = MAX_POBJ_HEADER_TYPES; switch (c->header_type) { case HEADER_LEGACY: user_htype = POBJ_HEADER_LEGACY; break; case HEADER_COMPACT: user_htype = POBJ_HEADER_COMPACT; break; case HEADER_NONE: user_htype = POBJ_HEADER_NONE; break; default: ASSERT(0); /* unreachable */ break; } struct pobj_alloc_class_desc *p = arg; p->units_per_block = c->type == CLASS_HUGE ? 0 : c->rdsc.nallocs; p->header_type = user_htype; p->unit_size = c->unit_size; p->class_id = c->id; p->alignment = c->flags & CHUNK_FLAG_ALIGNED ? c->rdsc.alignment : 0; return 0; } static const struct ctl_argument CTL_ARG(desc) = { .dest_size = sizeof(struct pobj_alloc_class_desc), .parsers = { CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc, unit_size, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc, alignment, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc, units_per_block, ctl_arg_integer), CTL_ARG_PARSER_STRUCT(struct pobj_alloc_class_desc, header_type, pmalloc_header_type_parser), CTL_ARG_PARSER_END } }; static const struct ctl_node CTL_NODE(class_id)[] = { CTL_LEAF_RW(desc), CTL_NODE_END }; static const struct ctl_node CTL_NODE(new)[] = { CTL_LEAF_WO(desc), CTL_NODE_END }; static const struct ctl_node CTL_NODE(alloc_class)[] = { CTL_INDEXED(class_id), CTL_INDEXED(new), CTL_NODE_END }; /* * CTL_RUNNABLE_HANDLER(extend) -- extends the pool by the given size */ static int CTL_RUNNABLE_HANDLER(extend)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; ssize_t arg_in = *(ssize_t *)arg; if (arg_in < (ssize_t)PMEMOBJ_MIN_PART) { ERR("incorrect size for extend, must be larger than %" PRIu64, PMEMOBJ_MIN_PART); return -1; } struct palloc_heap *heap = &pop->heap; struct bucket *defb = heap_bucket_acquire(heap, DEFAULT_ALLOC_CLASS_ID, HEAP_ARENA_PER_THREAD); int ret = heap_extend(heap, defb, (size_t)arg_in) < 0 ? -1 : 0; heap_bucket_release(heap, defb); return ret; } /* * CTL_READ_HANDLER(granularity) -- reads the current heap grow size */ static int CTL_READ_HANDLER(granularity)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; ssize_t *arg_out = arg; *arg_out = (ssize_t)pop->heap.growsize; return 0; } /* * CTL_WRITE_HANDLER(granularity) -- changes the heap grow size */ static int CTL_WRITE_HANDLER(granularity)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; ssize_t arg_in = *(int *)arg; if (arg_in != 0 && arg_in < (ssize_t)PMEMOBJ_MIN_PART) { ERR("incorrect grow size, must be 0 or larger than %" PRIu64, PMEMOBJ_MIN_PART); return -1; } pop->heap.growsize = (size_t)arg_in; return 0; } static const struct ctl_argument CTL_ARG(granularity) = CTL_ARG_LONG_LONG; /* * CTL_READ_HANDLER(total) -- reads a number of the arenas */ static int CTL_READ_HANDLER(total)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned *narenas = arg; *narenas = heap_get_narenas_total(&pop->heap); return 0; } /* * CTL_READ_HANDLER(max) -- reads a max number of the arenas */ static int CTL_READ_HANDLER(max)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned *max = arg; *max = heap_get_narenas_max(&pop->heap); return 0; } /* * CTL_WRITE_HANDLER(max) -- write a max number of the arenas */ static int CTL_WRITE_HANDLER(max)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned size = *(unsigned *)arg; int ret = heap_set_narenas_max(&pop->heap, size); if (ret) { LOG(1, "cannot change max arena number"); return -1; } return 0; } static const struct ctl_argument CTL_ARG(max) = CTL_ARG_LONG_LONG; /* * CTL_READ_HANDLER(automatic) -- reads a number of the automatic arenas */ static int CTL_READ_HANDLER(automatic, narenas)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned *narenas = arg; *narenas = heap_get_narenas_auto(&pop->heap); return 0; } /* * CTL_READ_HANDLER(arena_id) -- reads the id of the arena * assigned to the calling thread */ static int CTL_READ_HANDLER(arena_id)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned *arena_id = arg; *arena_id = heap_get_thread_arena_id(&pop->heap); return 0; } /* * CTL_WRITE_HANDLER(arena_id) -- assigns the arena to the calling thread */ static int CTL_WRITE_HANDLER(arena_id)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned arena_id = *(unsigned *)arg; unsigned narenas = heap_get_narenas_total(&pop->heap); /* * check if index is not bigger than number of arenas * or if it is not equal zero */ if (arena_id < 1 || arena_id > narenas) { LOG(1, "arena id outside of the allowed range: <1,%u>", narenas); errno = ERANGE; return -1; } heap_set_arena_thread(&pop->heap, arena_id); return 0; } static const struct ctl_argument CTL_ARG(arena_id) = CTL_ARG_LONG_LONG; /* * CTL_WRITE_HANDLER(automatic) -- updates automatic status of the arena */ static int CTL_WRITE_HANDLER(automatic)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; int arg_in = *(int *)arg; unsigned arena_id; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "arena_id"), 0); arena_id = (unsigned)idx->value; unsigned narenas = heap_get_narenas_total(&pop->heap); /* * check if index is not bigger than number of arenas * or if it is not equal zero */ if (arena_id < 1 || arena_id > narenas) { LOG(1, "arena id outside of the allowed range: <1,%u>", narenas); errno = ERANGE; return -1; } if (arg_in != 0 && arg_in != 1) { LOG(1, "incorrect arena state, must be 0 or 1"); return -1; } return heap_set_arena_auto(&pop->heap, arena_id, arg_in); } /* * CTL_READ_HANDLER(automatic) -- reads automatic status of the arena */ static int CTL_READ_HANDLER(automatic)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; int *arg_out = arg; unsigned arena_id; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "arena_id"), 0); arena_id = (unsigned)idx->value; unsigned narenas = heap_get_narenas_total(&pop->heap); /* * check if index is not bigger than number of arenas * or if it is not equal zero */ if (arena_id < 1 || arena_id > narenas) { LOG(1, "arena id outside of the allowed range: <1,%u>", narenas); errno = ERANGE; return -1; } *arg_out = heap_get_arena_auto(&pop->heap, arena_id); return 0; } static struct ctl_argument CTL_ARG(automatic) = CTL_ARG_BOOLEAN; static const struct ctl_node CTL_NODE(size)[] = { CTL_LEAF_RW(granularity), CTL_LEAF_RUNNABLE(extend), CTL_NODE_END }; /* * CTL_READ_HANDLER(size) -- reads usable size of specified arena */ static int CTL_READ_HANDLER(size)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned arena_id; unsigned narenas; size_t *arena_size = arg; struct ctl_index *idx = PMDK_SLIST_FIRST(indexes); ASSERTeq(strcmp(idx->name, "arena_id"), 0); /* take index of arena */ arena_id = (unsigned)idx->value; /* take number of arenas */ narenas = heap_get_narenas_total(&pop->heap); /* * check if index is not bigger than number of arenas * or if it is not equal zero */ if (arena_id < 1 || arena_id > narenas) { LOG(1, "arena id outside of the allowed range: <1,%u>", narenas); errno = ERANGE; return -1; } /* take buckets for arena */ struct bucket **buckets; buckets = heap_get_arena_buckets(&pop->heap, arena_id); /* calculate number of reservation for arena using buckets */ unsigned size = 0; for (int i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { if (buckets[i] != NULL && buckets[i]->is_active) size += buckets[i]->active_memory_block->m.size_idx; } *arena_size = size * CHUNKSIZE; return 0; } /* * CTL_RUNNABLE_HANDLER(create) -- create new arena in the heap */ static int CTL_RUNNABLE_HANDLER(create)(void *ctx, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { PMEMobjpool *pop = ctx; unsigned *arena_id = arg; struct palloc_heap *heap = &pop->heap; int ret = heap_arena_create(heap); if (ret < 0) return -1; *arena_id = (unsigned)ret; return 0; } static const struct ctl_node CTL_NODE(arena_id)[] = { CTL_LEAF_RO(size), CTL_LEAF_RW(automatic), CTL_NODE_END }; static const struct ctl_node CTL_NODE(arena)[] = { CTL_INDEXED(arena_id), CTL_LEAF_RUNNABLE(create), CTL_NODE_END }; static const struct ctl_node CTL_NODE(narenas)[] = { CTL_LEAF_RO(automatic, narenas), CTL_LEAF_RO(total), CTL_LEAF_RW(max), CTL_NODE_END }; static const struct ctl_node CTL_NODE(thread)[] = { CTL_LEAF_RW(arena_id), CTL_NODE_END }; static const struct ctl_node CTL_NODE(heap)[] = { CTL_CHILD(alloc_class), CTL_CHILD(arena), CTL_CHILD(size), CTL_CHILD(thread), CTL_CHILD(narenas), CTL_NODE_END }; /* * pmalloc_ctl_register -- registers ctl nodes for "heap" module */ void pmalloc_ctl_register(PMEMobjpool *pop) { CTL_REGISTER_MODULE(pop->ctl, heap); }
18,444
22.114035
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/pmemops.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ #ifndef LIBPMEMOBJ_PMEMOPS_H #define LIBPMEMOBJ_PMEMOPS_H 1 #include <stddef.h> #include <stdint.h> #include "util.h" #ifdef __cplusplus extern "C" { #endif typedef int (*persist_fn)(void *base, const void *, size_t, unsigned); typedef int (*flush_fn)(void *base, const void *, size_t, unsigned); typedef void (*drain_fn)(void *base); typedef void *(*memcpy_fn)(void *base, void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memmove_fn)(void *base, void *dest, const void *src, size_t len, unsigned flags); typedef void *(*memset_fn)(void *base, void *dest, int c, size_t len, unsigned flags); typedef int (*remote_read_fn)(void *ctx, uintptr_t base, void *dest, void *addr, size_t length); struct pmem_ops { /* for 'master' replica: with or without data replication */ persist_fn persist; /* persist function */ flush_fn flush; /* flush function */ drain_fn drain; /* drain function */ memcpy_fn memcpy; /* persistent memcpy function */ memmove_fn memmove; /* persistent memmove function */ memset_fn memset; /* persistent memset function */ void *base; //char a; //temp var end struct remote_ops { remote_read_fn read; void *ctx; uintptr_t base; } remote; void *device; uint16_t objid; }; static force_inline int pmemops_xpersist(const struct pmem_ops *p_ops, const void *d, size_t s, unsigned flags) { return p_ops->persist(p_ops->base, d, s, flags); } static force_inline void pmemops_persist(const struct pmem_ops *p_ops, const void *d, size_t s) { (void) pmemops_xpersist(p_ops, d, s, 0); } static force_inline int pmemops_xflush(const struct pmem_ops *p_ops, const void *d, size_t s, unsigned flags) { return p_ops->flush(p_ops->base, d, s, flags); } static force_inline void pmemops_flush(const struct pmem_ops *p_ops, const void *d, size_t s) { (void) pmemops_xflush(p_ops, d, s, 0); } static force_inline void pmemops_drain(const struct pmem_ops *p_ops) { p_ops->drain(p_ops->base); } static force_inline void * pmemops_memcpy(const struct pmem_ops *p_ops, void *dest, const void *src, size_t len, unsigned flags) { return p_ops->memcpy(p_ops->base, dest, src, len, flags); } static force_inline void * pmemops_memmove(const struct pmem_ops *p_ops, void *dest, const void *src, size_t len, unsigned flags) { return p_ops->memmove(p_ops->base, dest, src, len, flags); } static force_inline void * pmemops_memset(const struct pmem_ops *p_ops, void *dest, int c, size_t len, unsigned flags) { return p_ops->memset(p_ops->base, dest, c, len, flags); } #ifdef __cplusplus } #endif #endif
2,672
22.866071
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/sync.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * sync.h -- internal to obj synchronization API */ #ifndef LIBPMEMOBJ_SYNC_H #define LIBPMEMOBJ_SYNC_H 1 #include <errno.h> #include <stdint.h> #include "libpmemobj.h" #include "out.h" #include "os_thread.h" #ifdef __cplusplus extern "C" { #endif /* * internal definitions of PMEM-locks */ typedef union padded_pmemmutex { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_mutex_t mutex; struct { void *bsd_mutex_p; union padded_pmemmutex *next; } bsd_u; } mutex_u; } pmemmutex; } PMEMmutex_internal; #define PMEMmutex_lock pmemmutex.mutex_u.mutex #define PMEMmutex_bsd_mutex_p pmemmutex.mutex_u.bsd_u.bsd_mutex_p #define PMEMmutex_next pmemmutex.mutex_u.bsd_u.next typedef union padded_pmemrwlock { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_rwlock_t rwlock; struct { void *bsd_rwlock_p; union padded_pmemrwlock *next; } bsd_u; } rwlock_u; } pmemrwlock; } PMEMrwlock_internal; #define PMEMrwlock_lock pmemrwlock.rwlock_u.rwlock #define PMEMrwlock_bsd_rwlock_p pmemrwlock.rwlock_u.bsd_u.bsd_rwlock_p #define PMEMrwlock_next pmemrwlock.rwlock_u.bsd_u.next typedef union padded_pmemcond { char padding[_POBJ_CL_SIZE]; struct { uint64_t runid; union { os_cond_t cond; struct { void *bsd_cond_p; union padded_pmemcond *next; } bsd_u; } cond_u; } pmemcond; } PMEMcond_internal; #define PMEMcond_cond pmemcond.cond_u.cond #define PMEMcond_bsd_cond_p pmemcond.cond_u.bsd_u.bsd_cond_p #define PMEMcond_next pmemcond.cond_u.bsd_u.next /* * pmemobj_mutex_lock_nofail -- pmemobj_mutex_lock variant that never * fails from caller perspective. If pmemobj_mutex_lock failed, this function * aborts the program. */ static inline void pmemobj_mutex_lock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp) { int ret = pmemobj_mutex_lock(pop, mutexp); if (ret) { errno = ret; FATAL("!pmemobj_mutex_lock"); } } /* * pmemobj_mutex_unlock_nofail -- pmemobj_mutex_unlock variant that never * fails from caller perspective. If pmemobj_mutex_unlock failed, this function * aborts the program. */ static inline void pmemobj_mutex_unlock_nofail(PMEMobjpool *pop, PMEMmutex *mutexp) { int ret = pmemobj_mutex_unlock(pop, mutexp); if (ret) { errno = ret; FATAL("!pmemobj_mutex_unlock"); } } int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp); #ifdef __cplusplus } #endif #endif
2,504
21.168142
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/sync.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * sync.c -- persistent memory resident synchronization primitives */ #include <inttypes.h> #include "obj.h" #include "out.h" #include "util.h" #include "sync.h" #include "sys_util.h" #include "util.h" #include "valgrind_internal.h" #ifdef __FreeBSD__ #define RECORD_LOCK(init, type, p) \ if (init) {\ PMEM##type##_internal *head = pop->type##_head;\ while (!util_bool_compare_and_swap64(&pop->type##_head, head,\ p)) {\ head = pop->type##_head;\ }\ p->PMEM##type##_next = head;\ } #else #define RECORD_LOCK(init, type, p) #endif /* * _get_value -- (internal) atomically initialize and return a value. * Returns -1 on error, 0 if the caller is not the value * initializer, 1 if the caller is the value initializer. */ static int _get_value(uint64_t pop_runid, volatile uint64_t *runid, void *value, void *arg, int (*init_value)(void *value, void *arg)) { uint64_t tmp_runid; int initializer = 0; while ((tmp_runid = *runid) != pop_runid) { if (tmp_runid == pop_runid - 1) continue; if (!util_bool_compare_and_swap64(runid, tmp_runid, pop_runid - 1)) continue; initializer = 1; if (init_value(value, arg)) { ERR("error initializing lock"); util_fetch_and_and64(runid, 0); return -1; } if (util_bool_compare_and_swap64(runid, pop_runid - 1, pop_runid) == 0) { ERR("error setting lock runid"); return -1; } } return initializer; } /* * get_mutex -- (internal) atomically initialize, record and return a mutex */ static inline os_mutex_t * get_mutex(PMEMobjpool *pop, PMEMmutex_internal *imp) { if (likely(imp->pmemmutex.runid == pop->run_id)) return &imp->PMEMmutex_lock; volatile uint64_t *runid = &imp->pmemmutex.runid; LOG(5, "PMEMmutex %p pop->run_id %" PRIu64 " pmemmutex.runid %" PRIu64, imp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMmutex) != sizeof(PMEMmutex_internal)); COMPILE_ERROR_ON(util_alignof(PMEMmutex) != util_alignof(os_mutex_t)); VALGRIND_REMOVE_PMEM_MAPPING(imp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &imp->PMEMmutex_lock, NULL, (void *)os_mutex_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, mutex, imp); return &imp->PMEMmutex_lock; } /* * get_rwlock -- (internal) atomically initialize, record and return a rwlock */ static inline os_rwlock_t * get_rwlock(PMEMobjpool *pop, PMEMrwlock_internal *irp) { if (likely(irp->pmemrwlock.runid == pop->run_id)) return &irp->PMEMrwlock_lock; volatile uint64_t *runid = &irp->pmemrwlock.runid; LOG(5, "PMEMrwlock %p pop->run_id %"\ PRIu64 " pmemrwlock.runid %" PRIu64, irp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMrwlock) != sizeof(PMEMrwlock_internal)); COMPILE_ERROR_ON(util_alignof(PMEMrwlock) != util_alignof(os_rwlock_t)); VALGRIND_REMOVE_PMEM_MAPPING(irp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &irp->PMEMrwlock_lock, NULL, (void *)os_rwlock_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, rwlock, irp); return &irp->PMEMrwlock_lock; } /* * get_cond -- (internal) atomically initialize, record and return a * condition variable */ static inline os_cond_t * get_cond(PMEMobjpool *pop, PMEMcond_internal *icp) { if (likely(icp->pmemcond.runid == pop->run_id)) return &icp->PMEMcond_cond; volatile uint64_t *runid = &icp->pmemcond.runid; LOG(5, "PMEMcond %p pop->run_id %" PRIu64 " pmemcond.runid %" PRIu64, icp, pop->run_id, *runid); ASSERTeq((uintptr_t)runid % util_alignof(uint64_t), 0); COMPILE_ERROR_ON(sizeof(PMEMcond) != sizeof(PMEMcond_internal)); COMPILE_ERROR_ON(util_alignof(PMEMcond) != util_alignof(os_cond_t)); VALGRIND_REMOVE_PMEM_MAPPING(icp, _POBJ_CL_SIZE); int initializer = _get_value(pop->run_id, runid, &icp->PMEMcond_cond, NULL, (void *)os_cond_init); if (initializer == -1) { return NULL; } RECORD_LOCK(initializer, cond, icp); return &icp->PMEMcond_cond; } /* * pmemobj_mutex_zero -- zero-initialize a pmem resident mutex * * This function is not MT safe. */ void pmemobj_mutex_zero(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; mutexip->pmemmutex.runid = 0; pmemops_persist(&pop->p_ops, &mutexip->pmemmutex.runid, sizeof(mutexip->pmemmutex.runid)); } /* * pmemobj_mutex_lock -- lock a pmem resident mutex * * Atomically initializes and locks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_lock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_lock(mutex); } /* * pmemobj_mutex_assert_locked -- checks whether mutex is locked. * * Returns 0 when mutex is locked. */ int pmemobj_mutex_assert_locked(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); int ret = os_mutex_trylock(mutex); if (ret == EBUSY) return 0; if (ret == 0) { util_mutex_unlock(mutex); /* * There's no good error code for this case. EINVAL is used for * something else here. */ return ENODEV; } return ret; } /* * pmemobj_mutex_timedlock -- lock a pmem resident mutex * * Atomically initializes and locks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_timedlock(PMEMobjpool *pop, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_timedlock(mutex, abs_timeout); } /* * pmemobj_mutex_trylock -- trylock a pmem resident mutex * * Atomically initializes and trylocks a PMEMmutex, otherwise behaves as its * POSIX counterpart. */ int pmemobj_mutex_trylock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_trylock(mutex); } /* * pmemobj_mutex_unlock -- unlock a pmem resident mutex */ int pmemobj_mutex_unlock(PMEMobjpool *pop, PMEMmutex *mutexp) { LOG(3, "pop %p mutex %p", pop, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); /* XXX potential performance improvement - move GET to debug version */ PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_mutex_t *mutex = get_mutex(pop, mutexip); if (mutex == NULL) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); return os_mutex_unlock(mutex); } /* * pmemobj_rwlock_zero -- zero-initialize a pmem resident rwlock * * This function is not MT safe. */ void pmemobj_rwlock_zero(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; rwlockip->pmemrwlock.runid = 0; pmemops_persist(&pop->p_ops, &rwlockip->pmemrwlock.runid, sizeof(rwlockip->pmemrwlock.runid)); } /* * pmemobj_rwlock_rdlock -- rdlock a pmem resident mutex * * Atomically initializes and rdlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_rdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_rdlock(rwlock); } /* * pmemobj_rwlock_wrlock -- wrlock a pmem resident mutex * * Atomically initializes and wrlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_wrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_wrlock(rwlock); } /* * pmemobj_rwlock_timedrdlock -- timedrdlock a pmem resident mutex * * Atomically initializes and timedrdlocks a PMEMrwlock, otherwise behaves as * its POSIX counterpart. */ int pmemobj_rwlock_timedrdlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_timedrdlock(rwlock, abs_timeout); } /* * pmemobj_rwlock_timedwrlock -- timedwrlock a pmem resident mutex * * Atomically initializes and timedwrlocks a PMEMrwlock, otherwise behaves as * its POSIX counterpart. */ int pmemobj_rwlock_timedwrlock(PMEMobjpool *pop, PMEMrwlock *__restrict rwlockp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p rwlock %p timeout sec %ld nsec %ld", pop, rwlockp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_timedwrlock(rwlock, abs_timeout); } /* * pmemobj_rwlock_tryrdlock -- tryrdlock a pmem resident mutex * * Atomically initializes and tryrdlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_tryrdlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_tryrdlock(rwlock); } /* * pmemobj_rwlock_trywrlock -- trywrlock a pmem resident mutex * * Atomically initializes and trywrlocks a PMEMrwlock, otherwise behaves as its * POSIX counterpart. */ int pmemobj_rwlock_trywrlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_trywrlock(rwlock); } /* * pmemobj_rwlock_unlock -- unlock a pmem resident rwlock */ int pmemobj_rwlock_unlock(PMEMobjpool *pop, PMEMrwlock *rwlockp) { LOG(3, "pop %p rwlock %p", pop, rwlockp); ASSERTeq(pop, pmemobj_pool_by_ptr(rwlockp)); /* XXX potential performance improvement - move GET to debug version */ PMEMrwlock_internal *rwlockip = (PMEMrwlock_internal *)rwlockp; os_rwlock_t *rwlock = get_rwlock(pop, rwlockip); if (rwlock == NULL) return EINVAL; ASSERTeq((uintptr_t)rwlock % util_alignof(os_rwlock_t), 0); return os_rwlock_unlock(rwlock); } /* * pmemobj_cond_zero -- zero-initialize a pmem resident condition variable * * This function is not MT safe. */ void pmemobj_cond_zero(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; condip->pmemcond.runid = 0; pmemops_persist(&pop->p_ops, &condip->pmemcond.runid, sizeof(condip->pmemcond.runid)); } /* * pmemobj_cond_broadcast -- broadcast a pmem resident condition variable * * Atomically initializes and broadcast a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_broadcast(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; os_cond_t *cond = get_cond(pop, condip); if (cond == NULL) return EINVAL; ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_broadcast(cond); } /* * pmemobj_cond_signal -- signal a pmem resident condition variable * * Atomically initializes and signal a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_signal(PMEMobjpool *pop, PMEMcond *condp) { LOG(3, "pop %p cond %p", pop, condp); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; os_cond_t *cond = get_cond(pop, condip); if (cond == NULL) return EINVAL; ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_signal(cond); } /* * pmemobj_cond_timedwait -- timedwait on a pmem resident condition variable * * Atomically initializes and timedwait on a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_timedwait(PMEMobjpool *pop, PMEMcond *__restrict condp, PMEMmutex *__restrict mutexp, const struct timespec *__restrict abs_timeout) { LOG(3, "pop %p cond %p mutex %p abstime sec %ld nsec %ld", pop, condp, mutexp, abs_timeout->tv_sec, abs_timeout->tv_nsec); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_cond_t *cond = get_cond(pop, condip); os_mutex_t *mutex = get_mutex(pop, mutexip); if ((cond == NULL) || (mutex == NULL)) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_timedwait(cond, mutex, abs_timeout); } /* * pmemobj_cond_wait -- wait on a pmem resident condition variable * * Atomically initializes and wait on a PMEMcond, otherwise behaves as its * POSIX counterpart. */ int pmemobj_cond_wait(PMEMobjpool *pop, PMEMcond *condp, PMEMmutex *__restrict mutexp) { LOG(3, "pop %p cond %p mutex %p", pop, condp, mutexp); ASSERTeq(pop, pmemobj_pool_by_ptr(mutexp)); ASSERTeq(pop, pmemobj_pool_by_ptr(condp)); PMEMcond_internal *condip = (PMEMcond_internal *)condp; PMEMmutex_internal *mutexip = (PMEMmutex_internal *)mutexp; os_cond_t *cond = get_cond(pop, condip); os_mutex_t *mutex = get_mutex(pop, mutexip); if ((cond == NULL) || (mutex == NULL)) return EINVAL; ASSERTeq((uintptr_t)mutex % util_alignof(os_mutex_t), 0); ASSERTeq((uintptr_t)cond % util_alignof(os_cond_t), 0); return os_cond_wait(cond, mutex); } /* * pmemobj_volatile -- atomically initialize, record and return a * generic value */ void * pmemobj_volatile(PMEMobjpool *pop, struct pmemvlt *vlt, void *ptr, size_t size, int (*constr)(void *ptr, void *arg), void *arg) { LOG(3, "pop %p vlt %p ptr %p constr %p arg %p", pop, vlt, ptr, constr, arg); if (likely(vlt->runid == pop->run_id)) return ptr; VALGRIND_REMOVE_PMEM_MAPPING(ptr, size); VALGRIND_ADD_TO_TX(vlt, sizeof(*vlt)); if (_get_value(pop->run_id, &vlt->runid, ptr, arg, constr) < 0) { VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt)); return NULL; } VALGRIND_REMOVE_FROM_TX(vlt, sizeof(*vlt)); VALGRIND_SET_CLEAN(vlt, sizeof(*vlt)); return ptr; }
16,501
24.664075
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/lane.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * lane.h -- internal definitions for lanes */ #ifndef LIBPMEMOBJ_LANE_H #define LIBPMEMOBJ_LANE_H 1 #include <stdint.h> #include "ulog.h" #include "libpmemobj.h" #ifdef __cplusplus extern "C" { #endif /* * Distance between lanes used by threads required to prevent threads from * false sharing part of lanes array. Used if properly spread lanes are * available. Otherwise less spread out lanes would be used. */ #define LANE_JUMP (64 / sizeof(uint64_t)) /* * Number of times the algorithm will try to reacquire the primary lane for the * thread. If this threshold is exceeded, a new primary lane is selected for the * thread. */ #define LANE_PRIMARY_ATTEMPTS 128 #define RLANE_DEFAULT 0 #define LANE_TOTAL_SIZE 3072 /* 3 * 1024 (sum of 3 old lane sections) */ /* * We have 3 kilobytes to distribute. * The smallest capacity is needed for the internal redo log for which we can * accurately calculate the maximum number of occupied space: 48 bytes, * 3 times sizeof(struct ulog_entry_val). One for bitmap OR, second for bitmap * AND, third for modification of the destination pointer. For future needs, * this has been bumped up to 12 ulog entries. * * The remaining part has to be split between transactional redo and undo logs, * and since by far the most space consuming operations are transactional * snapshots, most of the space, 2 kilobytes, is assigned to the undo log. * After that, the remainder, 640 bytes, or 40 ulog entries, is left for the * transactional redo logs. * Thanks to this distribution, all small and medium transactions should be * entirely performed without allocating any additional metadata. * * These values must be cacheline size aligned to be used for ulogs. Therefore * they are parametrized for the size of the struct ulog changes between * platforms. */ #define LANE_UNDO_SIZE (LANE_TOTAL_SIZE \ - LANE_REDO_EXTERNAL_SIZE \ - LANE_REDO_INTERNAL_SIZE \ - 3 * sizeof(struct ulog)) /* 2048 for 64B ulog */ #define LANE_REDO_EXTERNAL_SIZE ALIGN_UP(704 - sizeof(struct ulog), \ CACHELINE_SIZE) /* 640 for 64B ulog */ #define LANE_REDO_INTERNAL_SIZE ALIGN_UP(256 - sizeof(struct ulog), \ CACHELINE_SIZE) /* 192 for 64B ulog */ struct lane_layout { /* * Redo log for self-contained and 'one-shot' allocator operations. * Cannot be extended. */ struct ULOG(LANE_REDO_INTERNAL_SIZE) internal; /* * Redo log for large operations/transactions. * Can be extended by the use of internal ulog. */ struct ULOG(LANE_REDO_EXTERNAL_SIZE) external; /* * Undo log for snapshots done in a transaction. * Can be extended/shrunk by the use of internal ulog. */ struct ULOG(LANE_UNDO_SIZE) undo; }; struct lane { struct lane_layout *layout; /* pointer to persistent layout */ struct operation_context *internal; /* context for internal ulog */ struct operation_context *external; /* context for external ulog */ struct operation_context *undo; /* context for undo ulog */ }; struct lane_descriptor { /* * Number of lanes available at runtime must be <= total number of lanes * available in the pool. Number of lanes can be limited by shortage of * other resources e.g. available RNIC's submission queue sizes. */ unsigned runtime_nlanes; unsigned next_lane_idx; uint64_t *lane_locks; struct lane *lane; }; typedef int (*section_layout_op)(PMEMobjpool *pop, void *data, unsigned length); typedef void *(*section_constr)(PMEMobjpool *pop, void *data); typedef void (*section_destr)(PMEMobjpool *pop, void *rt); typedef int (*section_global_op)(PMEMobjpool *pop); struct section_operations { section_constr construct_rt; section_destr destroy_rt; section_layout_op check; section_layout_op recover; section_global_op boot; section_global_op cleanup; }; struct lane_info { uint64_t pop_uuid_lo; uint64_t lane_idx; unsigned long nest_count; /* * The index of the primary lane for the thread. A thread will always * try to acquire the primary lane first, and only if that fails it will * look for a different available lane. */ uint64_t primary; int primary_attempts; struct lane_info *prev, *next; }; void lane_info_boot(void); void lane_info_destroy(void); void lane_init_data(PMEMobjpool *pop); int lane_boot(PMEMobjpool *pop); void lane_cleanup(PMEMobjpool *pop); int lane_recover_and_section_boot(PMEMobjpool *pop); int lane_section_cleanup(PMEMobjpool *pop); int lane_check(PMEMobjpool *pop); unsigned lane_hold(PMEMobjpool *pop, struct lane **lane); void lane_release(PMEMobjpool *pop); #ifdef __cplusplus } #endif #endif
4,652
30.02
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/ulog.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * ulog.h -- unified log public interface */ #ifndef LIBPMEMOBJ_ULOG_H #define LIBPMEMOBJ_ULOG_H 1 #include <stddef.h> #include <stdint.h> #include <time.h> #include "vec.h" #include "pmemops.h" #include<x86intrin.h> ////cmd write optimization /* struct ulog_cmd_packet{ uint32_t ulog_offset : 32; uint32_t base_offset : 32; uint32_t src : 32; uint32_t size : 32; }; */ struct ulog_entry_base { uint64_t offset; /* offset with operation type flag */ }; /* * ulog_entry_val -- log entry */ struct ulog_entry_val { struct ulog_entry_base base; uint64_t value; /* value to be applied */ }; /* * ulog_entry_buf - ulog buffer entry */ struct ulog_entry_buf { struct ulog_entry_base base; /* offset with operation type flag */ uint64_t checksum; /* checksum of the entire log entry */ uint64_t size; /* size of the buffer to be modified */ uint8_t data[]; /* content to fill in */ }; #define ULOG_UNUSED ((CACHELINE_SIZE - 40) / 8) /* * This structure *must* be located at a cacheline boundary. To achieve this, * the next field is always allocated with extra padding, and then the offset * is additionally aligned. */ #define ULOG(capacity_bytes) {\ /* 64 bytes of metadata */\ uint64_t checksum; /* checksum of ulog header and its entries */\ uint64_t next; /* offset of ulog extension */\ uint64_t capacity; /* capacity of this ulog in bytes */\ uint64_t gen_num; /* generation counter */\ uint64_t flags; /* ulog flags */\ uint64_t unused[ULOG_UNUSED]; /* must be 0 */\ uint8_t data[capacity_bytes]; /* N bytes of data */\ }\ #define SIZEOF_ULOG(base_capacity)\ (sizeof(struct ulog) + base_capacity) /* * Ulog buffer allocated by the user must be marked by this flag. * It is important to not free it at the end: * what user has allocated - user should free himself. */ #define ULOG_USER_OWNED (1U << 0) /* use this for allocations of aligned ulog extensions */ #define SIZEOF_ALIGNED_ULOG(base_capacity)\ ALIGN_UP(SIZEOF_ULOG(base_capacity + (2 * CACHELINE_SIZE)), CACHELINE_SIZE) struct ulog ULOG(0); VEC(ulog_next, uint64_t); typedef uint64_t ulog_operation_type; #define ULOG_OPERATION_SET (0b000ULL << 61ULL) #define ULOG_OPERATION_AND (0b001ULL << 61ULL) #define ULOG_OPERATION_OR (0b010ULL << 61ULL) #define ULOG_OPERATION_BUF_SET (0b101ULL << 61ULL) #define ULOG_OPERATION_BUF_CPY (0b110ULL << 61ULL) #define ULOG_BIT_OPERATIONS (ULOG_OPERATION_AND | ULOG_OPERATION_OR) /* immediately frees all associated ulog structures */ #define ULOG_FREE_AFTER_FIRST (1U << 0) /* increments gen_num of the first, preallocated, ulog */ #define ULOG_INC_FIRST_GEN_NUM (1U << 1) /* informs if there was any buffer allocated by user in the tx */ #define ULOG_ANY_USER_BUFFER (1U << 2) typedef int (*ulog_check_offset_fn)(void *ctx, uint64_t offset); typedef int (*ulog_extend_fn)(void *, uint64_t *, uint64_t); typedef int (*ulog_entry_cb)(struct ulog_entry_base *e, void *arg, const struct pmem_ops *p_ops); typedef int (*ulog_entry_cb_ndp)(struct ulog_entry_base *e, struct ulog_entry_base *f, void *arg, const struct pmem_ops *p_ops); typedef void (*ulog_free_fn)(void *base, uint64_t *next); typedef int (*ulog_rm_user_buffer_fn)(void *, void *addr); struct ulog *ulog_next(struct ulog *ulog, const struct pmem_ops *p_ops); void ulog_construct(uint64_t offset, size_t capacity, uint64_t gen_num, int flush, uint64_t flags, const struct pmem_ops *p_ops); size_t ulog_capacity(struct ulog *ulog, size_t ulog_base_bytes, const struct pmem_ops *p_ops); void ulog_rebuild_next_vec(struct ulog *ulog, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_foreach_entry(struct ulog *ulog, ulog_entry_cb cb, void *arg, const struct pmem_ops *ops, struct ulog *ulognvm); int ulog_foreach_entry_ndp(struct ulog *ulogdram, struct ulog *ulognvm, ulog_entry_cb_ndp cb, void *arg, const struct pmem_ops *ops); int ulog_reserve(struct ulog *ulog, size_t ulog_base_nbytes, size_t gen_num, int auto_reserve, size_t *new_capacity_bytes, ulog_extend_fn extend, struct ulog_next *next, const struct pmem_ops *p_ops); void ulog_store(struct ulog *dest, struct ulog *src, size_t nbytes, size_t ulog_base_nbytes, size_t ulog_total_capacity, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_free_next(struct ulog *u, const struct pmem_ops *p_ops, ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove, uint64_t flags); void ulog_clobber(struct ulog *dest, struct ulog_next *next, const struct pmem_ops *p_ops); int ulog_clobber_data(struct ulog *dest, size_t nbytes, size_t ulog_base_nbytes, struct ulog_next *next, ulog_free_fn ulog_free, ulog_rm_user_buffer_fn user_buff_remove, const struct pmem_ops *p_ops, unsigned flags); void ulog_clobber_entry(const struct ulog_entry_base *e, const struct pmem_ops *p_ops); void ulog_process(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); void ulog_process_ndp(struct ulog *ulognvm, struct ulog *ulogdeam, ulog_check_offset_fn check, const struct pmem_ops *p_ops); size_t ulog_base_nbytes(struct ulog *ulog); int ulog_recovery_needed(struct ulog *ulog, int verify_checksum); struct ulog *ulog_by_offset(size_t offset, const struct pmem_ops *p_ops); uint64_t ulog_entry_offset(const struct ulog_entry_base *entry); ulog_operation_type ulog_entry_type( const struct ulog_entry_base *entry); struct ulog_entry_val *ulog_entry_val_create(struct ulog *ulog, size_t offset, uint64_t *dest, uint64_t value, ulog_operation_type type, const struct pmem_ops *p_ops); #ifdef USE_NDP_CLOBBER struct ulog_entry_buf * ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size, ulog_operation_type type, const struct pmem_ops *p_ops, int clear_next_header); #else struct ulog_entry_buf * ulog_entry_buf_create(struct ulog *ulog, size_t offset, uint64_t gen_num, uint64_t *dest, const void *src, uint64_t size, ulog_operation_type type, const struct pmem_ops *p_ops); #endif void ulog_entry_apply(const struct ulog_entry_base *e, int persist, const struct pmem_ops *p_ops); void ulog_entry_apply_ndp(const struct ulog_entry_base *e, const struct ulog_entry_base *f, int persist, const struct pmem_ops *p_ops); size_t ulog_entry_size(const struct ulog_entry_base *entry); void ulog_recover(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); int ulog_check(struct ulog *ulog, ulog_check_offset_fn check, const struct pmem_ops *p_ops); #endif
6,600
32.170854
104
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemobj/lane.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * lane.c -- lane implementation */ #ifndef _GNU_SOURCE #define _GNU_SOURCE #endif #include <inttypes.h> #include <errno.h> #include <limits.h> #include <sched.h> #include "libpmemobj.h" #include "critnib.h" #include "lane.h" #include "out.h" #include "util.h" #include "obj.h" #include "os_thread.h" #include "valgrind_internal.h" #include "memops.h" #include "palloc.h" #include "tx.h" static os_tls_key_t Lane_info_key; static __thread struct critnib *Lane_info_ht; static __thread struct lane_info *Lane_info_records; static __thread struct lane_info *Lane_info_cache; /* * lane_info_create -- (internal) constructor for thread shared data */ static inline void lane_info_create(void) { Lane_info_ht = critnib_new(); if (Lane_info_ht == NULL) FATAL("critnib_new"); } /* * lane_info_delete -- (internal) deletes lane info hash table */ static inline void lane_info_delete(void) { if (unlikely(Lane_info_ht == NULL)) return; critnib_delete(Lane_info_ht); struct lane_info *record; struct lane_info *head = Lane_info_records; while (head != NULL) { record = head; head = head->next; Free(record); } Lane_info_ht = NULL; Lane_info_records = NULL; Lane_info_cache = NULL; } /* * lane_info_ht_boot -- (internal) boot lane info and add it to thread shared * data */ static inline void lane_info_ht_boot(void) { lane_info_create(); int result = os_tls_set(Lane_info_key, Lane_info_ht); if (result != 0) { errno = result; FATAL("!os_tls_set"); } } /* * lane_info_ht_destroy -- (internal) destructor for thread shared data */ static inline void lane_info_ht_destroy(void *ht) { lane_info_delete(); } /* * lane_info_boot -- initialize lane info hash table and lane info key */ void lane_info_boot(void) { int result = os_tls_key_create(&Lane_info_key, lane_info_ht_destroy); if (result != 0) { errno = result; FATAL("!os_tls_key_create"); } } /* * lane_info_destroy -- destroy lane info hash table */ void lane_info_destroy(void) { lane_info_delete(); (void) os_tls_key_delete(Lane_info_key); } /* * lane_info_cleanup -- remove lane info record regarding pool being deleted */ static inline void lane_info_cleanup(PMEMobjpool *pop) { if (unlikely(Lane_info_ht == NULL)) return; struct lane_info *info = critnib_remove(Lane_info_ht, pop->uuid_lo); if (likely(info != NULL)) { if (info->prev) info->prev->next = info->next; if (info->next) info->next->prev = info->prev; if (Lane_info_cache == info) Lane_info_cache = NULL; if (Lane_info_records == info) Lane_info_records = info->next; Free(info); } } /* * lane_get_layout -- (internal) calculates the real pointer of the lane layout */ static struct lane_layout * lane_get_layout(PMEMobjpool *pop, uint64_t lane_idx) { return (void *)((char *)pop + pop->lanes_offset + sizeof(struct lane_layout) * lane_idx); } /* * lane_ulog_constructor -- (internal) constructor of a ulog extension */ static int lane_ulog_constructor(void *base, void *ptr, size_t usable_size, void *arg) { PMEMobjpool *pop = base; const struct pmem_ops *p_ops = &pop->p_ops; size_t capacity = ALIGN_DOWN(usable_size - sizeof(struct ulog), CACHELINE_SIZE); uint64_t gen_num = *(uint64_t *)arg; ulog_construct(OBJ_PTR_TO_OFF(base, ptr), capacity, gen_num, 1, 0, p_ops); return 0; } /* * lane_undo_extend -- allocates a new undo log */ static int lane_undo_extend(void *base, uint64_t *redo, uint64_t gen_num) { PMEMobjpool *pop = base; struct tx_parameters *params = pop->tx_params; size_t s = SIZEOF_ALIGNED_ULOG(params->cache_size); return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num, 0, OBJ_INTERNAL_OBJECT_MASK, 0); } /* * lane_redo_extend -- allocates a new redo log */ static int lane_redo_extend(void *base, uint64_t *redo, uint64_t gen_num) { size_t s = SIZEOF_ALIGNED_ULOG(LANE_REDO_EXTERNAL_SIZE); return pmalloc_construct(base, redo, s, lane_ulog_constructor, &gen_num, 0, OBJ_INTERNAL_OBJECT_MASK, 0); } /* * lane_init -- (internal) initializes a single lane runtime variables */ static int lane_init(PMEMobjpool *pop, struct lane *lane, struct lane_layout *layout) { ASSERTne(lane, NULL); lane->layout = layout; lane->internal = operation_new((struct ulog *)&layout->internal, LANE_REDO_INTERNAL_SIZE, NULL, NULL, &pop->p_ops, LOG_TYPE_REDO); if (lane->internal == NULL) goto error_internal_new; lane->external = operation_new((struct ulog *)&layout->external, LANE_REDO_EXTERNAL_SIZE, lane_redo_extend, (ulog_free_fn)pfree, &pop->p_ops, LOG_TYPE_REDO); if (lane->external == NULL) goto error_external_new; lane->undo = operation_new((struct ulog *)&layout->undo, LANE_UNDO_SIZE, lane_undo_extend, (ulog_free_fn)pfree, &pop->p_ops, LOG_TYPE_UNDO); if (lane->undo == NULL) goto error_undo_new; return 0; error_undo_new: operation_delete(lane->external); error_external_new: operation_delete(lane->internal); error_internal_new: return -1; } /* * lane_destroy -- cleanups a single lane runtime variables */ static void lane_destroy(PMEMobjpool *pop, struct lane *lane) { operation_delete(lane->undo); operation_delete(lane->internal); operation_delete(lane->external); } /* * lane_boot -- initializes all lanes */ int lane_boot(PMEMobjpool *pop) { int err = 0; pop->lanes_desc.lane = Malloc(sizeof(struct lane) * pop->nlanes); if (pop->lanes_desc.lane == NULL) { err = ENOMEM; ERR("!Malloc of volatile lanes"); goto error_lanes_malloc; } pop->lanes_desc.next_lane_idx = 0; pop->lanes_desc.lane_locks = Zalloc(sizeof(*pop->lanes_desc.lane_locks) * pop->nlanes); if (pop->lanes_desc.lane_locks == NULL) { ERR("!Malloc for lane locks"); goto error_locks_malloc; } /* add lanes to pmemcheck ignored list */ VALGRIND_ADD_TO_GLOBAL_TX_IGNORE((char *)pop + pop->lanes_offset, (sizeof(struct lane_layout) * pop->nlanes)); uint64_t i; for (i = 0; i < pop->nlanes; ++i) { struct lane_layout *layout = lane_get_layout(pop, i); if ((err = lane_init(pop, &pop->lanes_desc.lane[i], layout))) { ERR("!lane_init"); goto error_lane_init; } } return 0; error_lane_init: for (; i >= 1; --i) lane_destroy(pop, &pop->lanes_desc.lane[i - 1]); Free(pop->lanes_desc.lane_locks); pop->lanes_desc.lane_locks = NULL; error_locks_malloc: Free(pop->lanes_desc.lane); pop->lanes_desc.lane = NULL; error_lanes_malloc: return err; } /* * lane_init_data -- initializes ulogs for all the lanes */ void lane_init_data(PMEMobjpool *pop) { struct lane_layout *layout; for (uint64_t i = 0; i < pop->nlanes; ++i) { layout = lane_get_layout(pop, i); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->internal), LANE_REDO_INTERNAL_SIZE, 0, 0, 0, &pop->p_ops); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->external), LANE_REDO_EXTERNAL_SIZE, 0, 0, 0, &pop->p_ops); ulog_construct(OBJ_PTR_TO_OFF(pop, &layout->undo), LANE_UNDO_SIZE, 0, 0, 0, &pop->p_ops); } layout = lane_get_layout(pop, 0); pmemops_xpersist(&pop->p_ops, layout, pop->nlanes * sizeof(struct lane_layout), PMEMOBJ_F_RELAXED); } /* * lane_cleanup -- destroys all lanes */ void lane_cleanup(PMEMobjpool *pop) { for (uint64_t i = 0; i < pop->nlanes; ++i) lane_destroy(pop, &pop->lanes_desc.lane[i]); Free(pop->lanes_desc.lane); pop->lanes_desc.lane = NULL; Free(pop->lanes_desc.lane_locks); pop->lanes_desc.lane_locks = NULL; lane_info_cleanup(pop); } /* * lane_recover_and_section_boot -- performs initialization and recovery of all * lanes */ int lane_recover_and_section_boot(PMEMobjpool *pop) { COMPILE_ERROR_ON(SIZEOF_ULOG(LANE_UNDO_SIZE) + SIZEOF_ULOG(LANE_REDO_EXTERNAL_SIZE) + SIZEOF_ULOG(LANE_REDO_INTERNAL_SIZE) != LANE_TOTAL_SIZE); int err = 0; uint64_t i; /* lane index */ struct lane_layout *layout; /* * First we need to recover the internal/external redo logs so that the * allocator state is consistent before we boot it. */ for (i = 0; i < pop->nlanes; ++i) { layout = lane_get_layout(pop, i); ulog_recover((struct ulog *)&layout->internal, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); ulog_recover((struct ulog *)&layout->external, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops); } if ((err = pmalloc_boot(pop)) != 0) return err; /* * Undo logs must be processed after the heap is initialized since * a undo recovery might require deallocation of the next ulogs. */ for (i = 0; i < pop->nlanes; ++i) { struct operation_context *ctx = pop->lanes_desc.lane[i].undo; operation_resume(ctx); operation_process(ctx); operation_finish(ctx, ULOG_INC_FIRST_GEN_NUM | ULOG_FREE_AFTER_FIRST); } return 0; } /* * lane_section_cleanup -- performs runtime cleanup of all lanes */ int lane_section_cleanup(PMEMobjpool *pop) { return pmalloc_cleanup(pop); } /* * lane_check -- performs check of all lanes */ int lane_check(PMEMobjpool *pop) { int err = 0; uint64_t j; /* lane index */ struct lane_layout *layout; for (j = 0; j < pop->nlanes; ++j) { layout = lane_get_layout(pop, j); if (ulog_check((struct ulog *)&layout->internal, OBJ_OFF_IS_VALID_FROM_CTX, &pop->p_ops) != 0) { LOG(2, "lane %" PRIu64 " internal redo failed: %d", j, err); return err; } } return 0; } /* * get_lane -- (internal) get free lane index */ static inline void get_lane(uint64_t *locks, struct lane_info *info, uint64_t nlocks) { info->lane_idx = info->primary; while (1) { do { info->lane_idx %= nlocks; if (likely(util_bool_compare_and_swap64( &locks[info->lane_idx], 0, 1))) { if (info->lane_idx == info->primary) { info->primary_attempts = LANE_PRIMARY_ATTEMPTS; } else if (info->primary_attempts == 0) { info->primary = info->lane_idx; info->primary_attempts = LANE_PRIMARY_ATTEMPTS; } return; } if (info->lane_idx == info->primary && info->primary_attempts > 0) { info->primary_attempts--; } ++info->lane_idx; } while (info->lane_idx < nlocks); sched_yield(); } } /* * get_lane_info_record -- (internal) get lane record attached to memory pool * or first free */ static inline struct lane_info * get_lane_info_record(PMEMobjpool *pop) { if (likely(Lane_info_cache != NULL && Lane_info_cache->pop_uuid_lo == pop->uuid_lo)) { return Lane_info_cache; } if (unlikely(Lane_info_ht == NULL)) { lane_info_ht_boot(); } struct lane_info *info = critnib_get(Lane_info_ht, pop->uuid_lo); if (unlikely(info == NULL)) { info = Malloc(sizeof(struct lane_info)); if (unlikely(info == NULL)) { FATAL("Malloc"); } info->pop_uuid_lo = pop->uuid_lo; info->lane_idx = UINT64_MAX; info->nest_count = 0; info->next = Lane_info_records; info->prev = NULL; info->primary = 0; info->primary_attempts = LANE_PRIMARY_ATTEMPTS; if (Lane_info_records) { Lane_info_records->prev = info; } Lane_info_records = info; if (unlikely(critnib_insert( Lane_info_ht, pop->uuid_lo, info) != 0)) { FATAL("critnib_insert"); } } Lane_info_cache = info; return info; } /* * lane_hold -- grabs a per-thread lane in a round-robin fashion */ unsigned lane_hold(PMEMobjpool *pop, struct lane **lanep) { /* * Before runtime lane initialization all remote operations are * executed using RLANE_DEFAULT. */ if (unlikely(!pop->lanes_desc.runtime_nlanes)) { ASSERT(pop->has_remote_replicas); if (lanep != NULL) FATAL("cannot obtain section before lane's init"); return RLANE_DEFAULT; } struct lane_info *lane = get_lane_info_record(pop); while (unlikely(lane->lane_idx == UINT64_MAX)) { /* initial wrap to next CL */ lane->primary = lane->lane_idx = util_fetch_and_add32( &pop->lanes_desc.next_lane_idx, LANE_JUMP); } /* handles wraparound */ uint64_t *llocks = pop->lanes_desc.lane_locks; /* grab next free lane from lanes available at runtime */ if (!lane->nest_count++) { get_lane(llocks, lane, pop->lanes_desc.runtime_nlanes); } struct lane *l = &pop->lanes_desc.lane[lane->lane_idx]; /* reinitialize lane's content only if in outermost hold */ if (lanep && lane->nest_count == 1) { VALGRIND_ANNOTATE_NEW_MEMORY(l, sizeof(*l)); VALGRIND_ANNOTATE_NEW_MEMORY(l->layout, sizeof(*l->layout)); operation_init(l->external); operation_init(l->internal); operation_init(l->undo); } if (lanep) *lanep = l; return (unsigned)lane->lane_idx; } /* * lane_release -- drops the per-thread lane */ void lane_release(PMEMobjpool *pop) { if (unlikely(!pop->lanes_desc.runtime_nlanes)) { ASSERT(pop->has_remote_replicas); return; } struct lane_info *lane = get_lane_info_record(pop); ASSERTne(lane, NULL); ASSERTne(lane->lane_idx, UINT64_MAX); if (unlikely(lane->nest_count == 0)) { FATAL("lane_release"); } else if (--(lane->nest_count) == 0) { if (unlikely(!util_bool_compare_and_swap64( &pop->lanes_desc.lane_locks[lane->lane_idx], 1, 0))) { FATAL("util_bool_compare_and_swap64"); } } }
12,994
21.678883
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_common.h -- common declarations for rpmem_obc test */ #include "unittest.h" #include "out.h" #include "librpmem.h" #include "rpmem.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmem_util.h" #include "rpmem_obc.h" #define POOL_SIZE 1024 #define NLANES 32 #define NLANES_RESP 16 #define PROVIDER RPMEM_PROV_LIBFABRIC_SOCKETS #define POOL_DESC "pool_desc" #define RKEY 0xabababababababab #define RADDR 0x0101010101010101 #define PORT 1234 #define BUFF_SIZE 8192 #define POOL_ATTR_INIT {\ .signature = "<RPMEM>",\ .major = 1,\ .compat_features = 2,\ .incompat_features = 3,\ .ro_compat_features = 4,\ .poolset_uuid = "POOLSET_UUID0123",\ .uuid = "UUID0123456789AB",\ .next_uuid = "NEXT_UUID0123456",\ .prev_uuid = "PREV_UUID0123456",\ .user_flags = "USER_FLAGS012345",\ } #define POOL_ATTR_ALT {\ .signature = "<ALT>",\ .major = 5,\ .compat_features = 6,\ .incompat_features = 7,\ .ro_compat_features = 8,\ .poolset_uuid = "UUID_POOLSET_ALT",\ .uuid = "ALT_UUIDCDEFFEDC",\ .next_uuid = "456UUID_NEXT_ALT",\ .prev_uuid = "UUID012_ALT_PREV",\ .user_flags = "012345USER_FLAGS",\ } static const struct rpmem_pool_attr POOL_ATTR = POOL_ATTR_INIT; struct server { int fd_in; int fd_out; }; void set_rpmem_cmd(const char *fmt, ...); struct server *srv_init(void); void srv_fini(struct server *s); void srv_recv(struct server *s, void *buff, size_t len); void srv_send(struct server *s, const void *buff, size_t len); void srv_wait_disconnect(struct server *s); void client_connect_wait(struct rpmem_obc *rpc, char *target); /* * Since the server may disconnect the connection at any moment * from the client's perspective, execute the test in a loop so * the moment when the connection is closed will be possibly different. */ #define ECONNRESET_LOOP 10 void server_econnreset(struct server *s, const void *msg, size_t len); TEST_CASE_DECLARE(client_enotconn); TEST_CASE_DECLARE(client_connect); TEST_CASE_DECLARE(client_monitor); TEST_CASE_DECLARE(server_monitor); TEST_CASE_DECLARE(server_wait); TEST_CASE_DECLARE(client_create); TEST_CASE_DECLARE(server_create); TEST_CASE_DECLARE(server_create_econnreset); TEST_CASE_DECLARE(server_create_eproto); TEST_CASE_DECLARE(server_create_error); TEST_CASE_DECLARE(client_open); TEST_CASE_DECLARE(server_open); TEST_CASE_DECLARE(server_open_econnreset); TEST_CASE_DECLARE(server_open_eproto); TEST_CASE_DECLARE(server_open_error); TEST_CASE_DECLARE(client_close); TEST_CASE_DECLARE(server_close); TEST_CASE_DECLARE(server_close_econnreset); TEST_CASE_DECLARE(server_close_eproto); TEST_CASE_DECLARE(server_close_error); TEST_CASE_DECLARE(client_set_attr); TEST_CASE_DECLARE(server_set_attr); TEST_CASE_DECLARE(server_set_attr_econnreset); TEST_CASE_DECLARE(server_set_attr_eproto); TEST_CASE_DECLARE(server_set_attr_error);
2,951
26.082569
71
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_create.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_create.c -- test cases for rpmem_obc_create function */ #include "rpmem_obc_test_common.h" static const struct rpmem_msg_create_resp CREATE_RESP = { .hdr = { .type = RPMEM_MSG_TYPE_CREATE_RESP, .size = sizeof(struct rpmem_msg_create_resp), .status = 0, }, .ibc = { .port = PORT, .rkey = RKEY, .raddr = RADDR, .persist_method = RPMEM_PM_GPSPM, .nlanes = NLANES_RESP, }, }; /* * check_create_msg -- check create message */ static void check_create_msg(struct rpmem_msg_create *msg) { size_t pool_desc_size = strlen(POOL_DESC) + 1; size_t msg_size = sizeof(struct rpmem_msg_create) + pool_desc_size; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_CREATE); UT_ASSERTeq(msg->hdr.size, msg_size); UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR); UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR); UT_ASSERTeq(msg->c.pool_size, POOL_SIZE); UT_ASSERTeq(msg->c.provider, PROVIDER); UT_ASSERTeq(msg->c.nlanes, NLANES); UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE); UT_ASSERTeq(msg->pool_desc.size, pool_desc_size); UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0); UT_ASSERTeq(memcmp(&msg->pool_attr, &pool_attr, sizeof(pool_attr)), 0); } /* * server_create_handle -- handle a create request message */ static void server_create_handle(struct server *s, const struct rpmem_msg_create_resp *resp) { size_t msg_size = sizeof(struct rpmem_msg_create) + strlen(POOL_DESC) + 1; struct rpmem_msg_create *msg = MALLOC(msg_size); srv_recv(s, msg, msg_size); rpmem_ntoh_msg_create(msg); check_create_msg(msg); srv_send(s, resp, sizeof(*resp)); FREE(msg); } /* * Number of cases for EPROTO test. Must be kept in sync with the * server_create_eproto function. */ #define CREATE_EPROTO_COUNT 8 /* * server_create_eproto -- send invalid create request responses to a client */ int server_create_eproto(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, CREATE_EPROTO_COUNT - 1); int i = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; switch (i) { case 0: resp.hdr.type = MAX_RPMEM_MSG_TYPE; break; case 1: resp.hdr.type = RPMEM_MSG_TYPE_OPEN_RESP; break; case 2: resp.hdr.size -= 1; break; case 3: resp.hdr.size += 1; break; case 4: resp.hdr.status = MAX_RPMEM_ERR; break; case 5: resp.ibc.port = 0; break; case 6: resp.ibc.port = UINT16_MAX + 1; break; case 7: resp.ibc.persist_method = MAX_RPMEM_PM; break; default: UT_ASSERT(0); break; } rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 1; } /* * server_create_error -- return an error status in create response message */ int server_create_error(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR); enum rpmem_err e = (enum rpmem_err)atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; resp.hdr.status = e; rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 1; } /* * server_create_econnreset -- test case for closing connection - server side */ int server_create_econnreset(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0|1", tc->name); int do_send = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; rpmem_hton_msg_create_resp(&resp); if (do_send) srv_send(s, &resp, sizeof(resp) / 2); srv_fini(s); return 1; } /* * server_create -- test case for rpmem_obc_create function - server side */ int server_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 0) UT_FATAL("usage: %s", tc->name); struct server *s = srv_init(); struct rpmem_msg_create_resp resp = CREATE_RESP; rpmem_hton_msg_create_resp(&resp); server_create_handle(s, &resp); srv_fini(s); return 0; } /* * client_create_errno -- perform create request operation and expect * specified errno. If ex_errno is zero expect certain values in res struct. */ static void client_create_errno(char *target, int ex_errno) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; struct rpmem_resp_attr res; int ret; struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_create(rpc, &req, &res, &pool_attr); if (ex_errno) { UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); } else { UT_ASSERTeq(ret, 0); UT_ASSERTeq(res.port, CREATE_RESP.ibc.port); UT_ASSERTeq(res.rkey, CREATE_RESP.ibc.rkey); UT_ASSERTeq(res.raddr, CREATE_RESP.ibc.raddr); UT_ASSERTeq(res.persist_method, CREATE_RESP.ibc.persist_method); UT_ASSERTeq(res.nlanes, CREATE_RESP.ibc.nlanes); } rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } /* * client_create_error -- check if valid errno is set if error status returned */ static void client_create_error(char *target) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr = POOL_ATTR_INIT; struct rpmem_resp_attr res; int ret; for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) { set_rpmem_cmd("server_create_error %d", e); int ex_errno = rpmem_util_proto_errno(e); struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_create(rpc, &req, &res, &pool_attr); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } } /* * client_create -- test case for create request operation - client side */ int client_create(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; for (int i = 0; i < ECONNRESET_LOOP; i++) { set_rpmem_cmd("server_create_econnreset %d", i % 2); client_create_errno(target, ECONNRESET); } for (int i = 0; i < CREATE_EPROTO_COUNT; i++) { set_rpmem_cmd("server_create_eproto %d", i); client_create_errno(target, EPROTO); } client_create_error(target); set_rpmem_cmd("server_create"); client_create_errno(target, 0); return 1; }
6,642
20.498382
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * rpmem_obc_test.c -- unit test for rpmem_obc module */ #include "rpmem_obc_test_common.h" #include "pmemcommon.h" /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(client_enotconn), TEST_CASE(client_connect), TEST_CASE(client_create), TEST_CASE(server_create), TEST_CASE(server_create_econnreset), TEST_CASE(server_create_eproto), TEST_CASE(server_create_error), TEST_CASE(client_open), TEST_CASE(server_open), TEST_CASE(server_open_econnreset), TEST_CASE(server_open_eproto), TEST_CASE(server_open_error), TEST_CASE(client_close), TEST_CASE(server_close), TEST_CASE(server_close_econnreset), TEST_CASE(server_close_eproto), TEST_CASE(server_close_error), TEST_CASE(client_monitor), TEST_CASE(server_monitor), TEST_CASE(client_set_attr), TEST_CASE(server_set_attr), TEST_CASE(server_set_attr_econnreset), TEST_CASE(server_set_attr_eproto), TEST_CASE(server_set_attr_error), }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { START(argc, argv, "rpmem_obc"); common_init("rpmem_obc", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); rpmem_util_cmds_init(); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); rpmem_util_cmds_fini(); common_fini(); DONE(NULL); }
1,388
20.369231
59
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_obc/rpmem_obc_test_open.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmem_obc_test_open.c -- test cases for rpmem_obj_open function */ #include "rpmem_obc_test_common.h" static const struct rpmem_msg_open_resp OPEN_RESP = { .hdr = { .type = RPMEM_MSG_TYPE_OPEN_RESP, .size = sizeof(struct rpmem_msg_open_resp), .status = 0, }, .ibc = { .port = PORT, .rkey = RKEY, .raddr = RADDR, .persist_method = RPMEM_PM_GPSPM, .nlanes = NLANES_RESP, }, .pool_attr = POOL_ATTR_INIT, }; /* * check_open_msg -- check open message */ static void check_open_msg(struct rpmem_msg_open *msg) { size_t pool_desc_size = strlen(POOL_DESC) + 1; size_t msg_size = sizeof(struct rpmem_msg_open) + pool_desc_size; UT_ASSERTeq(msg->hdr.type, RPMEM_MSG_TYPE_OPEN); UT_ASSERTeq(msg->hdr.size, msg_size); UT_ASSERTeq(msg->c.major, RPMEM_PROTO_MAJOR); UT_ASSERTeq(msg->c.minor, RPMEM_PROTO_MINOR); UT_ASSERTeq(msg->c.pool_size, POOL_SIZE); UT_ASSERTeq(msg->c.provider, PROVIDER); UT_ASSERTeq(msg->c.nlanes, NLANES); UT_ASSERTeq(msg->c.buff_size, BUFF_SIZE); UT_ASSERTeq(msg->pool_desc.size, pool_desc_size); UT_ASSERTeq(strcmp((char *)msg->pool_desc.desc, POOL_DESC), 0); } /* * server_open_handle -- handle an open request message */ static void server_open_handle(struct server *s, const struct rpmem_msg_open_resp *resp) { size_t msg_size = sizeof(struct rpmem_msg_open) + strlen(POOL_DESC) + 1; struct rpmem_msg_open *msg = MALLOC(msg_size); srv_recv(s, msg, msg_size); rpmem_ntoh_msg_open(msg); check_open_msg(msg); srv_send(s, resp, sizeof(*resp)); FREE(msg); } /* * Number of cases for EPROTO test. Must be kept in sync with the * server_open_eproto function. */ #define OPEN_EPROTO_COUNT 8 /* * server_open_eproto -- send invalid open request responses to a client */ int server_open_eproto(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, OPEN_EPROTO_COUNT - 1); int i = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; switch (i) { case 0: resp.hdr.type = MAX_RPMEM_MSG_TYPE; break; case 1: resp.hdr.type = RPMEM_MSG_TYPE_CREATE_RESP; break; case 2: resp.hdr.size -= 1; break; case 3: resp.hdr.size += 1; break; case 4: resp.hdr.status = MAX_RPMEM_ERR; break; case 5: resp.ibc.port = 0; break; case 6: resp.ibc.port = UINT16_MAX + 1; break; case 7: resp.ibc.persist_method = MAX_RPMEM_PM; break; default: UT_ASSERT(0); break; } rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 1; } /* * server_open_error -- return error status in open response message */ int server_open_error(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0-%d", tc->name, MAX_RPMEM_ERR); enum rpmem_err e = (enum rpmem_err)atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; resp.hdr.status = e; rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 1; } /* * server_open -- test case for rpmem_obc_create function - server side */ int server_open_econnreset(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s 0|1", tc->name); int do_send = atoi(argv[0]); struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; rpmem_hton_msg_open_resp(&resp); if (do_send) srv_send(s, &resp, sizeof(resp) / 2); srv_fini(s); return 1; } /* * server_open -- test case for open request message - server side */ int server_open(const struct test_case *tc, int argc, char *argv[]) { struct server *s = srv_init(); struct rpmem_msg_open_resp resp = OPEN_RESP; rpmem_hton_msg_open_resp(&resp); server_open_handle(s, &resp); srv_fini(s); return 0; } /* * client_open_errno -- perform open request operation and expect * specified errno, repeat the operation specified number of times. * If ex_errno is zero expect certain values in res struct. */ static void client_open_errno(char *target, int ex_errno) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr; memset(&pool_attr, 0, sizeof(pool_attr)); struct rpmem_resp_attr res; int ret; struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_open(rpc, &req, &res, &pool_attr); if (ex_errno) { UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); } else { UT_ASSERTeq(ret, 0); UT_ASSERTeq(res.port, OPEN_RESP.ibc.port); UT_ASSERTeq(res.rkey, OPEN_RESP.ibc.rkey); UT_ASSERTeq(res.raddr, OPEN_RESP.ibc.raddr); UT_ASSERTeq(res.persist_method, OPEN_RESP.ibc.persist_method); UT_ASSERTeq(res.nlanes, OPEN_RESP.ibc.nlanes); UT_ASSERTeq(memcmp(pool_attr.signature, OPEN_RESP.pool_attr.signature, RPMEM_POOL_HDR_SIG_LEN), 0); UT_ASSERTeq(pool_attr.major, OPEN_RESP.pool_attr.major); UT_ASSERTeq(pool_attr.compat_features, OPEN_RESP.pool_attr.compat_features); UT_ASSERTeq(pool_attr.incompat_features, OPEN_RESP.pool_attr.incompat_features); UT_ASSERTeq(pool_attr.ro_compat_features, OPEN_RESP.pool_attr.ro_compat_features); UT_ASSERTeq(memcmp(pool_attr.poolset_uuid, OPEN_RESP.pool_attr.poolset_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.uuid, OPEN_RESP.pool_attr.uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.next_uuid, OPEN_RESP.pool_attr.next_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.prev_uuid, OPEN_RESP.pool_attr.prev_uuid, RPMEM_POOL_HDR_UUID_LEN), 0); UT_ASSERTeq(memcmp(pool_attr.user_flags, OPEN_RESP.pool_attr.user_flags, RPMEM_POOL_USER_FLAGS_LEN), 0); } rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } /* * client_open_error -- check if valid errno is set if error status returned */ static void client_open_error(char *target) { struct rpmem_req_attr req = { .pool_size = POOL_SIZE, .nlanes = NLANES, .provider = PROVIDER, .pool_desc = POOL_DESC, .buff_size = BUFF_SIZE, }; struct rpmem_pool_attr pool_attr; memset(&pool_attr, 0, sizeof(pool_attr)); struct rpmem_resp_attr res; int ret; for (enum rpmem_err e = 1; e < MAX_RPMEM_ERR; e++) { set_rpmem_cmd("server_open_error %d", e); int ex_errno = rpmem_util_proto_errno(e); struct rpmem_obc *rpc = rpmem_obc_init(); UT_ASSERTne(rpc, NULL); client_connect_wait(rpc, target); ret = rpmem_obc_open(rpc, &req, &res, &pool_attr); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ex_errno); rpmem_obc_disconnect(rpc); rpmem_obc_fini(rpc); } } /* * client_open -- test case for open request message - client side */ int client_open(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <addr>[:<port>]", tc->name); char *target = argv[0]; for (int i = 0; i < ECONNRESET_LOOP; i++) { set_rpmem_cmd("server_open_econnreset %d", i % 2); client_open_errno(target, ECONNRESET); } for (int i = 0; i < OPEN_EPROTO_COUNT; i++) { set_rpmem_cmd("server_open_eproto %d", i); client_open_errno(target, EPROTO); } client_open_error(target); set_rpmem_cmd("server_open"); client_open_errno(target, 0); return 1; }
7,427
21.306306
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmemd_db/rpmemd_db_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmemd_db_test.c -- unit test for pool set database * * usage: rpmemd_db <log-file> <root_dir> <pool_desc_1> <pool_desc_2> */ #include "file.h" #include "unittest.h" #include "librpmem.h" #include "rpmemd_db.h" #include "rpmemd_log.h" #include "util_pmem.h" #include "set.h" #include "out.h" #include <limits.h> #include <stdlib.h> #include <unistd.h> #include <time.h> #define POOL_MODE 0644 #define FAILED_FUNC(func_name) \ UT_ERR("!%s(): %s() failed", __func__, func_name); #define FAILED_FUNC_PARAM(func_name, param) \ UT_ERR("!%s(): %s(%s) failed", __func__, func_name, param); #define NPOOLS_DUAL 2 #define POOL_ATTR_CREATE 0 #define POOL_ATTR_OPEN 1 #define POOL_ATTR_SET_ATTR 2 #define POOL_STATE_INITIAL 0 #define POOL_STATE_CREATED 1 #define POOL_STATE_OPENED 2 #define POOL_STATE_CLOSED POOL_STATE_CREATED #define POOL_STATE_REMOVED POOL_STATE_INITIAL /* * fill_rand -- fill a buffer with random values */ static void fill_rand(void *addr, size_t len) { unsigned char *buff = addr; srand(time(NULL)); for (unsigned i = 0; i < len; i++) buff[i] = (rand() % ('z' - 'a')) + 'a'; } /* * test_init -- test rpmemd_db_init() and rpmemd_db_fini() */ static int test_init(const char *root_dir) { struct rpmemd_db *db; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } rpmemd_db_fini(db); return 0; } /* * test_check_dir -- test rpmemd_db_check_dir() */ static int test_check_dir(const char *root_dir) { struct rpmemd_db *db; int ret; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } ret = rpmemd_db_check_dir(db); if (ret) { FAILED_FUNC("rpmemd_db_check_dir"); } rpmemd_db_fini(db); return ret; } /* * test_create -- test rpmemd_db_pool_create() */ static int test_create(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr; memset(&attr, 0, sizeof(attr)); attr.incompat_features = 2; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto fini; } rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } fini: rpmemd_db_fini(db); return ret; } /* * test_create_dual -- dual test for rpmemd_db_pool_create() */ static int test_create_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr1; memset(&attr1, 0, sizeof(attr1)); attr1.incompat_features = 2; struct rpmemd_db_pool *prp1, *prp2; struct rpmemd_db *db; int ret = -1; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } /* test dual create */ prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1); goto err_create_1; } prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2); goto err_create_2; } rpmemd_db_pool_close(db, prp2); rpmemd_db_pool_close(db, prp1); ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2); goto err_remove_2; } ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1); } goto fini; err_create_2: rpmemd_db_pool_close(db, prp1); err_remove_2: rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); err_create_1: fini: rpmemd_db_fini(db); return ret; } /* * compare_attr -- compare pool's attributes */ static void compare_attr(struct rpmem_pool_attr *a1, struct rpmem_pool_attr *a2) { char *msg; if (a1->major != a2->major) { msg = "major"; goto err_mismatch; } if (a1->compat_features != a2->compat_features) { msg = "compat_features"; goto err_mismatch; } if (a1->incompat_features != a2->incompat_features) { msg = "incompat_features"; goto err_mismatch; } if (a1->ro_compat_features != a2->ro_compat_features) { msg = "ro_compat_features"; goto err_mismatch; } if (memcmp(a1->signature, a2->signature, RPMEM_POOL_HDR_SIG_LEN)) { msg = "signature"; goto err_mismatch; } if (memcmp(a1->poolset_uuid, a2->poolset_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "poolset_uuid"; goto err_mismatch; } if (memcmp(a1->uuid, a2->uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "uuid"; goto err_mismatch; } if (memcmp(a1->next_uuid, a2->next_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "next_uuid"; goto err_mismatch; } if (memcmp(a1->prev_uuid, a2->prev_uuid, RPMEM_POOL_HDR_UUID_LEN)) { msg = "prev_uuid"; goto err_mismatch; } return; err_mismatch: errno = EINVAL; UT_FATAL("%s(): pool attributes mismatch (%s)", __func__, msg); } /* * test_open -- test rpmemd_db_pool_open() */ static int test_open(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr1, attr2; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; fill_rand(&attr1, sizeof(attr1)); attr1.major = 1; attr1.incompat_features = 2; attr1.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr1); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto fini; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr2); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto fini; } rpmemd_db_pool_close(db, prp); compare_attr(&attr1, &attr2); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } fini: rpmemd_db_fini(db); return ret; } /* * test_open_dual -- dual test for rpmemd_db_pool_open() */ static int test_open_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr1a, attr2a, attr1b, attr2b; struct rpmemd_db_pool *prp1, *prp2; struct rpmemd_db *db; int ret = -1; fill_rand(&attr1a, sizeof(attr1a)); fill_rand(&attr1b, sizeof(attr1b)); attr1a.major = 1; attr1a.incompat_features = 2; attr1a.compat_features = 0; attr1b.major = 1; attr1b.incompat_features = 2; attr1b.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp1 = rpmemd_db_pool_create(db, pool_desc_1, 0, &attr1a); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_1); goto err_create_1; } rpmemd_db_pool_close(db, prp1); prp2 = rpmemd_db_pool_create(db, pool_desc_2, 0, &attr1b); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc_2); goto err_create_2; } rpmemd_db_pool_close(db, prp2); /* test dual open */ prp1 = rpmemd_db_pool_open(db, pool_desc_1, 0, &attr2a); if (prp1 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_1); goto err_open_1; } prp2 = rpmemd_db_pool_open(db, pool_desc_2, 0, &attr2b); if (prp2 == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc_2); goto err_open_2; } rpmemd_db_pool_close(db, prp1); rpmemd_db_pool_close(db, prp2); compare_attr(&attr1a, &attr2a); compare_attr(&attr1b, &attr2b); ret = rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_2); goto err_remove_2; } ret = rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc_1); } goto fini; err_open_2: rpmemd_db_pool_close(db, prp1); err_open_1: rpmemd_db_pool_remove(db, pool_desc_2, 0, 0); err_create_2: err_remove_2: rpmemd_db_pool_remove(db, pool_desc_1, 0, 0); err_create_1: fini: rpmemd_db_fini(db); return ret; } /* * test_set_attr -- test rpmemd_db_pool_set_attr() */ static int test_set_attr(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr[3]; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret = -1; fill_rand(&attr[POOL_ATTR_CREATE], sizeof(attr[POOL_ATTR_CREATE])); fill_rand(&attr[POOL_ATTR_SET_ATTR], sizeof(attr[POOL_ATTR_SET_ATTR])); attr[POOL_ATTR_CREATE].major = 1; attr[POOL_ATTR_CREATE].incompat_features = 2; attr[POOL_ATTR_CREATE].compat_features = 0; attr[POOL_ATTR_SET_ATTR].major = 1; attr[POOL_ATTR_SET_ATTR].incompat_features = 2; attr[POOL_ATTR_SET_ATTR].compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr[POOL_ATTR_CREATE]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_create"); goto err_create; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto err_open; } compare_attr(&attr[POOL_ATTR_CREATE], &attr[POOL_ATTR_OPEN]); ret = rpmemd_db_pool_set_attr(prp, &attr[POOL_ATTR_SET_ATTR]); if (ret) { FAILED_FUNC("rpmemd_db_pool_set_attr"); goto err_set_attr; } rpmemd_db_pool_close(db, prp); prp = rpmemd_db_pool_open(db, pool_desc, 0, &attr[POOL_ATTR_OPEN]); if (prp == NULL) { FAILED_FUNC("rpmemd_db_pool_open"); goto err_open; } compare_attr(&attr[POOL_ATTR_SET_ATTR], &attr[POOL_ATTR_OPEN]); rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); if (ret) { FAILED_FUNC("rpmemd_db_pool_remove"); } goto fini; err_set_attr: rpmemd_db_pool_close(db, prp); err_open: rpmemd_db_pool_remove(db, pool_desc, 0, 0); err_create: fini: rpmemd_db_fini(db); return ret; } /* * test_set_attr_dual -- dual test for rpmemd_db_pool_set_attr() */ static int test_set_attr_dual(const char *root_dir, const char *pool_desc_1, const char *pool_desc_2) { struct rpmem_pool_attr attr[NPOOLS_DUAL][3]; struct rpmemd_db_pool *prp[NPOOLS_DUAL]; const char *pool_desc[NPOOLS_DUAL] = {pool_desc_1, pool_desc_2}; unsigned pool_state[NPOOLS_DUAL] = {POOL_STATE_INITIAL}; struct rpmemd_db *db; int ret = -1; /* initialize rpmem database */ db = rpmemd_db_init(root_dir, POOL_MODE); if (db == NULL) { FAILED_FUNC("rpmemd_db_init"); return -1; } for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { /* * generate random pool attributes for create and set * attributes operations */ fill_rand(&attr[p][POOL_ATTR_CREATE], sizeof(attr[p][POOL_ATTR_CREATE])); fill_rand(&attr[p][POOL_ATTR_SET_ATTR], sizeof(attr[p][POOL_ATTR_SET_ATTR])); attr[p][POOL_ATTR_CREATE].major = 1; attr[p][POOL_ATTR_CREATE].incompat_features = 2; attr[p][POOL_ATTR_CREATE].compat_features = 0; attr[p][POOL_ATTR_SET_ATTR].major = 1; attr[p][POOL_ATTR_SET_ATTR].incompat_features = 2; attr[p][POOL_ATTR_SET_ATTR].compat_features = 0; /* create pool */ prp[p] = rpmemd_db_pool_create(db, pool_desc[p], 0, &attr[p][POOL_ATTR_CREATE]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_create", pool_desc[p]); goto err; } rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CREATED; } /* open pools and check pool attributes */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0, &attr[p][POOL_ATTR_OPEN]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]); goto err; } pool_state[p] = POOL_STATE_OPENED; compare_attr(&attr[p][POOL_ATTR_CREATE], &attr[p][POOL_ATTR_OPEN]); } /* set attributes and close pools */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { ret = rpmemd_db_pool_set_attr(prp[p], &attr[p][POOL_ATTR_SET_ATTR]); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_set_attr", pool_desc[p]); goto err; } rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CLOSED; } /* open pools and check attributes */ for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { prp[p] = rpmemd_db_pool_open(db, pool_desc[p], 0, &attr[p][POOL_ATTR_OPEN]); if (prp[p] == NULL) { FAILED_FUNC_PARAM("rpmemd_db_pool_open", pool_desc[p]); goto err; } pool_state[p] = POOL_STATE_OPENED; compare_attr(&attr[p][POOL_ATTR_SET_ATTR], &attr[p][POOL_ATTR_OPEN]); } err: for (unsigned p = 0; p < NPOOLS_DUAL; ++p) { if (pool_state[p] == POOL_STATE_OPENED) { rpmemd_db_pool_close(db, prp[p]); pool_state[p] = POOL_STATE_CLOSED; } if (pool_state[p] == POOL_STATE_CREATED) { ret = rpmemd_db_pool_remove(db, pool_desc[p], 0, 0); if (ret) { FAILED_FUNC_PARAM("rpmemd_db_pool_remove", pool_desc[p]); } pool_state[p] = POOL_STATE_REMOVED; } } rpmemd_db_fini(db); return ret; } static int exists_cb(struct part_file *pf, void *arg) { return util_file_exists(pf->part->path); } static int noexists_cb(struct part_file *pf, void *arg) { int exists = util_file_exists(pf->part->path); if (exists < 0) return -1; else return !exists; } /* * test_remove -- test for rpmemd_db_pool_remove() */ static void test_remove(const char *root_dir, const char *pool_desc) { struct rpmem_pool_attr attr; struct rpmemd_db_pool *prp; struct rpmemd_db *db; int ret; char path[PATH_MAX]; SNPRINTF(path, PATH_MAX, "%s/%s", root_dir, pool_desc); fill_rand(&attr, sizeof(attr)); strncpy((char *)attr.poolset_uuid, "TEST", sizeof(attr.poolset_uuid)); attr.incompat_features = 2; attr.compat_features = 0; db = rpmemd_db_init(root_dir, POOL_MODE); UT_ASSERTne(db, NULL); prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); UT_ASSERTne(prp, NULL); rpmemd_db_pool_close(db, prp); ret = util_poolset_foreach_part(path, exists_cb, NULL); UT_ASSERTeq(ret, 1); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 0); UT_ASSERTeq(ret, 0); ret = util_poolset_foreach_part(path, noexists_cb, NULL); UT_ASSERTeq(ret, 1); prp = rpmemd_db_pool_create(db, pool_desc, 0, &attr); UT_ASSERTne(prp, NULL); rpmemd_db_pool_close(db, prp); ret = rpmemd_db_pool_remove(db, pool_desc, 0, 1); UT_ASSERTeq(ret, 0); ret = util_file_exists(path); UT_ASSERTne(ret, 1); rpmemd_db_fini(db); } int main(int argc, char *argv[]) { char *pool_desc[2], *log_file; char root_dir[PATH_MAX]; START(argc, argv, "rpmemd_db"); util_init(); out_init("rpmemd_db", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); if (argc != 5) UT_FATAL("usage: %s <log-file> <root_dir> <pool_desc_1>" " <pool_desc_2>", argv[0]); log_file = argv[1]; if (realpath(argv[2], root_dir) == NULL) UT_FATAL("!realpath(%s)", argv[1]); pool_desc[0] = argv[3]; pool_desc[1] = argv[4]; if (rpmemd_log_init("rpmemd error: ", log_file, 0)) FAILED_FUNC("rpmemd_log_init"); test_init(root_dir); test_check_dir(root_dir); test_create(root_dir, pool_desc[0]); test_create_dual(root_dir, pool_desc[0], pool_desc[1]); test_open(root_dir, pool_desc[0]); test_open_dual(root_dir, pool_desc[0], pool_desc[1]); test_set_attr(root_dir, pool_desc[0]); test_set_attr_dual(root_dir, pool_desc[0], pool_desc[1]); test_remove(root_dir, pool_desc[0]); rpmemd_log_close(); out_fini(); DONE(NULL); }
15,339
22.636364
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/pmem2_granularity.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_granularity.c -- test for graunlarity functionality */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include "config.h" #include "source.h" #include "pmem2_granularity.h" #include "unittest.h" #include "ut_pmem2_config.h" #include "ut_pmem2_utils.h" #include "out.h" size_t Is_nfit = 1; size_t Pc_type = 7; size_t Pc_capabilities; /* * parse_args -- parse args from the input */ static int parse_args(const struct test_case *tc, int argc, char *argv[], char **file) { if (argc < 1) UT_FATAL("usage: %s <file>", tc->name); *file = argv[0]; return 1; } /* * set_eadr -- set variable required for mocked functions */ static void set_eadr() { int is_eadr = atoi(os_getenv("IS_EADR")); if (is_eadr) Pc_capabilities = 3; else Pc_capabilities = 2; } /* * test_ctx -- essential parameters used by test */ struct test_ctx { int fd; enum pmem2_granularity requested_granularity; enum pmem2_granularity expected_granularity; }; /* * init_test -- initialize basic parameters for test */ static void init_test(char *file, struct test_ctx *ctx, enum pmem2_granularity granularity) { set_eadr(); ctx->fd = OPEN(file, O_RDWR); ctx->requested_granularity = granularity; int is_eadr = atoi(os_getenv("IS_EADR")); int is_pmem = atoi(os_getenv("IS_PMEM")); if (is_eadr) { if (is_pmem) ctx->expected_granularity = PMEM2_GRANULARITY_BYTE; else UT_FATAL("invalid configuration IS_EADR && !IS_PMEM"); } else if (is_pmem) { ctx->expected_granularity = PMEM2_GRANULARITY_CACHE_LINE; } else { ctx->expected_granularity = PMEM2_GRANULARITY_PAGE; } } /* * init_cfg -- initialize basic pmem2 config */ static void init_cfg(struct pmem2_config *cfg, struct pmem2_source **src, struct test_ctx *ctx) { pmem2_config_init(cfg); int ret = pmem2_source_from_fd(src, ctx->fd); UT_PMEM2_EXPECT_RETURN(ret, 0); } /* * cleanup -- cleanup the environment after test */ static void cleanup(struct pmem2_source *src, struct test_ctx *ctx) { #ifdef _WIN32 CloseHandle(src->value.handle); #else CLOSE(ctx->fd); #endif } /* * map_with_available_granularity -- map the range with valid granularity, * includes cleanup */ static void map_with_available_granularity(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx) { cfg->requested_max_granularity = ctx->requested_granularity; struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(map, NULL); UT_ASSERTeq(ctx->expected_granularity, pmem2_map_get_store_granularity(map)); /* cleanup after the test */ pmem2_unmap(&map); } /* * map_with_unavailable_granularity -- map the range with invalid * granularity (unsuccessful) */ static void map_with_unavailable_granularity(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx) { cfg->requested_max_granularity = ctx->requested_granularity; struct pmem2_map *map; int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_GRANULARITY_NOT_SUPPORTED); UT_ERR("%s", pmem2_errormsg()); UT_ASSERTeq(map, NULL); } typedef void(*map_func)(struct pmem2_config *cfg, struct pmem2_source *src, struct test_ctx *ctx); /* * granularity_template -- template for testing granularity in pmem2 */ static int granularity_template(const struct test_case *tc, int argc, char *argv[], map_func map_do, enum pmem2_granularity granularity) { char *file = NULL; int ret = parse_args(tc, argc, argv, &file); struct test_ctx ctx = { 0 }; init_test(file, &ctx, granularity); struct pmem2_config cfg; struct pmem2_source *src; init_cfg(&cfg, &src, &ctx); map_do(&cfg, src, &ctx); cleanup(src, &ctx); pmem2_source_delete(&src); return ret; } /* * test_granularity_req_byte_avail_byte -- require byte granularity, * when byte granularity is available */ static int test_granularity_req_byte_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_byte_avail_cl -- require byte granularity, * when cache line granularity is available */ static int test_granularity_req_byte_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_byte_avail_page -- require byte granularity, * when page granularity is available */ static int test_granularity_req_byte_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_BYTE); } /* * test_granularity_req_cl_avail_byte -- require cache line granularity, * when byte granularity is available */ static int test_granularity_req_cl_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_cl_avail_cl -- require cache line granularity, * when cache line granularity is available */ static int test_granularity_req_cl_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_cl_avail_page -- require cache line granularity, * when page granularity is available */ static int test_granularity_req_cl_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_unavailable_granularity, PMEM2_GRANULARITY_CACHE_LINE); } /* * test_granularity_req_page_avail_byte -- require page granularity, * when byte granularity is available */ static int test_granularity_req_page_avail_byte(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_granularity_req_byte_avail_cl -- require page granularity, * when byte cache line is available */ static int test_granularity_req_page_avail_cl(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_granularity_req_page_avail_page -- require page granularity, * when page granularity is available */ static int test_granularity_req_page_avail_page(const struct test_case *tc, int argc, char *argv[]) { return granularity_template(tc, argc, argv, map_with_available_granularity, PMEM2_GRANULARITY_PAGE); } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_granularity_req_byte_avail_byte), TEST_CASE(test_granularity_req_byte_avail_cl), TEST_CASE(test_granularity_req_byte_avail_page), TEST_CASE(test_granularity_req_cl_avail_byte), TEST_CASE(test_granularity_req_cl_avail_cl), TEST_CASE(test_granularity_req_cl_avail_page), TEST_CASE(test_granularity_req_page_avail_byte), TEST_CASE(test_granularity_req_page_avail_cl), TEST_CASE(test_granularity_req_page_avail_page), }; #define NTESTS ARRAY_SIZE(test_cases) int main(int argc, char *argv[]) { START(argc, argv, "pmem2_granularity"); out_init("pmem2_granularity", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); } #ifdef _MSC_VER MSVC_CONSTR(libpmem2_init) MSVC_DESTR(libpmem2_fini) #endif
7,665
23.106918
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/mocks_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_posix.c -- mocked functions used in auto_flush_linux.c */ #include <fts.h> #include "map.h" #include "../common/mmap.h" #include "fs.h" #include "unittest.h" #define BUS_DEVICE_PATH "/sys/bus/nd/devices" /* * mmap - mock mmap */ FUNC_MOCK(mmap, void *, void *addr, size_t len, int prot, int flags, int fd, __off_t offset) FUNC_MOCK_RUN_DEFAULT { char *str_map_sync = os_getenv("IS_PMEM"); const int ms = MAP_SYNC | MAP_SHARED_VALIDATE; int map_sync_try = ((flags & ms) == ms) ? 1 : 0; if (str_map_sync && atoi(str_map_sync) == 1) { if (map_sync_try) { flags &= ~ms; flags |= MAP_SHARED; return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset); } } else if (map_sync_try) { errno = EINVAL; return MAP_FAILED; } return _FUNC_REAL(mmap)(addr, len, prot, flags, fd, offset); } FUNC_MOCK_END /* * open -- open mock */ FUNC_MOCK(open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(open)(path, flags, mode); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(open)(mock_path, flags, mode); } FUNC_MOCK_END struct fs { FTS *ft; struct fs_entry entry; }; /* * fs_new -- creates fs traversal instance */ FUNC_MOCK(fs_new, struct fs *, const char *path) FUNC_MOCK_RUN_DEFAULT { char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(fs_new)(path); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(fs_new)(mock_path); } FUNC_MOCK_END /* * os_stat -- os_stat mock to handle sysfs path */ FUNC_MOCK(os_stat, int, const char *path, os_stat_t *buf) FUNC_MOCK_RUN_DEFAULT { char *is_bus_device_path = strstr(path, BUS_DEVICE_PATH); if (!is_bus_device_path || (is_bus_device_path && strstr(path, "region"))) return _FUNC_REAL(os_stat)(path, buf); const char *mock_path = os_getenv("BUS_DEVICE_PATH"); return _FUNC_REAL(os_stat)(mock_path, buf); } FUNC_MOCK_END
2,302
23.5
63
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/mocks_dax_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_dax_windows.c -- mocked function required to control * FILE_DAX_VOLUME value reported by the OS APIs */ #include "unittest.h" FUNC_MOCK_DLLIMPORT(GetVolumeInformationByHandleW, BOOL, HANDLE hFile, LPWSTR lpVolumeNameBuffer, DWORD nVolumeNameSize, LPDWORD lpVolumeSerialNumber, LPDWORD lpMaximumComponentLength, LPDWORD lpFileSystemFlags, LPWSTR lpFileSystemNameBuffer, DWORD nFileSystemNameSize) FUNC_MOCK_RUN_DEFAULT { size_t is_pmem = atoi(os_getenv("IS_PMEM")); if (is_pmem) *lpFileSystemFlags = FILE_DAX_VOLUME; else *lpFileSystemFlags = 0; return TRUE; } FUNC_MOCK_END
688
22.758621
61
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_granularity/mocks_dax_windows.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * mocks_dax_windows.h -- redefinitions of GetVolumeInformationByHandleW * * This file is Windows-specific. * * This file should be included (i.e. using Forced Include) by libpmem2 * files, when compiled for the purpose of pmem2_granularity test. * It would replace default implementation with mocked functions defined * in mocks_windows.c * * This WRAP_REAL define could also be passed as a preprocessor definition. */ #ifndef MOCKS_WINDOWS_H #define MOCKS_WINDOWS_H 1 #include <windows.h> #ifndef WRAP_REAL #define GetVolumeInformationByHandleW __wrap_GetVolumeInformationByHandleW BOOL __wrap_GetVolumeInformationByHandleW(HANDLE hFile, LPWSTR lpVolumeNameBuffer, DWORD nVolumeNameSize, LPDWORD lpVolumeSerialNumber, LPDWORD lpMaximumComponentLength, LPDWORD lpFileSystemFlags, LPWSTR lpFileSystemNameBuffer, DWORD nFileSystemNameSize); #endif #endif
956
28.90625
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_fip/rpmem_fip_test.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmem_fip_test.c -- tests for rpmem_fip and rpmemd_fip modules */ #include <netdb.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include "unittest.h" #include "pmemcommon.h" #include "librpmem.h" #include "rpmem.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmem_util.h" #include "rpmem_fip_common.h" #include "rpmem_fip_oob.h" #include "rpmemd_fip.h" #include "rpmemd_log.h" #include "rpmemd_util.h" #include "rpmem_fip.h" #include "os.h" #define SIZE_PER_LANE 64 #define COUNT_PER_LANE 32 #define NLANES 1024 #define SOCK_NLANES 32 #define NTHREADS 32 #define TOTAL_PER_LANE (SIZE_PER_LANE * COUNT_PER_LANE) #define POOL_SIZE (NLANES * TOTAL_PER_LANE) static uint8_t lpool[POOL_SIZE]; static uint8_t rpool[POOL_SIZE]; TEST_CASE_DECLARE(client_init); TEST_CASE_DECLARE(server_init); TEST_CASE_DECLARE(client_connect); TEST_CASE_DECLARE(server_connect); TEST_CASE_DECLARE(server_process); TEST_CASE_DECLARE(client_flush); TEST_CASE_DECLARE(client_flush_mt); TEST_CASE_DECLARE(client_persist); TEST_CASE_DECLARE(client_persist_mt); TEST_CASE_DECLARE(client_read); TEST_CASE_DECLARE(client_wq_size); struct fip_client { enum rpmem_provider provider; unsigned max_wq_size; unsigned nlanes; }; #define FIP_CLIENT_DEFAULT {RPMEM_PROV_UNKNOWN, 0, NLANES} /* * get_persist_method -- parse persist method */ static enum rpmem_persist_method get_persist_method(const char *pm) { if (strcmp(pm, "GPSPM") == 0) return RPMEM_PM_GPSPM; else if (strcmp(pm, "APM") == 0) return RPMEM_PM_APM; else UT_FATAL("unknown method"); } /* * get_provider -- get provider for given target */ static void get_provider(const char *target, const char *prov_name, struct fip_client *client) { struct rpmem_fip_probe probe; int ret; int any = 0; if (strcmp(prov_name, "any") == 0) any = 1; ret = rpmem_fip_probe_get(target, &probe); UT_ASSERTeq(ret, 0); UT_ASSERT(rpmem_fip_probe_any(probe)); if (any) { /* return verbs in first place */ if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_VERBS)) client->provider = RPMEM_PROV_LIBFABRIC_VERBS; else if (rpmem_fip_probe(probe, RPMEM_PROV_LIBFABRIC_SOCKETS)) client->provider = RPMEM_PROV_LIBFABRIC_SOCKETS; else UT_ASSERT(0); } else { client->provider = rpmem_provider_from_str(prov_name); UT_ASSERTne(client->provider, RPMEM_PROV_UNKNOWN); UT_ASSERT(rpmem_fip_probe(probe, client->provider)); } /* * Decrease number of lanes for socket provider because * the test may be too long. */ if (client->provider == RPMEM_PROV_LIBFABRIC_SOCKETS) client->nlanes = min(client->nlanes, SOCK_NLANES); client->max_wq_size = probe.max_wq_size[client->provider]; } /* * set_pool_data -- set pools data to well known values */ static void set_pool_data(uint8_t *pool, int inverse) { for (unsigned l = 0; l < NLANES; l++) { for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = l * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = i + l; if (inverse) val = ~val; memset(&pool[offset], (int)val, SIZE_PER_LANE); } } } /* * flush_arg -- arguments for client persist and flush / drain threads */ struct flush_arg { struct rpmem_fip *fip; unsigned lane; }; typedef void *(*flush_fn)(void *arg); /* * client_flush_thread -- thread callback for flush / drain operation */ static void * client_flush_thread(void *arg) { struct flush_arg *args = arg; int ret; /* persist with len == 0 should always succeed */ ret = rpmem_fip_flush(args->fip, args->lane * TOTAL_PER_LANE, 0, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = args->lane + i; memset(&lpool[offset], (int)val, SIZE_PER_LANE); ret = rpmem_fip_flush(args->fip, offset, SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); } ret = rpmem_fip_drain(args->fip, args->lane); UT_ASSERTeq(ret, 0); return NULL; } /* * client_persist_thread -- thread callback for persist operation */ static void * client_persist_thread(void *arg) { struct flush_arg *args = arg; int ret; /* persist with len == 0 should always succeed */ ret = rpmem_fip_persist(args->fip, args->lane * TOTAL_PER_LANE, 0, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); for (unsigned i = 0; i < COUNT_PER_LANE; i++) { size_t offset = args->lane * TOTAL_PER_LANE + i * SIZE_PER_LANE; unsigned val = args->lane + i; memset(&lpool[offset], (int)val, SIZE_PER_LANE); ret = rpmem_fip_persist(args->fip, offset, SIZE_PER_LANE, args->lane, RPMEM_FLUSH_WRITE); UT_ASSERTeq(ret, 0); } return NULL; } /* * client_init -- test case for client initialization */ int client_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_init %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); /* * Tune the maximum number of lanes according to environment. */ rpmem_util_get_env_max_nlanes(&Rpmem_max_nlanes); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); client_close_begin(client); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_init -- test case for server initialization */ int server_init(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; int ret; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); server_close_begin(); server_close_end(); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * client_connect -- test case for establishing connection - client side */ int client_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_connect %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 3; } /* * server_connect -- test case for establishing connection - server side */ int server_connect(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); server_close_begin(); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * server_process -- test case for processing data on server side */ int server_process(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: %s <persist method>", tc->name); enum rpmem_persist_method persist_method = get_persist_method(argv[0]); set_pool_data(rpool, 1); unsigned nlanes; enum rpmem_provider provider; char *addr = NULL; server_exchange_begin(&nlanes, &provider, &addr); UT_ASSERTne(addr, NULL); struct rpmemd_fip_attr attr = { .addr = rpool, .size = POOL_SIZE, .nlanes = nlanes, .provider = provider, .persist_method = persist_method, .nthreads = NTHREADS, }; int ret; struct rpmem_resp_attr resp; struct rpmemd_fip *fip; enum rpmem_err err; ret = rpmemd_apply_pm_policy(&attr.persist_method, &attr.persist, &attr.memcpy_persist, 1 /* is pmem */); UT_ASSERTeq(ret, 0); fip = rpmemd_fip_init(addr, NULL, &attr, &resp, &err); UT_ASSERTne(fip, NULL); server_exchange_end(resp); ret = rpmemd_fip_accept(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_process_start(fip); server_close_begin(); ret = rpmemd_fip_process_stop(fip); UT_ASSERTeq(ret, 0); server_close_end(); ret = rpmemd_fip_wait_close(fip, -1); UT_ASSERTeq(ret, 0); ret = rpmemd_fip_close(fip); UT_ASSERTeq(ret, 0); rpmemd_fip_fini(fip); FREE(addr); return 1; } /* * flush_common -- common part for single-threaded persist and flush / drain * test cases */ static void flush_common(char *target, char *prov_name, char *persist_method, flush_fn flush_func) { set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); int ret; set_pool_data(lpool, 1); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); struct flush_arg arg = { .fip = fip, .lane = 0, }; flush_func(&arg); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); } /* * flush_common_mt -- common part for multi-threaded persist and flush / drain * test cases */ static int flush_common_mt(char *target, char *prov_name, char *persist_method, flush_fn flush_thread_func) { set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 1); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); os_thread_t *flush_thread = MALLOC(resp.nlanes * sizeof(os_thread_t)); struct flush_arg *args = MALLOC(resp.nlanes * sizeof(struct flush_arg)); for (unsigned i = 0; i < fip_client.nlanes; i++) { args[i].fip = fip; args[i].lane = i; THREAD_CREATE(&flush_thread[i], NULL, flush_thread_func, &args[i]); } for (unsigned i = 0; i < fip_client.nlanes; i++) THREAD_JOIN(&flush_thread[i], NULL); ret = rpmem_fip_read(fip, rpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); FREE(flush_thread); FREE(args); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } /* * client_flush -- test case for single-threaded flush / drain operation */ int client_flush(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common(target, prov_name, persist_method, client_flush_thread); return 3; } /* * client_flush_mt -- test case for multi-threaded flush / drain operation */ int client_flush_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common_mt(target, prov_name, persist_method, client_flush_thread); return 3; } /* * client_persist -- test case for single-threaded persist operation */ int client_persist(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common(target, prov_name, persist_method, client_persist_thread); return 3; } /* * client_persist_mt -- test case for multi-threaded persist operation */ int client_persist_mt(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; flush_common_mt(target, prov_name, persist_method, client_persist_thread); return 3; } /* * client_read -- test case for read operation */ int client_read(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); set_pool_data(lpool, 0); set_pool_data(rpool, 1); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); /* read with len == 0 should always succeed */ ret = rpmem_fip_read(fip, lpool, 0, 0, 0); UT_ASSERTeq(ret, 0); ret = rpmem_fip_read(fip, lpool, POOL_SIZE, 0, 0); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); ret = memcmp(rpool, lpool, POOL_SIZE); UT_ASSERTeq(ret, 0); rpmem_target_free(info); return 3; } #define LT_MAX_WQ_SIZE "LT_MAX_WQ_SIZE" /* < max_wq_size */ #define EQ_MAX_WQ_SIZE "EQ_MAX_WQ_SIZE" /* == max_wq_size */ #define GT_MAX_WQ_SIZE "GT_MAX_WQ_SIZE" /* > max_wq_size */ /* * client_wq_size -- test case for WQ size adjustment */ int client_wq_size(const struct test_case *tc, int argc, char *argv[]) { if (argc < 3) UT_FATAL("usage: %s <target> <provider> <persist method>" "<wq_size>", tc->name); char *target = argv[0]; char *prov_name = argv[1]; char *persist_method = argv[2]; char *wq_size_env_str = argv[3]; set_rpmem_cmd("server_process %s", persist_method); char fip_service[NI_MAXSERV]; struct rpmem_target_info *info; int ret; info = rpmem_target_parse(target); UT_ASSERTne(info, NULL); struct fip_client fip_client = FIP_CLIENT_DEFAULT; get_provider(info->node, prov_name, &fip_client); rpmem_util_get_env_max_nlanes(&fip_client.nlanes); client_t *client; struct rpmem_resp_attr resp; client = client_exchange(info, fip_client.nlanes, fip_client.provider, &resp); struct rpmem_fip_attr attr = { .provider = fip_client.provider, .max_wq_size = fip_client.max_wq_size, .persist_method = resp.persist_method, .laddr = lpool, .size = POOL_SIZE, .nlanes = resp.nlanes, .raddr = (void *)resp.raddr, .rkey = resp.rkey, }; ssize_t sret = SNPRINTF(fip_service, NI_MAXSERV, "%u", resp.port); UT_ASSERT(sret > 0); /* check RPMEM_WORK_QUEUE_SIZE env processing */ unsigned wq_size_default = Rpmem_wq_size; if (strcmp(wq_size_env_str, LT_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size - 1; } else if (strcmp(wq_size_env_str, EQ_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size; } else if (strcmp(wq_size_env_str, GT_MAX_WQ_SIZE) == 0) { Rpmem_wq_size = fip_client.max_wq_size + 1; } else { long wq_size_env = STRTOL(wq_size_env_str, NULL, 10); rpmem_util_get_env_wq_size(&Rpmem_wq_size); if (wq_size_env > 0) { if (wq_size_env < UINT_MAX) UT_ASSERT(Rpmem_wq_size == wq_size_env); else UT_ASSERT(Rpmem_wq_size == UINT_MAX); } else UT_ASSERT(Rpmem_wq_size == wq_size_default); } struct rpmem_fip *fip; fip = rpmem_fip_init(info->node, fip_service, &attr, &fip_client.nlanes); UT_ASSERTne(fip, NULL); size_t req_wq_size = rpmem_fip_wq_size( resp.persist_method, RPMEM_FIP_NODE_CLIENT); size_t eff_wq_size = rpmem_fip_get_wq_size(fip); /* max supported meets minimal requirements */ UT_ASSERT(fip_client.max_wq_size >= req_wq_size); /* calculated meets minimal requirements */ UT_ASSERT(eff_wq_size >= req_wq_size); /* calculated is supported */ UT_ASSERT(eff_wq_size <= fip_client.max_wq_size); /* if forced by env meets minimal requirements */ if (Rpmem_wq_size > req_wq_size) { /* and it is supported */ if (Rpmem_wq_size <= fip_client.max_wq_size) { /* calculated is >= to forced */ UT_ASSERT(eff_wq_size >= Rpmem_wq_size); } else { /* calculated is clipped to max supported */ UT_ASSERT(eff_wq_size == fip_client.max_wq_size); } } ret = rpmem_fip_connect(fip); UT_ASSERTeq(ret, 0); client_close_begin(client); ret = rpmem_fip_close(fip); UT_ASSERTeq(ret, 0); client_close_end(client); rpmem_fip_fini(fip); rpmem_target_free(info); return 4; } /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(client_init), TEST_CASE(server_init), TEST_CASE(client_connect), TEST_CASE(server_connect), TEST_CASE(client_flush), TEST_CASE(client_flush_mt), TEST_CASE(client_persist), TEST_CASE(client_persist_mt), TEST_CASE(server_process), TEST_CASE(client_read), TEST_CASE(client_wq_size) }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char *argv[]) { /* workaround for left-opened files by libfabric */ rpmem_fip_probe_get("localhost", NULL); START(argc, argv, "rpmem_obc"); common_init("rpmem_fip", "RPMEM_LOG_LEVEL", "RPMEM_LOG_FILE", 0, 0); rpmem_util_cmds_init(); rpmemd_log_init("rpmemd", os_getenv("RPMEMD_LOG_FILE"), 0); rpmemd_log_level = rpmemd_log_level_from_str( os_getenv("RPMEMD_LOG_LEVEL")); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); common_fini(); rpmemd_log_close(); rpmem_util_cmds_fini(); DONE(NULL); }
22,586
21.97762
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/rpmem_fip/rpmem_fip_oob.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * rpmem_fip_sock.h -- simple oob connection implementation for exchanging * required RDMA related data */ #include <stdint.h> #include <netinet/in.h> typedef struct rpmem_ssh client_t; client_t *client_exchange(struct rpmem_target_info *info, unsigned nlanes, enum rpmem_provider provider, struct rpmem_resp_attr *resp); void client_close_begin(client_t *c); void client_close_end(client_t *c); void server_exchange_begin(unsigned *lanes, enum rpmem_provider *provider, char **addr); void server_exchange_end(struct rpmem_resp_attr resp); void server_close_begin(void); void server_close_end(void); void set_rpmem_cmd(const char *fmt, ...);
743
24.655172
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_source/pmem2_source.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ /* * pmem2_source.c -- pmem2_source unittests */ #include "fault_injection.h" #include "libpmem2.h" #include "unittest.h" #include "ut_pmem2_utils.h" #include "ut_pmem2_config.h" #include "source.h" #include "out.h" /* * verify_fd -- verify value fd or handle in source */ static void verify_fd(struct pmem2_source *src, int fd) { #ifdef WIN32 UT_ASSERTeq(src->type, PMEM2_SOURCE_HANDLE); UT_ASSERTeq(src->value.handle, fd != INVALID_FD ? (HANDLE)_get_osfhandle(fd) : INVALID_HANDLE_VALUE); #else UT_ASSERTeq(src->type, PMEM2_SOURCE_FD); UT_ASSERTeq(src->value.fd, fd); #endif } /* * test_set_rw_fd - test setting O_RDWR fd */ static int test_set_rw_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_rw_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDWR); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(src, NULL); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_set_ro_fd - test setting O_RDONLY fd */ static int test_set_ro_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_ro_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_RDONLY); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTne(src, NULL); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_set_invalid_fd - test setting invalid fd */ static int test_set_invalid_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_invalid_fd <file>"); char *file = argv[0]; /* open and close the file to get invalid fd */ int fd = OPEN(file, O_WRONLY); CLOSE(fd); ut_suppress_crt_assert(); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); ut_unsuppress_crt_assert(); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); return 1; } /* * test_set_wronly_fd - test setting wronly fd */ static int test_set_wronly_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_wronly_fd <file>"); char *file = argv[0]; int fd = OPEN(file, O_WRONLY); struct pmem2_source *src; int ret = pmem2_source_from_fd(&src, fd); #ifdef _WIN32 /* windows doesn't validate open flags */ UT_PMEM2_EXPECT_RETURN(ret, 0); verify_fd(src, fd); ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); #else UT_ASSERTeq(src, NULL); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); #endif CLOSE(fd); return 1; } /* * test_alloc_src_enomem - test pmem2_source allocation with error injection */ static int test_alloc_src_enomem(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_alloc_src_enomem <file>"); char *file = argv[0]; struct pmem2_source *src; if (!core_fault_injection_enabled()) { return 1; } int fd = OPEN(file, O_RDWR); core_inject_fault_at(PMEM_MALLOC, 1, "pmem2_malloc"); int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, -ENOMEM); UT_ASSERTeq(src, NULL); CLOSE(fd); return 1; } /* * test_delete_null_config - test pmem2_source_delete on NULL config */ static int test_delete_null_config(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src = NULL; /* should not crash */ int ret = pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); return 0; } #ifdef WIN32 /* * test_set_handle - test setting valid handle */ static int test_set_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_handle <file>"); char *file = argv[0]; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); struct pmem2_source *src; int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src->value.handle, h); CloseHandle(h); pmem2_source_delete(&src); UT_PMEM2_EXPECT_RETURN(ret, 0); UT_ASSERTeq(src, NULL); return 1; } /* * test_set_null_handle - test resetting handle */ static int test_set_null_handle(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src; int ret = pmem2_source_from_handle(&src, INVALID_HANDLE_VALUE); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); return 0; } /* * test_set_invalid_handle - test setting invalid handle */ static int test_set_invalid_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_invalid_handle <file>"); char *file = argv[0]; struct pmem2_source *src; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, 0, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); CloseHandle(h); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); return 1; } /* * test_set_directory_handle - test setting a directory handle */ static int test_set_directory_handle(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_directory_handle <file>"); char *file = argv[0]; struct pmem2_source *src; HANDLE h = CreateFile(file, GENERIC_READ | GENERIC_WRITE, 0, NULL, OPEN_ALWAYS, FILE_FLAG_BACKUP_SEMANTICS, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE); UT_ASSERTeq(src, NULL); CloseHandle(h); return 1; } /* * test_set_directory_handle - test setting a mutex handle */ static int test_set_mutex_handle(const struct test_case *tc, int argc, char *argv[]) { struct pmem2_source *src; HANDLE h = CreateMutex(NULL, FALSE, NULL); UT_ASSERTne(h, INVALID_HANDLE_VALUE); int ret = pmem2_source_from_handle(&src, h); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_HANDLE); UT_ASSERTeq(src, NULL); CloseHandle(h); return 0; } #else /* * test_set_directory_handle - test setting directory's fd */ static int test_set_directory_fd(const struct test_case *tc, int argc, char *argv[]) { if (argc < 1) UT_FATAL("usage: test_set_directory_fd <file>"); char *file = argv[0]; struct pmem2_source *src; int fd = OPEN(file, O_RDONLY); int ret = pmem2_source_from_fd(&src, fd); UT_PMEM2_EXPECT_RETURN(ret, PMEM2_E_INVALID_FILE_TYPE); CLOSE(fd); return 1; } #endif /* * test_cases -- available test cases */ static struct test_case test_cases[] = { TEST_CASE(test_set_rw_fd), TEST_CASE(test_set_ro_fd), TEST_CASE(test_set_invalid_fd), TEST_CASE(test_set_wronly_fd), TEST_CASE(test_alloc_src_enomem), TEST_CASE(test_delete_null_config), #ifdef _WIN32 TEST_CASE(test_set_handle), TEST_CASE(test_set_null_handle), TEST_CASE(test_set_invalid_handle), TEST_CASE(test_set_directory_handle), TEST_CASE(test_set_mutex_handle), #else TEST_CASE(test_set_directory_fd), #endif }; #define NTESTS (sizeof(test_cases) / sizeof(test_cases[0])) int main(int argc, char **argv) { START(argc, argv, "pmem2_source"); util_init(); out_init("pmem2_source", "TEST_LOG_LEVEL", "TEST_LOG_FILE", 0, 0); TEST_CASE_PROCESS(argc, argv, test_cases, NTESTS); out_fini(); DONE(NULL); }
7,608
20.433803
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/tools/ddmap/ddmap.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * ddmap.c -- simple app for reading and writing data from/to a regular file or * dax device using mmap instead of file io API */ #include <stdio.h> #include <unistd.h> #include <getopt.h> #include <stdlib.h> #include <sys/mman.h> #include <errno.h> #include <fcntl.h> #include <inttypes.h> #include "common.h" #include "output.h" #include "mmap.h" #include "file.h" #include "util.h" #include "os.h" /* * ddmap_context -- context and arguments */ struct ddmap_context { char *file_in; /* input file name */ char *file_out; /* output file name */ char *str; /* string data to write */ size_t offset_in; /* offset from beginning of input file for */ /* read/write operations expressed in blocks */ size_t offset_out; /* offset from beginning of output file for */ /* read/write operations expressed in blocks */ size_t bytes; /* size of blocks to write at the time */ size_t count; /* number of blocks to read/write */ int checksum; /* compute checksum */ int runlen; /* print bytes as runlen/char sequence */ }; /* * the default context, with all fields initialized to zero or NULL */ static struct ddmap_context ddmap_default; /* * print_usage -- print short description of usage */ static void print_usage(void) { printf("Usage: ddmap [option] ...\n"); printf("Valid options:\n"); printf("-i FILE - read from FILE\n"); printf("-o FILE - write to FILE\n"); printf("-d STRING - STRING to be written\n"); printf("-s N - skip N blocks at start of input\n"); printf("-q N - skip N blocks at start of output\n"); printf("-b N - read/write N bytes at a time\n"); printf("-n N - copy N input blocks\n"); printf("-c - compute checksum\n"); printf("-r - print file content as runlen/char pairs\n"); printf("-h - print this usage info\n"); } /* * long_options -- command line options */ static const struct option long_options[] = { {"input-file", required_argument, NULL, 'i'}, {"output-file", required_argument, NULL, 'o'}, {"string", required_argument, NULL, 'd'}, {"offset-in", required_argument, NULL, 's'}, {"offset-out", required_argument, NULL, 'q'}, {"block-size", required_argument, NULL, 'b'}, {"count", required_argument, NULL, 'n'}, {"checksum", no_argument, NULL, 'c'}, {"runlen", no_argument, NULL, 'r'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * ddmap_print_char -- (internal) print single char * * Printable ASCII characters are printed normally, * NUL character is printed as a little circle (the degree symbol), * non-printable ASCII characters are printed as centered dots. */ static void ddmap_print_char(char c) { if (c == '\0') /* print the degree symbol for NUL */ printf("\u00B0"); else if (c >= ' ' && c <= '~') /* print printable ASCII character */ printf("%c", c); else /* print centered dot for non-printable character */ printf("\u00B7"); } /* * ddmap_print_runlen -- (internal) print file content as length/char pairs * * For each sequence of chars of the same value (could be just 1 byte) * print length of the sequence and the char value. */ static void ddmap_print_runlen(char *addr, size_t len) { char c = '\0'; ssize_t cnt = 0; for (size_t i = 0; i < len; i++) { if (i > 0 && c != addr[i] && cnt != 0) { printf("%zd ", cnt); ddmap_print_char(c); printf("\n"); cnt = 0; } c = addr[i]; cnt++; } if (cnt) { printf("%zd ", cnt); ddmap_print_char(c); printf("\n"); } } /* * ddmap_print_bytes -- (internal) print array of bytes */ static void ddmap_print_bytes(const char *data, size_t len) { for (size_t i = 0; i < len; ++i) { ddmap_print_char(data[i]); } printf("\n"); } /* * ddmap_read -- (internal) read a string from the file at the offset and * print it to stdout */ static int ddmap_read(const char *path, size_t offset_in, size_t bytes, size_t count, int runlen) { size_t len = bytes * count; os_off_t offset = (os_off_t)(bytes * offset_in); char *read_buff = Zalloc(len + 1); if (read_buff == NULL) { outv_err("Zalloc(%zu) failed\n", len + 1); return -1; } ssize_t read_len = util_file_pread(path, read_buff, len, offset); if (read_len < 0) { outv_err("pread failed"); Free(read_buff); return -1; } else if ((size_t)read_len < len) { outv(1, "read less bytes than requested: %zd vs. %zu\n", read_len, len); } if (runlen) ddmap_print_runlen(read_buff, (size_t)read_len); else ddmap_print_bytes(read_buff, (size_t)read_len); Free(read_buff); return 0; } /* * ddmap_zero -- (internal) zero a range of data in the file */ static int ddmap_zero(const char *path, size_t offset, size_t len) { void *addr; ssize_t filesize = util_file_get_size(path); if (filesize < 0) { outv_err("invalid file size"); return -1; } if (offset + len > (size_t)filesize) len = (size_t)filesize - offset; addr = util_file_map_whole(path); if (addr == NULL) { outv_err("map failed"); return -1; } memset((char *)addr + offset, 0, len); util_unmap(addr, (size_t)filesize); return 0; } /* * ddmap_write_data -- (internal) write data to a file */ static int ddmap_write_data(const char *path, const char *data, os_off_t offset, size_t len) { if (util_file_pwrite(path, data, len, offset) < 0) { outv_err("pwrite for dax device failed: path %s," " len %zu, offset %zd", path, len, offset); return -1; } return 0; } /* * ddmap_write_from_file -- (internal) write data from file to dax device or * file */ static int ddmap_write_from_file(const char *path_in, const char *path_out, size_t offset_in, size_t offset_out, size_t bytes, size_t count) { char *src, *tmp_src; os_off_t offset; ssize_t file_in_size = util_file_get_size(path_in); size_t data_left, len; util_init(); src = util_file_map_whole(path_in); src += (os_off_t)(offset_in * bytes); offset = (os_off_t)(offset_out * bytes); data_left = (size_t)file_in_size; tmp_src = src; do { len = MIN(data_left, bytes); ddmap_write_data(path_out, tmp_src, offset, len); tmp_src += len; data_left -= len; if (data_left == 0) { data_left = (size_t)file_in_size; tmp_src = src; } offset += (os_off_t)len; count--; } while (count > 0); util_unmap(src, (size_t)file_in_size); return 0; } /* * ddmap_write -- (internal) write the string to the file */ static int ddmap_write(const char *path, const char *str, size_t offset_in, size_t bytes, size_t count) { /* calculate how many characters from the string are to be written */ size_t length; size_t str_len = (str != NULL) ? strlen(str) + 1 : 0; os_off_t offset = (os_off_t)(bytes * offset_in); size_t len = bytes * count; if (len == 0) length = str_len; else length = min(len, str_len); /* write the string */ if (length > 0) { if (ddmap_write_data(path, str, offset, length)) return -1; } /* zero the rest of requested range */ if (length < len) { if (ddmap_zero(path, (size_t)offset + length, len - length)) return -1; } return 0; } /* * ddmap_checksum -- (internal) compute checksum of a slice of an input file */ static int ddmap_checksum(const char *path, size_t bytes, size_t count, size_t offset_in) { char *src; uint64_t checksum; ssize_t filesize = util_file_get_size(path); os_off_t offset = (os_off_t)(bytes * offset_in); size_t len = bytes * count; if ((size_t)filesize < len + (size_t)offset) { outv_err("offset with length exceed file size"); return -1; } util_init(); src = util_file_map_whole(path); util_checksum(src + offset, len, &checksum, 1, 0); util_unmap(src, (size_t)filesize); printf("%" PRIu64 "\n", checksum); return 0; } /* * parse_args -- (internal) parse command line arguments */ static int parse_args(struct ddmap_context *ctx, int argc, char *argv[]) { int opt; char *endptr; size_t offset; size_t count; size_t bytes; while ((opt = getopt_long(argc, argv, "i:o:d:s:q:b:n:crhv", long_options, NULL)) != -1) { switch (opt) { case 'i': ctx->file_in = optarg; break; case 'o': ctx->file_out = optarg; break; case 'd': ctx->str = optarg; if (ctx->count == 0) ctx->count = strlen(ctx->str); if (ctx->bytes == 0) ctx->bytes = 1; break; case 's': errno = 0; offset = strtoul(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid input offset", optarg); return -1; } ctx->offset_in = offset; break; case 'q': errno = 0; offset = strtoul(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid output offset", optarg); return -1; } ctx->offset_out = offset; break; case 'b': errno = 0; bytes = strtoull(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid block size", optarg); return -1; } ctx->bytes = bytes; break; case 'n': errno = 0; count = strtoull(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno) { outv_err("'%s' -- invalid count", optarg); return -1; } ctx->count = count; break; case 'c': ctx->checksum = 1; break; case 'r': ctx->runlen = 1; break; case 'h': print_usage(); exit(EXIT_SUCCESS); case 'v': out_set_vlevel(1); break; default: print_usage(); exit(EXIT_FAILURE); } } return 0; } /* * validate_args -- (internal) validate arguments */ static int validate_args(struct ddmap_context *ctx) { if ((ctx->file_in == NULL) && (ctx->file_out == NULL)) { outv_err("an input file and/or an output file must be " "provided"); return -1; } else if (ctx->file_out == NULL) { if (ctx->bytes == 0) { outv_err("number of bytes to read has to be provided"); return -1; } } else if (ctx->file_in == NULL) { /* ddmap_write requirements */ if (ctx->str == NULL && (ctx->count * ctx->bytes) == 0) { outv_err("when writing, 'data' or 'count' and 'bytes' " "have to be provided"); return -1; } } else { /* scenarios other than ddmap_write requirement */ if ((ctx->bytes * ctx->count) == 0) { outv_err("number of bytes and count must be provided"); return -1; } } return 0; } /* * do_ddmap -- (internal) perform ddmap */ static int do_ddmap(struct ddmap_context *ctx) { if ((ctx->file_in != NULL) && (ctx->file_out != NULL)) { if (ddmap_write_from_file(ctx->file_in, ctx->file_out, ctx->offset_in, ctx->offset_out, ctx->bytes, ctx->count)) return -1; return 0; } if ((ctx->checksum == 1) && (ctx->file_in != NULL)) { if (ddmap_checksum(ctx->file_in, ctx->bytes, ctx->count, ctx->offset_in)) return -1; return 0; } if (ctx->file_in != NULL) { if (ddmap_read(ctx->file_in, ctx->offset_in, ctx->bytes, ctx->count, ctx->runlen)) return -1; } else { /* ctx->file_out != NULL */ if (ddmap_write(ctx->file_out, ctx->str, ctx->offset_in, ctx->bytes, ctx->count)) return -1; } return 0; } int main(int argc, char *argv[]) { #ifdef _WIN32 util_suppress_errmsg(); wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc); for (int i = 0; i < argc; i++) { argv[i] = util_toUTF8(wargv[i]); if (argv[i] == NULL) { for (i--; i >= 0; i--) free(argv[i]); outv_err("Error during arguments conversion\n"); return 1; } } #endif int ret = 0; struct ddmap_context ctx = ddmap_default; if ((ret = parse_args(&ctx, argc, argv))) goto out; if ((ret = validate_args(&ctx))) goto out; if ((ret = do_ddmap(&ctx))) { outv_err("failed to perform ddmap\n"); if (errno) outv_err("errno: %s\n", strerror(errno)); ret = -1; goto out; } out: #ifdef _WIN32 for (int i = argc; i > 0; i--) free(argv[i - 1]); #endif return ret; }
11,872
22.280392
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/tools/dllview/dllview.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * dllview.c -- a simple utility displaying the list of symbols exported by DLL * * usage: dllview filename */ #include <windows.h> #include <stdio.h> #include <winnt.h> #include <imagehlp.h> #include "util.h" int main(int argc, char *argv[]) { util_suppress_errmsg(); if (argc < 2) { fprintf(stderr, "usage: %s dllname\n", argv[0]); exit(1); } const char *dllname = argv[1]; LOADED_IMAGE img; if (MapAndLoad(dllname, NULL, &img, 1, 1) == FALSE) { fprintf(stderr, "cannot load DLL image\n"); exit(2); } IMAGE_EXPORT_DIRECTORY *dir; ULONG dirsize; dir = (IMAGE_EXPORT_DIRECTORY *)ImageDirectoryEntryToData( img.MappedAddress, 0 /* mapped as image */, IMAGE_DIRECTORY_ENTRY_EXPORT, &dirsize); if (dir == NULL) { fprintf(stderr, "cannot read image directory\n"); UnMapAndLoad(&img); exit(3); } DWORD *rva; rva = (DWORD *)ImageRvaToVa(img.FileHeader, img.MappedAddress, dir->AddressOfNames, NULL); for (DWORD i = 0; i < dir->NumberOfNames; i++) { char *name = (char *)ImageRvaToVa(img.FileHeader, img.MappedAddress, rva[i], NULL); printf("%s\n", name); } UnMapAndLoad(&img); return 0; }
1,233
20.649123
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/tools/cmpmap/cmpmap.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2019, Intel Corporation */ /* * cmpmap -- a tool for comparing files using mmap */ #include <stdlib.h> #include <stdio.h> #include <getopt.h> #include <sys/mman.h> #include <assert.h> #include <string.h> #include <errno.h> #include <unistd.h> #include "file.h" #include "fcntl.h" #include "mmap.h" #include "os.h" #include "util.h" #define CMPMAP_ZERO (1<<0) #define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp))) /* arguments */ static char *File1 = NULL; /* file1 name */ static char *File2 = NULL; /* file2 name */ static size_t Length = 0; /* number of bytes to read */ static os_off_t Offset = 0; /* offset from beginning of file */ static int Opts = 0; /* options flag */ /* * print_usage -- print short description of usage */ static void print_usage(void) { printf("Usage: cmpmap [options] file1 [file2]\n"); printf("Valid options:\n"); printf("-l, --length=N - compare up to N bytes\n"); printf("-o, --offset=N - skip N bytes at start of the files\n"); printf("-z, --zero - compare bytes of the file1 to NUL\n"); printf("-h, --help - print this usage info\n"); } /* * long_options -- command line options */ static const struct option long_options[] = { {"length", required_argument, NULL, 'l'}, {"offset", required_argument, NULL, 'o'}, {"zero", no_argument, NULL, 'z'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * parse_args -- (internal) parse command line arguments */ static int parse_args(int argc, char *argv[]) { int opt; char *endptr; os_off_t off; ssize_t len; while ((opt = getopt_long(argc, argv, "l:o:zh", long_options, NULL)) != -1) { switch (opt) { case 'l': errno = 0; len = strtoll(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno || len < 0) { fprintf(stderr, "'%s' -- invalid length", optarg); return -1; } Length = (size_t)len; break; case 'o': errno = 0; off = strtol(optarg, &endptr, 0); if ((endptr && *endptr != '\0') || errno || off < 0) { fprintf(stderr, "'%s' -- invalid offset", optarg); return -1; } Offset = off; break; case 'z': Opts |= CMPMAP_ZERO; break; case 'h': print_usage(); return 0; default: print_usage(); return -1; } } if (optind < argc) { File1 = argv[optind]; if (optind + 1 < argc) File2 = argv[optind + 1]; } else { print_usage(); return -1; } return 0; } /* * validate_args -- (internal) validate arguments */ static int validate_args(void) { if (File1 == NULL) { fprintf(stderr, "no file provided"); return -1; } else if (File2 == NULL && Length == 0) { fprintf(stderr, "length of the file has to be provided"); return -1; } return 0; } /* * do_cmpmap -- (internal) perform cmpmap */ static int do_cmpmap(void) { int ret = 0; int fd1; int fd2; size_t size1; size_t size2; /* open the first file */ if ((fd1 = os_open(File1, O_RDONLY)) < 0) { fprintf(stderr, "opening %s failed, errno %d\n", File1, errno); return -1; } ssize_t size_tmp = util_fd_get_size(fd1); if (size_tmp < 0) { fprintf(stderr, "getting size of %s failed, errno %d\n", File1, errno); ret = -1; goto out_close1; } size1 = (size_t)size_tmp; int flag = MAP_SHARED; if (Opts & CMPMAP_ZERO) { /* when checking if bytes are zeroed */ fd2 = -1; size2 = (size_t)Offset + Length; flag |= MAP_ANONYMOUS; } else if (File2 != NULL) { /* when comparing two files */ /* open the second file */ if ((fd2 = os_open(File2, O_RDONLY)) < 0) { fprintf(stderr, "opening %s failed, errno %d\n", File2, errno); ret = -1; goto out_close1; } size_tmp = util_fd_get_size(fd2); if (size_tmp < 0) { fprintf(stderr, "getting size of %s failed, errno %d\n", File2, errno); ret = -1; goto out_close2; } size2 = (size_t)size_tmp; /* basic check */ size_t min_size = (size1 < size2) ? size1 : size2; if ((size_t)Offset + Length > min_size) { if (size1 != size2) { fprintf(stdout, "%s %s differ in size: %zu" " %zu\n", File1, File2, size1, size2); ret = -1; goto out_close2; } else { Length = min_size - (size_t)Offset; } } } else { assert(0); } /* initialize utils */ util_init(); /* map the first file */ void *addr1; if ((addr1 = util_map(fd1, 0, size1, MAP_SHARED, 1, 0, NULL)) == MAP_FAILED) { fprintf(stderr, "mmap failed, file %s, length %zu, offset 0," " errno %d\n", File1, size1, errno); ret = -1; goto out_close2; } /* map the second file, or do anonymous mapping to get zeroed bytes */ void *addr2; if ((addr2 = util_map(fd2, 0, size2, flag, 1, 0, NULL)) == MAP_FAILED) { fprintf(stderr, "mmap failed, file %s, length %zu, errno %d\n", File2 ? File2 : "(anonymous)", size2, errno); ret = -1; goto out_unmap1; } /* compare bytes of memory */ if ((ret = memcmp(ADDR_SUM(addr1, Offset), ADDR_SUM(addr2, Offset), Length))) { if (Opts & CMPMAP_ZERO) fprintf(stderr, "%s is not zeroed\n", File1); else fprintf(stderr, "%s %s differ\n", File1, File2); ret = -1; } munmap(addr2, size2); out_unmap1: munmap(addr1, size1); out_close2: if (File2 != NULL) (void) os_close(fd2); out_close1: (void) os_close(fd1); return ret; } int main(int argc, char *argv[]) { #ifdef _WIN32 util_suppress_errmsg(); wchar_t **wargv = CommandLineToArgvW(GetCommandLineW(), &argc); for (int i = 0; i < argc; i++) { argv[i] = util_toUTF8(wargv[i]); if (argv[i] == NULL) { for (i--; i >= 0; i--) free(argv[i]); fprintf(stderr, "Error during arguments conversion\n"); return 1; } } #endif int ret = EXIT_FAILURE; if (parse_args(argc, argv)) goto end; if (validate_args()) goto end; if (do_cmpmap()) goto end; ret = EXIT_SUCCESS; end: #ifdef _WIN32 for (int i = argc; i > 0; i--) free(argv[i - 1]); #endif exit(ret); }
5,918
20.291367
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/tools/ctrld/signals_linux.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * signals_linux.h - Signal definitions for Linux */ #ifndef _SIGNALS_LINUX_H #define _SIGNALS_LINUX_H 1 #define SIGNAL_2_STR(sig) [sig] = #sig static const char *signal2str[] = { SIGNAL_2_STR(SIGHUP), /* 1 */ SIGNAL_2_STR(SIGINT), /* 2 */ SIGNAL_2_STR(SIGQUIT), /* 3 */ SIGNAL_2_STR(SIGILL), /* 4 */ SIGNAL_2_STR(SIGTRAP), /* 5 */ SIGNAL_2_STR(SIGABRT), /* 6 */ SIGNAL_2_STR(SIGBUS), /* 7 */ SIGNAL_2_STR(SIGFPE), /* 8 */ SIGNAL_2_STR(SIGKILL), /* 9 */ SIGNAL_2_STR(SIGUSR1), /* 10 */ SIGNAL_2_STR(SIGSEGV), /* 11 */ SIGNAL_2_STR(SIGUSR2), /* 12 */ SIGNAL_2_STR(SIGPIPE), /* 13 */ SIGNAL_2_STR(SIGALRM), /* 14 */ SIGNAL_2_STR(SIGTERM), /* 15 */ SIGNAL_2_STR(SIGSTKFLT), /* 16 */ SIGNAL_2_STR(SIGCHLD), /* 17 */ SIGNAL_2_STR(SIGCONT), /* 18 */ SIGNAL_2_STR(SIGSTOP), /* 19 */ SIGNAL_2_STR(SIGTSTP), /* 20 */ SIGNAL_2_STR(SIGTTIN), /* 21 */ SIGNAL_2_STR(SIGTTOU), /* 22 */ SIGNAL_2_STR(SIGURG), /* 23 */ SIGNAL_2_STR(SIGXCPU), /* 24 */ SIGNAL_2_STR(SIGXFSZ), /* 25 */ SIGNAL_2_STR(SIGVTALRM), /* 26 */ SIGNAL_2_STR(SIGPROF), /* 27 */ SIGNAL_2_STR(SIGWINCH), /* 28 */ SIGNAL_2_STR(SIGPOLL), /* 29 */ SIGNAL_2_STR(SIGPWR), /* 30 */ SIGNAL_2_STR(SIGSYS) /* 31 */ }; #define SIGNALMAX SIGSYS #endif
1,322
27.148936
49
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/tools/ctrld/signals_freebsd.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * signals_fbsd.h - Signal definitions for FreeBSD */ #ifndef _SIGNALS_FBSD_H #define _SIGNALS_FBSD_H 1 #define SIGNAL_2_STR(sig) [sig] = #sig static const char *signal2str[] = { SIGNAL_2_STR(SIGHUP), /* 1 */ SIGNAL_2_STR(SIGINT), /* 2 */ SIGNAL_2_STR(SIGQUIT), /* 3 */ SIGNAL_2_STR(SIGILL), /* 4 */ SIGNAL_2_STR(SIGTRAP), /* 5 */ SIGNAL_2_STR(SIGABRT), /* 6 */ SIGNAL_2_STR(SIGEMT), /* 7 */ SIGNAL_2_STR(SIGFPE), /* 8 */ SIGNAL_2_STR(SIGKILL), /* 9 */ SIGNAL_2_STR(SIGBUS), /* 10 */ SIGNAL_2_STR(SIGSEGV), /* 11 */ SIGNAL_2_STR(SIGSYS), /* 12 */ SIGNAL_2_STR(SIGPIPE), /* 13 */ SIGNAL_2_STR(SIGALRM), /* 14 */ SIGNAL_2_STR(SIGTERM), /* 15 */ SIGNAL_2_STR(SIGURG), /* 16 */ SIGNAL_2_STR(SIGSTOP), /* 17 */ SIGNAL_2_STR(SIGTSTP), /* 18 */ SIGNAL_2_STR(SIGCONT), /* 19 */ SIGNAL_2_STR(SIGCHLD), /* 20 */ SIGNAL_2_STR(SIGTTIN), /* 21 */ SIGNAL_2_STR(SIGTTOU), /* 22 */ SIGNAL_2_STR(SIGIO), /* 23 */ SIGNAL_2_STR(SIGXCPU), /* 24 */ SIGNAL_2_STR(SIGXFSZ), /* 25 */ SIGNAL_2_STR(SIGVTALRM), /* 26 */ SIGNAL_2_STR(SIGPROF), /* 27 */ SIGNAL_2_STR(SIGWINCH), /* 28 */ SIGNAL_2_STR(SIGINFO), /* 29 */ SIGNAL_2_STR(SIGUSR1), /* 30 */ SIGNAL_2_STR(SIGUSR2), /* 31 */ SIGNAL_2_STR(SIGTHR), /* 32 */ SIGNAL_2_STR(SIGLIBRT) /* 33 */ }; #define SIGNALMAX SIGLIBRT #endif
1,386
26.74
50
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_locks/obj_locks.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * obj_locks.c -- unit test for PMEMmutex, PMEMrwlock and PMEMcond */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "libpmemobj.h" #define LAYOUT_NAME "obj_locks" #define NUM_THREADS 16 #define MAX_FUNC 5 TOID_DECLARE(struct locks, 0); struct locks { PMEMobjpool *pop; PMEMmutex mtx; PMEMrwlock rwlk; PMEMcond cond; int data; }; struct thread_args { os_thread_t t; TOID(struct locks) lock; int t_id; }; typedef void *(*fn_lock)(void *arg); static struct thread_args threads[NUM_THREADS]; /* * do_mutex_lock -- lock and unlock the mutex */ static void * do_mutex_lock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); return NULL; } /* * do_rwlock_wrlock -- lock and unlock the write rwlock */ static void * do_rwlock_wrlock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_rwlock_wrlock(lock->pop, &lock->rwlk); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_rwlock_unlock(lock->pop, &lock->rwlk); return NULL; } /* * do_rwlock_rdlock -- lock and unlock the read rwlock */ static void * do_rwlock_rdlock(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); pmemobj_rwlock_rdlock(lock->pop, &lock->rwlk); pmemobj_rwlock_unlock(lock->pop, &lock->rwlk); return NULL; } /* * do_cond_signal -- lock block on a condition variables, * and unlock them by signal */ static void * do_cond_signal(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); if (t->t_id == 0) { pmemobj_mutex_lock(lock->pop, &lock->mtx); while (lock->data < (NUM_THREADS - 1)) pmemobj_cond_wait(lock->pop, &lock->cond, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } else { pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_cond_signal(lock->pop, &lock->cond); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } return NULL; } /* * do_cond_broadcast -- lock block on a condition variables and unlock * by broadcasting */ static void * do_cond_broadcast(void *arg) { struct thread_args *t = (struct thread_args *)arg; struct locks *lock = D_RW(t->lock); if (t->t_id < (NUM_THREADS / 2)) { pmemobj_mutex_lock(lock->pop, &lock->mtx); while (lock->data < (NUM_THREADS / 2)) pmemobj_cond_wait(lock->pop, &lock->cond, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } else { pmemobj_mutex_lock(lock->pop, &lock->mtx); lock->data++; pmemobj_persist(lock->pop, &lock->data, sizeof(lock->data)); pmemobj_cond_broadcast(lock->pop, &lock->cond); pmemobj_mutex_unlock(lock->pop, &lock->mtx); } return NULL; } static fn_lock do_lock[MAX_FUNC] = {do_mutex_lock, do_rwlock_wrlock, do_rwlock_rdlock, do_cond_signal, do_cond_broadcast}; /* * do_lock_init -- initialize all types of locks */ static void do_lock_init(struct locks *lock) { pmemobj_mutex_zero(lock->pop, &lock->mtx); pmemobj_rwlock_zero(lock->pop, &lock->rwlk); pmemobj_cond_zero(lock->pop, &lock->cond); } /* * do_lock_mt -- perform multithread lock operations */ static void do_lock_mt(TOID(struct locks) lock, unsigned f_num) { D_RW(lock)->data = 0; for (int i = 0; i < NUM_THREADS; ++i) { threads[i].lock = lock; threads[i].t_id = i; THREAD_CREATE(&threads[i].t, NULL, do_lock[f_num], &threads[i]); } for (int i = 0; i < NUM_THREADS; ++i) THREAD_JOIN(&threads[i].t, NULL); /* * If all threads passed function properly and used every lock, there * should be every element in data array incremented exactly one time * by every thread. */ UT_ASSERT((D_RO(lock)->data == NUM_THREADS) || (D_RO(lock)->data == 0)); } int main(int argc, char *argv[]) { START(argc, argv, "obj_locks"); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); TOID(struct locks) lock; POBJ_ALLOC(pop, &lock, struct locks, sizeof(struct locks), NULL, NULL); D_RW(lock)->pop = pop; do_lock_init(D_RW(lock)); for (unsigned i = 0; i < MAX_FUNC; i++) do_lock_mt(lock, i); POBJ_FREE(&lock); pmemobj_close(pop); DONE(NULL); }
4,821
22.99005
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/libpmempool_feature/libpmempool_feature.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * libpmempool_feature -- pmempool_feature_(enable|disable|query) test * */ #include <stddef.h> #include <stdlib.h> #include <stdio.h> #include "libpmempool.h" #include "pool_hdr.h" #include "unittest.h" #define EMPTY_FLAGS 0 /* * print_usage -- print usage of program */ static void print_usage(const char *name) { UT_OUT("usage: %s <pool_path> (e|d|q) <feature-name>", name); UT_OUT("feature-name: SINGLEHDR, CKSUM_2K, SHUTDOWN_STATE"); } /* * str2pmempool_feature -- convert feature name to pmempool_feature enum */ static enum pmempool_feature str2pmempool_feature(const char *app, const char *str) { uint32_t fval = util_str2pmempool_feature(str); if (fval == UINT32_MAX) { print_usage(app); UT_FATAL("unknown feature: %s", str); } return (enum pmempool_feature)fval; } int main(int argc, char *argv[]) { START(argc, argv, "libpmempool_feature"); if (argc < 4) { print_usage(argv[0]); UT_FATAL("insufficient number of arguments: %d", argc - 1); } const char *path = argv[1]; char cmd = argv[2][0]; enum pmempool_feature feature = str2pmempool_feature(argv[0], argv[3]); int ret; switch (cmd) { case 'e': return pmempool_feature_enable(path, feature, EMPTY_FLAGS); case 'd': return pmempool_feature_disable(path, feature, EMPTY_FLAGS); case 'q': ret = pmempool_feature_query(path, feature, EMPTY_FLAGS); if (ret < 0) return 1; UT_OUT("query %s result is %d", argv[3], ret); return 0; default: print_usage(argv[0]); UT_FATAL("unknown command: %c", cmd); } DONE(NULL); }
1,622
20.077922
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_flow/obj_tx_flow.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_flow.c -- unit test for transaction flow */ #include "unittest.h" #include "obj.h" #define LAYOUT_NAME "direct" #define TEST_VALUE_A 5 #define TEST_VALUE_B 10 #define TEST_VALUE_C 15 #define OPS_NUM 9 TOID_DECLARE(struct test_obj, 1); struct test_obj { int a; int b; int c; }; static void do_tx_macro_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj) { TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); D_RW(*obj)->b = TEST_VALUE_B; } TX_ONABORT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; TX_BEGIN(pop) { TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_B; pmemobj_tx_abort(EINVAL); D_RW(*obj)->b = TEST_VALUE_A; } TX_ONCOMMIT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_ONABORT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(D_RW(*obj)->b == TEST_VALUE_B); D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { TX_BEGIN(pop) { TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; } TX_ONCOMMIT { UT_ASSERT(D_RW(*obj)->a == TEST_VALUE_A); D_RW(*obj)->b = TEST_VALUE_B; } TX_END } TX_ONCOMMIT { D_RW(*obj)->c = TEST_VALUE_C; } TX_END } static void do_tx_macro_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { volatile int a = 0; volatile int b = 0; volatile int c = 0; D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; TX_BEGIN(pop) { TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_B; a = TEST_VALUE_C; TX_BEGIN(pop) { D_RW(*obj)->b = TEST_VALUE_C; a = TEST_VALUE_A; pmemobj_tx_abort(EINVAL); a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ a = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(a == TEST_VALUE_A); b = TEST_VALUE_B; } TX_FINALLY { UT_ASSERT(b == TEST_VALUE_B); c = TEST_VALUE_C; } TX_END a = TEST_VALUE_B; } TX_ONCOMMIT { /* not called */ UT_ASSERT(a == TEST_VALUE_A); c = TEST_VALUE_C; } TX_ONABORT { UT_ASSERT(a == TEST_VALUE_A); UT_ASSERT(b == TEST_VALUE_B); UT_ASSERT(c == TEST_VALUE_C); b = TEST_VALUE_A; } TX_FINALLY { UT_ASSERT(b == TEST_VALUE_A); D_RW(*obj)->c = TEST_VALUE_C; a = TEST_VALUE_B; } TX_END UT_ASSERT(a == TEST_VALUE_B); } static void do_tx_macro_abort_nested_begin(PMEMobjpool *pop, TOID(struct test_obj) *obj) { errno = 0; TX_BEGIN(pop) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN((PMEMobjpool *)(uintptr_t)7) { } TX_ONABORT { UT_ASSERT(0); } TX_END UT_ASSERT(errno == EINVAL); } TX_ONABORT { D_RW(*obj)->c = TEST_VALUE_C; } TX_ONCOMMIT { /* not called */ D_RW(*obj)->a = TEST_VALUE_B; } TX_END } static void do_tx_commit(PMEMobjpool *pop, TOID(struct test_obj) *obj) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); D_RW(*obj)->a = TEST_VALUE_A; TX_ADD(*obj); D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_commit_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->a = TEST_VALUE_A; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_commit(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_abort(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); D_RW(*obj)->b = TEST_VALUE_B; TX_ADD(*obj); D_RW(*obj)->a = 0; pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } static void do_tx_abort_nested(PMEMobjpool *pop, TOID(struct test_obj) *obj) { D_RW(*obj)->a = TEST_VALUE_A; D_RW(*obj)->b = TEST_VALUE_B; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->a = 0; pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); TX_ADD(*obj); D_RW(*obj)->b = 0; pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); D_RW(*obj)->c = TEST_VALUE_C; pmemobj_tx_end(); } typedef void (*fn_op)(PMEMobjpool *pop, TOID(struct test_obj) *obj); static fn_op tx_op[OPS_NUM] = {do_tx_macro_commit, do_tx_macro_abort, do_tx_macro_commit_nested, do_tx_macro_abort_nested, do_tx_macro_abort_nested_begin, do_tx_commit, do_tx_commit_nested, do_tx_abort, do_tx_abort_nested}; static void do_tx_process(PMEMobjpool *pop) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); } static void do_tx_process_nested(PMEMobjpool *pop) { pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONCOMMIT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_WORK); pmemobj_tx_abort(EINVAL); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_ONABORT); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_FINALLY); pmemobj_tx_process(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); pmemobj_tx_end(); UT_ASSERT(pmemobj_tx_stage() == TX_STAGE_NONE); } static void do_fault_injection(PMEMobjpool *pop) { if (!pmemobj_fault_injection_enabled()) return; pmemobj_inject_fault_at(PMEM_MALLOC, 1, "pmemobj_tx_begin"); int ret = pmemobj_tx_begin(pop, NULL, TX_PARAM_NONE); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_flow"); if (argc != 3) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[2], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); TOID(struct test_obj) obj; POBJ_ZNEW(pop, &obj, struct test_obj); for (int i = 0; i < OPS_NUM; i++) { D_RW(obj)->a = 0; D_RW(obj)->b = 0; D_RW(obj)->c = 0; tx_op[i](pop, &obj); UT_ASSERT(D_RO(obj)->a == TEST_VALUE_A); UT_ASSERT(D_RO(obj)->b == TEST_VALUE_B); UT_ASSERT(D_RO(obj)->c == TEST_VALUE_C); } switch (argv[1][0]) { case 't': do_tx_process(pop); do_tx_process_nested(pop); break; case 'f': do_fault_injection(pop); break; default: UT_FATAL("usage: %s [t|f]", argv[0]); } pmemobj_close(pop); DONE(NULL); }
7,445
23.574257
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_pool_hdr/util_pool_hdr.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * util_pool_hdr.c -- unit test for pool_hdr layout and default values * * This test should be modified after every layout change. It's here to prevent * any accidental layout changes. */ #include "util.h" #include "unittest.h" #include "set.h" #include "pool_hdr.h" #define POOL_HDR_SIG_LEN_V1 (8) #define POOL_HDR_UNUSED_LEN_V1 (1904) #define POOL_HDR_UNUSED2_LEN_V1 (1976) #define POOL_HDR_2K_CHECKPOINT (2048UL) #define FEATURES_T_SIZE_V1 (12) #define ARCH_FLAGS_SIZE_V1 (16) #define ARCH_FLAGS_RESERVED_LEN_V1 (4) #define SHUTDOWN_STATE_SIZE_V1 (64) #define SHUTDOWN_STATE_RESERVED_LEN_V1 (39) /* * test_layout -- test pool_hdr layout */ static void test_layout() { ASSERT_ALIGNED_BEGIN(struct pool_hdr); ASSERT_ALIGNED_FIELD(struct pool_hdr, signature); ASSERT_FIELD_SIZE(signature, POOL_HDR_SIG_LEN_V1); ASSERT_ALIGNED_FIELD(struct pool_hdr, major); ASSERT_ALIGNED_FIELD(struct pool_hdr, features); ASSERT_ALIGNED_FIELD(struct pool_hdr, poolset_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_part_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, next_part_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, prev_repl_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, next_repl_uuid); ASSERT_ALIGNED_FIELD(struct pool_hdr, crtime); ASSERT_ALIGNED_FIELD(struct pool_hdr, arch_flags); ASSERT_ALIGNED_FIELD(struct pool_hdr, unused); ASSERT_FIELD_SIZE(unused, POOL_HDR_UNUSED_LEN_V1); ASSERT_OFFSET_CHECKPOINT(struct pool_hdr, POOL_HDR_2K_CHECKPOINT); ASSERT_ALIGNED_FIELD(struct pool_hdr, unused2); ASSERT_FIELD_SIZE(unused2, POOL_HDR_UNUSED2_LEN_V1); ASSERT_ALIGNED_FIELD(struct pool_hdr, sds); ASSERT_ALIGNED_FIELD(struct pool_hdr, checksum); #if PMEM_PAGESIZE > 4096 ASSERT_ALIGNED_FIELD(struct pool_hdr, align_pad); #endif ASSERT_ALIGNED_CHECK(struct pool_hdr); ASSERT_ALIGNED_BEGIN(features_t); ASSERT_ALIGNED_FIELD(features_t, compat); ASSERT_ALIGNED_FIELD(features_t, incompat); ASSERT_ALIGNED_FIELD(features_t, ro_compat); ASSERT_ALIGNED_CHECK(features_t); UT_COMPILE_ERROR_ON(sizeof(features_t) != FEATURES_T_SIZE_V1); ASSERT_ALIGNED_BEGIN(struct arch_flags); ASSERT_ALIGNED_FIELD(struct arch_flags, alignment_desc); ASSERT_ALIGNED_FIELD(struct arch_flags, machine_class); ASSERT_ALIGNED_FIELD(struct arch_flags, data); ASSERT_ALIGNED_FIELD(struct arch_flags, reserved); ASSERT_FIELD_SIZE(reserved, ARCH_FLAGS_RESERVED_LEN_V1); ASSERT_ALIGNED_FIELD(struct arch_flags, machine); ASSERT_ALIGNED_CHECK(struct arch_flags); UT_COMPILE_ERROR_ON(sizeof(struct arch_flags) != ARCH_FLAGS_SIZE_V1); ASSERT_ALIGNED_BEGIN(struct shutdown_state); ASSERT_ALIGNED_FIELD(struct shutdown_state, usc); ASSERT_ALIGNED_FIELD(struct shutdown_state, uuid); ASSERT_ALIGNED_FIELD(struct shutdown_state, dirty); ASSERT_ALIGNED_FIELD(struct shutdown_state, reserved); ASSERT_FIELD_SIZE(reserved, SHUTDOWN_STATE_RESERVED_LEN_V1); ASSERT_ALIGNED_FIELD(struct shutdown_state, checksum); ASSERT_ALIGNED_CHECK(struct shutdown_state); UT_COMPILE_ERROR_ON(sizeof(struct shutdown_state) != SHUTDOWN_STATE_SIZE_V1); } /* incompat features - final values */ #define POOL_FEAT_SINGLEHDR_FINAL 0x0001U #define POOL_FEAT_CKSUM_2K_FINAL 0x0002U #define POOL_FEAT_SDS_FINAL 0x0004U /* incompat features effective values */ #if defined(_WIN32) || NDCTL_ENABLED #ifdef SDS_ENABLED #define POOL_E_FEAT_SDS_FINAL POOL_FEAT_SDS_FINAL #else #define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */ #endif #else /* * shutdown state support on Linux requires root access on kernel < 4.20 with * ndctl < 63 so it is disabled by default */ #define POOL_E_FEAT_SDS_FINAL 0x0000U /* empty */ #endif #define POOL_FEAT_INCOMPAT_DEFAULT_V1 \ (POOL_FEAT_CKSUM_2K_FINAL | POOL_E_FEAT_SDS_FINAL) #ifdef _WIN32 #define SDS_AT_CREATE_EXPECTED 1 #else #define SDS_AT_CREATE_EXPECTED 0 #endif /* * test_default_values -- test default values */ static void test_default_values() { UT_COMPILE_ERROR_ON(POOL_FEAT_SINGLEHDR != POOL_FEAT_SINGLEHDR_FINAL); UT_COMPILE_ERROR_ON(POOL_FEAT_CKSUM_2K != POOL_FEAT_CKSUM_2K_FINAL); UT_COMPILE_ERROR_ON(POOL_FEAT_SDS != POOL_FEAT_SDS_FINAL); UT_COMPILE_ERROR_ON(SDS_at_create != SDS_AT_CREATE_EXPECTED); UT_COMPILE_ERROR_ON(POOL_FEAT_INCOMPAT_DEFAULT != POOL_FEAT_INCOMPAT_DEFAULT_V1); } int main(int argc, char *argv[]) { START(argc, argv, "util_pool_hdr"); test_layout(); test_default_values(); DONE(NULL); }
4,508
30.531469
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_map_proc/util_map_proc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * util_map_proc.c -- unit test for util_map() /proc parsing * * usage: util_map_proc maps_file len [len]... */ #define _GNU_SOURCE #include <dlfcn.h> #include "unittest.h" #include "util.h" #include "mmap.h" #define GIGABYTE ((uintptr_t)1 << 30) #define TERABYTE ((uintptr_t)1 << 40) int main(int argc, char *argv[]) { START(argc, argv, "util_map_proc"); util_init(); util_mmap_init(); if (argc < 3) UT_FATAL("usage: %s maps_file len [len]...", argv[0]); Mmap_mapfile = argv[1]; UT_OUT("redirecting " OS_MAPFILE " to %s", Mmap_mapfile); for (int arg = 2; arg < argc; arg++) { size_t len = (size_t)strtoull(argv[arg], NULL, 0); size_t align = 2 * MEGABYTE; if (len >= 2 * GIGABYTE) align = GIGABYTE; void *h1 = util_map_hint_unused((void *)TERABYTE, len, GIGABYTE); void *h2 = util_map_hint(len, 0); if (h1 != MAP_FAILED && h1 != NULL) UT_ASSERTeq((uintptr_t)h1 & (GIGABYTE - 1), 0); if (h2 != MAP_FAILED && h2 != NULL) UT_ASSERTeq((uintptr_t)h2 & (align - 1), 0); if (h1 == NULL) /* XXX portability */ UT_OUT("len %zu: (nil) %p", len, h2); else if (h2 == NULL) UT_OUT("len %zu: %p (nil)", len, h1); else UT_OUT("len %zu: %p %p", len, h1, h2); } util_mmap_fini(); DONE(NULL); }
1,335
21.644068
60
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_lists/win_lists.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_lists.c -- test list routines used in windows implementation */ #include "unittest.h" #include "queue.h" typedef struct TEST_LIST_NODE { PMDK_LIST_ENTRY(TEST_LIST_NODE) ListEntry; int dummy; } *PTEST_LIST_NODE; PMDK_LIST_HEAD(TestList, TEST_LIST_NODE); static void dump_list(struct TestList *head) { PTEST_LIST_NODE pNode = NULL; pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head); while (pNode != NULL) { UT_OUT("Node value: %d", pNode->dummy); pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry); } } static int get_list_count(struct TestList *head) { PTEST_LIST_NODE pNode = NULL; int listCount = 0; pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(head); while (pNode != NULL) { listCount++; pNode = (PTEST_LIST_NODE)PMDK_LIST_NEXT(pNode, ListEntry); } return listCount; } /* * test_list - Do some basic list manipulations and output to log for * script comparison. Only testing the macros we use. */ static void test_list(void) { PTEST_LIST_NODE pNode = NULL; struct TestList head = PMDK_LIST_HEAD_INITIALIZER(head); PMDK_LIST_INIT(&head); UT_ASSERT_rt(PMDK_LIST_EMPTY(&head)); pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = 0; PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry); UT_ASSERTeq_rt(1, get_list_count(&head)); dump_list(&head); /* Remove one node */ PMDK_LIST_REMOVE(pNode, ListEntry); UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); free(pNode); /* Add a bunch of nodes */ for (int i = 1; i < 10; i++) { pNode = MALLOC(sizeof(struct TEST_LIST_NODE)); pNode->dummy = i; PMDK_LIST_INSERT_HEAD(&head, pNode, ListEntry); } UT_ASSERTeq_rt(9, get_list_count(&head)); dump_list(&head); /* Remove all of them */ while (!PMDK_LIST_EMPTY(&head)) { pNode = (PTEST_LIST_NODE)PMDK_LIST_FIRST(&head); PMDK_LIST_REMOVE(pNode, ListEntry); free(pNode); } UT_ASSERTeq_rt(0, get_list_count(&head)); dump_list(&head); } typedef struct TEST_SORTEDQ_NODE { PMDK_SORTEDQ_ENTRY(TEST_SORTEDQ_NODE) queue_link; int dummy; } TEST_SORTEDQ_NODE, *PTEST_SORTEDQ_NODE; PMDK_SORTEDQ_HEAD(TEST_SORTEDQ, TEST_SORTEDQ_NODE); static int sortedq_node_comparer(TEST_SORTEDQ_NODE *a, TEST_SORTEDQ_NODE *b) { return a->dummy - b->dummy; } struct TEST_DATA_SORTEDQ { int count; int data[10]; }; /* * test_sortedq - Do some basic operations on SORTEDQ and make sure that the * queue is sorted for different input sequences. */ void test_sortedq(void) { PTEST_SORTEDQ_NODE node = NULL; struct TEST_SORTEDQ head = PMDK_SORTEDQ_HEAD_INITIALIZER(head); struct TEST_DATA_SORTEDQ test_data[] = { {5, {5, 7, 9, 100, 101}}, {7, {1, 2, 3, 4, 5, 6, 7}}, {5, {100, 90, 80, 70, 40}}, {6, {10, 9, 8, 7, 6, 5}}, {5, {23, 13, 27, 4, 15}}, {5, {2, 2, 2, 2, 2}} }; PMDK_SORTEDQ_INIT(&head); UT_ASSERT_rt(PMDK_SORTEDQ_EMPTY(&head)); for (int i = 0; i < _countof(test_data); i++) { for (int j = 0; j < test_data[i].count; j++) { node = MALLOC(sizeof(TEST_SORTEDQ_NODE)); node->dummy = test_data[i].data[j]; PMDK_SORTEDQ_INSERT(&head, node, queue_link, TEST_SORTEDQ_NODE, sortedq_node_comparer); } int prev = MININT; int num_entries = 0; PMDK_SORTEDQ_FOREACH(node, &head, queue_link) { UT_ASSERT(prev <= node->dummy); num_entries++; } UT_ASSERT(num_entries == test_data[i].count); while (!PMDK_SORTEDQ_EMPTY(&head)) { node = PMDK_SORTEDQ_FIRST(&head); PMDK_SORTEDQ_REMOVE(&head, node, queue_link); FREE(node); } } } int main(int argc, char *argv[]) { START(argc, argv, "win_lists - testing %s", (argc > 1) ? argv[1] : "list"); if (argc == 1 || (stricmp(argv[1], "list") == 0)) test_list(); if (argc > 1 && (stricmp(argv[1], "sortedq") == 0)) test_sortedq(); DONE(NULL); }
5,431
27
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pool/obj_pool.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_pool.c -- unit test for pmemobj_create() and pmemobj_open() * Also tests pmemobj_(set/get)_user_data(). * * usage: obj_pool op path layout [poolsize mode] * * op can be: * c - create * o - open * * "poolsize" and "mode" arguments are ignored for "open" */ #include "unittest.h" #include "../libpmemobj/obj.h" #define MB ((size_t)1 << 20) #define USER_DATA_V (void *) 123456789ULL static void pool_create(const char *path, const char *layout, size_t poolsize, unsigned mode) { PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode); if (pop == NULL) UT_OUT("!%s: pmemobj_create: %s", path, pmemobj_errormsg()); else { /* Test pmemobj_(get/set)_user data */ UT_ASSERTeq(NULL, pmemobj_get_user_data(pop)); pmemobj_set_user_data(pop, USER_DATA_V); UT_ASSERTeq(USER_DATA_V, pmemobj_get_user_data(pop)); os_stat_t stbuf; STAT(path, &stbuf); UT_OUT("%s: file size %zu mode 0%o", path, stbuf.st_size, stbuf.st_mode & 0777); pmemobj_close(pop); int result = pmemobj_check(path, layout); if (result < 0) UT_OUT("!%s: pmemobj_check", path); else if (result == 0) UT_OUT("%s: pmemobj_check: not consistent", path); } } static void pool_open(const char *path, const char *layout) { PMEMobjpool *pop = pmemobj_open(path, layout); if (pop == NULL) UT_OUT("!%s: pmemobj_open: %s", path, pmemobj_errormsg()); else { UT_OUT("%s: pmemobj_open: Success", path); UT_ASSERTeq(NULL, pmemobj_get_user_data(pop)); pmemobj_close(pop); } } static void test_fault_injection(const char *path, const char *layout, size_t poolsize, unsigned mode) { if (!pmemobj_fault_injection_enabled()) return; pmemobj_inject_fault_at(PMEM_MALLOC, 1, "tx_params_new"); PMEMobjpool *pop = pmemobj_create(path, layout, poolsize, mode); UT_ASSERTeq(pop, NULL); UT_ASSERTeq(errno, ENOMEM); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pool"); if (argc < 4) UT_FATAL("usage: %s op path layout [poolsize mode]", argv[0]); char *layout = NULL; size_t poolsize; unsigned mode; if (strcmp(argv[3], "EMPTY") == 0) layout = ""; else if (strcmp(argv[3], "NULL") != 0) layout = argv[3]; switch (argv[1][0]) { case 'c': poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); pool_create(argv[2], layout, poolsize, mode); break; case 'o': pool_open(argv[2], layout); break; case 'f': os_setenv("PMEMOBJ_CONF", "invalid-query", 1); pool_open(argv[2], layout); os_unsetenv("PMEMOBJ_CONF"); pool_open(argv[2], layout); break; case 't': poolsize = strtoull(argv[4], NULL, 0) * MB; /* in megabytes */ mode = strtoul(argv[5], NULL, 8); test_fault_injection(argv[2], layout, poolsize, mode); break; default: UT_FATAL("unknown operation"); } DONE(NULL); }
2,905
21.527132
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memset/pmem2_memset.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * pmem_memset.c -- unit test for doing a memset * * usage: pmem_memset file offset length */ #include "unittest.h" #include "file.h" #include "ut_pmem2.h" #include "memset_common.h" static void do_memset_variants(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, persist_fn p, memset_fn fn) { for (int i = 0; i < ARRAY_SIZE(Flags); ++i) { do_memset(fd, dest, file_name, dest_off, bytes, fn, Flags[i], p); if (Flags[i] & PMEMOBJ_F_MEM_NOFLUSH) p(dest, bytes); } } int main(int argc, char *argv[]) { int fd; char *dest; struct pmem2_config *cfg; struct pmem2_source *src; struct pmem2_map *map; if (argc != 4) UT_FATAL("usage: %s file offset length", argv[0]); const char *thr = os_getenv("PMEM_MOVNT_THRESHOLD"); const char *avx = os_getenv("PMEM_AVX"); const char *avx512f = os_getenv("PMEM_AVX512F"); START(argc, argv, "pmem2_memset %s %s %s %savx %savx512f", argv[2], argv[3], thr ? thr : "default", avx ? "" : "!", avx512f ? "" : "!"); fd = OPEN(argv[1], O_RDWR); PMEM2_CONFIG_NEW(&cfg); PMEM2_SOURCE_FROM_FD(&src, fd); PMEM2_CONFIG_SET_GRANULARITY(cfg, PMEM2_GRANULARITY_PAGE); int ret = pmem2_map(cfg, src, &map); UT_PMEM2_EXPECT_RETURN(ret, 0); PMEM2_CONFIG_DELETE(&cfg); dest = pmem2_map_get_address(map); if (dest == NULL) UT_FATAL("!could not map file: %s", argv[1]); size_t dest_off = strtoul(argv[2], NULL, 0); size_t bytes = strtoul(argv[3], NULL, 0); pmem2_persist_fn persist = pmem2_get_persist_fn(map); pmem2_memset_fn memset_fn = pmem2_get_memset_fn(map); do_memset_variants(fd, dest, argv[1], dest_off, bytes, persist, memset_fn); ret = pmem2_unmap(&map); UT_ASSERTeq(ret, 0); CLOSE(fd); DONE(NULL); }
1,810
21.6375
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memset/memset_common.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * memset_common.c -- common part for tests doing a persistent memset */ #include "unittest.h" #include "memset_common.h" /* * do_memset - worker function for memset */ void do_memset(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, memset_fn fn, unsigned flags, persist_fn persist) { char *buf = MALLOC(bytes); char *dest1; char *ret; memset(dest, 0, bytes); persist(dest, bytes); dest1 = MALLOC(bytes); memset(dest1, 0, bytes); /* * This is used to verify that the value of what a non persistent * memset matches the outcome of the persistent memset. The * persistent memset will match the file but may not be the * correct or expected value. */ memset(dest1 + dest_off, 0x5A, bytes / 4); memset(dest1 + dest_off + (bytes / 4), 0x46, bytes / 4); /* Test the corner cases */ ret = fn(dest + dest_off, 0x5A, 0, flags); UT_ASSERTeq(ret, dest + dest_off); UT_ASSERTeq(*(char *)(dest + dest_off), 0); /* * Do the actual memset with persistence. */ ret = fn(dest + dest_off, 0x5A, bytes / 4, flags); UT_ASSERTeq(ret, dest + dest_off); ret = fn(dest + dest_off + (bytes / 4), 0x46, bytes / 4, flags); UT_ASSERTeq(ret, dest + dest_off + (bytes / 4)); if (memcmp(dest, dest1, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); LSEEK(fd, 0, SEEK_SET); if (READ(fd, buf, bytes / 2) == bytes / 2) { if (memcmp(buf, dest, bytes / 2)) UT_FATAL("%s: first %zu bytes do not match", file_name, bytes / 2); } FREE(dest1); FREE(buf); } unsigned Flags[] = { 0, PMEM_F_MEM_NODRAIN, PMEM_F_MEM_NONTEMPORAL, PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL, PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_NODRAIN, PMEM_F_MEM_WC, PMEM_F_MEM_WB, PMEM_F_MEM_NOFLUSH, /* all possible flags */ PMEM_F_MEM_NODRAIN | PMEM_F_MEM_NOFLUSH | PMEM_F_MEM_NONTEMPORAL | PMEM_F_MEM_TEMPORAL | PMEM_F_MEM_WC | PMEM_F_MEM_WB, };
2,043
24.55
69
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem2_memset/memset_common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2020, Intel Corporation */ /* * memset_common.h -- header file for common memset utilities */ #ifndef MEMSET_COMMON_H #define MEMSET_COMMON_H 1 #include "unittest.h" #include "file.h" extern unsigned Flags[10]; typedef void *(*memset_fn)(void *pmemdest, int c, size_t len, unsigned flags); typedef void (*persist_fn)(const void *ptr, size_t len); void do_memset(int fd, char *dest, const char *file_name, size_t dest_off, size_t bytes, memset_fn fn, unsigned flags, persist_fn p); #endif
552
22.041667
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pmalloc_basic/obj_pmalloc_basic.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_pmalloc_basic.c -- unit test for pmalloc interface */ #include <stdint.h> #include "heap.h" #include "obj.h" #include "pmalloc.h" #include "unittest.h" #include "valgrind_internal.h" #include "set.h" #define MOCK_POOL_SIZE (PMEMOBJ_MIN_POOL * 3) #define TEST_MEGA_ALLOC_SIZE (10 * 1024 * 1024) #define TEST_HUGE_ALLOC_SIZE (4 * 255 * 1024) #define TEST_SMALL_ALLOC_SIZE (1000) #define TEST_MEDIUM_ALLOC_SIZE (1024 * 200) #define TEST_TINY_ALLOC_SIZE (64) #define TEST_RUNS 2 #define MAX_MALLOC_FREE_LOOP 1000 #define MALLOC_FREE_SIZE 8000 #define PAD_SIZE (PMEM_PAGESIZE - LANE_TOTAL_SIZE) struct mock_pop { PMEMobjpool p; char lanes[LANE_TOTAL_SIZE]; char padding[PAD_SIZE]; /* to page boundary */ uint64_t ptr; }; static struct mock_pop *addr; static PMEMobjpool *mock_pop; /* * drain_empty -- (internal) empty function for drain on non-pmem memory */ static void drain_empty(void) { /* do nothing */ } /* * obj_persist -- pmemobj version of pmem_persist w/o replication */ static int obj_persist(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = ctx; pop->persist_local(addr, len); return 0; } /* * obj_flush -- pmemobj version of pmem_flush w/o replication */ static int obj_flush(void *ctx, const void *addr, size_t len, unsigned flags) { PMEMobjpool *pop = ctx; pop->flush_local(addr, len); return 0; } /* * obj_drain -- pmemobj version of pmem_drain w/o replication */ static void obj_drain(void *ctx) { PMEMobjpool *pop = ctx; pop->drain_local(); } static void obj_msync_nofail(const void *addr, size_t size) { if (pmem_msync(addr, size)) UT_FATAL("!pmem_msync"); } /* * obj_memcpy -- pmemobj version of memcpy w/o replication */ static void * obj_memcpy(void *ctx, void *dest, const void *src, size_t len, unsigned flags) { pmem_memcpy(dest, src, len, flags); return dest; } static void * obj_memset(void *ctx, void *ptr, int c, size_t sz, unsigned flags) { pmem_memset(ptr, c, sz, flags); return ptr; } static size_t test_oom_allocs(size_t size) { uint64_t max_allocs = MOCK_POOL_SIZE / size; uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs)); size_t count = 0; for (;;) { if (pmalloc(mock_pop, &addr->ptr, size, 0, 0)) { break; } UT_ASSERT(addr->ptr != 0); allocs[count++] = addr->ptr; } for (int i = 0; i < count; ++i) { addr->ptr = allocs[i]; pfree(mock_pop, &addr->ptr); UT_ASSERT(addr->ptr == 0); } UT_ASSERT(count != 0); FREE(allocs); return count; } static size_t test_oom_resrv(size_t size) { uint64_t max_allocs = MOCK_POOL_SIZE / size; uint64_t *allocs = CALLOC(max_allocs, sizeof(*allocs)); struct pobj_action *resvs = CALLOC(max_allocs, sizeof(*resvs)); size_t count = 0; for (;;) { if (palloc_reserve(&mock_pop->heap, size, NULL, NULL, 0, 0, 0, 0, &resvs[count]) != 0) break; allocs[count] = resvs[count].heap.offset; UT_ASSERT(allocs[count] != 0); count++; } for (size_t i = 0; i < count; ) { size_t nresv = MIN(count - i, 10); struct operation_context *ctx = pmalloc_operation_hold(mock_pop); palloc_publish(&mock_pop->heap, &resvs[i], nresv, ctx); pmalloc_operation_release(mock_pop); i += nresv; } for (int i = 0; i < count; ++i) { addr->ptr = allocs[i]; pfree(mock_pop, &addr->ptr); UT_ASSERT(addr->ptr == 0); } UT_ASSERT(count != 0); FREE(allocs); FREE(resvs); return count; } static void test_malloc_free_loop(size_t size) { int err; for (int i = 0; i < MAX_MALLOC_FREE_LOOP; ++i) { err = pmalloc(mock_pop, &addr->ptr, size, 0, 0); UT_ASSERTeq(err, 0); pfree(mock_pop, &addr->ptr); } } static void test_realloc(size_t org, size_t dest) { int err; struct palloc_heap *heap = &mock_pop->heap; err = pmalloc(mock_pop, &addr->ptr, org, 0, 0); UT_ASSERTeq(err, 0); UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= org); err = prealloc(mock_pop, &addr->ptr, dest, 0, 0); UT_ASSERTeq(err, 0); UT_ASSERT(palloc_usable_size(heap, addr->ptr) >= dest); pfree(mock_pop, &addr->ptr); } #define PMALLOC_EXTRA 20 #define PALLOC_FLAG (1 << 15) #define FIRST_SIZE 1 /* use the first allocation class */ #define FIRST_USIZE 112 /* the usable size is 128 - 16 */ static void test_pmalloc_extras(PMEMobjpool *pop) { uint64_t val; int ret = pmalloc(pop, &val, FIRST_SIZE, PMALLOC_EXTRA, PALLOC_FLAG); UT_ASSERTeq(ret, 0); UT_ASSERTeq(palloc_extra(&pop->heap, val), PMALLOC_EXTRA); UT_ASSERT((palloc_flags(&pop->heap, val) & PALLOC_FLAG) == PALLOC_FLAG); UT_ASSERT(palloc_usable_size(&pop->heap, val) == FIRST_USIZE); pfree(pop, &val); } #define PMALLOC_ELEMENTS 20 static void test_pmalloc_first_next(PMEMobjpool *pop) { uint64_t vals[PMALLOC_ELEMENTS]; for (unsigned i = 0; i < PMALLOC_ELEMENTS; ++i) { int ret = pmalloc(pop, &vals[i], FIRST_SIZE, i, i); UT_ASSERTeq(ret, 0); } uint64_t off = palloc_first(&pop->heap); UT_ASSERTne(off, 0); int nvalues = 0; do { UT_ASSERTeq(vals[nvalues], off); UT_ASSERTeq(palloc_extra(&pop->heap, off), nvalues); UT_ASSERTeq(palloc_flags(&pop->heap, off), nvalues); UT_ASSERT(palloc_usable_size(&pop->heap, off) == FIRST_USIZE); nvalues ++; } while ((off = palloc_next(&pop->heap, off)) != 0); UT_ASSERTeq(nvalues, PMALLOC_ELEMENTS); for (int i = 0; i < PMALLOC_ELEMENTS; ++i) pfree(pop, &vals[i]); } static void test_mock_pool_allocs(void) { addr = MMAP_ANON_ALIGNED(MOCK_POOL_SIZE, Ut_mmap_align); mock_pop = &addr->p; mock_pop->addr = addr; mock_pop->rdonly = 0; mock_pop->is_pmem = 0; mock_pop->heap_offset = offsetof(struct mock_pop, ptr); UT_ASSERTeq(mock_pop->heap_offset % Ut_pagesize, 0); mock_pop->nlanes = 1; mock_pop->lanes_offset = sizeof(PMEMobjpool); mock_pop->is_master_replica = 1; mock_pop->persist_local = obj_msync_nofail; mock_pop->flush_local = obj_msync_nofail; mock_pop->drain_local = drain_empty; mock_pop->p_ops.persist = obj_persist; mock_pop->p_ops.flush = obj_flush; mock_pop->p_ops.drain = obj_drain; mock_pop->p_ops.memcpy = obj_memcpy; mock_pop->p_ops.memset = obj_memset; mock_pop->p_ops.base = mock_pop; mock_pop->set = MALLOC(sizeof(*(mock_pop->set))); mock_pop->set->options = 0; mock_pop->set->directory_based = 0; void *heap_start = (char *)mock_pop + mock_pop->heap_offset; uint64_t heap_size = MOCK_POOL_SIZE - mock_pop->heap_offset; struct stats *s = stats_new(mock_pop); UT_ASSERTne(s, NULL); heap_init(heap_start, heap_size, &mock_pop->heap_size, &mock_pop->p_ops); heap_boot(&mock_pop->heap, heap_start, heap_size, &mock_pop->heap_size, mock_pop, &mock_pop->p_ops, s, mock_pop->set); heap_buckets_init(&mock_pop->heap); /* initialize runtime lanes structure */ mock_pop->lanes_desc.runtime_nlanes = (unsigned)mock_pop->nlanes; lane_boot(mock_pop); UT_ASSERTne(mock_pop->heap.rt, NULL); test_pmalloc_extras(mock_pop); test_pmalloc_first_next(mock_pop); test_malloc_free_loop(MALLOC_FREE_SIZE); size_t medium_resv = test_oom_resrv(TEST_MEDIUM_ALLOC_SIZE); /* * Allocating till OOM and freeing the objects in a loop for different * buckets covers basically all code paths except error cases. */ size_t medium0 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE); size_t mega0 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE); size_t huge0 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE); size_t small0 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE); size_t tiny0 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); size_t huge1 = test_oom_allocs(TEST_HUGE_ALLOC_SIZE); size_t small1 = test_oom_allocs(TEST_SMALL_ALLOC_SIZE); size_t mega1 = test_oom_allocs(TEST_MEGA_ALLOC_SIZE); size_t tiny1 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); size_t medium1 = test_oom_allocs(TEST_MEDIUM_ALLOC_SIZE); UT_ASSERTeq(mega0, mega1); UT_ASSERTeq(huge0, huge1); UT_ASSERTeq(small0, small1); UT_ASSERTeq(tiny0, tiny1); UT_ASSERTeq(medium0, medium1); UT_ASSERTeq(medium0, medium_resv); /* realloc to the same size shouldn't affect anything */ for (size_t i = 0; i < tiny1; ++i) test_realloc(TEST_TINY_ALLOC_SIZE, TEST_TINY_ALLOC_SIZE); size_t tiny2 = test_oom_allocs(TEST_TINY_ALLOC_SIZE); UT_ASSERTeq(tiny1, tiny2); test_realloc(TEST_SMALL_ALLOC_SIZE, TEST_MEDIUM_ALLOC_SIZE); test_realloc(TEST_HUGE_ALLOC_SIZE, TEST_MEGA_ALLOC_SIZE); stats_delete(mock_pop, s); lane_cleanup(mock_pop); heap_cleanup(&mock_pop->heap); FREE(mock_pop->set); MUNMAP_ANON_ALIGNED(addr, MOCK_POOL_SIZE); } static void test_spec_compliance(void) { uint64_t max_alloc = MAX_MEMORY_BLOCK_SIZE - sizeof(struct allocation_header_legacy); UT_ASSERTeq(max_alloc, PMEMOBJ_MAX_ALLOC_SIZE); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pmalloc_basic"); for (int i = 0; i < TEST_RUNS; ++i) test_mock_pool_allocs(); test_spec_compliance(); DONE(NULL); } #ifdef _MSC_VER /* * Since libpmemobj is linked statically, we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) #endif
8,962
23.15903
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/win_common/win_common.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * win_common.c -- test common POSIX or Linux API that were implemented * for Windows by our library. */ #include "unittest.h" /* * test_setunsetenv - test the setenv and unsetenv APIs */ static void test_setunsetenv(void) { os_unsetenv("TEST_SETUNSETENV_ONE"); /* set a new variable without overwriting - expect the new value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_one", 0) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_one") == 0); /* set an existing variable without overwriting - expect old value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_two", 0) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_one") == 0); /* set an existing variable with overwriting - expect the new value */ UT_ASSERT(os_setenv("TEST_SETUNSETENV_ONE", "test_setunsetenv_two", 1) == 0); UT_ASSERT(strcmp(os_getenv("TEST_SETUNSETENV_ONE"), "test_setunsetenv_two") == 0); /* unset our test value - expect it to be empty */ UT_ASSERT(os_unsetenv("TEST_SETUNSETENV_ONE") == 0); UT_ASSERT(os_getenv("TEST_SETUNSETENV_ONE") == NULL); } int main(int argc, char *argv[]) { START(argc, argv, "win_common - testing %s", (argc > 1) ? argv[1] : "setunsetenv"); if (argc == 1 || (stricmp(argv[1], "setunsetenv") == 0)) test_setunsetenv(); DONE(NULL); }
3,080
35.678571
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_realloc/obj_realloc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_realloc.c -- unit test for pmemobj_realloc and pmemobj_zrealloc */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "heap.h" #include "alloc_class.h" #include "obj.h" #include "util.h" #define MAX_ALLOC_MUL 8 #define MAX_ALLOC_CLASS 5 POBJ_LAYOUT_BEGIN(realloc); POBJ_LAYOUT_ROOT(realloc, struct root); POBJ_LAYOUT_TOID(realloc, struct object); POBJ_LAYOUT_END(realloc); struct object { size_t value; char data[]; }; struct root { TOID(struct object) obj; char data[CHUNKSIZE - sizeof(TOID(struct object))]; }; static struct alloc_class_collection *alloc_classes; /* * test_alloc -- test allocation using realloc */ static void test_alloc(PMEMobjpool *pop, size_t size) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); UT_ASSERT(pmemobj_alloc_usable_size(D_RO(root)->obj.oid) >= size); } /* * test_free -- test free using realloc */ static void test_free(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, 0, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* * test_huge_size -- test zrealloc with size greater than pool size */ static void test_huge_size(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret; ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, PMEMOBJ_MAX_ALLOC_SIZE, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, UINTMAX_MAX - 1, TOID_TYPE_NUM(struct object)); UT_ASSERTne(ret, 0); UT_ASSERTeq(errno, ENOMEM); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* test zrealloc passing PMEMoid that points to OID_NULL value */ static void test_null_oid(PMEMobjpool *pop) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, 1024, TOID_TYPE_NUM(struct object)); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); pmemobj_free(&D_RW(root)->obj.oid); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } static int check_integrity = 1; /* * fill_buffer -- fill buffer with random data and return its checksum */ static uint16_t fill_buffer(unsigned char *buf, size_t size) { for (size_t i = 0; i < size; ++i) buf[i] = rand() % 255; pmem_persist(buf, size); return ut_checksum(buf, size); } /* * test_realloc -- test single reallocation */ static void test_realloc(PMEMobjpool *pop, size_t size_from, size_t size_to, uint64_t type_from, uint64_t type_to, int zrealloc) { TOID(struct root) root = POBJ_ROOT(pop, struct root); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); int ret; if (zrealloc) ret = pmemobj_zalloc(pop, &D_RW(root)->obj.oid, size_from, type_from); else ret = pmemobj_alloc(pop, &D_RW(root)->obj.oid, size_from, type_from, NULL, NULL); UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); size_t usable_size_from = pmemobj_alloc_usable_size(D_RO(root)->obj.oid); UT_ASSERT(usable_size_from >= size_from); size_t check_size; uint16_t checksum; if (zrealloc) { UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_from)); } else if (check_integrity) { check_size = size_to >= usable_size_from ? usable_size_from : size_to; checksum = fill_buffer((unsigned char *)D_RW(D_RW(root)->obj), check_size); } if (zrealloc) { ret = pmemobj_zrealloc(pop, &D_RW(root)->obj.oid, size_to, type_to); } else { ret = pmemobj_realloc(pop, &D_RW(root)->obj.oid, size_to, type_to); } UT_ASSERTeq(ret, 0); UT_ASSERT(!TOID_IS_NULL(D_RO(root)->obj)); size_t usable_size_to = pmemobj_alloc_usable_size(D_RO(root)->obj.oid); UT_ASSERT(usable_size_to >= size_to); if (size_to < size_from) { UT_ASSERT(usable_size_to <= usable_size_from); } if (zrealloc) { UT_ASSERT(util_is_zeroed(D_RO(D_RO(root)->obj), size_to)); } else if (check_integrity) { uint16_t checksum2 = ut_checksum( (uint8_t *)D_RW(D_RW(root)->obj), check_size); if (checksum2 != checksum) UT_ASSERTinfo(0, "memory corruption"); } pmemobj_free(&D_RW(root)->obj.oid); UT_ASSERT(TOID_IS_NULL(D_RO(root)->obj)); } /* * test_realloc_sizes -- test reallocations from/to specified sizes */ static void test_realloc_sizes(PMEMobjpool *pop, uint64_t type_from, uint64_t type_to, int zrealloc, unsigned size_diff) { for (uint8_t i = 0; i < MAX_ALLOCATION_CLASSES; ++i) { struct alloc_class *c = alloc_class_by_id(alloc_classes, i); if (c == NULL) continue; size_t header_size = header_type_to_size[c->header_type]; size_t size_from = c->unit_size - header_size - size_diff; for (unsigned j = 2; j <= MAX_ALLOC_MUL; j++) { size_t inc_size_to = c->unit_size * j - header_size; test_realloc(pop, size_from, inc_size_to, type_from, type_to, zrealloc); size_t dec_size_to = c->unit_size / j; if (dec_size_to <= header_size) dec_size_to = header_size; else dec_size_to -= header_size; test_realloc(pop, size_from, dec_size_to, type_from, type_to, zrealloc); for (int k = 0; k < MAX_ALLOC_CLASS; k++) { struct alloc_class *ck = alloc_class_by_id( alloc_classes, k); if (c == NULL) continue; size_t header_sizek = header_type_to_size[c->header_type]; size_t prev_size = ck->unit_size - header_sizek; test_realloc(pop, size_from, prev_size, type_from, type_to, zrealloc); } } } } int main(int argc, char *argv[]) { START(argc, argv, "obj_realloc"); /* root doesn't count */ UT_COMPILE_ERROR_ON(POBJ_LAYOUT_TYPES_NUM(realloc) != 1); if (argc < 2) UT_FATAL("usage: %s file [check_integrity]", argv[0]); PMEMobjpool *pop = pmemobj_open(argv[1], POBJ_LAYOUT_NAME(realloc)); if (!pop) UT_FATAL("!pmemobj_open"); if (argc >= 3) check_integrity = atoi(argv[2]); alloc_classes = alloc_class_collection_new(); /* test huge size alloc */ test_huge_size(pop); /* test alloc and free */ test_alloc(pop, 16); test_free(pop); /* test zrealloc passing PMEMoid that points to OID_NULL value */ test_null_oid(pop); /* test realloc without changing type number */ test_realloc_sizes(pop, 0, 0, 0, 0); /* test realloc with changing type number */ test_realloc_sizes(pop, 0, 1, 0, 0); /* test zrealloc without changing type number... */ test_realloc_sizes(pop, 0, 0, 1, 8); test_realloc_sizes(pop, 0, 0, 1, 0); /* test zrealloc with changing type number... */ test_realloc_sizes(pop, 0, 1, 1, 8); test_realloc_sizes(pop, 0, 1, 1, 0); /* test realloc with type number equal to range of long long int */ test_realloc_sizes(pop, 0, UINT64_MAX, 0, 0); test_realloc_sizes(pop, 0, UINT64_MAX - 1, 0, 0); /* test zrealloc with type number equal to range of long long int */ test_realloc_sizes(pop, 0, UINT64_MAX, 1, 0); test_realloc_sizes(pop, 0, (UINT64_MAX - 1), 1, 0); alloc_class_collection_delete(alloc_classes); pmemobj_close(pop); DONE(NULL); } #ifdef _MSC_VER extern "C" { /* * Since libpmemobj is linked statically, * we need to invoke its ctor/dtor. */ MSVC_CONSTR(libpmemobj_init) MSVC_DESTR(libpmemobj_fini) } #endif
7,788
24.371336
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/pmem_deep_persist/mocks_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * mocks_posix.c -- redefinitions of open/write functions (Posix implementation) */ #include "util.h" #include "os.h" #include "unittest.h" /* * open -- open mock because of Dev DAX without deep_flush * sysfs file, eg. DAX on emulated pmem */ FUNC_MOCK(os_open, int, const char *path, int flags, ...) FUNC_MOCK_RUN_DEFAULT { if (strstr(path, "/sys/bus/nd/devices/region") && strstr(path, "/deep_flush")) { UT_OUT("mocked open, path %s", path); if (os_access(path, R_OK)) return 999; } va_list ap; va_start(ap, flags); int mode = va_arg(ap, int); va_end(ap); return _FUNC_REAL(os_open)(path, flags, mode); } FUNC_MOCK_END /* * write -- write mock */ FUNC_MOCK(write, int, int fd, const void *buffer, size_t count) FUNC_MOCK_RUN_DEFAULT { if (fd == 999) { UT_OUT("mocked write, path %d", fd); return 1; } return _FUNC_REAL(write)(fd, buffer, count); } FUNC_MOCK_END /* * read -- read mock */ FUNC_MOCK(read, size_t, int fd, void *buffer, size_t nbyte) FUNC_MOCK_RUN_DEFAULT { if (fd == 999) { char pattern[2] = {'1', '\n'}; memcpy(buffer, pattern, sizeof(pattern)); UT_OUT("mocked read, fd %d", fd); return sizeof(pattern); } return _FUNC_REAL(read)(fd, buffer, nbyte); } FUNC_MOCK_END
1,326
20.754098
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_tx_free/obj_tx_free.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * obj_tx_free.c -- unit test for pmemobj_tx_free */ #include <sys/param.h> #include <string.h> #include "unittest.h" #include "util.h" #include "valgrind_internal.h" #define LAYOUT_NAME "tx_free" #define OBJ_SIZE (200 * 1024) enum type_number { TYPE_FREE_NO_TX, TYPE_FREE_WRONG_UUID, TYPE_FREE_COMMIT, TYPE_FREE_ABORT, TYPE_FREE_COMMIT_NESTED1, TYPE_FREE_COMMIT_NESTED2, TYPE_FREE_ABORT_NESTED1, TYPE_FREE_ABORT_NESTED2, TYPE_FREE_ABORT_AFTER_NESTED1, TYPE_FREE_ABORT_AFTER_NESTED2, TYPE_FREE_OOM, TYPE_FREE_ALLOC, TYPE_FREE_AFTER_ABORT, TYPE_FREE_MANY_TIMES, }; TOID_DECLARE(struct object, 0); struct object { size_t value; char data[OBJ_SIZE - sizeof(size_t)]; }; /* * do_tx_alloc -- do tx allocation with specified type number */ static PMEMoid do_tx_alloc(PMEMobjpool *pop, unsigned type_num) { PMEMoid ret = OID_NULL; TX_BEGIN(pop) { ret = pmemobj_tx_alloc(sizeof(struct object), type_num); } TX_END return ret; } /* * do_tx_free_wrong_uuid -- try to free object with invalid uuid */ static void do_tx_free_wrong_uuid(PMEMobjpool *pop) { volatile int ret = 0; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID); oid.pool_uuid_lo = ~oid.pool_uuid_lo; TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { ret = -1; } TX_END UT_ASSERTeq(ret, -1); /* POBJ_XFREE_NO_ABORT flag is set */ TX_BEGIN(pop) { ret = pmemobj_tx_xfree(oid, POBJ_XFREE_NO_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_wrong_uuid_abort_on_failure -- try to free object with * invalid uuid in a transaction where pmemobj_tx_set_failure_behavior * was called. */ static void do_tx_free_wrong_uuid_abort_on_failure(PMEMobjpool *pop) { volatile int ret = 0; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_WRONG_UUID); oid.pool_uuid_lo = ~oid.pool_uuid_lo; /* pmemobj_tx_set_failure_behavior is called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_xfree(oid, 0); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in outer tx */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in neighbour tx */ TX_BEGIN(pop) { TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END /* pmemobj_tx_set_failure_behavior is called in neighbour tx */ TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_RETURN); TX_BEGIN(pop) { pmemobj_tx_set_failure_behavior(POBJ_TX_FAILURE_ABORT); UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_ABORT); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TX_BEGIN(pop) { UT_ASSERTeq(pmemobj_tx_get_failure_behavior(), POBJ_TX_FAILURE_RETURN); ret = pmemobj_tx_free(oid); } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END } TX_ONCOMMIT { UT_ASSERTeq(ret, EINVAL); } TX_ONABORT { UT_ASSERT(0); /* should not get to this point */ } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_WRONG_UUID)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_null_oid -- call pmemobj_tx_free with OID_NULL */ static void do_tx_free_null_oid(PMEMobjpool *pop) { volatile int ret = 0; TX_BEGIN(pop) { ret = pmemobj_tx_free(OID_NULL); } TX_ONABORT { ret = -1; } TX_END UT_ASSERTeq(ret, 0); } /* * do_tx_free_commit -- do the basic transactional deallocation of object */ static void do_tx_free_commit(PMEMobjpool *pop) { int ret; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_COMMIT); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort -- abort deallocation of object */ static void do_tx_free_abort(PMEMobjpool *pop) { int ret; PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_ABORT); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_commit_nested -- do allocation in nested transaction */ static void do_tx_free_commit_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_COMMIT_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END } TX_ONABORT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED1)); UT_ASSERT(TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_COMMIT_NESTED2)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort_nested -- abort allocation in nested transaction */ static void do_tx_free_abort_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_abort_after_nested -- abort transaction after nested * pmemobj_tx_free */ static void do_tx_free_abort_after_nested(PMEMobjpool *pop) { int ret; PMEMoid oid1 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED1); PMEMoid oid2 = do_tx_alloc(pop, TYPE_FREE_ABORT_AFTER_NESTED2); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid1); UT_ASSERTeq(ret, 0); TX_BEGIN(pop) { ret = pmemobj_tx_free(oid2); UT_ASSERTeq(ret, 0); } TX_END pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID(struct object) obj; TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_AFTER_NESTED1)); UT_ASSERT(!TOID_IS_NULL(obj)); TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ABORT_AFTER_NESTED2)); UT_ASSERT(!TOID_IS_NULL(obj)); } /* * do_tx_free_alloc_abort -- free object allocated in the same transaction * and abort transaction */ static void do_tx_free_alloc_abort(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc( sizeof(struct object), TYPE_FREE_ALLOC)); UT_ASSERT(!TOID_IS_NULL(obj)); ret = pmemobj_tx_free(obj.oid); UT_ASSERTeq(ret, 0); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_alloc_abort -- free object allocated in the same transaction * and commit transaction */ static void do_tx_free_alloc_commit(PMEMobjpool *pop) { int ret; TOID(struct object) obj; TX_BEGIN(pop) { TOID_ASSIGN(obj, pmemobj_tx_alloc( sizeof(struct object), TYPE_FREE_ALLOC)); UT_ASSERT(!TOID_IS_NULL(obj)); ret = pmemobj_tx_free(obj.oid); UT_ASSERTeq(ret, 0); } TX_ONABORT { UT_ASSERT(0); } TX_END TOID_ASSIGN(obj, POBJ_FIRST_TYPE_NUM(pop, TYPE_FREE_ALLOC)); UT_ASSERT(TOID_IS_NULL(obj)); } /* * do_tx_free_abort_free - allocate a new object, perform a transactional free * in an aborted transaction and then to actually free the object. * * This can expose any issues with not properly handled free undo log. */ static void do_tx_free_abort_free(PMEMobjpool *pop) { PMEMoid oid = do_tx_alloc(pop, TYPE_FREE_AFTER_ABORT); TX_BEGIN(pop) { pmemobj_tx_free(oid); pmemobj_tx_abort(-1); } TX_ONCOMMIT { UT_ASSERT(0); } TX_END TX_BEGIN(pop) { pmemobj_tx_free(oid); } TX_ONABORT { UT_ASSERT(0); } TX_END } /* * do_tx_free_many_times -- free enough objects to trigger vector array alloc */ static void do_tx_free_many_times(PMEMobjpool *pop) { #define TX_FREE_COUNT ((1 << 3) + 1) PMEMoid oids[TX_FREE_COUNT]; for (int i = 0; i < TX_FREE_COUNT; ++i) oids[i] = do_tx_alloc(pop, TYPE_FREE_MANY_TIMES); TX_BEGIN(pop) { for (int i = 0; i < TX_FREE_COUNT; ++i) pmemobj_tx_free(oids[i]); } TX_ONABORT { UT_ASSERT(0); } TX_END #undef TX_FREE_COUNT } int main(int argc, char *argv[]) { START(argc, argv, "obj_tx_free"); util_init(); if (argc != 2) UT_FATAL("usage: %s [file]", argv[0]); PMEMobjpool *pop; if ((pop = pmemobj_create(argv[1], LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR)) == NULL) UT_FATAL("!pmemobj_create"); do_tx_free_wrong_uuid(pop); VALGRIND_WRITE_STATS; do_tx_free_wrong_uuid_abort_on_failure(pop); VALGRIND_WRITE_STATS; do_tx_free_null_oid(pop); VALGRIND_WRITE_STATS; do_tx_free_commit(pop); VALGRIND_WRITE_STATS; do_tx_free_abort(pop); VALGRIND_WRITE_STATS; do_tx_free_commit_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_after_nested(pop); VALGRIND_WRITE_STATS; do_tx_free_alloc_commit(pop); VALGRIND_WRITE_STATS; do_tx_free_alloc_abort(pop); VALGRIND_WRITE_STATS; do_tx_free_abort_free(pop); VALGRIND_WRITE_STATS; do_tx_free_many_times(pop); VALGRIND_WRITE_STATS; pmemobj_close(pop); DONE(NULL); }
11,423
21.356164
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/util_uuid_generate/util_uuid_generate.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * util_uuid_generate.c -- unit test for generating a uuid * * usage: util_uuid_generate [string] [valid|invalid] */ #include "unittest.h" #include "uuid.h" #include <unistd.h> #include <string.h> int main(int argc, char *argv[]) { START(argc, argv, "util_uuid_generate"); uuid_t uuid; uuid_t uuid1; int ret; char conv_uu[POOL_HDR_UUID_STR_LEN]; char uu[POOL_HDR_UUID_STR_LEN]; /* * No string passed in. Generate uuid. */ if (argc == 1) { /* generate a UUID string */ ret = ut_get_uuid_str(uu); UT_ASSERTeq(ret, 0); /* * Convert the string to a uuid, convert generated * uuid back to a string and compare strings. */ ret = util_uuid_from_string(uu, (struct uuid *)&uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, conv_uu); UT_ASSERTeq(ret, 0); UT_ASSERT(strncmp(uu, conv_uu, POOL_HDR_UUID_STR_LEN) == 0); /* * Generate uuid from util_uuid_generate and translate to * string then back to uuid to verify they match. */ memset(uuid, 0, sizeof(uuid_t)); memset(uu, 0, POOL_HDR_UUID_STR_LEN); memset(conv_uu, 0, POOL_HDR_UUID_STR_LEN); ret = util_uuid_generate(uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, uu); UT_ASSERTeq(ret, 0); ret = util_uuid_from_string(uu, (struct uuid *)&uuid1); UT_ASSERTeq(ret, 0); UT_ASSERT(memcmp(&uuid, &uuid1, sizeof(uuid_t)) == 0); } else { /* * Caller passed in string. */ if (strcmp(argv[2], "valid") == 0) { ret = util_uuid_from_string(argv[1], (struct uuid *)&uuid); UT_ASSERTeq(ret, 0); ret = util_uuid_to_string(uuid, conv_uu); UT_ASSERTeq(ret, 0); } else { ret = util_uuid_from_string(argv[1], (struct uuid *)&uuid); UT_ASSERT(ret < 0); UT_OUT("util_uuid_generate: invalid uuid string"); } } DONE(NULL); }
1,885
21.722892
62
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/test/obj_pool_lookup/obj_pool_lookup.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * obj_pool_lookup.c -- unit test for pmemobj_pool and pmemobj_pool_of */ #include "unittest.h" #define MAX_PATH_LEN 255 #define LAYOUT_NAME "pool_lookup" #define ALLOC_SIZE 100 static void define_path(char *str, size_t size, const char *dir, unsigned i) { int ret = snprintf(str, size, "%s"OS_DIR_SEP_STR"testfile%d", dir, i); if (ret < 0 || ret >= size) UT_FATAL("snprintf: %d", ret); } int main(int argc, char *argv[]) { START(argc, argv, "obj_pool_lookup"); if (argc != 3) UT_FATAL("usage: %s [directory] [# of pools]", argv[0]); unsigned npools = ATOU(argv[2]); const char *dir = argv[1]; int r; /* check before pool creation */ PMEMoid some_oid = {2, 3}; UT_ASSERTeq(pmemobj_pool_by_ptr(&some_oid), NULL); UT_ASSERTeq(pmemobj_pool_by_oid(some_oid), NULL); PMEMobjpool **pops = MALLOC(npools * sizeof(PMEMobjpool *)); void **guard_after = MALLOC(npools * sizeof(void *)); size_t length = strlen(dir) + MAX_PATH_LEN; char *path = MALLOC(length); for (unsigned i = 0; i < npools; ++i) { define_path(path, length, dir, i); pops[i] = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, S_IWUSR | S_IRUSR); /* * Reserve a page after the pool for address checks, if it * doesn't map precisely at that address - it's OK. */ guard_after[i] = MMAP((char *)pops[i] + PMEMOBJ_MIN_POOL, Ut_pagesize, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); UT_ASSERTne(guard_after[i], NULL); if (pops[i] == NULL) UT_FATAL("!pmemobj_create"); } PMEMoid *oids = MALLOC(npools * sizeof(PMEMoid)); for (unsigned i = 0; i < npools; ++i) { r = pmemobj_alloc(pops[i], &oids[i], ALLOC_SIZE, 1, NULL, NULL); UT_ASSERTeq(r, 0); } PMEMoid invalid = {123, 321}; UT_ASSERTeq(pmemobj_pool_by_oid(OID_NULL), NULL); UT_ASSERTeq(pmemobj_pool_by_oid(invalid), NULL); for (unsigned i = 0; i < npools; ++i) { UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]); } UT_ASSERTeq(pmemobj_pool_by_ptr(NULL), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr((void *)0xCBA), NULL); void *valid_ptr = MALLOC(ALLOC_SIZE); UT_ASSERTeq(pmemobj_pool_by_ptr(valid_ptr), NULL); FREE(valid_ptr); for (unsigned i = 0; i < npools; ++i) { void *before_pool = (char *)pops[i] - 1; void *after_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL + 1; void *start_pool = (char *)pops[i]; void *end_pool = (char *)pops[i] + PMEMOBJ_MIN_POOL - 1; void *edge = (char *)pops[i] + PMEMOBJ_MIN_POOL; void *middle = (char *)pops[i] + (PMEMOBJ_MIN_POOL / 2); void *in_oid = (char *)pmemobj_direct(oids[i]) + (ALLOC_SIZE / 2); UT_ASSERTeq(pmemobj_pool_by_ptr(before_pool), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(after_pool), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(start_pool), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(end_pool), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(edge), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(middle), pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), pops[i]); pmemobj_close(pops[i]); UT_ASSERTeq(pmemobj_pool_by_ptr(middle), NULL); UT_ASSERTeq(pmemobj_pool_by_ptr(in_oid), NULL); MUNMAP(guard_after[i], Ut_pagesize); } for (unsigned i = 0; i < npools; ++i) { UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), NULL); define_path(path, length, dir, i); pops[i] = pmemobj_open(path, LAYOUT_NAME); UT_ASSERTne(pops[i], NULL); UT_ASSERTeq(pmemobj_pool_by_oid(oids[i]), pops[i]); pmemobj_close(pops[i]); } FREE(path); FREE(pops); FREE(guard_after); FREE(oids); DONE(NULL); }
3,576
26.305344
70
c