repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/geohash-int/geohash.h
/* * Copyright (c) 2013-2014, yinqiwen <yinqiwen@gmail.com> * Copyright (c) 2014, Matt Stancliff <matt@genges.com>. * Copyright (c) 2015, Salvatore Sanfilippo <antirez@gmail.com>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef GEOHASH_H_ #define GEOHASH_H_ #include <stddef.h> #include <stdint.h> #include <stdint.h> #if defined(__cplusplus) extern "C" { #endif #define HASHISZERO(r) (!(r).bits && !(r).step) #define RANGEISZERO(r) (!(r).max && !(r).min) #define RANGEPISZERO(r) (r == NULL || RANGEISZERO(*r)) #define GEO_STEP_MAX 26 /* 26*2 = 52 bits. */ /* Limits from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ #define GEO_LAT_MIN -85.05112878 #define GEO_LAT_MAX 85.05112878 #define GEO_LONG_MIN -180 #define GEO_LONG_MAX 180 typedef enum { GEOHASH_NORTH = 0, GEOHASH_EAST, GEOHASH_WEST, GEOHASH_SOUTH, GEOHASH_SOUTH_WEST, GEOHASH_SOUTH_EAST, GEOHASH_NORT_WEST, GEOHASH_NORT_EAST } GeoDirection; typedef struct { uint64_t bits; uint8_t step; } GeoHashBits; typedef struct { double min; double max; } GeoHashRange; typedef struct { GeoHashBits hash; GeoHashRange longitude; GeoHashRange latitude; } GeoHashArea; typedef struct { GeoHashBits north; GeoHashBits east; GeoHashBits west; GeoHashBits south; GeoHashBits north_east; GeoHashBits south_east; GeoHashBits north_west; GeoHashBits south_west; } GeoHashNeighbors; /* * 0:success * -1:failed */ void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range); int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits *hash); int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area); int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area); int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area); int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy); int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy); int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy); int geohashDecodeToLongLatMercator(const GeoHashBits hash, double *xy); void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors); #if defined(__cplusplus) } #endif #endif /* GEOHASH_H_ */
4,124
33.663866
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/geohash-int/geohash.c
/* * Copyright (c) 2013-2014, yinqiwen <yinqiwen@gmail.com> * Copyright (c) 2014, Matt Stancliff <matt@genges.com>. * Copyright (c) 2015-2016, Salvatore Sanfilippo <antirez@gmail.com>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include "geohash.h" /** * Hashing works like this: * Divide the world into 4 buckets. Label each one as such: * ----------------- * | | | * | | | * | 0,1 | 1,1 | * ----------------- * | | | * | | | * | 0,0 | 1,0 | * ----------------- */ /* Interleave lower bits of x and y, so the bits of x * are in the even positions and bits from y in the odd; * x and y must initially be less than 2**32 (65536). * From: https://graphics.stanford.edu/~seander/bithacks.html#InterleaveBMN */ static inline uint64_t interleave64(uint32_t xlo, uint32_t ylo) { static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL}; static const unsigned int S[] = {1, 2, 4, 8, 16}; uint64_t x = xlo; uint64_t y = ylo; x = (x | (x << S[4])) & B[4]; y = (y | (y << S[4])) & B[4]; x = (x | (x << S[3])) & B[3]; y = (y | (y << S[3])) & B[3]; x = (x | (x << S[2])) & B[2]; y = (y | (y << S[2])) & B[2]; x = (x | (x << S[1])) & B[1]; y = (y | (y << S[1])) & B[1]; x = (x | (x << S[0])) & B[0]; y = (y | (y << S[0])) & B[0]; return x | (y << 1); } /* reverse the interleave process * derived from http://stackoverflow.com/questions/4909263 */ static inline uint64_t deinterleave64(uint64_t interleaved) { static const uint64_t B[] = {0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL, 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL}; static const unsigned int S[] = {0, 1, 2, 4, 8, 16}; uint64_t x = interleaved; uint64_t y = interleaved >> 1; x = (x | (x >> S[0])) & B[0]; y = (y | (y >> S[0])) & B[0]; x = (x | (x >> S[1])) & B[1]; y = (y | (y >> S[1])) & B[1]; x = (x | (x >> S[2])) & B[2]; y = (y | (y >> S[2])) & B[2]; x = (x | (x >> S[3])) & B[3]; y = (y | (y >> S[3])) & B[3]; x = (x | (x >> S[4])) & B[4]; y = (y | (y >> S[4])) & B[4]; x = (x | (x >> S[5])) & B[5]; y = (y | (y >> S[5])) & B[5]; return x | (y << 32); } void geohashGetCoordRange(GeoHashRange *long_range, GeoHashRange *lat_range) { /* These are constraints from EPSG:900913 / EPSG:3785 / OSGEO:41001 */ /* We can't geocode at the north/south pole. */ long_range->max = GEO_LONG_MAX; long_range->min = GEO_LONG_MIN; lat_range->max = GEO_LAT_MAX; lat_range->min = GEO_LAT_MIN; } int geohashEncode(const GeoHashRange *long_range, const GeoHashRange *lat_range, double longitude, double latitude, uint8_t step, GeoHashBits *hash) { /* Check basic arguments sanity. */ if (hash == NULL || step > 32 || step == 0 || RANGEPISZERO(lat_range) || RANGEPISZERO(long_range)) return 0; /* Return an error when trying to index outside the supported * constraints. */ if (longitude > 180 || longitude < -180 || latitude > 85.05112878 || latitude < -85.05112878) return 0; hash->bits = 0; hash->step = step; if (latitude < lat_range->min || latitude > lat_range->max || longitude < long_range->min || longitude > long_range->max) { return 0; } double lat_offset = (latitude - lat_range->min) / (lat_range->max - lat_range->min); double long_offset = (longitude - long_range->min) / (long_range->max - long_range->min); /* convert to fixed point based on the step size */ lat_offset *= (1 << step); long_offset *= (1 << step); hash->bits = interleave64(lat_offset, long_offset); return 1; } int geohashEncodeType(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { GeoHashRange r[2] = { { 0 } }; geohashGetCoordRange(&r[0], &r[1]); return geohashEncode(&r[0], &r[1], longitude, latitude, step, hash); } int geohashEncodeWGS84(double longitude, double latitude, uint8_t step, GeoHashBits *hash) { return geohashEncodeType(longitude, latitude, step, hash); } int geohashDecode(const GeoHashRange long_range, const GeoHashRange lat_range, const GeoHashBits hash, GeoHashArea *area) { if (HASHISZERO(hash) || NULL == area || RANGEISZERO(lat_range) || RANGEISZERO(long_range)) { return 0; } area->hash = hash; uint8_t step = hash.step; uint64_t hash_sep = deinterleave64(hash.bits); /* hash = [LAT][LONG] */ double lat_scale = lat_range.max - lat_range.min; double long_scale = long_range.max - long_range.min; uint32_t ilato = hash_sep; /* get lat part of deinterleaved hash */ uint32_t ilono = hash_sep >> 32; /* shift over to get long part of hash */ /* divide by 2**step. * Then, for 0-1 coordinate, multiply times scale and add to the min to get the absolute coordinate. */ area->latitude.min = lat_range.min + (ilato * 1.0 / (1ull << step)) * lat_scale; area->latitude.max = lat_range.min + ((ilato + 1) * 1.0 / (1ull << step)) * lat_scale; area->longitude.min = long_range.min + (ilono * 1.0 / (1ull << step)) * long_scale; area->longitude.max = long_range.min + ((ilono + 1) * 1.0 / (1ull << step)) * long_scale; return 1; } int geohashDecodeType(const GeoHashBits hash, GeoHashArea *area) { GeoHashRange r[2] = { { 0 } }; geohashGetCoordRange(&r[0], &r[1]); return geohashDecode(r[0], r[1], hash, area); } int geohashDecodeWGS84(const GeoHashBits hash, GeoHashArea *area) { return geohashDecodeType(hash, area); } int geohashDecodeAreaToLongLat(const GeoHashArea *area, double *xy) { if (!xy) return 0; xy[0] = (area->longitude.min + area->longitude.max) / 2; xy[1] = (area->latitude.min + area->latitude.max) / 2; return 1; } int geohashDecodeToLongLatType(const GeoHashBits hash, double *xy) { GeoHashArea area = { { 0 } }; if (!xy || !geohashDecodeType(hash, &area)) return 0; return geohashDecodeAreaToLongLat(&area, xy); } int geohashDecodeToLongLatWGS84(const GeoHashBits hash, double *xy) { return geohashDecodeToLongLatType(hash, xy); } static void geohash_move_x(GeoHashBits *hash, int8_t d) { if (d == 0) return; uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; uint64_t y = hash->bits & 0x5555555555555555ULL; uint64_t zz = 0x5555555555555555ULL >> (64 - hash->step * 2); if (d > 0) { x = x + (zz + 1); } else { x = x | zz; x = x - (zz + 1); } x &= (0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2)); hash->bits = (x | y); } static void geohash_move_y(GeoHashBits *hash, int8_t d) { if (d == 0) return; uint64_t x = hash->bits & 0xaaaaaaaaaaaaaaaaULL; uint64_t y = hash->bits & 0x5555555555555555ULL; uint64_t zz = 0xaaaaaaaaaaaaaaaaULL >> (64 - hash->step * 2); if (d > 0) { y = y + (zz + 1); } else { y = y | zz; y = y - (zz + 1); } y &= (0x5555555555555555ULL >> (64 - hash->step * 2)); hash->bits = (x | y); } void geohashNeighbors(const GeoHashBits *hash, GeoHashNeighbors *neighbors) { neighbors->east = *hash; neighbors->west = *hash; neighbors->north = *hash; neighbors->south = *hash; neighbors->south_east = *hash; neighbors->south_west = *hash; neighbors->north_east = *hash; neighbors->north_west = *hash; geohash_move_x(&neighbors->east, 1); geohash_move_y(&neighbors->east, 0); geohash_move_x(&neighbors->west, -1); geohash_move_y(&neighbors->west, 0); geohash_move_x(&neighbors->south, 0); geohash_move_y(&neighbors->south, -1); geohash_move_x(&neighbors->north, 0); geohash_move_y(&neighbors->north, 1); geohash_move_x(&neighbors->north_west, -1); geohash_move_y(&neighbors->north_west, 1); geohash_move_x(&neighbors->north_east, 1); geohash_move_y(&neighbors->north_east, 1); geohash_move_x(&neighbors->south_east, 1); geohash_move_y(&neighbors->south_east, -1); geohash_move_x(&neighbors->south_west, -1); geohash_move_y(&neighbors->south_west, -1); }
10,005
32.804054
91
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/deps/geohash-int/geohash_helper.h
/* * Copyright (c) 2013-2014, yinqiwen <yinqiwen@gmail.com> * Copyright (c) 2014, Matt Stancliff <matt@genges.com>. * Copyright (c) 2015, Salvatore Sanfilippo <antirez@gmail.com>. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Redis nor the names of its contributors may be used * to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef GEOHASH_HELPER_HPP_ #define GEOHASH_HELPER_HPP_ #include <math.h> #include "geohash.h" #define GZERO(s) s.bits = s.step = 0; #define GISZERO(s) (!s.bits && !s.step) #define GISNOTZERO(s) (s.bits || s.step) typedef uint64_t GeoHashFix52Bits; typedef uint64_t GeoHashVarBits; typedef struct { GeoHashBits hash; GeoHashArea area; GeoHashNeighbors neighbors; } GeoHashRadius; int GeoHashBitsComparator(const GeoHashBits *a, const GeoHashBits *b); uint8_t geohashEstimateStepsByRadius(double range_meters, double lat); int geohashBoundingBox(double longitude, double latitude, double radius_meters, double *bounds); GeoHashRadius geohashGetAreasByRadius(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusWGS84(double longitude, double latitude, double radius_meters); GeoHashRadius geohashGetAreasByRadiusMercator(double longitude, double latitude, double radius_meters); GeoHashFix52Bits geohashAlign52Bits(const GeoHashBits hash); double geohashGetDistance(double lon1d, double lat1d, double lon2d, double lat2d); int geohashGetDistanceIfInRadius(double x1, double y1, double x2, double y2, double radius, double *distance); int geohashGetDistanceIfInRadiusWGS84(double x1, double y1, double x2, double y2, double radius, double *distance); #endif /* GEOHASH_HELPER_HPP_ */
3,368
45.791667
80
h
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/utils/corrupt_rdb.c
/* Trivia program to corrupt an RDB file in order to check the RDB check * program behavior and effectiveness. * * Copyright (C) 2016 Salvatore Sanfilippo. * This software is released in the 3-clause BSD license. */ #include <stdio.h> #include <fcntl.h> #include <sys/stat.h> #include <stdlib.h> #include <unistd.h> #include <time.h> int main(int argc, char **argv) { struct stat stat; int fd, cycles; if (argc != 3) { fprintf(stderr,"Usage: <filename> <cycles>\n"); exit(1); } srand(time(NULL)); cycles = atoi(argv[2]); fd = open("dump.rdb",O_RDWR); if (fd == -1) { perror("open"); exit(1); } fstat(fd,&stat); while(cycles--) { unsigned char buf[32]; unsigned long offset = rand()%stat.st_size; int writelen = 1+rand()%31; int j; for (j = 0; j < writelen; j++) buf[j] = (char)rand(); lseek(fd,offset,SEEK_SET); printf("Writing %d bytes at offset %lu\n", writelen, offset); write(fd,buf,writelen); } return 0; }
1,070
22.8
72
c
null
NearPMSW-main/nearpm/shadow/redis-NDP-sd/utils/hashtable/rehashing.c
#include "redis.h" #include "dict.h" void _redisAssert(char *x, char *y, int l) { printf("ASSERT: %s %s %d\n",x,y,l); exit(1); } unsigned int dictKeyHash(const void *keyp) { unsigned long key = (unsigned long)keyp; key = dictGenHashFunction(&key,sizeof(key)); key += ~(key << 15); key ^= (key >> 10); key += (key << 3); key ^= (key >> 6); key += ~(key << 11); key ^= (key >> 16); return key; } int dictKeyCompare(void *privdata, const void *key1, const void *key2) { unsigned long k1 = (unsigned long)key1; unsigned long k2 = (unsigned long)key2; return k1 == k2; } dictType dictTypeTest = { dictKeyHash, /* hash function */ NULL, /* key dup */ NULL, /* val dup */ dictKeyCompare, /* key compare */ NULL, /* key destructor */ NULL /* val destructor */ }; void showBuckets(dictht ht) { if (ht.table == NULL) { printf("NULL\n"); } else { int j; for (j = 0; j < ht.size; j++) { printf("%c", ht.table[j] ? '1' : '0'); } printf("\n"); } } void show(dict *d) { int j; if (d->rehashidx != -1) { printf("rhidx: "); for (j = 0; j < d->rehashidx; j++) printf("."); printf("|\n"); } printf("ht[0]: "); showBuckets(d->ht[0]); printf("ht[1]: "); showBuckets(d->ht[1]); printf("\n"); } int sortPointers(const void *a, const void *b) { unsigned long la, lb; la = (long) (*((dictEntry**)a)); lb = (long) (*((dictEntry**)b)); return la-lb; } void stressGetKeys(dict *d, int times, int *perfect_run, int *approx_run) { int j; dictEntry **des = zmalloc(sizeof(dictEntry*)*dictSize(d)); for (j = 0; j < times; j++) { int requested = rand() % (dictSize(d)+1); int returned = dictGetSomeKeys(d, des, requested); int dup = 0; qsort(des,returned,sizeof(dictEntry*),sortPointers); if (returned > 1) { int i; for (i = 0; i < returned-1; i++) { if (des[i] == des[i+1]) dup++; } } if (requested == returned && dup == 0) { (*perfect_run)++; } else { (*approx_run)++; printf("Requested, returned, duplicated: %d %d %d\n", requested, returned, dup); } } zfree(des); } #define MAX1 120 #define MAX2 1000 int main(void) { dict *d = dictCreate(&dictTypeTest,NULL); unsigned long i; srand(time(NULL)); for (i = 0; i < MAX1; i++) { dictAdd(d,(void*)i,NULL); show(d); } printf("Size: %d\n", (int)dictSize(d)); for (i = 0; i < MAX1; i++) { dictDelete(d,(void*)i); dictResize(d); show(d); } dictRelease(d); d = dictCreate(&dictTypeTest,NULL); printf("Stress testing dictGetSomeKeys\n"); int perfect_run = 0, approx_run = 0; for (i = 0; i < MAX2; i++) { dictAdd(d,(void*)i,NULL); stressGetKeys(d,100,&perfect_run,&approx_run); } for (i = 0; i < MAX2; i++) { dictDelete(d,(void*)i); dictResize(d); stressGetKeys(d,100,&perfect_run,&approx_run); } printf("dictGetSomeKey, %d perfect runs, %d approximated runs\n", perfect_run, approx_run); dictRelease(d); printf("TEST PASSED!\n"); return 0; }
3,504
23.51049
75
c
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/csv.h
// SPDX-License-Identifier: Apache-2.0 /* Copyright 2020-2021, Intel Corporation */ #pragma once #include <iostream> #include <map> #include <ostream> #include <set> #include <string> template <typename IdType> class CSV { private: /* Hold data in two-dimensional map of strings: data_matrix[row][column] */ std::map<IdType, std::map<std::string, std::string>> data_matrix; /* List of all columns, which is filled during inserts. Needed for * printing header and data in the same order. * */ std::set<std::string> columns; std::string id_name; public: CSV(std::string id_column_name) : id_name(id_column_name){}; void insert(IdType row, std::string column, std::string data) { columns.insert(column); data_matrix[row][column] = data; } void insert(IdType row, std::string column, const char *data) { insert(row, column, std::string(data)); } template <typename T> void insert(IdType row, std::string column, T data) { insert(row, column, std::to_string(data)); } void print() { // Print first column name std::cout << id_name; for (auto &column : columns) { std::cout << "," << column; } std::cout << "\r\n" << std::flush; for (auto &row : data_matrix) { std::cout << row.first; for (auto &column : columns) { std::cout << "," << data_matrix[row.first][column]; } std::cout << "\r\n" << std::flush; } } };
1,381
21.290323
73
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/logging.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Must not be included from any .h files to avoid polluting the namespace // with macros. #ifndef STORAGE_LEVELDB_UTIL_LOGGING_H_ #define STORAGE_LEVELDB_UTIL_LOGGING_H_ #include "port/port_posix.h" #include <stdint.h> #include <stdio.h> #include <string> namespace leveldb { class Slice; class WritableFile; // Append a human-readable printout of "num" to *str extern void AppendNumberTo(std::string *str, uint64_t num); // Append a human-readable printout of "value" to *str. // Escapes any non-printable characters found in "value". extern void AppendEscapedStringTo(std::string *str, const Slice &value); // Return a human-readable printout of "num" extern std::string NumberToString(uint64_t num); // Return a human-readable version of "value". // Escapes any non-printable characters found in "value". extern std::string EscapeString(const Slice &value); // Parse a human-readable number from "*in" into *value. On success, // advances "*in" past the consumed number and sets "*val" to the // numeric value. Otherwise, returns false and leaves *in in an // unspecified state. extern bool ConsumeDecimalNumber(Slice *in, uint64_t *val); } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_LOGGING_H_
1,519
30.666667
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/testutil.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #define STORAGE_LEVELDB_UTIL_TESTUTIL_H_ #include "leveldb/env.h" #include "leveldb/slice.h" #include "util/random.h" namespace leveldb { namespace test { // Store in *dst a random string of length "len" and return a Slice that // references the generated data. Slice RandomString(Random *rnd, int len, std::string *dst); // Return a random key with the specified length that may contain interesting // characters (e.g. \x00, \xff, etc.). std::string RandomKey(Random *rnd, int len); // Store in *dst a string of length "len" that will compress to // "N*compressed_fraction" bytes and return a Slice that references // the generated data. Slice CompressibleString(Random *rnd, double compressed_fraction, size_t len, std::string *dst); // A wrapper that allows injection of errors. class ErrorEnv : public EnvWrapper { public: bool writable_file_error_; int num_writable_file_errors_; ErrorEnv() : EnvWrapper(Env::Default()), writable_file_error_(false), num_writable_file_errors_(0) { } virtual Status NewWritableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewWritableFile(fname, result); } virtual Status NewAppendableFile(const std::string &fname, WritableFile **result) { if (writable_file_error_) { ++num_writable_file_errors_; *result = nullptr; return Status::IOError(fname, "fake error"); } return target()->NewAppendableFile(fname, result); } }; } // namespace test } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_TESTUTIL_H_
1,984
28.191176
99
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/mutexlock.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #define STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_ #include "port/port_posix.h" #include "port/thread_annotations.h" namespace leveldb { // Helper class that locks a mutex on construction and unlocks the mutex when // the destructor of the MutexLock object is invoked. // // Typical usage: // // void MyClass::MyMethod() { // MutexLock l(&mu_); // mu_ is an instance variable // ... some complex code, possibly with multiple return paths ... // } class SCOPED_LOCKABLE MutexLock { public: explicit MutexLock(port::Mutex *mu) EXCLUSIVE_LOCK_FUNCTION(mu) : mu_(mu) { this->mu_->Lock(); } ~MutexLock() UNLOCK_FUNCTION() { this->mu_->Unlock(); } private: port::Mutex *const mu_; // No copying allowed MutexLock(const MutexLock &); void operator=(const MutexLock &); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_MUTEXLOCK_H_
1,202
24.0625
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/random.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_RANDOM_H_ #define STORAGE_LEVELDB_UTIL_RANDOM_H_ #include <stdint.h> namespace leveldb { // A very simple random number generator. Not especially good at // generating truly random bits, but good enough for our needs in this // package. class Random { private: uint32_t seed_; public: explicit Random(uint32_t s) : seed_(s & 0x7fffffffu) { // Avoid bad seeds. if (seed_ == 0 || seed_ == 2147483647L) { seed_ = 1; } } uint32_t Next() { static const uint32_t M = 2147483647L; // 2^31-1 static const uint64_t A = 16807; // bits 14, 8, 7, 5, 2, 1, 0 // We are computing // seed_ = (seed_ * A) % M, where M = 2^31-1 // // seed_ must not be zero or M, or else all subsequent computed values // will be zero or M respectively. For all other values, seed_ will end // up cycling through every number in [1,M-1] uint64_t product = seed_ * A; // Compute (product % M) using the fact that ((x << 31) % M) == x. seed_ = static_cast<uint32_t>((product >> 31) + (product & M)); // The first reduction may overflow by 1 bit, so we may need to // repeat. mod == M is not possible; using > allows the faster // sign-bit-based test. if (seed_ > M) { seed_ -= M; } return seed_; } // Returns a uniformly distributed value in the range [0..n-1] // REQUIRES: n > 0 uint32_t Uniform(int n) { return Next() % n; } // Randomly returns true ~"1/n" of the time, and false otherwise. // REQUIRES: n > 0 bool OneIn(int n) { return (Next() % n) == 0; } // Skewed: pick "base" uniformly from range [0,max_log] and then // return "base" random bits. The effect is to pick a number in the // range [0,2^max_log-1] with exponential bias towards smaller numbers. uint32_t Skewed(int max_log) { return Uniform(1 << Uniform(max_log + 1)); } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_RANDOM_H_
2,202
26.886076
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/posix_logger.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // // Logger implementation that can be shared by all environments // where enough posix functionality is available. #ifndef STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #define STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_ #include "leveldb/env.h" #include <algorithm> #include <stdio.h> #include <sys/time.h> #include <time.h> namespace leveldb { class PosixLogger : public Logger { private: FILE *file_; uint64_t (*gettid_)(); // Return the thread id for the current thread public: PosixLogger(FILE *f, uint64_t (*gettid)()) : file_(f), gettid_(gettid) { } virtual ~PosixLogger() { fclose(file_); } virtual void Logv(const char *format, va_list ap) { const uint64_t thread_id = (*gettid_)(); // We try twice: the first time with a fixed-size stack allocated buffer, // and the second time with a much larger dynamically allocated buffer. char buffer[500]; for (int iter = 0; iter < 2; iter++) { char *base; int bufsize; if (iter == 0) { bufsize = sizeof(buffer); base = buffer; } else { bufsize = 30000; base = new char[bufsize]; } char *p = base; char *limit = base + bufsize; struct timeval now_tv; gettimeofday(&now_tv, NULL); const time_t seconds = now_tv.tv_sec; struct tm t; localtime_r(&seconds, &t); p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, static_cast<int>(now_tv.tv_usec), static_cast<long long unsigned int>(thread_id)); // Print the message if (p < limit) { va_list backup_ap; va_copy(backup_ap, ap); p += vsnprintf(p, limit - p, format, backup_ap); va_end(backup_ap); } // Truncate to available space if necessary if (p >= limit) { if (iter == 0) { continue; // Try again with larger buffer } else { p = limit - 1; } } // Add newline if necessary if (p == base || p[-1] != '\n') { *p++ = '\n'; } assert(p <= limit); fwrite(base, 1, p - base, file_); fflush(file_); if (base != buffer) { delete[] base; } break; } } }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_POSIX_LOGGER_H_
2,503
23.54902
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/util/env_posix_test_helper.h
// Copyright 2017 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ #define STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_ namespace leveldb { class EnvPosixTest; // A helper for the POSIX Env to facilitate testing. class EnvPosixTestHelper { private: friend class EnvPosixTest; // Set the maximum number of read-only files that will be opened. // Must be called before creating an Env. static void SetReadOnlyFDLimit(int limit); // Set the maximum number of read-only files that will be mapped via mmap. // Must be called before creating an Env. static void SetReadOnlyMMapLimit(int limit); }; } // namespace leveldb #endif // STORAGE_LEVELDB_UTIL_ENV_POSIX_TEST_HELPER_H_
967
28.333333
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/port/port_posix.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // See port_example.h for documentation for the following types/functions. #ifndef STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #define STORAGE_LEVELDB_PORT_PORT_POSIX_H_ #undef PLATFORM_IS_LITTLE_ENDIAN #if defined(__APPLE__) #include <machine/endian.h> #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) #define PLATFORM_IS_LITTLE_ENDIAN (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) #endif #elif defined(OS_SOLARIS) #include <sys/isa_defs.h> #ifdef _LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN true #else #define PLATFORM_IS_LITTLE_ENDIAN false #endif #elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || defined(OS_DRAGONFLYBSD) #include <sys/endian.h> #include <sys/types.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #elif defined(OS_HPUX) #define PLATFORM_IS_LITTLE_ENDIAN false #elif defined(OS_ANDROID) // Due to a bug in the NDK x86 <sys/endian.h> definition, // _BYTE_ORDER must be used instead of __BYTE_ORDER on Android. // See http://code.google.com/p/android/issues/detail?id=39824 #include <endian.h> #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) #else #include <endian.h> #endif #include <pthread.h> #if defined(HAVE_CRC32C) #include <crc32c/crc32c.h> #endif // defined(HAVE_CRC32C) #ifdef HAVE_SNAPPY #include <snappy.h> #endif // defined(HAVE_SNAPPY) #include "port/atomic_pointer.h" #include <stdint.h> #include <string> #ifndef PLATFORM_IS_LITTLE_ENDIAN #define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) #endif #if defined(__APPLE__) || defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) // Use fsync() on platforms without fdatasync() #define fdatasync fsync #endif #if defined(OS_ANDROID) && __ANDROID_API__ < 9 // fdatasync() was only introduced in API level 9 on Android. Use fsync() // when targetting older platforms. #define fdatasync fsync #endif namespace leveldb { namespace port { static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; #undef PLATFORM_IS_LITTLE_ENDIAN class CondVar; class Mutex { public: Mutex(); ~Mutex(); void Lock(); void Unlock(); void AssertHeld() { } private: friend class CondVar; pthread_mutex_t mu_; // No copying Mutex(const Mutex &); void operator=(const Mutex &); }; class CondVar { public: explicit CondVar(Mutex *mu); ~CondVar(); void Wait(); void Signal(); void SignalAll(); private: pthread_cond_t cv_; Mutex *mu_; }; typedef pthread_once_t OnceType; #define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT extern void InitOnce(OnceType *once, void (*initializer)()); inline bool Snappy_Compress(const char *input, size_t length, ::std::string *output) { #ifdef HAVE_SNAPPY output->resize(snappy::MaxCompressedLength(length)); size_t outlen; snappy::RawCompress(input, length, &(*output)[0], &outlen); output->resize(outlen); return true; #endif // defined(HAVE_SNAPPY) return false; } inline bool Snappy_GetUncompressedLength(const char *input, size_t length, size_t *result) { #ifdef HAVE_SNAPPY return snappy::GetUncompressedLength(input, length, result); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool Snappy_Uncompress(const char *input, size_t length, char *output) { #ifdef HAVE_SNAPPY return snappy::RawUncompress(input, length, output); #else return false; #endif // defined(HAVE_SNAPPY) } inline bool GetHeapProfile(void (*func)(void *, const char *, int), void *arg) { return false; } inline uint32_t AcceleratedCRC32C(uint32_t crc, const char *buf, size_t size) { #if defined(HAVE_CRC32C) return ::crc32c::Extend(crc, reinterpret_cast<const uint8_t *>(buf), size); #else return 0; #endif // defined(HAVE_CRC32C) } } // namespace port } // namespace leveldb #endif // STORAGE_LEVELDB_PORT_PORT_POSIX_H_
4,061
23.768293
98
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/port/thread_annotations.h
// Copyright (c) 2012 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation #ifndef STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ #define STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_ // Some environments provide custom macros to aid in static thread-safety // analysis. Provide empty definitions of such macros unless they are already // defined. #ifndef EXCLUSIVE_LOCKS_REQUIRED #define EXCLUSIVE_LOCKS_REQUIRED(...) #endif #ifndef SHARED_LOCKS_REQUIRED #define SHARED_LOCKS_REQUIRED(...) #endif #ifndef LOCKS_EXCLUDED #define LOCKS_EXCLUDED(...) #endif #ifndef LOCK_RETURNED #define LOCK_RETURNED(x) #endif #ifndef LOCKABLE #define LOCKABLE #endif #ifndef SCOPED_LOCKABLE #define SCOPED_LOCKABLE #endif #ifndef EXCLUSIVE_LOCK_FUNCTION #define EXCLUSIVE_LOCK_FUNCTION(...) #endif #ifndef SHARED_LOCK_FUNCTION #define SHARED_LOCK_FUNCTION(...) #endif #ifndef EXCLUSIVE_TRYLOCK_FUNCTION #define EXCLUSIVE_TRYLOCK_FUNCTION(...) #endif #ifndef SHARED_TRYLOCK_FUNCTION #define SHARED_TRYLOCK_FUNCTION(...) #endif #ifndef UNLOCK_FUNCTION #define UNLOCK_FUNCTION(...) #endif #ifndef NO_THREAD_SAFETY_ANALYSIS #define NO_THREAD_SAFETY_ANALYSIS #endif #endif // STORAGE_LEVELDB_PORT_THREAD_ANNOTATIONS_H_
1,429
21.34375
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/port/atomic_pointer.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // AtomicPointer provides storage for a lock-free pointer. // Platform-dependent implementation of AtomicPointer: // - If the platform provides a cheap barrier, we use it with raw pointers // - If <atomic> is present (on newer versions of gcc, it is), we use // a <atomic>-based AtomicPointer. However we prefer the memory // barrier based version, because at least on a gcc 4.4 32-bit build // on linux, we have encountered a buggy <atomic> implementation. // Also, some <atomic> implementations are much slower than a memory-barrier // based implementation (~16ns for <atomic> based acquire-load vs. ~1ns for // a barrier based acquire-load). // This code is based on atomicops-internals-* in Google's perftools: // http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase #ifndef PORT_ATOMIC_POINTER_H_ #define PORT_ATOMIC_POINTER_H_ #include <stdint.h> #ifdef LEVELDB_ATOMIC_PRESENT #include <atomic> #endif #ifdef OS_WIN #include <windows.h> #endif #ifdef __APPLE__ #include <libkern/OSAtomic.h> #endif #if defined(_M_X64) || defined(__x86_64__) #define ARCH_CPU_X86_FAMILY 1 #elif defined(_M_IX86) || defined(__i386__) || defined(__i386) #define ARCH_CPU_X86_FAMILY 1 #elif defined(__ARMEL__) #define ARCH_CPU_ARM_FAMILY 1 #elif defined(__aarch64__) #define ARCH_CPU_ARM64_FAMILY 1 #elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__) #define ARCH_CPU_PPC_FAMILY 1 #elif defined(__mips__) #define ARCH_CPU_MIPS_FAMILY 1 #endif namespace leveldb { namespace port { // Define MemoryBarrier() if available // Windows on x86 #if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY) // windows.h already provides a MemoryBarrier(void) macro // http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx #define LEVELDB_HAVE_MEMORY_BARRIER // Mac OS #elif defined(__APPLE__) inline void MemoryBarrier() { OSMemoryBarrier(); } #define LEVELDB_HAVE_MEMORY_BARRIER // Gcc on x86 #elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. __asm__ __volatile__("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // Sun Studio #elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC) inline void MemoryBarrier() { // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering. asm volatile("" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM Linux #elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__) typedef void (*LinuxKernelMemoryBarrierFunc)(void); // The Linux ARM kernel provides a highly optimized device-specific memory // barrier function at a fixed memory address that is mapped in every // user-level process. // // This beats using CPU-specific instructions which are, on single-core // devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more // than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking // shows that the extra function call cost is completely negligible on // multi-core devices. // inline void MemoryBarrier() { (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)(); } #define LEVELDB_HAVE_MEMORY_BARRIER // ARM64 #elif defined(ARCH_CPU_ARM64_FAMILY) inline void MemoryBarrier() { asm volatile("dmb sy" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // PPC #elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { // TODO for some powerpc expert: is there a cheaper suitable variant? // Perhaps by having separate barriers for acquire and release ops. asm volatile("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER // MIPS #elif defined(ARCH_CPU_MIPS_FAMILY) && defined(__GNUC__) inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); } #define LEVELDB_HAVE_MEMORY_BARRIER #endif // AtomicPointer built using platform-specific MemoryBarrier() #if defined(LEVELDB_HAVE_MEMORY_BARRIER) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *p) : rep_(p) { } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } inline void *Acquire_Load() const { void *result = rep_; MemoryBarrier(); return result; } inline void Release_Store(void *v) { MemoryBarrier(); rep_ = v; } }; // AtomicPointer based on <cstdatomic> #elif defined(LEVELDB_ATOMIC_PRESENT) class AtomicPointer { private: std::atomic<void *> rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { return rep_.load(std::memory_order_acquire); } inline void Release_Store(void *v) { rep_.store(v, std::memory_order_release); } inline void *NoBarrier_Load() const { return rep_.load(std::memory_order_relaxed); } inline void NoBarrier_Store(void *v) { rep_.store(v, std::memory_order_relaxed); } }; // Atomic pointer based on sparc memory barriers #elif defined(__sparcv9) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ldx [%[rep_]], %[val] \n\t" "membar #LoadLoad|#LoadStore \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("membar #LoadStore|#StoreStore \n\t" "stx %[v], [%[rep_]] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // Atomic pointer based on ia64 acq/rel #elif defined(__ia64) && defined(__GNUC__) class AtomicPointer { private: void *rep_; public: AtomicPointer() { } explicit AtomicPointer(void *v) : rep_(v) { } inline void *Acquire_Load() const { void *val; __asm__ __volatile__("ld8.acq %[val] = [%[rep_]] \n\t" : [val] "=r"(val) : [rep_] "r"(&rep_) : "memory"); return val; } inline void Release_Store(void *v) { __asm__ __volatile__("st8.rel [%[rep_]] = %[v] \n\t" : : [rep_] "r"(&rep_), [v] "r"(v) : "memory"); } inline void *NoBarrier_Load() const { return rep_; } inline void NoBarrier_Store(void *v) { rep_ = v; } }; // We have neither MemoryBarrier(), nor <atomic> #else #error Please implement AtomicPointer for this platform. #endif #undef LEVELDB_HAVE_MEMORY_BARRIER #undef ARCH_CPU_X86_FAMILY #undef ARCH_CPU_ARM_FAMILY #undef ARCH_CPU_ARM64_FAMILY #undef ARCH_CPU_PPC_FAMILY } // namespace port } // namespace leveldb #endif // PORT_ATOMIC_POINTER_H_
7,207
23.26936
84
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/include/leveldb/status.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // A Status encapsulates the result of an operation. It may indicate success, // or it may indicate an error with an associated error message. // // Multiple threads can invoke const methods on a Status without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Status must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_STATUS_H_ #define STORAGE_LEVELDB_INCLUDE_STATUS_H_ #include "leveldb/slice.h" #include <string> namespace leveldb { class Status { public: // Create a success status. Status() : state_(NULL) { } ~Status() { delete[] state_; } // Copy the specified status. Status(const Status &s); void operator=(const Status &s); // Return a success status. static Status OK() { return Status(); } // Return error status of an appropriate type. static Status NotFound(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotFound, msg, msg2); } static Status Corruption(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kCorruption, msg, msg2); } static Status NotSupported(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kNotSupported, msg, msg2); } static Status InvalidArgument(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kInvalidArgument, msg, msg2); } static Status IOError(const Slice &msg, const Slice &msg2 = Slice()) { return Status(kIOError, msg, msg2); } // Returns true iff the status indicates success. bool ok() const { return (state_ == NULL); } // Returns true iff the status indicates a NotFound error. bool IsNotFound() const { return code() == kNotFound; } // Returns true iff the status indicates a Corruption error. bool IsCorruption() const { return code() == kCorruption; } // Returns true iff the status indicates an IOError. bool IsIOError() const { return code() == kIOError; } // Returns true iff the status indicates a NotSupportedError. bool IsNotSupportedError() const { return code() == kNotSupported; } // Returns true iff the status indicates an InvalidArgument. bool IsInvalidArgument() const { return code() == kInvalidArgument; } // Return a string representation of this status suitable for printing. // Returns the string "OK" for success. std::string ToString() const; private: // OK status has a NULL state_. Otherwise, state_ is a new[] array // of the following form: // state_[0..3] == length of message // state_[4] == code // state_[5..] == message const char *state_; enum Code { kOk = 0, kNotFound = 1, kCorruption = 2, kNotSupported = 3, kInvalidArgument = 4, kIOError = 5 }; Code code() const { return (state_ == NULL) ? kOk : static_cast<Code>(state_[4]); } Status(Code code, const Slice &msg, const Slice &msg2); static const char *CopyState(const char *s); }; inline Status::Status(const Status &s) { state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } inline void Status::operator=(const Status &s) { // The following condition catches both aliasing (when this == &s), // and the common case where both s and *this are ok. if (state_ != s.state_) { delete[] state_; state_ = (s.state_ == NULL) ? NULL : CopyState(s.state_); } } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_STATUS_H_
3,658
23.231788
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/include/leveldb/slice.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // Slice is a simple structure containing a pointer into some external // storage and a size. The user of a Slice must ensure that the slice // is not used after the corresponding external storage has been // deallocated. // // Multiple threads can invoke const methods on a Slice without // external synchronization, but if any of the threads may call a // non-const method, all threads accessing the same Slice must use // external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_SLICE_H_ #define STORAGE_LEVELDB_INCLUDE_SLICE_H_ #include <assert.h> #include <stddef.h> #include <string.h> #include <string> namespace leveldb { class Slice { public: // Create an empty slice. Slice() : data_(""), size_(0) { } // Create a slice that refers to d[0,n-1]. Slice(const char *d, size_t n) : data_(d), size_(n) { } // Create a slice that refers to the contents of "s" Slice(const std::string &s) : data_(s.data()), size_(s.size()) { } // Create a slice that refers to s[0,strlen(s)-1] Slice(const char *s) : data_(s), size_(strlen(s)) { } // Return a pointer to the beginning of the referenced data const char *data() const { return data_; } // Return the length (in bytes) of the referenced data size_t size() const { return size_; } // Return true iff the length of the referenced data is zero bool empty() const { return size_ == 0; } // Return the ith byte in the referenced data. // REQUIRES: n < size() char operator[](size_t n) const { assert(n < size()); return data_[n]; } // Change this slice to refer to an empty array void clear() { data_ = ""; size_ = 0; } // Drop the first "n" bytes from this slice. void remove_prefix(size_t n) { assert(n <= size()); data_ += n; size_ -= n; } // Return a string that contains the copy of the referenced data. std::string ToString() const { return std::string(data_, size_); } // Three-way comparison. Returns value: // < 0 iff "*this" < "b", // == 0 iff "*this" == "b", // > 0 iff "*this" > "b" int compare(const Slice &b) const; // Return true iff "x" is a prefix of "*this" bool starts_with(const Slice &x) const { return ((size_ >= x.size_) && (memcmp(data_, x.data_, x.size_) == 0)); } private: const char *data_; size_t size_; // Intentionally copyable }; inline bool operator==(const Slice &x, const Slice &y) { return ((x.size() == y.size()) && (memcmp(x.data(), y.data(), x.size()) == 0)); } inline bool operator!=(const Slice &x, const Slice &y) { return !(x == y); } inline int Slice::compare(const Slice &b) const { const size_t min_len = (size_ < b.size_) ? size_ : b.size_; int r = memcmp(data_, b.data_, min_len); if (r == 0) { if (size_ < b.size_) r = -1; else if (size_ > b.size_) r = +1; } return r; } } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_SLICE_H_
3,163
21.125874
81
h
null
NearPMSW-main/nearpm/shadow/pmemkv-bench-sd/bench/include/leveldb/env.h
// Copyright (c) 2011 The LevelDB Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE-BSD file. See the AUTHORS file for names of contributors. // SPDX-License-Identifier: Apache-2.0 // Copyright 2020, Intel Corporation // An Env is an interface used by the leveldb implementation to access // operating system functionality like the filesystem etc. Callers // may wish to provide a custom Env object when opening a database to // get fine gain control; e.g., to rate limit file system operations. // // All Env implementations are safe for concurrent access from // multiple threads without any external synchronization. #ifndef STORAGE_LEVELDB_INCLUDE_ENV_H_ #define STORAGE_LEVELDB_INCLUDE_ENV_H_ #include "leveldb/status.h" #include <stdarg.h> #include <stdint.h> #include <string> #include <vector> namespace leveldb { class FileLock; class Logger; class RandomAccessFile; class SequentialFile; class Slice; class WritableFile; class Env { public: Env() { } virtual ~Env(); // Return a default environment suitable for the current operating // system. Sophisticated users may wish to provide their own Env // implementation instead of relying on this default environment. // // The result of Default() belongs to leveldb and must never be deleted. static Env *Default(); // Create a brand new sequentially-readable file with the specified name. // On success, stores a pointer to the new file in *result and returns OK. // On failure stores NULL in *result and returns non-OK. If the file does // not exist, returns a non-OK status. Implementations should return a // NotFound status when the file does not exist. // // The returned file will only be accessed by one thread at a time. virtual Status NewSequentialFile(const std::string &fname, SequentialFile **result) = 0; // Create a brand new random access read-only file with the // specified name. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. If the file does not exist, returns a non-OK // status. Implementations should return a NotFound status when the file does // not exist. // // The returned file may be concurrently accessed by multiple threads. virtual Status NewRandomAccessFile(const std::string &fname, RandomAccessFile **result) = 0; // Create an object that writes to a new file with the specified // name. Deletes any existing file with the same name and creates a // new file. On success, stores a pointer to the new file in // *result and returns OK. On failure stores NULL in *result and // returns non-OK. // // The returned file will only be accessed by one thread at a time. virtual Status NewWritableFile(const std::string &fname, WritableFile **result) = 0; // Create an object that either appends to an existing file, or // writes to a new file (if the file does not exist to begin with). // On success, stores a pointer to the new file in *result and // returns OK. On failure stores NULL in *result and returns // non-OK. // // The returned file will only be accessed by one thread at a time. // // May return an IsNotSupportedError error if this Env does // not allow appending to an existing file. Users of Env (including // the leveldb implementation) must be prepared to deal with // an Env that does not support appending. virtual Status NewAppendableFile(const std::string &fname, WritableFile **result); // Returns true iff the named file exists. virtual bool FileExists(const std::string &fname) = 0; // Store in *result the names of the children of the specified directory. // The names are relative to "dir". // Original contents of *results are dropped. virtual Status GetChildren(const std::string &dir, std::vector<std::string> *result) = 0; // Delete the named file. virtual Status DeleteFile(const std::string &fname) = 0; // Create the specified directory. virtual Status CreateDir(const std::string &dirname) = 0; // Delete the specified directory. virtual Status DeleteDir(const std::string &dirname) = 0; // Store the size of fname in *file_size. virtual Status GetFileSize(const std::string &fname, uint64_t *file_size) = 0; // Rename file src to target. virtual Status RenameFile(const std::string &src, const std::string &target) = 0; // Lock the specified file. Used to prevent concurrent access to // the same db by multiple processes. On failure, stores NULL in // *lock and returns non-OK. // // On success, stores a pointer to the object that represents the // acquired lock in *lock and returns OK. The caller should call // UnlockFile(*lock) to release the lock. If the process exits, // the lock will be automatically released. // // If somebody else already holds the lock, finishes immediately // with a failure. I.e., this call does not wait for existing locks // to go away. // // May create the named file if it does not already exist. virtual Status LockFile(const std::string &fname, FileLock **lock) = 0; // Release the lock acquired by a previous successful call to LockFile. // REQUIRES: lock was returned by a successful LockFile() call // REQUIRES: lock has not already been unlocked. virtual Status UnlockFile(FileLock *lock) = 0; // Arrange to run "(*function)(arg)" once in a background thread. // // "function" may run in an unspecified thread. Multiple functions // added to the same Env may run concurrently in different threads. // I.e., the caller may not assume that background work items are // serialized. virtual void Schedule(void (*function)(void *arg), void *arg) = 0; // Start a new thread, invoking "function(arg)" within the new thread. // When "function(arg)" returns, the thread will be destroyed. virtual void StartThread(void (*function)(void *arg), void *arg) = 0; // *path is set to a temporary directory that can be used for testing. It may // or many not have just been created. The directory may or may not differ // between runs of the same process, but subsequent calls will return the // same directory. virtual Status GetTestDirectory(std::string *path) = 0; // Create and return a log file for storing informational messages. virtual Status NewLogger(const std::string &fname, Logger **result) = 0; // Returns the number of micro-seconds since some fixed point in time. Only // useful for computing deltas of time. virtual uint64_t NowMicros() = 0; // Sleep/delay the thread for the prescribed number of micro-seconds. virtual void SleepForMicroseconds(int micros) = 0; private: // No copying allowed Env(const Env &); void operator=(const Env &); }; // A file abstraction for reading sequentially through a file class SequentialFile { public: SequentialFile() { } virtual ~SequentialFile(); // Read up to "n" bytes from the file. "scratch[0..n-1]" may be // written by this routine. Sets "*result" to the data that was // read (including if fewer than "n" bytes were successfully read). // May set "*result" to point at data in "scratch[0..n-1]", so // "scratch[0..n-1]" must be live when "*result" is used. // If an error was encountered, returns a non-OK status. // // REQUIRES: External synchronization virtual Status Read(size_t n, Slice *result, char *scratch) = 0; // Skip "n" bytes from the file. This is guaranteed to be no // slower that reading the same data, but may be faster. // // If end of file is reached, skipping will stop at the end of the // file, and Skip will return OK. // // REQUIRES: External synchronization virtual Status Skip(uint64_t n) = 0; private: // No copying allowed SequentialFile(const SequentialFile &); void operator=(const SequentialFile &); }; // A file abstraction for randomly reading the contents of a file. class RandomAccessFile { public: RandomAccessFile() { } virtual ~RandomAccessFile(); // Read up to "n" bytes from the file starting at "offset". // "scratch[0..n-1]" may be written by this routine. Sets "*result" // to the data that was read (including if fewer than "n" bytes were // successfully read). May set "*result" to point at data in // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when // "*result" is used. If an error was encountered, returns a non-OK // status. // // Safe for concurrent use by multiple threads. virtual Status Read(uint64_t offset, size_t n, Slice *result, char *scratch) const = 0; private: // No copying allowed RandomAccessFile(const RandomAccessFile &); void operator=(const RandomAccessFile &); }; // A file abstraction for sequential writing. The implementation // must provide buffering since callers may append small fragments // at a time to the file. class WritableFile { public: WritableFile() { } virtual ~WritableFile(); virtual Status Append(const Slice &data) = 0; virtual Status Close() = 0; virtual Status Flush() = 0; virtual Status Sync() = 0; private: // No copying allowed WritableFile(const WritableFile &); void operator=(const WritableFile &); }; // An interface for writing log messages. class Logger { public: Logger() { } virtual ~Logger(); // Write an entry to the log file with the specified format. virtual void Logv(const char *format, va_list ap) = 0; private: // No copying allowed Logger(const Logger &); void operator=(const Logger &); }; // Identifies a locked file. class FileLock { public: FileLock() { } virtual ~FileLock(); private: // No copying allowed FileLock(const FileLock &); void operator=(const FileLock &); }; // Log the specified data to *info_log if info_log is non-NULL. extern void Log(Logger *info_log, const char *format, ...) #if defined(__GNUC__) || defined(__clang__) __attribute__((__format__(__printf__, 2, 3))) #endif ; // A utility routine: write "data" to the named file. Status WriteStringToFile(Env *env, const Slice &data, const std::string &fname); // A utility routine: read contents of named file into *data Status ReadFileToString(Env *env, const std::string &fname, std::string *data); // An implementation of Env that forwards all calls to another Env. // May be useful to clients who wish to override just part of the // functionality of another Env. class EnvWrapper : public Env { public: // Initialize an EnvWrapper that delegates all calls to *t explicit EnvWrapper(Env *t) : target_(t) { } virtual ~EnvWrapper(); // Return the target to which this Env forwards all calls Env *target() const { return target_; } // The following text is boilerplate that forwards all methods to target() Status NewSequentialFile(const std::string &f, SequentialFile **r) { return target_->NewSequentialFile(f, r); } Status NewRandomAccessFile(const std::string &f, RandomAccessFile **r) { return target_->NewRandomAccessFile(f, r); } Status NewWritableFile(const std::string &f, WritableFile **r) { return target_->NewWritableFile(f, r); } Status NewAppendableFile(const std::string &f, WritableFile **r) { return target_->NewAppendableFile(f, r); } bool FileExists(const std::string &f) { return target_->FileExists(f); } Status GetChildren(const std::string &dir, std::vector<std::string> *r) { return target_->GetChildren(dir, r); } Status DeleteFile(const std::string &f) { return target_->DeleteFile(f); } Status CreateDir(const std::string &d) { return target_->CreateDir(d); } Status DeleteDir(const std::string &d) { return target_->DeleteDir(d); } Status GetFileSize(const std::string &f, uint64_t *s) { return target_->GetFileSize(f, s); } Status RenameFile(const std::string &s, const std::string &t) { return target_->RenameFile(s, t); } Status LockFile(const std::string &f, FileLock **l) { return target_->LockFile(f, l); } Status UnlockFile(FileLock *l) { return target_->UnlockFile(l); } void Schedule(void (*f)(void *), void *a) { return target_->Schedule(f, a); } void StartThread(void (*f)(void *), void *a) { return target_->StartThread(f, a); } virtual Status GetTestDirectory(std::string *path) { return target_->GetTestDirectory(path); } virtual Status NewLogger(const std::string &fname, Logger **result) { return target_->NewLogger(fname, result); } uint64_t NowMicros() { return target_->NowMicros(); } void SleepForMicroseconds(int micros) { target_->SleepForMicroseconds(micros); } private: Env *target_; }; } // namespace leveldb #endif // STORAGE_LEVELDB_INCLUDE_ENV_H_
12,539
30.827411
93
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_config.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_config.h -- internal definitions for rpmemd config */ #include <stdint.h> #include <stdbool.h> #ifndef RPMEMD_DEFAULT_LOG_FILE #define RPMEMD_DEFAULT_LOG_FILE ("/var/log/" DAEMON_NAME ".log") #endif #ifndef RPMEMD_GLOBAL_CONFIG_FILE #define RPMEMD_GLOBAL_CONFIG_FILE ("/etc/" DAEMON_NAME "/" DAEMON_NAME\ ".conf") #endif #define RPMEMD_USER_CONFIG_FILE ("." DAEMON_NAME ".conf") #define RPMEM_DEFAULT_MAX_LANES 1024 #define RPMEM_DEFAULT_NTHREADS 0 #define HOME_ENV "HOME" #define HOME_STR_PLACEHOLDER ("$" HOME_ENV) struct rpmemd_config { char *log_file; char *poolset_dir; const char *rm_poolset; bool force; bool pool_set; bool persist_apm; bool persist_general; bool use_syslog; uint64_t max_lanes; enum rpmemd_log_level log_level; size_t nthreads; }; int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]); void rpmemd_config_free(struct rpmemd_config *config);
1,012
21.021739
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmemd.c -- rpmemd main source file */ #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <string.h> #include <sys/socket.h> #include <netinet/in.h> #include "librpmem.h" #include "rpmemd.h" #include "rpmemd_log.h" #include "rpmemd_config.h" #include "rpmem_common.h" #include "rpmemd_fip.h" #include "rpmemd_obc.h" #include "rpmemd_db.h" #include "rpmemd_util.h" #include "pool_hdr.h" #include "os.h" #include "os_thread.h" #include "util.h" #include "uuid.h" #include "set.h" /* * rpmemd -- rpmem handle */ struct rpmemd { struct rpmemd_obc *obc; /* out-of-band connection handle */ struct rpmemd_db *db; /* pool set database handle */ struct rpmemd_db_pool *pool; /* pool handle */ char *pool_desc; /* pool descriptor */ struct rpmemd_fip *fip; /* fabric provider handle */ struct rpmemd_config config; /* configuration */ enum rpmem_persist_method persist_method; int closing; /* set when closing connection */ int created; /* pool created */ os_thread_t fip_thread; int fip_running; }; #ifdef DEBUG /* * bool2str -- convert bool to yes/no string */ static inline const char * bool2str(int v) { return v ? "yes" : "no"; } #endif /* * str_or_null -- return null string instead of NULL pointer */ static inline const char * _str(const char *str) { if (!str) return "(null)"; return str; } /* * uuid2str -- convert uuid to string */ static const char * uuid2str(const uuid_t uuid) { static char uuid_str[64] = {0, }; int ret = util_uuid_to_string(uuid, uuid_str); if (ret != 0) { return "(error)"; } return uuid_str; } /* * rpmemd_get_pm -- returns persist method based on configuration */ static enum rpmem_persist_method rpmemd_get_pm(struct rpmemd_config *config) { enum rpmem_persist_method ret = RPMEM_PM_GPSPM; if (config->persist_apm) ret = RPMEM_PM_APM; return ret; } /* * rpmemd_db_get_status -- convert error number to status for db operation */ static int rpmemd_db_get_status(int err) { switch (err) { case EEXIST: return RPMEM_ERR_EXISTS; case EACCES: return RPMEM_ERR_NOACCESS; case ENOENT: return RPMEM_ERR_NOEXIST; case EWOULDBLOCK: return RPMEM_ERR_BUSY; case EBADF: return RPMEM_ERR_BADNAME; case EINVAL: return RPMEM_ERR_POOL_CFG; default: return RPMEM_ERR_FATAL; } } /* * rpmemd_check_pool -- verify pool parameters */ static int rpmemd_check_pool(struct rpmemd *rpmemd, const struct rpmem_req_attr *req, int *status) { if (rpmemd->pool->pool_size < RPMEM_MIN_POOL) { RPMEMD_LOG(ERR, "invalid pool size -- must be >= %zu", RPMEM_MIN_POOL); *status = RPMEM_ERR_POOL_CFG; return -1; } if (rpmemd->pool->pool_size < req->pool_size) { RPMEMD_LOG(ERR, "requested size is too big"); *status = RPMEM_ERR_BADSIZE; return -1; } return 0; } /* * rpmemd_deep_persist -- perform deep persist operation */ static int rpmemd_deep_persist(const void *addr, size_t size, void *ctx) { struct rpmemd *rpmemd = (struct rpmemd *)ctx; return util_replica_deep_persist(addr, size, rpmemd->pool->set, 0); } /* * rpmemd_common_fip_init -- initialize fabric provider */ static int rpmemd_common_fip_init(struct rpmemd *rpmemd, const struct rpmem_req_attr *req, struct rpmem_resp_attr *resp, int *status) { /* register the whole pool with header in RDMA */ void *addr = (void *)((uintptr_t)rpmemd->pool->pool_addr); struct rpmemd_fip_attr fip_attr = { .addr = addr, .size = req->pool_size, .nlanes = req->nlanes, .nthreads = rpmemd->config.nthreads, .provider = req->provider, .persist_method = rpmemd->persist_method, .deep_persist = rpmemd_deep_persist, .ctx = rpmemd, .buff_size = req->buff_size, }; const int is_pmem = rpmemd_db_pool_is_pmem(rpmemd->pool); if (rpmemd_apply_pm_policy(&fip_attr.persist_method, &fip_attr.persist, &fip_attr.memcpy_persist, is_pmem)) { *status = RPMEM_ERR_FATAL; goto err_fip_init; } const char *node = rpmem_get_ssh_conn_addr(); enum rpmem_err err; rpmemd->fip = rpmemd_fip_init(node, NULL, &fip_attr, resp, &err); if (!rpmemd->fip) { *status = (int)err; goto err_fip_init; } return 0; err_fip_init: return -1; } /* * rpmemd_print_req_attr -- print request attributes */ static void rpmemd_print_req_attr(const struct rpmem_req_attr *req) { RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool descriptor: '%s'", _str(req->pool_desc)); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool size: %lu", req->pool_size); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", req->nlanes); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "provider: %s", rpmem_provider_to_str(req->provider)); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "buff_size: %lu", req->buff_size); } /* * rpmemd_print_pool_attr -- print pool attributes */ static void rpmemd_print_pool_attr(const struct rpmem_pool_attr *attr) { if (attr == NULL) { RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "NULL"); } else { RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "signature: '%s'", _str(attr->signature)); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "major: %u", attr->major); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "compat_features: 0x%x", attr->compat_features); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "incompat_features: 0x%x", attr->incompat_features); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "ro_compat_features: 0x%x", attr->ro_compat_features); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "poolset_uuid: %s", uuid2str(attr->poolset_uuid)); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "uuid: %s", uuid2str(attr->uuid)); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "next_uuid: %s", uuid2str(attr->next_uuid)); RPMEMD_LOG(INFO, RPMEMD_LOG_INDENT "prev_uuid: %s", uuid2str(attr->prev_uuid)); } } /* * rpmemd_print_resp_attr -- print response attributes */ static void rpmemd_print_resp_attr(const struct rpmem_resp_attr *attr) { RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "port: %u", attr->port); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "rkey: 0x%lx", attr->rkey); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "raddr: 0x%lx", attr->raddr); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "nlanes: %u", attr->nlanes); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s", rpmem_persist_method_to_str(attr->persist_method)); } /* * rpmemd_fip_thread -- background thread for establishing in-band connection */ static void * rpmemd_fip_thread(void *arg) { struct rpmemd *rpmemd = (struct rpmemd *)arg; int ret; RPMEMD_LOG(INFO, "waiting for in-band connection"); ret = rpmemd_fip_accept(rpmemd->fip, RPMEM_ACCEPT_TIMEOUT); if (ret) goto err_accept; RPMEMD_LOG(NOTICE, "in-band connection established"); ret = rpmemd_fip_process_start(rpmemd->fip); if (ret) goto err_process_start; return NULL; err_process_start: rpmemd_fip_close(rpmemd->fip); err_accept: return (void *)(uintptr_t)ret; } /* * rpmemd_fip_start_thread -- start background thread for establishing * in-band connection */ static int rpmemd_fip_start_thread(struct rpmemd *rpmemd) { errno = os_thread_create(&rpmemd->fip_thread, NULL, rpmemd_fip_thread, rpmemd); if (errno) { RPMEMD_LOG(ERR, "!creating in-band thread"); goto err_os_thread_create; } rpmemd->fip_running = 1; return 0; err_os_thread_create: return -1; } /* * rpmemd_fip_stop_thread -- stop background thread for in-band connection */ static int rpmemd_fip_stop_thread(struct rpmemd *rpmemd) { RPMEMD_ASSERT(rpmemd->fip_running); void *tret; errno = os_thread_join(&rpmemd->fip_thread, &tret); if (errno) RPMEMD_LOG(ERR, "!waiting for in-band thread"); int ret = (int)(uintptr_t)tret; if (ret) RPMEMD_LOG(ERR, "in-band thread failed -- '%d'", ret); return ret; } /* * rpmemd_fip-stop -- stop in-band thread and stop processing thread */ static int rpmemd_fip_stop(struct rpmemd *rpmemd) { int ret; int fip_ret = rpmemd_fip_stop_thread(rpmemd); if (fip_ret) { RPMEMD_LOG(ERR, "!in-band thread failed"); } if (!fip_ret) { ret = rpmemd_fip_process_stop(rpmemd->fip); if (ret) { RPMEMD_LOG(ERR, "!stopping fip process failed"); } } rpmemd->fip_running = 0; return fip_ret; } /* * rpmemd_close_pool -- close pool and remove it if required */ static int rpmemd_close_pool(struct rpmemd *rpmemd, int remove) { int ret = 0; RPMEMD_LOG(NOTICE, "closing pool"); rpmemd_db_pool_close(rpmemd->db, rpmemd->pool); RPMEMD_LOG(INFO, "pool closed"); if (remove) { RPMEMD_LOG(NOTICE, "removing '%s'", rpmemd->pool_desc); ret = rpmemd_db_pool_remove(rpmemd->db, rpmemd->pool_desc, 0, 0); if (ret) { RPMEMD_LOG(ERR, "!removing pool '%s' failed", rpmemd->pool_desc); } else { RPMEMD_LOG(INFO, "removed '%s'", rpmemd->pool_desc); } } free(rpmemd->pool_desc); return ret; } /* * rpmemd_req_cleanup -- cleanup in-band connection and all resources allocated * during open/create requests */ static void rpmemd_req_cleanup(struct rpmemd *rpmemd) { if (!rpmemd->fip_running) return; int ret; ret = rpmemd_fip_stop(rpmemd); if (!ret) { rpmemd_fip_close(rpmemd->fip); rpmemd_fip_fini(rpmemd->fip); } int remove = rpmemd->created && ret; rpmemd_close_pool(rpmemd, remove); } /* * rpmemd_req_create -- handle create request */ static int rpmemd_req_create(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr) { RPMEMD_ASSERT(arg != NULL); RPMEMD_LOG(NOTICE, "create request:"); rpmemd_print_req_attr(req); RPMEMD_LOG(NOTICE, "pool attributes:"); rpmemd_print_pool_attr(pool_attr); struct rpmemd *rpmemd = (struct rpmemd *)arg; int ret; int status = 0; int err_send = 1; struct rpmem_resp_attr resp; memset(&resp, 0, sizeof(resp)); if (rpmemd->pool) { RPMEMD_LOG(ERR, "pool already opened"); ret = -1; status = RPMEM_ERR_FATAL; goto err_pool_opened; } rpmemd->pool_desc = strdup(req->pool_desc); if (!rpmemd->pool_desc) { RPMEMD_LOG(ERR, "!allocating pool descriptor"); ret = -1; status = RPMEM_ERR_FATAL; goto err_strdup; } rpmemd->pool = rpmemd_db_pool_create(rpmemd->db, req->pool_desc, 0, pool_attr); if (!rpmemd->pool) { ret = -1; status = rpmemd_db_get_status(errno); goto err_pool_create; } rpmemd->created = 1; ret = rpmemd_check_pool(rpmemd, req, &status); if (ret) goto err_pool_check; ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status); if (ret) goto err_fip_init; RPMEMD_LOG(NOTICE, "create request response: (status = %u)", status); if (!status) rpmemd_print_resp_attr(&resp); ret = rpmemd_obc_create_resp(obc, status, &resp); if (ret) goto err_create_resp; ret = rpmemd_fip_start_thread(rpmemd); if (ret) goto err_fip_start; return 0; err_fip_start: err_create_resp: err_send = 0; rpmemd_fip_fini(rpmemd->fip); err_fip_init: err_pool_check: rpmemd_db_pool_close(rpmemd->db, rpmemd->pool); rpmemd_db_pool_remove(rpmemd->db, req->pool_desc, 0, 0); err_pool_create: free(rpmemd->pool_desc); err_strdup: err_pool_opened: if (err_send) ret = rpmemd_obc_create_resp(obc, status, &resp); rpmemd->closing = 1; return ret; } /* * rpmemd_req_open -- handle open request */ static int rpmemd_req_open(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req) { RPMEMD_ASSERT(arg != NULL); RPMEMD_LOG(NOTICE, "open request:"); rpmemd_print_req_attr(req); struct rpmemd *rpmemd = (struct rpmemd *)arg; int ret; int status = 0; int err_send = 1; struct rpmem_resp_attr resp; memset(&resp, 0, sizeof(resp)); struct rpmem_pool_attr pool_attr; memset(&pool_attr, 0, sizeof(pool_attr)); if (rpmemd->pool) { RPMEMD_LOG(ERR, "pool already opened"); ret = -1; status = RPMEM_ERR_FATAL; goto err_pool_opened; } rpmemd->pool_desc = strdup(req->pool_desc); if (!rpmemd->pool_desc) { RPMEMD_LOG(ERR, "!allocating pool descriptor"); ret = -1; status = RPMEM_ERR_FATAL; goto err_strdup; } rpmemd->pool = rpmemd_db_pool_open(rpmemd->db, req->pool_desc, 0, &pool_attr); if (!rpmemd->pool) { ret = -1; status = rpmemd_db_get_status(errno); goto err_pool_open; } RPMEMD_LOG(NOTICE, "pool attributes:"); rpmemd_print_pool_attr(&pool_attr); ret = rpmemd_check_pool(rpmemd, req, &status); if (ret) goto err_pool_check; ret = rpmemd_common_fip_init(rpmemd, req, &resp, &status); if (ret) goto err_fip_init; RPMEMD_LOG(NOTICE, "open request response: (status = %u)", status); if (!status) rpmemd_print_resp_attr(&resp); ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr); if (ret) goto err_open_resp; ret = rpmemd_fip_start_thread(rpmemd); if (ret) goto err_fip_start; return 0; err_fip_start: err_open_resp: err_send = 0; rpmemd_fip_fini(rpmemd->fip); err_fip_init: err_pool_check: rpmemd_db_pool_close(rpmemd->db, rpmemd->pool); err_pool_open: free(rpmemd->pool_desc); err_strdup: err_pool_opened: if (err_send) ret = rpmemd_obc_open_resp(obc, status, &resp, &pool_attr); rpmemd->closing = 1; return ret; } /* * rpmemd_req_close -- handle close request */ static int rpmemd_req_close(struct rpmemd_obc *obc, void *arg, int flags) { RPMEMD_ASSERT(arg != NULL); RPMEMD_LOG(NOTICE, "close request"); struct rpmemd *rpmemd = (struct rpmemd *)arg; rpmemd->closing = 1; int ret; int status = 0; if (!rpmemd->pool) { RPMEMD_LOG(ERR, "pool not opened"); status = RPMEM_ERR_FATAL; return rpmemd_obc_close_resp(obc, status); } ret = rpmemd_fip_stop(rpmemd); if (ret) { status = RPMEM_ERR_FATAL; } else { rpmemd_fip_close(rpmemd->fip); rpmemd_fip_fini(rpmemd->fip); } int remove = rpmemd->created && (status || (flags & RPMEM_CLOSE_FLAGS_REMOVE)); if (rpmemd_close_pool(rpmemd, remove)) RPMEMD_LOG(ERR, "closing pool failed"); RPMEMD_LOG(NOTICE, "close request response (status = %u)", status); ret = rpmemd_obc_close_resp(obc, status); return ret; } /* * rpmemd_req_set_attr -- handle set attributes request */ static int rpmemd_req_set_attr(struct rpmemd_obc *obc, void *arg, const struct rpmem_pool_attr *pool_attr) { RPMEMD_ASSERT(arg != NULL); RPMEMD_LOG(NOTICE, "set attributes request"); struct rpmemd *rpmemd = (struct rpmemd *)arg; RPMEMD_ASSERT(rpmemd->pool != NULL); int ret; int status = 0; int err_send = 1; ret = rpmemd_db_pool_set_attr(rpmemd->pool, pool_attr); if (ret) { ret = -1; status = rpmemd_db_get_status(errno); goto err_set_attr; } RPMEMD_LOG(NOTICE, "new pool attributes:"); rpmemd_print_pool_attr(pool_attr); ret = rpmemd_obc_set_attr_resp(obc, status); if (ret) goto err_set_attr_resp; return ret; err_set_attr_resp: err_send = 0; err_set_attr: if (err_send) ret = rpmemd_obc_set_attr_resp(obc, status); return ret; } static struct rpmemd_obc_requests rpmemd_req = { .create = rpmemd_req_create, .open = rpmemd_req_open, .close = rpmemd_req_close, .set_attr = rpmemd_req_set_attr, }; /* * rpmemd_print_info -- print basic info and configuration */ static void rpmemd_print_info(struct rpmemd *rpmemd) { RPMEMD_LOG(NOTICE, "ssh connection: %s", _str(os_getenv("SSH_CONNECTION"))); RPMEMD_LOG(NOTICE, "user: %s", _str(os_getenv("USER"))); RPMEMD_LOG(NOTICE, "configuration"); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "pool set directory: '%s'", _str(rpmemd->config.poolset_dir)); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s", rpmem_persist_method_to_str(rpmemd->persist_method)); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "number of threads: %lu", rpmemd->config.nthreads); RPMEMD_DBG(RPMEMD_LOG_INDENT "persist APM: %s", bool2str(rpmemd->config.persist_apm)); RPMEMD_DBG(RPMEMD_LOG_INDENT "persist GPSPM: %s", bool2str(rpmemd->config.persist_general)); RPMEMD_DBG(RPMEMD_LOG_INDENT "use syslog: %s", bool2str(rpmemd->config.use_syslog)); RPMEMD_DBG(RPMEMD_LOG_INDENT "log file: %s", _str(rpmemd->config.log_file)); RPMEMD_DBG(RPMEMD_LOG_INDENT "log level: %s", rpmemd_log_level_to_str(rpmemd->config.log_level)); } int main(int argc, char *argv[]) { util_init(); int send_status = 1; int ret = 1; struct rpmemd *rpmemd = calloc(1, sizeof(*rpmemd)); if (!rpmemd) { RPMEMD_LOG(ERR, "!calloc"); goto err_rpmemd; } rpmemd->obc = rpmemd_obc_init(STDIN_FILENO, STDOUT_FILENO); if (!rpmemd->obc) { RPMEMD_LOG(ERR, "out-of-band connection initialization"); goto err_obc; } if (rpmemd_log_init(DAEMON_NAME, NULL, 0)) { RPMEMD_LOG(ERR, "logging subsystem initialization failed"); goto err_log_init; } if (rpmemd_config_read(&rpmemd->config, argc, argv) != 0) { RPMEMD_LOG(ERR, "reading configuration failed"); goto err_config; } rpmemd_log_close(); rpmemd_log_level = rpmemd->config.log_level; if (rpmemd_log_init(DAEMON_NAME, rpmemd->config.log_file, rpmemd->config.use_syslog)) { RPMEMD_LOG(ERR, "logging subsystem initialization" " failed (%s, %d)", rpmemd->config.log_file, rpmemd->config.use_syslog); goto err_log_init_config; } RPMEMD_LOG(INFO, "%s version %s", DAEMON_NAME, SRCVERSION); rpmemd->persist_method = rpmemd_get_pm(&rpmemd->config); rpmemd->db = rpmemd_db_init(rpmemd->config.poolset_dir, 0666); if (!rpmemd->db) { RPMEMD_LOG(ERR, "!pool set db initialization"); goto err_db_init; } if (rpmemd->config.rm_poolset) { RPMEMD_LOG(INFO, "removing '%s'", rpmemd->config.rm_poolset); if (rpmemd_db_pool_remove(rpmemd->db, rpmemd->config.rm_poolset, rpmemd->config.force, rpmemd->config.pool_set)) { RPMEMD_LOG(ERR, "removing '%s' failed", rpmemd->config.rm_poolset); ret = errno; } else { RPMEMD_LOG(NOTICE, "removed '%s'", rpmemd->config.rm_poolset); ret = 0; } send_status = 0; goto out_rm; } ret = rpmemd_obc_status(rpmemd->obc, 0); if (ret) { RPMEMD_LOG(ERR, "writing status failed"); goto err_status; } rpmemd_print_info(rpmemd); while (!ret) { ret = rpmemd_obc_process(rpmemd->obc, &rpmemd_req, rpmemd); if (ret) { RPMEMD_LOG(ERR, "out-of-band connection" " process failed"); goto err; } if (rpmemd->closing) break; } rpmemd_db_fini(rpmemd->db); rpmemd_config_free(&rpmemd->config); rpmemd_log_close(); rpmemd_obc_fini(rpmemd->obc); free(rpmemd); return 0; err: rpmemd_req_cleanup(rpmemd); err_status: out_rm: rpmemd_db_fini(rpmemd->db); err_db_init: err_log_init_config: rpmemd_config_free(&rpmemd->config); err_config: rpmemd_log_close(); err_log_init: if (send_status) { if (rpmemd_obc_status(rpmemd->obc, (uint32_t)errno)) RPMEMD_LOG(ERR, "writing status failed"); } rpmemd_obc_fini(rpmemd->obc); err_obc: free(rpmemd); err_rpmemd: return ret; }
18,497
22.007463
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_log.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_log.h -- rpmemd logging functions declarations */ #include <string.h> #include "util.h" #define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b)))) /* * The tab character is not allowed in rpmemd log, * because it is not well handled by syslog. * Please use RPMEMD_LOG_INDENT instead. */ #define RPMEMD_LOG_INDENT " " #ifdef DEBUG #define RPMEMD_LOG(level, fmt, arg...) do {\ COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\ rpmemd_log(RPD_LOG_##level, __FILE__, __LINE__, fmt, ## arg);\ } while (0) #else #define RPMEMD_LOG(level, fmt, arg...) do {\ COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\ rpmemd_log(RPD_LOG_##level, NULL, 0, fmt, ## arg);\ } while (0) #endif #ifdef DEBUG #define RPMEMD_DBG(fmt, arg...) do {\ COMPILE_ERROR_ON(strchr(fmt, '\t') != 0);\ rpmemd_log(_RPD_LOG_DBG, __FILE__, __LINE__, fmt, ## arg);\ } while (0) #else #define RPMEMD_DBG(fmt, arg...) do {} while (0) #endif #define RPMEMD_ERR(fmt, arg...) do {\ RPMEMD_LOG(ERR, fmt, ## arg);\ } while (0) #define RPMEMD_FATAL(fmt, arg...) do {\ RPMEMD_LOG(ERR, fmt, ## arg);\ abort();\ } while (0) #define RPMEMD_ASSERT(cond) do {\ if (!(cond)) {\ rpmemd_log(RPD_LOG_ERR, __FILE__, __LINE__,\ "assertion fault: %s", #cond);\ abort();\ }\ } while (0) enum rpmemd_log_level { RPD_LOG_ERR, RPD_LOG_WARN, RPD_LOG_NOTICE, RPD_LOG_INFO, _RPD_LOG_DBG, /* disallow to use this with LOG macro */ MAX_RPD_LOG, }; enum rpmemd_log_level rpmemd_log_level_from_str(const char *str); const char *rpmemd_log_level_to_str(enum rpmemd_log_level level); extern enum rpmemd_log_level rpmemd_log_level; int rpmemd_log_init(const char *ident, const char *fname, int use_syslog); void rpmemd_log_close(void); int rpmemd_prefix(const char *fmt, ...) FORMAT_PRINTF(1, 2); void rpmemd_log(enum rpmemd_log_level level, const char *fname, int lineno, const char *fmt, ...) FORMAT_PRINTF(4, 5);
1,991
25.210526
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_util.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * rpmemd_util.c -- rpmemd utility functions definitions */ #include <stdlib.h> #include <unistd.h> #include "libpmem.h" #include "rpmem_common.h" #include "rpmemd_log.h" #include "rpmemd_util.h" /* * rpmemd_pmem_persist -- pmem_persist wrapper required to unify function * pointer type with pmem_msync */ int rpmemd_pmem_persist(const void *addr, size_t len) { pmem_persist(addr, len); return 0; } /* * rpmemd_flush_fatal -- APM specific flush function which should never be * called because APM does not require flushes */ int rpmemd_flush_fatal(const void *addr, size_t len) { RPMEMD_FATAL("rpmemd_flush_fatal should never be called"); } /* * rpmemd_persist_to_str -- convert persist function pointer to string */ static const char * rpmemd_persist_to_str(int (*persist)(const void *addr, size_t len)) { if (persist == rpmemd_pmem_persist) { return "pmem_persist"; } else if (persist == pmem_msync) { return "pmem_msync"; } else if (persist == rpmemd_flush_fatal) { return "none"; } else { return NULL; } } /* * rpmem_print_pm_policy -- print persistency method policy */ static void rpmem_print_pm_policy(enum rpmem_persist_method persist_method, int (*persist)(const void *addr, size_t len)) { RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist method: %s", rpmem_persist_method_to_str(persist_method)); RPMEMD_LOG(NOTICE, RPMEMD_LOG_INDENT "persist flush: %s", rpmemd_persist_to_str(persist)); } /* * rpmem_memcpy_msync -- memcpy and msync */ static void * rpmem_memcpy_msync(void *pmemdest, const void *src, size_t len) { void *ret = pmem_memcpy(pmemdest, src, len, PMEM_F_MEM_NOFLUSH); pmem_msync(pmemdest, len); return ret; } /* * rpmemd_apply_pm_policy -- choose the persistency method and the flush * function according to the pool type and the persistency method read from the * config */ int rpmemd_apply_pm_policy(enum rpmem_persist_method *persist_method, int (**persist)(const void *addr, size_t len), void *(**memcpy_persist)(void *pmemdest, const void *src, size_t len), const int is_pmem) { switch (*persist_method) { case RPMEM_PM_APM: if (is_pmem) { *persist_method = RPMEM_PM_APM; *persist = rpmemd_flush_fatal; } else { *persist_method = RPMEM_PM_GPSPM; *persist = pmem_msync; } break; case RPMEM_PM_GPSPM: *persist_method = RPMEM_PM_GPSPM; *persist = is_pmem ? rpmemd_pmem_persist : pmem_msync; break; default: RPMEMD_FATAL("invalid persist method: %d", *persist_method); return -1; } /* this is for RPMEM_PERSIST_INLINE */ if (is_pmem) *memcpy_persist = pmem_memcpy_persist; else *memcpy_persist = rpmem_memcpy_msync; RPMEMD_LOG(NOTICE, "persistency policy:"); rpmem_print_pm_policy(*persist_method, *persist); return 0; }
2,839
22.666667
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_db.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_db.h -- internal definitions for rpmemd database of pool set files */ struct rpmemd_db; struct rpmem_pool_attr; /* * struct rpmemd_db_pool -- remote pool context */ struct rpmemd_db_pool { void *pool_addr; size_t pool_size; struct pool_set *set; }; struct rpmemd_db *rpmemd_db_init(const char *root_dir, mode_t mode); struct rpmemd_db_pool *rpmemd_db_pool_create(struct rpmemd_db *db, const char *pool_desc, size_t pool_size, const struct rpmem_pool_attr *rattr); struct rpmemd_db_pool *rpmemd_db_pool_open(struct rpmemd_db *db, const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr); int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc, int force, int pool_set); int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp, const struct rpmem_pool_attr *rattr); void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp); void rpmemd_db_fini(struct rpmemd_db *db); int rpmemd_db_check_dir(struct rpmemd_db *db); int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool);
1,132
32.323529
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_obc.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * rpmemd_obc.c -- rpmemd out-of-band connection definitions */ #include <stdlib.h> #include <errno.h> #include <stdint.h> #include <string.h> #include <netinet/in.h> #include <arpa/inet.h> #include <sys/socket.h> #include <unistd.h> #include <netdb.h> #include "librpmem.h" #include "rpmemd_log.h" #include "rpmem_proto.h" #include "rpmem_common.h" #include "rpmemd_obc.h" struct rpmemd_obc { int fd_in; int fd_out; }; /* * rpmemd_obc_check_proto_ver -- check protocol version */ static int rpmemd_obc_check_proto_ver(unsigned major, unsigned minor) { if (major != RPMEM_PROTO_MAJOR || minor != RPMEM_PROTO_MINOR) { RPMEMD_LOG(ERR, "unsupported protocol version -- %u.%u", major, minor); return -1; } return 0; } /* * rpmemd_obc_check_msg_hdr -- check message header */ static int rpmemd_obc_check_msg_hdr(struct rpmem_msg_hdr *hdrp) { switch (hdrp->type) { case RPMEM_MSG_TYPE_OPEN: case RPMEM_MSG_TYPE_CREATE: case RPMEM_MSG_TYPE_CLOSE: case RPMEM_MSG_TYPE_SET_ATTR: /* all messages from obc to server are fine */ break; default: RPMEMD_LOG(ERR, "invalid message type -- %u", hdrp->type); return -1; } if (hdrp->size < sizeof(struct rpmem_msg_hdr)) { RPMEMD_LOG(ERR, "invalid message size -- %lu", hdrp->size); return -1; } return 0; } /* * rpmemd_obc_check_pool_desc -- check pool descriptor */ static int rpmemd_obc_check_pool_desc(struct rpmem_msg_hdr *hdrp, size_t msg_size, struct rpmem_msg_pool_desc *pool_desc) { size_t body_size = msg_size + pool_desc->size; if (hdrp->size != body_size) { RPMEMD_LOG(ERR, "message and pool descriptor size mismatch " "-- is %lu should be %lu", hdrp->size, body_size); return -1; } if (pool_desc->size < 2) { RPMEMD_LOG(ERR, "invalid pool descriptor size -- %u " "(must be >= 2)", pool_desc->size); return -1; } if (pool_desc->desc[pool_desc->size - 1] != '\0') { RPMEMD_LOG(ERR, "invalid pool descriptor " "(must be null-terminated string)"); return -1; } size_t len = strlen((char *)pool_desc->desc) + 1; if (pool_desc->size != len) { RPMEMD_LOG(ERR, "invalid pool descriptor size -- is %lu " "should be %u", len, pool_desc->size); return -1; } return 0; } /* * rpmemd_obc_check_provider -- check provider value */ static int rpmemd_obc_check_provider(uint32_t provider) { if (provider == 0 || provider >= MAX_RPMEM_PROV) { RPMEMD_LOG(ERR, "invalid provider -- %u", provider); return -1; } return 0; } /* * rpmemd_obc_ntoh_check_msg_create -- convert and check create request message */ static int rpmemd_obc_ntoh_check_msg_create(struct rpmem_msg_hdr *hdrp) { int ret; struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp; rpmem_ntoh_msg_create(msg); ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor); if (ret) return ret; ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc); if (ret) return ret; ret = rpmemd_obc_check_provider(msg->c.provider); if (ret) return ret; return 0; } /* * rpmemd_obc_ntoh_check_msg_open -- convert and check open request message */ static int rpmemd_obc_ntoh_check_msg_open(struct rpmem_msg_hdr *hdrp) { int ret; struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp; rpmem_ntoh_msg_open(msg); ret = rpmemd_obc_check_proto_ver(msg->c.major, msg->c.minor); if (ret) return ret; ret = rpmemd_obc_check_pool_desc(hdrp, sizeof(*msg), &msg->pool_desc); if (ret) return ret; ret = rpmemd_obc_check_provider(msg->c.provider); if (ret) return ret; return 0; } /* * rpmemd_obc_ntoh_check_msg_close -- convert and check close request message */ static int rpmemd_obc_ntoh_check_msg_close(struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp; rpmem_ntoh_msg_close(msg); /* nothing to do */ return 0; } /* * rpmemd_obc_ntoh_check_msg_set_attr -- convert and check set attributes * request message */ static int rpmemd_obc_ntoh_check_msg_set_attr(struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp; rpmem_ntoh_msg_set_attr(msg); /* nothing to do */ return 0; } typedef int (*rpmemd_obc_ntoh_check_msg_fn)(struct rpmem_msg_hdr *hdrp); static rpmemd_obc_ntoh_check_msg_fn rpmemd_obc_ntoh_check_msg[] = { [RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_ntoh_check_msg_create, [RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_ntoh_check_msg_open, [RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_ntoh_check_msg_close, [RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_ntoh_check_msg_set_attr, }; /* * rpmemd_obc_process_create -- process create request */ static int rpmemd_obc_process_create(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg, struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_create *msg = (struct rpmem_msg_create *)hdrp; struct rpmem_req_attr req = { .pool_size = msg->c.pool_size, .nlanes = (unsigned)msg->c.nlanes, .pool_desc = (char *)msg->pool_desc.desc, .provider = (enum rpmem_provider)msg->c.provider, .buff_size = msg->c.buff_size, }; struct rpmem_pool_attr *rattr = NULL; struct rpmem_pool_attr rpmem_attr; unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr); if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr))) rattr = &rpmem_attr; return req_cb->create(obc, arg, &req, rattr); } /* * rpmemd_obc_process_open -- process open request */ static int rpmemd_obc_process_open(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg, struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_open *msg = (struct rpmem_msg_open *)hdrp; struct rpmem_req_attr req = { .pool_size = msg->c.pool_size, .nlanes = (unsigned)msg->c.nlanes, .pool_desc = (const char *)msg->pool_desc.desc, .provider = (enum rpmem_provider)msg->c.provider, .buff_size = msg->c.buff_size, }; return req_cb->open(obc, arg, &req); } /* * rpmemd_obc_process_close -- process close request */ static int rpmemd_obc_process_close(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg, struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_close *msg = (struct rpmem_msg_close *)hdrp; return req_cb->close(obc, arg, (int)msg->flags); } /* * rpmemd_obc_process_set_attr -- process set attributes request */ static int rpmemd_obc_process_set_attr(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg, struct rpmem_msg_hdr *hdrp) { struct rpmem_msg_set_attr *msg = (struct rpmem_msg_set_attr *)hdrp; struct rpmem_pool_attr *rattr = NULL; struct rpmem_pool_attr rpmem_attr; unpack_rpmem_pool_attr(&msg->pool_attr, &rpmem_attr); if (!util_is_zeroed(&rpmem_attr, sizeof(rpmem_attr))) rattr = &rpmem_attr; return req_cb->set_attr(obc, arg, rattr); } typedef int (*rpmemd_obc_process_fn)(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg, struct rpmem_msg_hdr *hdrp); static rpmemd_obc_process_fn rpmemd_obc_process_cb[] = { [RPMEM_MSG_TYPE_CREATE] = rpmemd_obc_process_create, [RPMEM_MSG_TYPE_OPEN] = rpmemd_obc_process_open, [RPMEM_MSG_TYPE_CLOSE] = rpmemd_obc_process_close, [RPMEM_MSG_TYPE_SET_ATTR] = rpmemd_obc_process_set_attr, }; /* * rpmemd_obc_recv -- wrapper for read and decode data function */ static inline int rpmemd_obc_recv(struct rpmemd_obc *obc, void *buff, size_t len) { return rpmem_xread(obc->fd_in, buff, len, 0); } /* * rpmemd_obc_send -- wrapper for encode and write data function */ static inline int rpmemd_obc_send(struct rpmemd_obc *obc, const void *buff, size_t len) { return rpmem_xwrite(obc->fd_out, buff, len, 0); } /* * rpmemd_obc_msg_recv -- receive and check request message * * Return values: * 0 - success * < 0 - error * 1 - obc disconnected */ static int rpmemd_obc_msg_recv(struct rpmemd_obc *obc, struct rpmem_msg_hdr **hdrpp) { struct rpmem_msg_hdr hdr; struct rpmem_msg_hdr nhdr; struct rpmem_msg_hdr *hdrp; int ret; ret = rpmemd_obc_recv(obc, &nhdr, sizeof(nhdr)); if (ret == 1) { RPMEMD_LOG(NOTICE, "out-of-band connection disconnected"); return 1; } if (ret < 0) { RPMEMD_LOG(ERR, "!receiving message header failed"); return ret; } memcpy(&hdr, &nhdr, sizeof(hdr)); rpmem_ntoh_msg_hdr(&hdr); ret = rpmemd_obc_check_msg_hdr(&hdr); if (ret) { RPMEMD_LOG(ERR, "parsing message header failed"); return ret; } hdrp = malloc(hdr.size); if (!hdrp) { RPMEMD_LOG(ERR, "!allocating message buffer failed"); return -1; } memcpy(hdrp, &nhdr, sizeof(*hdrp)); size_t body_size = hdr.size - sizeof(hdr); ret = rpmemd_obc_recv(obc, hdrp->body, body_size); if (ret) { RPMEMD_LOG(ERR, "!receiving message body failed"); goto err_recv_body; } ret = rpmemd_obc_ntoh_check_msg[hdr.type](hdrp); if (ret) { RPMEMD_LOG(ERR, "parsing message body failed"); goto err_body; } *hdrpp = hdrp; return 0; err_body: err_recv_body: free(hdrp); return -1; } /* * rpmemd_obc_init -- initialize rpmemd */ struct rpmemd_obc * rpmemd_obc_init(int fd_in, int fd_out) { struct rpmemd_obc *obc = calloc(1, sizeof(*obc)); if (!obc) { RPMEMD_LOG(ERR, "!allocating obc failed"); goto err_calloc; } obc->fd_in = fd_in; obc->fd_out = fd_out; return obc; err_calloc: return NULL; } /* * rpmemd_obc_fini -- destroy obc */ void rpmemd_obc_fini(struct rpmemd_obc *obc) { free(obc); } /* * rpmemd_obc_status -- sends initial status to the client */ int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status) { return rpmemd_obc_send(obc, &status, sizeof(status)); } /* * rpmemd_obc_process -- wait for and process a message from client * * Return values: * 0 - success * < 0 - error * 1 - client disconnected */ int rpmemd_obc_process(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg) { RPMEMD_ASSERT(req_cb != NULL); RPMEMD_ASSERT(req_cb->create != NULL); RPMEMD_ASSERT(req_cb->open != NULL); RPMEMD_ASSERT(req_cb->close != NULL); RPMEMD_ASSERT(req_cb->set_attr != NULL); struct rpmem_msg_hdr *hdrp = NULL; int ret; ret = rpmemd_obc_msg_recv(obc, &hdrp); if (ret) return ret; RPMEMD_ASSERT(hdrp != NULL); ret = rpmemd_obc_process_cb[hdrp->type](obc, req_cb, arg, hdrp); free(hdrp); return ret; } /* * rpmemd_obc_create_resp -- send create request response message */ int rpmemd_obc_create_resp(struct rpmemd_obc *obc, int status, const struct rpmem_resp_attr *res) { struct rpmem_msg_create_resp resp = { .hdr = { .type = RPMEM_MSG_TYPE_CREATE_RESP, .size = sizeof(struct rpmem_msg_create_resp), .status = (uint32_t)status, }, .ibc = { .port = res->port, .rkey = res->rkey, .raddr = res->raddr, .persist_method = res->persist_method, .nlanes = res->nlanes, }, }; rpmem_hton_msg_create_resp(&resp); return rpmemd_obc_send(obc, &resp, sizeof(resp)); } /* * rpmemd_obc_open_resp -- send open request response message */ int rpmemd_obc_open_resp(struct rpmemd_obc *obc, int status, const struct rpmem_resp_attr *res, const struct rpmem_pool_attr *pool_attr) { struct rpmem_msg_open_resp resp = { .hdr = { .type = RPMEM_MSG_TYPE_OPEN_RESP, .size = sizeof(struct rpmem_msg_open_resp), .status = (uint32_t)status, }, .ibc = { .port = res->port, .rkey = res->rkey, .raddr = res->raddr, .persist_method = res->persist_method, .nlanes = res->nlanes, }, }; pack_rpmem_pool_attr(pool_attr, &resp.pool_attr); rpmem_hton_msg_open_resp(&resp); return rpmemd_obc_send(obc, &resp, sizeof(resp)); } /* * rpmemd_obc_close_resp -- send close request response message */ int rpmemd_obc_close_resp(struct rpmemd_obc *obc, int status) { struct rpmem_msg_close_resp resp = { .hdr = { .type = RPMEM_MSG_TYPE_CLOSE_RESP, .size = sizeof(struct rpmem_msg_close_resp), .status = (uint32_t)status, }, }; rpmem_hton_msg_close_resp(&resp); return rpmemd_obc_send(obc, &resp, sizeof(resp)); } /* * rpmemd_obc_set_attr_resp -- send set attributes request response message */ int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status) { struct rpmem_msg_set_attr_resp resp = { .hdr = { .type = RPMEM_MSG_TYPE_SET_ATTR_RESP, .size = sizeof(struct rpmem_msg_set_attr_resp), .status = (uint32_t)status, }, }; rpmem_hton_msg_set_attr_resp(&resp); return rpmemd_obc_send(obc, &resp, sizeof(resp)); }
12,309
21.422587
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_config.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmemd_config.c -- rpmemd config source file */ #include <pwd.h> #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <unistd.h> #include <ctype.h> #include <errno.h> #include <getopt.h> #include <limits.h> #include <inttypes.h> #include "rpmemd.h" #include "rpmemd_log.h" #include "rpmemd_config.h" #include "os.h" #define CONFIG_LINE_SIZE_INIT 50 #define INVALID_CHAR_POS UINT64_MAX struct rpmemd_special_chars_pos { uint64_t equal_char; uint64_t comment_char; uint64_t EOL_char; }; enum rpmemd_option { RPD_OPT_LOG_FILE, RPD_OPT_POOLSET_DIR, RPD_OPT_PERSIST_APM, RPD_OPT_PERSIST_GENERAL, RPD_OPT_USE_SYSLOG, RPD_OPT_LOG_LEVEL, RPD_OPT_RM_POOLSET, RPD_OPT_MAX_VALUE, RPD_OPT_INVALID = UINT64_MAX, }; static const char *optstr = "c:hVr:fst:"; /* * options -- cl and config file options */ static const struct option options[] = { {"config", required_argument, NULL, 'c'}, {"help", no_argument, NULL, 'h'}, {"version", no_argument, NULL, 'V'}, {"log-file", required_argument, NULL, RPD_OPT_LOG_FILE}, {"poolset-dir", required_argument, NULL, RPD_OPT_POOLSET_DIR}, {"persist-apm", no_argument, NULL, RPD_OPT_PERSIST_APM}, {"persist-general", no_argument, NULL, RPD_OPT_PERSIST_GENERAL}, {"use-syslog", no_argument, NULL, RPD_OPT_USE_SYSLOG}, {"log-level", required_argument, NULL, RPD_OPT_LOG_LEVEL}, {"remove", required_argument, NULL, 'r'}, {"force", no_argument, NULL, 'f'}, {"pool-set", no_argument, NULL, 's'}, {"nthreads", required_argument, NULL, 't'}, {NULL, 0, NULL, 0}, }; #define VALUE_INDENT " " static const char * const help_str = "\n" "Options:\n" " -c, --config <path> configuration file location\n" " -r, --remove <poolset> remove pool described by given poolset file\n" " -f, --force ignore errors when removing a pool\n" " -t, --nthreads <num> number of processing threads\n" " -h, --help display help message and exit\n" " -V, --version display target daemon version and exit\n" " --log-file <path> log file location\n" " --poolset-dir <path> pool set files directory\n" " --persist-apm enable Appliance Persistency Method\n" " --persist-general enable General Server Persistency Mechanism\n" " --use-syslog use syslog(3) for logging messages\n" " --log-level <level> set log level value\n" VALUE_INDENT "err error conditions\n" VALUE_INDENT "warn warning conditions\n" VALUE_INDENT "notice normal, but significant, condition\n" VALUE_INDENT "info informational message\n" VALUE_INDENT "debug debug-level message\n" "\n" "For complete documentation see %s(1) manual page."; /* * print_version -- (internal) prints version message */ static void print_version(void) { RPMEMD_LOG(ERR, "%s version %s", DAEMON_NAME, SRCVERSION); } /* * print_usage -- (internal) prints usage message */ static void print_usage(const char *name) { RPMEMD_LOG(ERR, "usage: %s [--version] [--help] [<args>]", name); } /* * print_help -- (internal) prints help message */ static void print_help(const char *name) { print_usage(name); print_version(); RPMEMD_LOG(ERR, help_str, DAEMON_NAME); } /* * parse_config_string -- (internal) parse string value */ static inline char * parse_config_string(const char *value) { if (strlen(value) == 0) { errno = EINVAL; return NULL; } char *output = strdup(value); if (output == NULL) RPMEMD_FATAL("!strdup"); return output; } /* * parse_config_bool -- (internal) parse yes / no flag */ static inline int parse_config_bool(bool *config_value, const char *value) { if (value == NULL) *config_value = true; else if (strcmp("yes", value) == 0) *config_value = true; else if (strcmp("no", value) == 0) *config_value = false; else { errno = EINVAL; return -1; } return 0; } /* * set_option -- (internal) set single config option */ static int set_option(enum rpmemd_option option, const char *value, struct rpmemd_config *config) { int ret = 0; switch (option) { case RPD_OPT_LOG_FILE: free(config->log_file); config->log_file = parse_config_string(value); if (config->log_file == NULL) return -1; else config->use_syslog = false; break; case RPD_OPT_POOLSET_DIR: free(config->poolset_dir); config->poolset_dir = parse_config_string(value); if (config->poolset_dir == NULL) return -1; break; case RPD_OPT_PERSIST_APM: ret = parse_config_bool(&config->persist_apm, value); break; case RPD_OPT_PERSIST_GENERAL: ret = parse_config_bool(&config->persist_general, value); break; case RPD_OPT_USE_SYSLOG: ret = parse_config_bool(&config->use_syslog, value); break; case RPD_OPT_LOG_LEVEL: config->log_level = rpmemd_log_level_from_str(value); if (config->log_level == MAX_RPD_LOG) { errno = EINVAL; return -1; } break; default: errno = EINVAL; return -1; } return ret; } /* * get_config_line -- (internal) read single line from file */ static int get_config_line(FILE *file, char **line, uint64_t *line_max, uint8_t *line_max_increased, struct rpmemd_special_chars_pos *pos) { uint8_t line_complete = 0; uint64_t line_length = 0; char *line_part = *line; do { char *ret = fgets(line_part, (int)(*line_max - line_length), file); if (ret == NULL) return 0; for (uint64_t i = 0; i < *line_max; ++i) { if (line_part[i] == '\n') line_complete = 1; else if (line_part[i] == '\0') { line_length += i; if (line_length + 1 < *line_max) line_complete = 1; break; } else if (line_part[i] == '#' && pos->comment_char == UINT64_MAX) pos->comment_char = line_length + i; else if (line_part[i] == '=' && pos->equal_char == UINT64_MAX) pos->equal_char = line_length + i; } if (line_complete == 0) { *line = realloc(*line, sizeof(char) * (*line_max) * 2); if (*line == NULL) { RPMEMD_FATAL("!realloc"); } line_part = *line + *line_max - 1; line_length = *line_max - 1; *line_max *= 2; *line_max_increased = 1; } } while (line_complete != 1); pos->EOL_char = line_length; return 0; } /* * trim_line_element -- (internal) remove white characters */ static char * trim_line_element(char *line, uint64_t start, uint64_t end) { for (; start <= end; ++start) { if (!isspace(line[start])) break; } for (; end > start; --end) { if (!isspace(line[end - 1])) break; } if (start == end) return NULL; line[end] = '\0'; return &line[start]; } /* * parse_config_key -- (internal) lookup config key */ static enum rpmemd_option parse_config_key(const char *key) { for (int i = 0; options[i].name != 0; ++i) { if (strcmp(key, options[i].name) == 0) return (enum rpmemd_option)options[i].val; } return RPD_OPT_INVALID; } /* * parse_config_line -- (internal) parse single config line * * Return newly written option flag. Store possible errors in errno. */ static int parse_config_line(char *line, struct rpmemd_special_chars_pos *pos, struct rpmemd_config *config, uint64_t disabled) { if (pos->comment_char < pos->equal_char) pos->equal_char = INVALID_CHAR_POS; uint64_t end_of_content = pos->comment_char != INVALID_CHAR_POS ? pos->comment_char : pos->EOL_char; if (pos->equal_char == INVALID_CHAR_POS) { char *leftover = trim_line_element(line, 0, end_of_content); if (leftover != NULL) { errno = EINVAL; return -1; } else { return 0; } } char *key_name = trim_line_element(line, 0, pos->equal_char); char *value = trim_line_element(line, pos->equal_char + 1, end_of_content); if (key_name == NULL || value == NULL) { errno = EINVAL; return -1; } enum rpmemd_option key = parse_config_key(key_name); if (key != RPD_OPT_INVALID) { if ((disabled & (uint64_t)(1 << key)) == 0) if (set_option(key, value, config) != 0) return -1; } else { errno = EINVAL; return -1; } return 0; } /* * parse_config_file -- (internal) parse config file */ static int parse_config_file(const char *filename, struct rpmemd_config *config, uint64_t disabled, int required) { RPMEMD_ASSERT(filename != NULL); FILE *file = os_fopen(filename, "r"); if (file == NULL) { if (required) { RPMEMD_LOG(ERR, "!%s", filename); goto error_fopen; } else { goto optional_config_missing; } } uint8_t line_max_increased = 0; uint64_t line_max = CONFIG_LINE_SIZE_INIT; uint64_t line_num = 1; char *line = (char *)malloc(sizeof(char) * line_max); if (line == NULL) { RPMEMD_LOG(ERR, "!malloc"); goto error_malloc_line; } char *line_copy = (char *)malloc(sizeof(char) * line_max); if (line_copy == NULL) { RPMEMD_LOG(ERR, "!malloc"); goto error_malloc_line_copy; } struct rpmemd_special_chars_pos pos; do { memset(&pos, 0xff, sizeof(pos)); if (get_config_line(file, &line, &line_max, &line_max_increased, &pos) != 0) goto error; if (line_max_increased) { char *line_new = (char *)realloc(line_copy, sizeof(char) * line_max); if (line_new == NULL) { RPMEMD_LOG(ERR, "!malloc"); goto error; } line_copy = line_new; line_max_increased = 0; } if (pos.EOL_char != INVALID_CHAR_POS) { strcpy(line_copy, line); int ret = parse_config_line(line_copy, &pos, config, disabled); if (ret != 0) { size_t len = strlen(line); if (len > 0 && line[len - 1] == '\n') line[len - 1] = '\0'; RPMEMD_LOG(ERR, "Invalid config file line at " "%s:%lu\n%s", filename, line_num, line); goto error; } } ++line_num; } while (pos.EOL_char != INVALID_CHAR_POS); free(line_copy); free(line); fclose(file); optional_config_missing: return 0; error: free(line_copy); error_malloc_line_copy: free(line); error_malloc_line: fclose(file); error_fopen: return -1; } /* * parse_cl_args -- (internal) parse command line arguments */ static void parse_cl_args(int argc, char *argv[], struct rpmemd_config *config, const char **config_file, uint64_t *cl_options) { RPMEMD_ASSERT(argv != NULL); RPMEMD_ASSERT(config != NULL); int opt; int option_index = 0; while ((opt = getopt_long(argc, argv, optstr, options, &option_index)) != -1) { switch (opt) { case 'c': (*config_file) = optarg; break; case 'r': config->rm_poolset = optarg; break; case 'f': config->force = true; break; case 's': config->pool_set = true; break; case 't': errno = 0; char *endptr; config->nthreads = strtoul(optarg, &endptr, 10); if (errno || *endptr != '\0') { RPMEMD_LOG(ERR, "invalid number of threads -- '%s'", optarg); exit(-1); } break; case 'h': print_help(argv[0]); exit(0); case 'V': print_version(); exit(0); break; default: if (set_option((enum rpmemd_option)opt, optarg, config) == 0) { *cl_options |= (UINT64_C(1) << opt); } else { print_usage(argv[0]); exit(-1); } } } } /* * get_home_dir -- (internal) return user home directory * * Function will lookup user home directory in order: * 1. HOME environment variable * 2. Password file entry using real user ID */ static void get_home_dir(char *str, size_t size) { char *home = os_getenv(HOME_ENV); if (home) { int r = util_snprintf(str, size, "%s", home); if (r < 0) RPMEMD_FATAL("!snprintf"); } else { uid_t uid = getuid(); struct passwd *pw = getpwuid(uid); if (pw == NULL) RPMEMD_FATAL("!getpwuid"); int r = util_snprintf(str, size, "%s", pw->pw_dir); if (r < 0) RPMEMD_FATAL("!snprintf"); } } /* * concat_dir_and_file_name -- (internal) concatenate directory and file name * into single string path */ static void concat_dir_and_file_name(char *path, size_t size, const char *dir, const char *file) { int r = util_snprintf(path, size, "%s/%s", dir, file); if (r < 0) RPMEMD_FATAL("!snprintf"); } /* * str_replace_home -- (internal) replace $HOME string with user home directory * * If function does not find $HOME string it will return haystack untouched. * Otherwise it will allocate new string with $HOME replaced with provided * home_dir path. haystack will be released and newly created string returned. */ static char * str_replace_home(char *haystack, const char *home_dir) { const size_t placeholder_len = strlen(HOME_STR_PLACEHOLDER); const size_t home_len = strlen(home_dir); size_t haystack_len = strlen(haystack); char *pos = strstr(haystack, HOME_STR_PLACEHOLDER); if (!pos) return haystack; const char *after = pos + placeholder_len; if (isalnum(*after)) return haystack; haystack_len += home_len - placeholder_len + 1; char *buf = malloc(sizeof(char) * haystack_len); if (!buf) RPMEMD_FATAL("!malloc"); *pos = '\0'; int r = util_snprintf(buf, haystack_len, "%s%s%s", haystack, home_dir, after); if (r < 0) RPMEMD_FATAL("!snprintf"); free(haystack); return buf; } /* * config_set_default -- (internal) load default config */ static void config_set_default(struct rpmemd_config *config, const char *poolset_dir) { config->log_file = strdup(RPMEMD_DEFAULT_LOG_FILE); if (!config->log_file) RPMEMD_FATAL("!strdup"); config->poolset_dir = strdup(poolset_dir); if (!config->poolset_dir) RPMEMD_FATAL("!strdup"); config->persist_apm = false; config->persist_general = true; config->use_syslog = true; config->max_lanes = RPMEM_DEFAULT_MAX_LANES; config->log_level = RPD_LOG_ERR; config->rm_poolset = NULL; config->force = false; config->nthreads = RPMEM_DEFAULT_NTHREADS; } /* * rpmemd_config_read -- read config from cl and config files * * cl param overwrites configuration from any config file. Config file are read * in order: * 1. Global config file * 2. User config file * or * cl provided config file */ int rpmemd_config_read(struct rpmemd_config *config, int argc, char *argv[]) { const char *cl_config_file = NULL; char user_config_file[PATH_MAX]; char home_dir[PATH_MAX]; uint64_t cl_options = 0; get_home_dir(home_dir, PATH_MAX); config_set_default(config, home_dir); parse_cl_args(argc, argv, config, &cl_config_file, &cl_options); if (cl_config_file) { if (parse_config_file(cl_config_file, config, cl_options, 1)) { rpmemd_config_free(config); return 1; } } else { if (parse_config_file(RPMEMD_GLOBAL_CONFIG_FILE, config, cl_options, 0)) { rpmemd_config_free(config); return 1; } concat_dir_and_file_name(user_config_file, PATH_MAX, home_dir, RPMEMD_USER_CONFIG_FILE); if (parse_config_file(user_config_file, config, cl_options, 0)) { rpmemd_config_free(config); return 1; } } config->poolset_dir = str_replace_home(config->poolset_dir, home_dir); return 0; } /* * rpmemd_config_free -- rpmemd config release */ void rpmemd_config_free(struct rpmemd_config *config) { free(config->log_file); free(config->poolset_dir); }
15,007
22.413417
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_db.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * rpmemd_db.c -- rpmemd database of pool set files */ #include <stdio.h> #include <stdint.h> #include <errno.h> #include <string.h> #include <unistd.h> #include <dirent.h> #include <sys/file.h> #include <sys/mman.h> #include "queue.h" #include "set.h" #include "os.h" #include "out.h" #include "file.h" #include "sys_util.h" #include "librpmem.h" #include "rpmemd_db.h" #include "rpmemd_log.h" /* * struct rpmemd_db -- pool set database structure */ struct rpmemd_db { os_mutex_t lock; char *root_dir; mode_t mode; }; /* * declaration of the 'struct list_head' type */ PMDK_LIST_HEAD(list_head, rpmemd_db_entry); /* * struct rpmemd_db_entry -- entry in the pool set list */ struct rpmemd_db_entry { PMDK_LIST_ENTRY(rpmemd_db_entry) next; char *pool_desc; struct pool_set *set; }; /* * rpmemd_db_init -- initialize the rpmem database of pool set files */ struct rpmemd_db * rpmemd_db_init(const char *root_dir, mode_t mode) { if (root_dir[0] != '/') { RPMEMD_LOG(ERR, "root directory is not an absolute path" " -- '%s'", root_dir); errno = EINVAL; return NULL; } struct rpmemd_db *db = calloc(1, sizeof(*db)); if (!db) { RPMEMD_LOG(ERR, "!allocating the rpmem database structure"); return NULL; } db->root_dir = strdup(root_dir); if (!db->root_dir) { RPMEMD_LOG(ERR, "!allocating the root dir path"); free(db); return NULL; } db->mode = mode; util_mutex_init(&db->lock); return db; } /* * rpmemd_db_concat -- (internal) concatenate two paths */ static char * rpmemd_db_concat(const char *path1, const char *path2) { size_t len1 = strlen(path1); size_t len2 = strlen(path2); size_t new_len = len1 + len2 + 2; /* +1 for '/' in snprintf() */ if (path1[0] != '/') { RPMEMD_LOG(ERR, "the first path is not an absolute one -- '%s'", path1); errno = EINVAL; return NULL; } if (path2[0] == '/') { RPMEMD_LOG(ERR, "the second path is not a relative one -- '%s'", path2); /* set to EBADF to distinguish this case from other errors */ errno = EBADF; return NULL; } char *new_str = malloc(new_len); if (new_str == NULL) { RPMEMD_LOG(ERR, "!allocating path buffer"); return NULL; } int ret = util_snprintf(new_str, new_len, "%s/%s", path1, path2); if (ret < 0) { RPMEMD_LOG(ERR, "!snprintf"); free(new_str); errno = EINVAL; return NULL; } return new_str; } /* * rpmemd_db_get_path -- (internal) get the full path of the pool set file */ static char * rpmemd_db_get_path(struct rpmemd_db *db, const char *pool_desc) { return rpmemd_db_concat(db->root_dir, pool_desc); } /* * rpmemd_db_pool_madvise -- (internal) workaround device dax alignment issue */ static int rpmemd_db_pool_madvise(struct pool_set *set) { /* * This is a workaround for an issue with using device dax with * libibverbs. The problem is that we use ibv_fork_init(3) which * makes all registered memory being madvised with MADV_DONTFORK * flag. In libpmemobj the remote replication is performed without * pool header (first 4k). In such case the address passed to * madvise(2) is aligned to 4k, but device dax can require different * alignment (default is 2MB). This workaround madvises the entire * memory region before registering it by ibv_reg_mr(3). */ const struct pool_set_part *part = &set->replica[0]->part[0]; if (part->is_dev_dax) { int ret = os_madvise(part->addr, part->filesize, MADV_DONTFORK); if (ret) { ERR("!madvise"); return -1; } } return 0; } /* * rpmemd_get_attr -- (internal) get pool attributes from remote pool attributes */ static void rpmemd_get_attr(struct pool_attr *attr, const struct rpmem_pool_attr *rattr) { LOG(3, "attr %p, rattr %p", attr, rattr); memcpy(attr->signature, rattr->signature, POOL_HDR_SIG_LEN); attr->major = rattr->major; attr->features.compat = rattr->compat_features; attr->features.incompat = rattr->incompat_features; attr->features.ro_compat = rattr->ro_compat_features; memcpy(attr->poolset_uuid, rattr->poolset_uuid, POOL_HDR_UUID_LEN); memcpy(attr->first_part_uuid, rattr->uuid, POOL_HDR_UUID_LEN); memcpy(attr->prev_repl_uuid, rattr->prev_uuid, POOL_HDR_UUID_LEN); memcpy(attr->next_repl_uuid, rattr->next_uuid, POOL_HDR_UUID_LEN); memcpy(attr->arch_flags, rattr->user_flags, POOL_HDR_ARCH_LEN); } /* * rpmemd_db_pool_create -- create a new pool set */ struct rpmemd_db_pool * rpmemd_db_pool_create(struct rpmemd_db *db, const char *pool_desc, size_t pool_size, const struct rpmem_pool_attr *rattr) { RPMEMD_ASSERT(db != NULL); util_mutex_lock(&db->lock); struct rpmemd_db_pool *prp = NULL; struct pool_set *set; char *path; int ret; prp = malloc(sizeof(struct rpmemd_db_pool)); if (!prp) { RPMEMD_LOG(ERR, "!allocating pool set db entry"); goto err_unlock; } path = rpmemd_db_get_path(db, pool_desc); if (!path) { goto err_free_prp; } struct pool_attr attr; struct pool_attr *pattr = NULL; if (rattr != NULL) { rpmemd_get_attr(&attr, rattr); pattr = &attr; } ret = util_pool_create_uuids(&set, path, 0, RPMEM_MIN_POOL, RPMEM_MIN_PART, pattr, NULL, REPLICAS_DISABLED, POOL_REMOTE); if (ret) { RPMEMD_LOG(ERR, "!cannot create pool set -- '%s'", path); goto err_free_path; } ret = util_poolset_chmod(set, db->mode); if (ret) { RPMEMD_LOG(ERR, "!cannot change pool set mode bits to 0%o", db->mode); } if (rpmemd_db_pool_madvise(set)) goto err_poolset_close; /* mark as opened */ prp->pool_addr = set->replica[0]->part[0].addr; prp->pool_size = set->poolsize; prp->set = set; free(path); util_mutex_unlock(&db->lock); return prp; err_poolset_close: util_poolset_close(set, DO_NOT_DELETE_PARTS); err_free_path: free(path); err_free_prp: free(prp); err_unlock: util_mutex_unlock(&db->lock); return NULL; } /* * rpmemd_db_pool_open -- open a pool set */ struct rpmemd_db_pool * rpmemd_db_pool_open(struct rpmemd_db *db, const char *pool_desc, size_t pool_size, struct rpmem_pool_attr *rattr) { RPMEMD_ASSERT(db != NULL); RPMEMD_ASSERT(rattr != NULL); util_mutex_lock(&db->lock); struct rpmemd_db_pool *prp = NULL; struct pool_set *set; char *path; int ret; prp = malloc(sizeof(struct rpmemd_db_pool)); if (!prp) { RPMEMD_LOG(ERR, "!allocating pool set db entry"); goto err_unlock; } path = rpmemd_db_get_path(db, pool_desc); if (!path) { goto err_free_prp; } ret = util_pool_open_remote(&set, path, 0, RPMEM_MIN_PART, rattr); if (ret) { RPMEMD_LOG(ERR, "!cannot open pool set -- '%s'", path); goto err_free_path; } if (rpmemd_db_pool_madvise(set)) goto err_poolset_close; /* mark as opened */ prp->pool_addr = set->replica[0]->part[0].addr; prp->pool_size = set->poolsize; prp->set = set; free(path); util_mutex_unlock(&db->lock); return prp; err_poolset_close: util_poolset_close(set, DO_NOT_DELETE_PARTS); err_free_path: free(path); err_free_prp: free(prp); err_unlock: util_mutex_unlock(&db->lock); return NULL; } /* * rpmemd_db_pool_close -- close a pool set */ void rpmemd_db_pool_close(struct rpmemd_db *db, struct rpmemd_db_pool *prp) { RPMEMD_ASSERT(db != NULL); util_mutex_lock(&db->lock); util_poolset_close(prp->set, DO_NOT_DELETE_PARTS); free(prp); util_mutex_unlock(&db->lock); } /* * rpmemd_db_pool_set_attr -- overwrite pool attributes */ int rpmemd_db_pool_set_attr(struct rpmemd_db_pool *prp, const struct rpmem_pool_attr *rattr) { RPMEMD_ASSERT(prp != NULL); RPMEMD_ASSERT(prp->set != NULL); RPMEMD_ASSERT(prp->set->nreplicas == 1); return util_replica_set_attr(prp->set->replica[0], rattr); } struct rm_cb_args { int force; int ret; }; /* * rm_poolset_cb -- (internal) callback for removing part files */ static int rm_poolset_cb(struct part_file *pf, void *arg) { struct rm_cb_args *args = (struct rm_cb_args *)arg; if (pf->is_remote) { RPMEMD_LOG(ERR, "removing remote replica not supported"); return -1; } int ret = util_unlink_flock(pf->part->path); if (!args->force && ret) { RPMEMD_LOG(ERR, "!unlink -- '%s'", pf->part->path); args->ret = ret; } return 0; } /* * rpmemd_db_pool_remove -- remove a pool set */ int rpmemd_db_pool_remove(struct rpmemd_db *db, const char *pool_desc, int force, int pool_set) { RPMEMD_ASSERT(db != NULL); RPMEMD_ASSERT(pool_desc != NULL); util_mutex_lock(&db->lock); struct rm_cb_args args; args.force = force; args.ret = 0; char *path; path = rpmemd_db_get_path(db, pool_desc); if (!path) { args.ret = -1; goto err_unlock; } int ret = util_poolset_foreach_part(path, rm_poolset_cb, &args); if (!force && ret) { RPMEMD_LOG(ERR, "!removing '%s' failed", path); args.ret = ret; goto err_free_path; } if (pool_set) os_unlink(path); err_free_path: free(path); err_unlock: util_mutex_unlock(&db->lock); return args.ret; } /* * rpmemd_db_fini -- deinitialize the rpmem database of pool set files */ void rpmemd_db_fini(struct rpmemd_db *db) { RPMEMD_ASSERT(db != NULL); util_mutex_destroy(&db->lock); free(db->root_dir); free(db); } /* * rpmemd_db_check_dups_set -- (internal) check for duplicates in the database */ static inline int rpmemd_db_check_dups_set(struct pool_set *set, const char *path) { for (unsigned r = 0; r < set->nreplicas; r++) { struct pool_replica *rep = set->replica[r]; for (unsigned p = 0; p < rep->nparts; p++) { if (strcmp(path, rep->part[p].path) == 0) return -1; } } return 0; } /* * rpmemd_db_check_dups -- (internal) check for duplicates in the database */ static int rpmemd_db_check_dups(struct list_head *head, struct rpmemd_db *db, const char *pool_desc, struct pool_set *set) { struct rpmemd_db_entry *edb; PMDK_LIST_FOREACH(edb, head, next) { for (unsigned r = 0; r < edb->set->nreplicas; r++) { struct pool_replica *rep = edb->set->replica[r]; for (unsigned p = 0; p < rep->nparts; p++) { if (rpmemd_db_check_dups_set(set, rep->part[p].path)) { RPMEMD_LOG(ERR, "part file '%s' from " "pool set '%s' duplicated in " "pool set '%s'", rep->part[p].path, pool_desc, edb->pool_desc); errno = EEXIST; return -1; } } } } return 0; } /* * rpmemd_db_add -- (internal) add an entry for a given set to the database */ static struct rpmemd_db_entry * rpmemd_db_add(struct list_head *head, struct rpmemd_db *db, const char *pool_desc, struct pool_set *set) { struct rpmemd_db_entry *edb; edb = calloc(1, sizeof(*edb)); if (!edb) { RPMEMD_LOG(ERR, "!allocating database entry"); goto err_calloc; } edb->set = set; edb->pool_desc = strdup(pool_desc); if (!edb->pool_desc) { RPMEMD_LOG(ERR, "!allocating path for database entry"); goto err_strdup; } PMDK_LIST_INSERT_HEAD(head, edb, next); return edb; err_strdup: free(edb); err_calloc: return NULL; } /* * new_paths -- (internal) create two new paths */ static int new_paths(const char *dir, const char *name, const char *old_desc, char **path, char **new_desc) { *path = rpmemd_db_concat(dir, name); if (!(*path)) return -1; if (old_desc[0] != 0) *new_desc = rpmemd_db_concat(old_desc, name); else { *new_desc = strdup(name); if (!(*new_desc)) { RPMEMD_LOG(ERR, "!allocating new descriptor"); } } if (!(*new_desc)) { free(*path); return -1; } return 0; } /* * rpmemd_db_check_dir_r -- (internal) recursively check given directory * for duplicates */ static int rpmemd_db_check_dir_r(struct list_head *head, struct rpmemd_db *db, const char *dir, char *pool_desc) { char *new_dir, *new_desc, *full_path; struct dirent *dentry; struct pool_set *set = NULL; DIR *dirp; int ret = 0; dirp = opendir(dir); if (dirp == NULL) { RPMEMD_LOG(ERR, "cannot open the directory -- %s", dir); return -1; } while ((dentry = readdir(dirp)) != NULL) { if (strcmp(dentry->d_name, ".") == 0 || strcmp(dentry->d_name, "..") == 0) continue; if (dentry->d_type == DT_DIR) { /* directory */ if (new_paths(dir, dentry->d_name, pool_desc, &new_dir, &new_desc)) goto err_closedir; /* call recursively for a new directory */ ret = rpmemd_db_check_dir_r(head, db, new_dir, new_desc); free(new_dir); free(new_desc); if (ret) goto err_closedir; continue; } if (new_paths(dir, dentry->d_name, pool_desc, &full_path, &new_desc)) { goto err_closedir; } if (util_poolset_read(&set, full_path)) { RPMEMD_LOG(ERR, "!error reading pool set file -- %s", full_path); goto err_free_paths; } if (rpmemd_db_check_dups(head, db, new_desc, set)) { RPMEMD_LOG(ERR, "!duplicate found in pool set file" " -- %s", full_path); goto err_free_set; } if (rpmemd_db_add(head, db, new_desc, set) == NULL) { goto err_free_set; } free(new_desc); free(full_path); } closedir(dirp); return 0; err_free_set: util_poolset_close(set, DO_NOT_DELETE_PARTS); err_free_paths: free(new_desc); free(full_path); err_closedir: closedir(dirp); return -1; } /* * rpmemd_db_check_dir -- check given directory for duplicates */ int rpmemd_db_check_dir(struct rpmemd_db *db) { RPMEMD_ASSERT(db != NULL); util_mutex_lock(&db->lock); struct list_head head; PMDK_LIST_INIT(&head); int ret = rpmemd_db_check_dir_r(&head, db, db->root_dir, ""); while (!PMDK_LIST_EMPTY(&head)) { struct rpmemd_db_entry *edb = PMDK_LIST_FIRST(&head); PMDK_LIST_REMOVE(edb, next); util_poolset_close(edb->set, DO_NOT_DELETE_PARTS); free(edb->pool_desc); free(edb); } util_mutex_unlock(&db->lock); return ret; } /* * rpmemd_db_pool_is_pmem -- true if pool is in PMEM */ int rpmemd_db_pool_is_pmem(struct rpmemd_db_pool *pool) { return REP(pool->set, 0)->is_pmem; }
13,747
20.616352
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_fip.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_fip.h -- rpmemd libfabric provider module header file */ #include <stddef.h> struct rpmemd_fip; struct rpmemd_fip_attr { void *addr; size_t size; unsigned nlanes; size_t nthreads; size_t buff_size; enum rpmem_provider provider; enum rpmem_persist_method persist_method; int (*persist)(const void *addr, size_t len); void *(*memcpy_persist)(void *pmemdest, const void *src, size_t len); int (*deep_persist)(const void *addr, size_t len, void *ctx); void *ctx; }; struct rpmemd_fip *rpmemd_fip_init(const char *node, const char *service, struct rpmemd_fip_attr *attr, struct rpmem_resp_attr *resp, enum rpmem_err *err); void rpmemd_fip_fini(struct rpmemd_fip *fip); int rpmemd_fip_accept(struct rpmemd_fip *fip, int timeout); int rpmemd_fip_process_start(struct rpmemd_fip *fip); int rpmemd_fip_process_stop(struct rpmemd_fip *fip); int rpmemd_fip_wait_close(struct rpmemd_fip *fip, int timeout); int rpmemd_fip_close(struct rpmemd_fip *fip);
1,066
27.078947
70
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/rpmemd/rpmemd_obc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * rpmemd_obc.h -- rpmemd out-of-band connection declarations */ #include <stdint.h> #include <sys/types.h> #include <sys/socket.h> struct rpmemd_obc; struct rpmemd_obc_requests { int (*create)(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req, const struct rpmem_pool_attr *pool_attr); int (*open)(struct rpmemd_obc *obc, void *arg, const struct rpmem_req_attr *req); int (*close)(struct rpmemd_obc *obc, void *arg, int flags); int (*set_attr)(struct rpmemd_obc *obc, void *arg, const struct rpmem_pool_attr *pool_attr); }; struct rpmemd_obc *rpmemd_obc_init(int fd_in, int fd_out); void rpmemd_obc_fini(struct rpmemd_obc *obc); int rpmemd_obc_status(struct rpmemd_obc *obc, uint32_t status); int rpmemd_obc_process(struct rpmemd_obc *obc, struct rpmemd_obc_requests *req_cb, void *arg); int rpmemd_obc_create_resp(struct rpmemd_obc *obc, int status, const struct rpmem_resp_attr *res); int rpmemd_obc_open_resp(struct rpmemd_obc *obc, int status, const struct rpmem_resp_attr *res, const struct rpmem_pool_attr *pool_attr); int rpmemd_obc_set_attr_resp(struct rpmemd_obc *obc, int status); int rpmemd_obc_close_resp(struct rpmemd_obc *obc, int status);
1,296
31.425
65
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/create.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * create.c -- pmempool create command source file */ #include <stdio.h> #include <getopt.h> #include <stdlib.h> #include <fcntl.h> #include <unistd.h> #include <string.h> #include <sys/types.h> #include <sys/stat.h> #include <sys/statvfs.h> #include <errno.h> #include <libgen.h> #include <err.h> #include "common.h" #include "file.h" #include "create.h" #include "os.h" #include "set.h" #include "output.h" #include "libpmemblk.h" #include "libpmemlog.h" #include "libpmempool.h" #define DEFAULT_MODE 0664 /* * pmempool_create -- context and args for create command */ struct pmempool_create { int verbose; char *fname; int fexists; char *inherit_fname; int max_size; char *str_type; struct pmem_pool_params params; struct pmem_pool_params inherit_params; char *str_size; char *str_mode; char *str_bsize; uint64_t csize; int write_btt_layout; int force; char *layout; struct options *opts; int clearbadblocks; }; /* * pmempool_create_default -- default args for create command */ static const struct pmempool_create pmempool_create_default = { .verbose = 0, .fname = NULL, .fexists = 0, .inherit_fname = NULL, .max_size = 0, .str_type = NULL, .str_bsize = NULL, .csize = 0, .write_btt_layout = 0, .force = 0, .layout = NULL, .clearbadblocks = 0, .params = { .type = PMEM_POOL_TYPE_UNKNOWN, .size = 0, .mode = DEFAULT_MODE, } }; /* * help_str -- string for help message */ static const char * const help_str = "Create pmem pool of specified size, type and name\n" "\n" "Common options:\n" " -s, --size <size> size of pool\n" " -M, --max-size use maximum available space on file system\n" " -m, --mode <octal> set permissions to <octal> (the default is 0664)\n" " -i, --inherit <file> take required parameters from specified pool file\n" " -b, --clear-bad-blocks clear bad blocks in existing files\n" " -f, --force remove the pool first\n" " -v, --verbose increase verbosity level\n" " -h, --help display this help and exit\n" "\n" "Options for PMEMBLK:\n" " -w, --write-layout force writing the BTT layout\n" "\n" "Options for PMEMOBJ:\n" " -l, --layout <name> layout name stored in pool's header\n" "\n" "For complete documentation see %s-create(1) manual page.\n" ; /* * long_options -- command line options */ static const struct option long_options[] = { {"size", required_argument, NULL, 's' | OPT_ALL}, {"verbose", no_argument, NULL, 'v' | OPT_ALL}, {"help", no_argument, NULL, 'h' | OPT_ALL}, {"max-size", no_argument, NULL, 'M' | OPT_ALL}, {"inherit", required_argument, NULL, 'i' | OPT_ALL}, {"mode", required_argument, NULL, 'm' | OPT_ALL}, {"write-layout", no_argument, NULL, 'w' | OPT_BLK}, {"layout", required_argument, NULL, 'l' | OPT_OBJ}, {"force", no_argument, NULL, 'f' | OPT_ALL}, {"clear-bad-blocks", no_argument, NULL, 'b' | OPT_ALL}, {NULL, 0, NULL, 0 }, }; /* * print_usage -- print application usage short description */ static void print_usage(const char *appname) { printf("Usage: %s create [<args>] <blk|log|obj> [<bsize>] <file>\n", appname); } /* * print_version -- print version string */ static void print_version(const char *appname) { printf("%s %s\n", appname, SRCVERSION); } /* * pmempool_create_help -- print help message for create command */ void pmempool_create_help(const char *appname) { print_usage(appname); print_version(appname); printf(help_str, appname); } /* * pmempool_create_obj -- create pmem obj pool */ static int pmempool_create_obj(struct pmempool_create *pcp) { PMEMobjpool *pop = pmemobj_create(pcp->fname, pcp->layout, pcp->params.size, pcp->params.mode); if (!pop) { outv_err("'%s' -- %s\n", pcp->fname, pmemobj_errormsg()); return -1; } pmemobj_close(pop); return 0; } /* * pmempool_create_blk -- create pmem blk pool */ static int pmempool_create_blk(struct pmempool_create *pcp) { ASSERTne(pcp->params.blk.bsize, 0); int ret = 0; PMEMblkpool *pbp = pmemblk_create(pcp->fname, pcp->params.blk.bsize, pcp->params.size, pcp->params.mode); if (!pbp) { outv_err("'%s' -- %s\n", pcp->fname, pmemblk_errormsg()); return -1; } if (pcp->write_btt_layout) { outv(1, "Writing BTT layout using block %d.\n", pcp->write_btt_layout); if (pmemblk_set_error(pbp, 0) || pmemblk_set_zero(pbp, 0)) { outv_err("writing BTT layout to block 0 failed\n"); ret = -1; } } pmemblk_close(pbp); return ret; } /* * pmempool_create_log -- create pmem log pool */ static int pmempool_create_log(struct pmempool_create *pcp) { PMEMlogpool *plp = pmemlog_create(pcp->fname, pcp->params.size, pcp->params.mode); if (!plp) { outv_err("'%s' -- %s\n", pcp->fname, pmemlog_errormsg()); return -1; } pmemlog_close(plp); return 0; } /* * pmempool_get_max_size -- return maximum allowed size of file */ #ifndef _WIN32 static int pmempool_get_max_size(const char *fname, uint64_t *sizep) { struct statvfs buf; int ret = 0; char *name = strdup(fname); if (name == NULL) { return -1; } char *dir = dirname(name); if (statvfs(dir, &buf)) ret = -1; else *sizep = buf.f_bsize * buf.f_bavail; free(name); return ret; } #else static int pmempool_get_max_size(const char *fname, uint64_t *sizep) { int ret = 0; ULARGE_INTEGER freespace; char *name = strdup(fname); if (name == NULL) { return -1; } char *dir = dirname(name); wchar_t *str = util_toUTF16(dir); if (str == NULL) { free(name); return -1; } if (GetDiskFreeSpaceExW(str, &freespace, NULL, NULL) == 0) ret = -1; else *sizep = freespace.QuadPart; free(str); free(name); return ret; } #endif /* * print_pool_params -- print some parameters of a pool */ static void print_pool_params(struct pmem_pool_params *params) { outv(1, "\ttype : %s\n", out_get_pool_type_str(params->type)); outv(1, "\tsize : %s\n", out_get_size_str(params->size, 2)); outv(1, "\tmode : 0%o\n", params->mode); switch (params->type) { case PMEM_POOL_TYPE_BLK: outv(1, "\tbsize : %s\n", out_get_size_str(params->blk.bsize, 0)); break; case PMEM_POOL_TYPE_OBJ: outv(1, "\tlayout: '%s'\n", params->obj.layout); break; default: break; } } /* * inherit_pool_params -- inherit pool parameters from specified file */ static int inherit_pool_params(struct pmempool_create *pcp) { outv(1, "Parsing pool: '%s'\n", pcp->inherit_fname); /* * If no type string passed, --inherit option must be passed * so parse file and get required parameters. */ if (pmem_pool_parse_params(pcp->inherit_fname, &pcp->inherit_params, 1)) { if (errno) perror(pcp->inherit_fname); else outv_err("%s: cannot determine type of pool\n", pcp->inherit_fname); return -1; } if (PMEM_POOL_TYPE_UNKNOWN == pcp->inherit_params.type) { outv_err("'%s' -- unknown pool type\n", pcp->inherit_fname); return -1; } print_pool_params(&pcp->inherit_params); return 0; } /* * pmempool_create_parse_args -- parse command line args */ static int pmempool_create_parse_args(struct pmempool_create *pcp, const char *appname, int argc, char *argv[], struct options *opts) { int opt, ret; while ((opt = util_options_getopt(argc, argv, "vhi:s:Mm:l:wfb", opts)) != -1) { switch (opt) { case 'v': pcp->verbose = 1; break; case 'h': pmempool_create_help(appname); exit(EXIT_SUCCESS); case 's': pcp->str_size = optarg; ret = util_parse_size(optarg, (size_t *)&pcp->params.size); if (ret || pcp->params.size == 0) { outv_err("invalid size value specified '%s'\n", optarg); return -1; } break; case 'M': pcp->max_size = 1; break; case 'm': pcp->str_mode = optarg; if (util_parse_mode(optarg, &pcp->params.mode)) { outv_err("invalid mode value specified '%s'\n", optarg); return -1; } break; case 'i': pcp->inherit_fname = optarg; break; case 'w': pcp->write_btt_layout = 1; break; case 'l': pcp->layout = optarg; break; case 'f': pcp->force = 1; break; case 'b': pcp->clearbadblocks = 1; break; default: print_usage(appname); return -1; } } /* check for <type>, <bsize> and <file> strings */ if (optind + 2 < argc) { pcp->str_type = argv[optind]; pcp->str_bsize = argv[optind + 1]; pcp->fname = argv[optind + 2]; } else if (optind + 1 < argc) { pcp->str_type = argv[optind]; pcp->fname = argv[optind + 1]; } else if (optind < argc) { pcp->fname = argv[optind]; pcp->str_type = NULL; } else { print_usage(appname); return -1; } return 0; } static int allocate_max_size_available_file(const char *name_of_file, mode_t mode, os_off_t max_size) { int fd = os_open(name_of_file, O_CREAT | O_EXCL | O_RDWR, mode); if (fd == -1) { outv_err("!open '%s' failed", name_of_file); return -1; } os_off_t offset = 0; os_off_t length = max_size - (max_size % (os_off_t)Pagesize); int ret; do { ret = os_posix_fallocate(fd, offset, length); if (ret == 0) offset += length; else if (ret != ENOSPC) { os_close(fd); if (os_unlink(name_of_file) == -1) outv_err("!unlink '%s' failed", name_of_file); errno = ret; outv_err("!space allocation for '%s' failed", name_of_file); return -1; } length /= 2; length -= (length % (os_off_t)Pagesize); } while (length > (os_off_t)Pagesize); os_close(fd); return 0; } /* * pmempool_create_func -- main function for create command */ int pmempool_create_func(const char *appname, int argc, char *argv[]) { int ret = 0; struct pmempool_create pc = pmempool_create_default; pc.opts = util_options_alloc(long_options, sizeof(long_options) / sizeof(long_options[0]), NULL); /* parse command line arguments */ ret = pmempool_create_parse_args(&pc, appname, argc, argv, pc.opts); if (ret) exit(EXIT_FAILURE); /* set verbosity level */ out_set_vlevel(pc.verbose); umask(0); int exists = util_file_exists(pc.fname); if (exists < 0) return -1; pc.fexists = exists; int is_poolset = util_is_poolset_file(pc.fname) == 1; if (pc.inherit_fname) { if (inherit_pool_params(&pc)) { outv_err("parsing pool '%s' failed\n", pc.inherit_fname); return -1; } } /* * Parse pool type and other parameters if --inherit option * passed. It is possible to either pass --inherit option * or pool type string in command line arguments. This is * validated here. */ if (pc.str_type) { /* parse pool type string if passed in command line arguments */ pc.params.type = pmem_pool_type_parse_str(pc.str_type); if (PMEM_POOL_TYPE_UNKNOWN == pc.params.type) { outv_err("'%s' -- unknown pool type\n", pc.str_type); return -1; } if (PMEM_POOL_TYPE_BLK == pc.params.type) { if (pc.str_bsize == NULL) { outv_err("blk pool requires <bsize> " "argument\n"); return -1; } if (util_parse_size(pc.str_bsize, (size_t *)&pc.params.blk.bsize)) { outv_err("cannot parse '%s' as block size\n", pc.str_bsize); return -1; } } if (PMEM_POOL_TYPE_OBJ == pc.params.type && pc.layout != NULL) { size_t max_layout = PMEMOBJ_MAX_LAYOUT; if (strlen(pc.layout) >= max_layout) { outv_err( "Layout name is too long, maximum number of characters (including the terminating null byte) is %zu\n", max_layout); return -1; } size_t len = sizeof(pc.params.obj.layout); strncpy(pc.params.obj.layout, pc.layout, len); pc.params.obj.layout[len - 1] = '\0'; } } else if (pc.inherit_fname) { pc.params.type = pc.inherit_params.type; } else { /* neither pool type string nor --inherit options passed */ print_usage(appname); return -1; } if (util_options_verify(pc.opts, pc.params.type)) return -1; if (pc.params.type != PMEM_POOL_TYPE_BLK && pc.str_bsize != NULL) { outv_err("invalid option specified for %s pool type" " -- block size\n", out_get_pool_type_str(pc.params.type)); return -1; } if (is_poolset) { if (pc.params.size) { outv_err("-s|--size cannot be used with " "poolset file\n"); return -1; } if (pc.max_size) { outv_err("-M|--max-size cannot be used with " "poolset file\n"); return -1; } } if (pc.params.size && pc.max_size) { outv_err("-M|--max-size option cannot be used with -s|--size" " option\n"); return -1; } if (pc.inherit_fname) { if (!pc.str_size && !pc.max_size) pc.params.size = pc.inherit_params.size; if (!pc.str_mode) pc.params.mode = pc.inherit_params.mode; switch (pc.params.type) { case PMEM_POOL_TYPE_BLK: if (!pc.str_bsize) pc.params.blk.bsize = pc.inherit_params.blk.bsize; break; case PMEM_POOL_TYPE_OBJ: if (!pc.layout) { memcpy(pc.params.obj.layout, pc.inherit_params.obj.layout, sizeof(pc.params.obj.layout)); } else { size_t len = sizeof(pc.params.obj.layout); strncpy(pc.params.obj.layout, pc.layout, len - 1); pc.params.obj.layout[len - 1] = '\0'; } break; default: break; } } /* * If neither --size nor --inherit options passed, check * for --max-size option - if not passed use minimum pool size. */ uint64_t min_size = pmem_pool_get_min_size(pc.params.type); if (pc.params.size == 0) { if (pc.max_size) { outv(1, "Maximum size option passed " "- getting available space of file system.\n"); ret = pmempool_get_max_size(pc.fname, &pc.params.size); if (ret) { outv_err("cannot get available space of fs\n"); return -1; } if (pc.params.size == 0) { outv_err("No space left on device\n"); return -1; } outv(1, "Available space is %s\n", out_get_size_str(pc.params.size, 2)); if (allocate_max_size_available_file(pc.fname, pc.params.mode, (os_off_t)pc.params.size)) return -1; /* * We are going to create pool based * on file size instead of the pc.params.size. */ pc.params.size = 0; } else { if (!pc.fexists) { outv(1, "No size option passed " "- picking minimum pool size.\n"); pc.params.size = min_size; } } } else { if (pc.params.size < min_size) { outv_err("size must be >= %lu bytes\n", min_size); return -1; } } if (pc.force) pmempool_rm(pc.fname, PMEMPOOL_RM_FORCE); outv(1, "Creating pool: %s\n", pc.fname); print_pool_params(&pc.params); if (pc.clearbadblocks) { int ret = util_pool_clear_badblocks(pc.fname, 1 /* ignore non-existing */); if (ret) { outv_err("'%s' -- clearing bad blocks failed\n", pc.fname); return -1; } } switch (pc.params.type) { case PMEM_POOL_TYPE_BLK: ret = pmempool_create_blk(&pc); break; case PMEM_POOL_TYPE_LOG: ret = pmempool_create_log(&pc); break; case PMEM_POOL_TYPE_OBJ: ret = pmempool_create_obj(&pc); break; default: ret = -1; break; } if (ret) { outv_err("creating pool file failed\n"); if (!pc.fexists) util_unlink(pc.fname); } util_options_free(pc.opts); return ret; }
14,987
21.403587
109
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/transform.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * transform.c -- pmempool transform command source file */ #include <stdio.h> #include <libgen.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <getopt.h> #include <stdbool.h> #include <sys/mman.h> #include <endian.h> #include "common.h" #include "output.h" #include "transform.h" #include "libpmempool.h" /* * pmempool_transform_context -- context and arguments for transform command */ struct pmempool_transform_context { unsigned flags; /* flags which modify the command execution */ char *poolset_file_src; /* a path to a source poolset file */ char *poolset_file_dst; /* a path to a target poolset file */ }; /* * pmempool_transform_default -- default arguments for transform command */ static const struct pmempool_transform_context pmempool_transform_default = { .flags = 0, .poolset_file_src = NULL, .poolset_file_dst = NULL, }; /* * help_str -- string for help message */ static const char * const help_str = "Modify internal structure of a poolset\n" "\n" "Common options:\n" " -d, --dry-run do not apply changes, only check for viability of" " transformation\n" " -v, --verbose increase verbosity level\n" " -h, --help display this help and exit\n" "\n" "For complete documentation see %s-transform(1) manual page.\n" ; /* * long_options -- command line options */ static const struct option long_options[] = { {"dry-run", no_argument, NULL, 'd'}, {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {NULL, 0, NULL, 0 }, }; /* * print_usage -- print application usage short description */ static void print_usage(const char *appname) { printf("usage: %s transform [<options>] <poolset_file_src>" " <poolset_file_dst>\n", appname); } /* * print_version -- print version string */ static void print_version(const char *appname) { printf("%s %s\n", appname, SRCVERSION); } /* * pmempool_transform_help -- print help message for the transform command */ void pmempool_transform_help(const char *appname) { print_usage(appname); print_version(appname); printf(help_str, appname); } /* * pmempool_check_parse_args -- parse command line arguments */ static int pmempool_transform_parse_args(struct pmempool_transform_context *ctx, const char *appname, int argc, char *argv[]) { int opt; while ((opt = getopt_long(argc, argv, "dhv", long_options, NULL)) != -1) { switch (opt) { case 'd': ctx->flags = PMEMPOOL_TRANSFORM_DRY_RUN; break; case 'h': pmempool_transform_help(appname); exit(EXIT_SUCCESS); case 'v': out_set_vlevel(1); break; default: print_usage(appname); exit(EXIT_FAILURE); } } if (optind + 1 < argc) { ctx->poolset_file_src = argv[optind]; ctx->poolset_file_dst = argv[optind + 1]; } else { print_usage(appname); exit(EXIT_FAILURE); } return 0; } /* * pmempool_transform_func -- main function for the transform command */ int pmempool_transform_func(const char *appname, int argc, char *argv[]) { int ret; struct pmempool_transform_context ctx = pmempool_transform_default; /* parse command line arguments */ if ((ret = pmempool_transform_parse_args(&ctx, appname, argc, argv))) return ret; ret = pmempool_transform(ctx.poolset_file_src, ctx.poolset_file_dst, ctx.flags); if (ret) { if (errno) outv_err("%s\n", strerror(errno)); outv_err("failed to transform %s -> %s: %s\n", ctx.poolset_file_src, ctx.poolset_file_dst, pmempool_errormsg()); return -1; } else { outv(1, "%s -> %s: transformed\n", ctx.poolset_file_src, ctx.poolset_file_dst); return 0; } }
3,689
21.919255
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/info_blk.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * info_blk.c -- pmempool info command source file for blk pool */ #include <stdbool.h> #include <stdlib.h> #include <string.h> #include <err.h> #include <sys/param.h> #include <endian.h> #include "os.h" #include "common.h" #include "output.h" #include "info.h" #include "btt.h" /* * pmempool_info_get_range -- get blocks/data chunk range * * Get range based on command line arguments and maximum value. * Return value: * 0 - range is empty * 1 - range is not empty */ static int pmempool_info_get_range(struct pmem_info *pip, struct range *rangep, struct range *curp, uint32_t max, uint64_t offset) { /* not using range */ if (!pip->args.use_range) { rangep->first = 0; rangep->last = max; return 1; } if (curp->first > offset + max) return 0; if (curp->first >= offset) rangep->first = curp->first - offset; else rangep->first = 0; if (curp->last < offset) return 0; if (curp->last <= offset + max) rangep->last = curp->last - offset; else rangep->last = max; return 1; } /* * info_blk_skip_block -- get action type for block/data chunk * * Return value indicating whether processing block/data chunk * should be skipped. * * Return values: * 0 - continue processing * 1 - skip current block */ static int info_blk_skip_block(struct pmem_info *pip, int is_zero, int is_error) { if (pip->args.blk.skip_no_flag && !is_zero && !is_error) return 1; if (is_zero && pip->args.blk.skip_zeros) return 1; if (is_error && pip->args.blk.skip_error) return 1; return 0; } /* * info_btt_data -- print block data and corresponding flags from map */ static int info_btt_data(struct pmem_info *pip, int v, struct btt_info *infop, uint64_t arena_off, uint64_t offset, uint64_t *countp) { if (!outv_check(v)) return 0; int ret = 0; size_t mapsize = infop->external_nlba * BTT_MAP_ENTRY_SIZE; uint32_t *map = malloc(mapsize); if (!map) err(1, "Cannot allocate memory for BTT map"); uint8_t *block_buff = malloc(infop->external_lbasize); if (!block_buff) err(1, "Cannot allocate memory for pmemblk block buffer"); /* read btt map area */ if (pmempool_info_read(pip, (uint8_t *)map, mapsize, arena_off + infop->mapoff)) { outv_err("wrong BTT Map size or offset\n"); ret = -1; goto error; } uint64_t i; struct range *curp = NULL; struct range range; FOREACH_RANGE(curp, &pip->args.ranges) { if (pmempool_info_get_range(pip, &range, curp, infop->external_nlba - 1, offset) == 0) continue; for (i = range.first; i <= range.last; i++) { uint32_t map_entry = le32toh(map[i]); int is_init = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == 0; int is_zero = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ZERO || is_init; int is_error = (map_entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ERROR; uint64_t blockno = is_init ? i : map_entry & BTT_MAP_ENTRY_LBA_MASK; if (info_blk_skip_block(pip, is_zero, is_error)) continue; /* compute block's data address */ uint64_t block_off = arena_off + infop->dataoff + blockno * infop->internal_lbasize; if (pmempool_info_read(pip, block_buff, infop->external_lbasize, block_off)) { outv_err("cannot read %lu block\n", i); ret = -1; goto error; } if (*countp == 0) outv_title(v, "PMEM BLK blocks data"); /* * Print block number, offset and flags * from map entry. */ outv(v, "Block %10lu: offset: %s\n", offset + i, out_get_btt_map_entry(map_entry)); /* dump block's data */ outv_hexdump(v, block_buff, infop->external_lbasize, block_off, 1); *countp = *countp + 1; } } error: free(map); free(block_buff); return ret; } /* * info_btt_map -- print all map entries */ static int info_btt_map(struct pmem_info *pip, int v, struct btt_info *infop, uint64_t arena_off, uint64_t offset, uint64_t *count) { if (!outv_check(v) && !outv_check(pip->args.vstats)) return 0; int ret = 0; size_t mapsize = infop->external_nlba * BTT_MAP_ENTRY_SIZE; uint32_t *map = malloc(mapsize); if (!map) err(1, "Cannot allocate memory for BTT map"); /* read btt map area */ if (pmempool_info_read(pip, (uint8_t *)map, mapsize, arena_off + infop->mapoff)) { outv_err("wrong BTT Map size or offset\n"); ret = -1; goto error; } uint32_t arena_count = 0; uint64_t i; struct range *curp = NULL; struct range range; FOREACH_RANGE(curp, &pip->args.ranges) { if (pmempool_info_get_range(pip, &range, curp, infop->external_nlba - 1, offset) == 0) continue; for (i = range.first; i <= range.last; i++) { uint32_t entry = le32toh(map[i]); int is_zero = (entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ZERO || (entry & ~BTT_MAP_ENTRY_LBA_MASK) == 0; int is_error = (entry & ~BTT_MAP_ENTRY_LBA_MASK) == BTT_MAP_ENTRY_ERROR; if (info_blk_skip_block(pip, is_zero, is_error) == 0) { if (arena_count == 0) outv_title(v, "PMEM BLK BTT Map"); if (is_zero) pip->blk.stats.zeros++; if (is_error) pip->blk.stats.errors++; if (!is_zero && !is_error) pip->blk.stats.noflag++; pip->blk.stats.total++; arena_count++; (*count)++; outv(v, "%010lu: %s\n", offset + i, out_get_btt_map_entry(entry)); } } } error: free(map); return ret; } /* * info_btt_flog -- print all flog entries */ static int info_btt_flog(struct pmem_info *pip, int v, struct btt_info *infop, uint64_t arena_off) { if (!outv_check(v)) return 0; int ret = 0; struct btt_flog *flogp = NULL; struct btt_flog *flogpp = NULL; uint64_t flog_size = infop->nfree * roundup(2 * sizeof(struct btt_flog), BTT_FLOG_PAIR_ALIGN); flog_size = roundup(flog_size, BTT_ALIGNMENT); uint8_t *buff = malloc(flog_size); if (!buff) err(1, "Cannot allocate memory for FLOG entries"); if (pmempool_info_read(pip, buff, flog_size, arena_off + infop->flogoff)) { outv_err("cannot read BTT FLOG"); ret = -1; goto error; } outv_title(v, "PMEM BLK BTT FLOG"); uint8_t *ptr = buff; uint32_t i; for (i = 0; i < infop->nfree; i++) { flogp = (struct btt_flog *)ptr; flogpp = flogp + 1; btt_flog_convert2h(flogp); btt_flog_convert2h(flogpp); outv(v, "%010d:\n", i); outv_field(v, "LBA", "0x%08x", flogp->lba); outv_field(v, "Old map", "0x%08x: %s", flogp->old_map, out_get_btt_map_entry(flogp->old_map)); outv_field(v, "New map", "0x%08x: %s", flogp->new_map, out_get_btt_map_entry(flogp->new_map)); outv_field(v, "Seq", "0x%x", flogp->seq); outv_field(v, "LBA'", "0x%08x", flogpp->lba); outv_field(v, "Old map'", "0x%08x: %s", flogpp->old_map, out_get_btt_map_entry(flogpp->old_map)); outv_field(v, "New map'", "0x%08x: %s", flogpp->new_map, out_get_btt_map_entry(flogpp->new_map)); outv_field(v, "Seq'", "0x%x", flogpp->seq); ptr += BTT_FLOG_PAIR_ALIGN; } error: free(buff); return ret; } /* * info_btt_stats -- print btt related statistics */ static void info_btt_stats(struct pmem_info *pip, int v) { if (pip->blk.stats.total > 0) { outv_title(v, "PMEM BLK Statistics"); double perc_zeros = (double)pip->blk.stats.zeros / (double)pip->blk.stats.total * 100.0; double perc_errors = (double)pip->blk.stats.errors / (double)pip->blk.stats.total * 100.0; double perc_noflag = (double)pip->blk.stats.noflag / (double)pip->blk.stats.total * 100.0; outv_field(v, "Total blocks", "%u", pip->blk.stats.total); outv_field(v, "Zeroed blocks", "%u [%s]", pip->blk.stats.zeros, out_get_percentage(perc_zeros)); outv_field(v, "Error blocks", "%u [%s]", pip->blk.stats.errors, out_get_percentage(perc_errors)); outv_field(v, "Blocks without flag", "%u [%s]", pip->blk.stats.noflag, out_get_percentage(perc_noflag)); } } /* * info_btt_info -- print btt_info structure fields */ static int info_btt_info(struct pmem_info *pip, int v, struct btt_info *infop) { outv_field(v, "Signature", "%.*s", BTTINFO_SIG_LEN, infop->sig); outv_field(v, "UUID of container", "%s", out_get_uuid_str(infop->parent_uuid)); outv_field(v, "Flags", "0x%x", infop->flags); outv_field(v, "Major", "%d", infop->major); outv_field(v, "Minor", "%d", infop->minor); outv_field(v, "External LBA size", "%s", out_get_size_str(infop->external_lbasize, pip->args.human)); outv_field(v, "External LBA count", "%u", infop->external_nlba); outv_field(v, "Internal LBA size", "%s", out_get_size_str(infop->internal_lbasize, pip->args.human)); outv_field(v, "Internal LBA count", "%u", infop->internal_nlba); outv_field(v, "Free blocks", "%u", infop->nfree); outv_field(v, "Info block size", "%s", out_get_size_str(infop->infosize, pip->args.human)); outv_field(v, "Next arena offset", "0x%lx", infop->nextoff); outv_field(v, "Arena data offset", "0x%lx", infop->dataoff); outv_field(v, "Area map offset", "0x%lx", infop->mapoff); outv_field(v, "Area flog offset", "0x%lx", infop->flogoff); outv_field(v, "Info block backup offset", "0x%lx", infop->infooff); outv_field(v, "Checksum", "%s", out_get_checksum(infop, sizeof(*infop), &infop->checksum, 0)); return 0; } /* * info_btt_layout -- print information about BTT layout */ static int info_btt_layout(struct pmem_info *pip, os_off_t btt_off) { int ret = 0; if (btt_off <= 0) { outv_err("wrong BTT layout offset\n"); return -1; } struct btt_info *infop = NULL; infop = malloc(sizeof(struct btt_info)); if (!infop) err(1, "Cannot allocate memory for BTT Info structure"); int narena = 0; uint64_t cur_lba = 0; uint64_t count_data = 0; uint64_t count_map = 0; uint64_t offset = (uint64_t)btt_off; uint64_t nextoff = 0; do { /* read btt info area */ if (pmempool_info_read(pip, infop, sizeof(*infop), offset)) { ret = -1; outv_err("cannot read BTT Info header\n"); goto err; } if (util_check_memory((uint8_t *)infop, sizeof(*infop), 0) == 0) { outv(1, "\n<No BTT layout>\n"); break; } outv(1, "\n[ARENA %d]", narena); outv_title(1, "PMEM BLK BTT Info Header"); outv_hexdump(pip->args.vhdrdump, infop, sizeof(*infop), offset, 1); btt_info_convert2h(infop); nextoff = infop->nextoff; /* print btt info fields */ if (info_btt_info(pip, 1, infop)) { ret = -1; goto err; } /* dump blocks data */ if (info_btt_data(pip, pip->args.vdata, infop, offset, cur_lba, &count_data)) { ret = -1; goto err; } /* print btt map entries and get statistics */ if (info_btt_map(pip, pip->args.blk.vmap, infop, offset, cur_lba, &count_map)) { ret = -1; goto err; } /* print flog entries */ if (info_btt_flog(pip, pip->args.blk.vflog, infop, offset)) { ret = -1; goto err; } /* increment LBA's counter before reading info backup */ cur_lba += infop->external_nlba; /* read btt info backup area */ if (pmempool_info_read(pip, infop, sizeof(*infop), offset + infop->infooff)) { outv_err("wrong BTT Info Backup size or offset\n"); ret = -1; goto err; } outv_title(pip->args.blk.vbackup, "PMEM BLK BTT Info Header Backup"); if (outv_check(pip->args.blk.vbackup)) outv_hexdump(pip->args.vhdrdump, infop, sizeof(*infop), offset + infop->infooff, 1); btt_info_convert2h(infop); info_btt_info(pip, pip->args.blk.vbackup, infop); offset += nextoff; narena++; } while (nextoff > 0); info_btt_stats(pip, pip->args.vstats); err: if (infop) free(infop); return ret; } /* * info_blk_descriptor -- print pmemblk descriptor */ static void info_blk_descriptor(struct pmem_info *pip, int v, struct pmemblk *pbp) { size_t pmemblk_size; #ifdef DEBUG pmemblk_size = offsetof(struct pmemblk, write_lock); #else pmemblk_size = sizeof(*pbp); #endif outv_title(v, "PMEM BLK Header"); /* dump pmemblk header without pool_hdr */ outv_hexdump(pip->args.vhdrdump, (uint8_t *)pbp + sizeof(pbp->hdr), pmemblk_size - sizeof(pbp->hdr), sizeof(pbp->hdr), 1); outv_field(v, "Block size", "%s", out_get_size_str(pbp->bsize, pip->args.human)); outv_field(v, "Is zeroed", pbp->is_zeroed ? "true" : "false"); } /* * pmempool_info_blk -- print information about block type pool */ int pmempool_info_blk(struct pmem_info *pip) { int ret; struct pmemblk *pbp = malloc(sizeof(struct pmemblk)); if (!pbp) err(1, "Cannot allocate memory for pmemblk structure"); if (pmempool_info_read(pip, pbp, sizeof(struct pmemblk), 0)) { outv_err("cannot read pmemblk header\n"); free(pbp); return -1; } info_blk_descriptor(pip, VERBOSE_DEFAULT, pbp); ssize_t btt_off = (char *)pbp->data - (char *)pbp->addr; ret = info_btt_layout(pip, btt_off); free(pbp); return ret; } /* * pmempool_info_btt -- print information about btt device */ int pmempool_info_btt(struct pmem_info *pip) { int ret; outv(1, "\nBTT Device"); ret = info_btt_layout(pip, DEFAULT_HDR_SIZE); return ret; }
14,565
24.644366
74
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/info_obj.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * info_obj.c -- pmempool info command source file for obj pool */ #include <stdlib.h> #include <stdbool.h> #include <err.h> #include <signal.h> #include <sys/stat.h> #include <sys/mman.h> #include <assert.h> #include <inttypes.h> #include "alloc_class.h" #include "set.h" #include "common.h" #include "output.h" #include "info.h" #include "util.h" #define BITMAP_BUFF_SIZE 1024 #define OFF_TO_PTR(pop, off) ((void *)((uintptr_t)(pop) + (off))) #define PTR_TO_OFF(pop, ptr) ((uintptr_t)(ptr) - (uintptr_t)(pop)) /* * lane_need_recovery -- return 1 if lane section needs recovery */ static int lane_need_recovery(struct pmem_info *pip, struct lane_layout *lane) { return ulog_recovery_needed((struct ulog *)&lane->external, 1) || ulog_recovery_needed((struct ulog *)&lane->internal, 1) || ulog_recovery_needed((struct ulog *)&lane->undo, 0); } #define RUN_BITMAP_SEPARATOR_DISTANCE 8 /* * get_bitmap_str -- get bitmap single value string */ static const char * get_bitmap_str(uint64_t val, unsigned values) { static char buff[BITMAP_BUFF_SIZE]; unsigned j = 0; for (unsigned i = 0; i < values && j < BITMAP_BUFF_SIZE - 3; i++) { buff[j++] = ((val & ((uint64_t)1 << i)) ? 'x' : '.'); if ((i + 1) % RUN_BITMAP_SEPARATOR_DISTANCE == 0) buff[j++] = ' '; } buff[j] = '\0'; return buff; } /* * pmem_obj_stats_get_type -- get stats for specified type number */ static struct pmem_obj_type_stats * pmem_obj_stats_get_type(struct pmem_obj_stats *stats, uint64_t type_num) { struct pmem_obj_type_stats *type; struct pmem_obj_type_stats *type_dest = NULL; PMDK_TAILQ_FOREACH(type, &stats->type_stats, next) { if (type->type_num == type_num) return type; if (!type_dest && type->type_num > type_num) type_dest = type; } type = calloc(1, sizeof(*type)); if (!type) { outv_err("cannot allocate memory for type stats\n"); exit(EXIT_FAILURE); } type->type_num = type_num; if (type_dest) PMDK_TAILQ_INSERT_BEFORE(type_dest, type, next); else PMDK_TAILQ_INSERT_TAIL(&stats->type_stats, type, next); return type; } struct info_obj_redo_args { int v; size_t i; struct pmem_info *pip; }; /* * info_obj_redo_entry - print redo log entry info */ static int info_obj_redo_entry(struct ulog_entry_base *e, void *arg, const struct pmem_ops *p_ops) { struct info_obj_redo_args *a = arg; struct ulog_entry_val *ev; struct ulog_entry_buf *eb; switch (ulog_entry_type(e)) { case ULOG_OPERATION_AND: case ULOG_OPERATION_OR: case ULOG_OPERATION_SET: ev = (struct ulog_entry_val *)e; outv(a->v, "%010zu: " "Offset: 0x%016jx " "Value: 0x%016jx ", a->i++, ulog_entry_offset(e), ev->value); break; case ULOG_OPERATION_BUF_CPY: case ULOG_OPERATION_BUF_SET: eb = (struct ulog_entry_buf *)e; outv(a->v, "%010zu: " "Offset: 0x%016jx " "Size: %s ", a->i++, ulog_entry_offset(e), out_get_size_str(eb->size, a->pip->args.human)); break; default: ASSERT(0); /* unreachable */ } return 0; } /* * info_obj_redo -- print ulog log entries */ static void info_obj_ulog(struct pmem_info *pip, int v, struct ulog *ulog, const struct pmem_ops *ops) { outv_title(v, "Log entries"); struct info_obj_redo_args args = {v, 0, pip}; ulog_foreach_entry(ulog, info_obj_redo_entry, &args, ops,NULL); } /* * info_obj_alloc_hdr -- print allocation header */ static void info_obj_alloc_hdr(struct pmem_info *pip, int v, const struct memory_block *m) { outv_title(v, "Allocation Header"); outv_field(v, "Size", "%s", out_get_size_str(m->m_ops->get_user_size(m), pip->args.human)); outv_field(v, "Extra", "%lu", m->m_ops->get_extra(m)); outv_field(v, "Flags", "0x%x", m->m_ops->get_flags(m)); } /* * info_obj_object_hdr -- print object headers and data */ static void info_obj_object_hdr(struct pmem_info *pip, int v, int vid, const struct memory_block *m, uint64_t id) { struct pmemobjpool *pop = pip->obj.pop; void *data = m->m_ops->get_user_data(m); outv_nl(vid); outv_field(vid, "Object", "%lu", id); outv_field(vid, "Offset", "0x%016lx", PTR_TO_OFF(pop, data)); int vahdr = v && pip->args.obj.valloc; int voobh = v && pip->args.obj.voobhdr; outv_indent(vahdr || voobh, 1); info_obj_alloc_hdr(pip, vahdr, m); outv_hexdump(v && pip->args.vdata, data, m->m_ops->get_real_size(m), PTR_TO_OFF(pip->obj.pop, data), 1); outv_indent(vahdr || voobh, -1); } /* * info_obj_lane_section -- print lane's section */ static void info_obj_lane(struct pmem_info *pip, int v, struct lane_layout *lane) { struct pmem_ops p_ops; p_ops.base = pip->obj.pop; outv_title(v, "Undo Log"); outv_indent(v, 1); info_obj_ulog(pip, v, (struct ulog *)&lane->undo, &p_ops); outv_indent(v, -1); outv_nl(v); outv_title(v, "Internal Undo Log"); outv_indent(v, 1); info_obj_ulog(pip, v, (struct ulog *)&lane->internal, &p_ops); outv_indent(v, -1); outv_title(v, "External Undo Log"); outv_indent(v, 1); info_obj_ulog(pip, v, (struct ulog *)&lane->external, &p_ops); outv_indent(v, -1); } /* * info_obj_lanes -- print lanes structures */ static void info_obj_lanes(struct pmem_info *pip) { int v = pip->args.obj.vlanes; if (!outv_check(v)) return; struct pmemobjpool *pop = pip->obj.pop; /* * Iterate through all lanes from specified range and print * specified sections. */ struct lane_layout *lanes = (void *)((char *)pip->obj.pop + pop->lanes_offset); struct range *curp = NULL; FOREACH_RANGE(curp, &pip->args.obj.lane_ranges) { for (uint64_t i = curp->first; i <= curp->last && i < pop->nlanes; i++) { /* For -R check print lane only if needs recovery */ if (pip->args.obj.lanes_recovery && !lane_need_recovery(pip, &lanes[i])) continue; outv_title(v, "Lane %" PRIu64, i); outv_indent(v, 1); info_obj_lane(pip, v, &lanes[i]); outv_indent(v, -1); } } } /* * info_obj_heap -- print pmemobj heap headers */ static void info_obj_heap(struct pmem_info *pip) { int v = pip->args.obj.vheap; struct pmemobjpool *pop = pip->obj.pop; struct heap_layout *layout = OFF_TO_PTR(pop, pop->heap_offset); struct heap_header *heap = &layout->header; outv(v, "\nPMEMOBJ Heap Header:\n"); outv_hexdump(v && pip->args.vhdrdump, heap, sizeof(*heap), pop->heap_offset, 1); outv_field(v, "Signature", "%s", heap->signature); outv_field(v, "Major", "%ld", heap->major); outv_field(v, "Minor", "%ld", heap->minor); outv_field(v, "Chunk size", "%s", out_get_size_str(heap->chunksize, pip->args.human)); outv_field(v, "Chunks per zone", "%ld", heap->chunks_per_zone); outv_field(v, "Checksum", "%s", out_get_checksum(heap, sizeof(*heap), &heap->checksum, 0)); } /* * info_obj_zone -- print information about zone */ static void info_obj_zone_hdr(struct pmem_info *pip, int v, struct zone_header *zone) { outv_hexdump(v && pip->args.vhdrdump, zone, sizeof(*zone), PTR_TO_OFF(pip->obj.pop, zone), 1); outv_field(v, "Magic", "%s", out_get_zone_magic_str(zone->magic)); outv_field(v, "Size idx", "%u", zone->size_idx); } /* * info_obj_object -- print information about object */ static void info_obj_object(struct pmem_info *pip, const struct memory_block *m, uint64_t objid) { if (!util_ranges_contain(&pip->args.ranges, objid)) return; uint64_t type_num = m->m_ops->get_extra(m); if (!util_ranges_contain(&pip->args.obj.type_ranges, type_num)) return; uint64_t real_size = m->m_ops->get_real_size(m); pip->obj.stats.n_total_objects++; pip->obj.stats.n_total_bytes += real_size; struct pmem_obj_type_stats *type_stats = pmem_obj_stats_get_type(&pip->obj.stats, type_num); type_stats->n_objects++; type_stats->n_bytes += real_size; int vid = pip->args.obj.vobjects; int v = pip->args.obj.vobjects; outv_indent(v, 1); info_obj_object_hdr(pip, v, vid, m, objid); outv_indent(v, -1); } /* * info_obj_run_bitmap -- print chunk run's bitmap */ static void info_obj_run_bitmap(int v, struct run_bitmap *b) { /* print only used values for lower verbosity */ uint32_t i; for (i = 0; i < b->nbits / RUN_BITS_PER_VALUE; i++) outv(v, "%s\n", get_bitmap_str(b->values[i], RUN_BITS_PER_VALUE)); unsigned mod = b->nbits % RUN_BITS_PER_VALUE; if (mod != 0) { outv(v, "%s\n", get_bitmap_str(b->values[i], mod)); } } /* * info_obj_memblock_is_root -- (internal) checks whether the object is root */ static int info_obj_memblock_is_root(struct pmem_info *pip, const struct memory_block *m) { uint64_t roff = pip->obj.pop->root_offset; if (roff == 0) return 0; struct memory_block rm = memblock_from_offset(pip->obj.heap, roff); return MEMORY_BLOCK_EQUALS(*m, rm); } /* * info_obj_run_cb -- (internal) run object callback */ static int info_obj_run_cb(const struct memory_block *m, void *arg) { struct pmem_info *pip = arg; if (info_obj_memblock_is_root(pip, m)) return 0; info_obj_object(pip, m, pip->obj.objid++); return 0; } static struct pmem_obj_class_stats * info_obj_class_stats_get_or_insert(struct pmem_obj_zone_stats *stats, uint64_t unit_size, uint64_t alignment, uint32_t nallocs, uint16_t flags) { struct pmem_obj_class_stats *cstats; VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) { if (cstats->alignment == alignment && cstats->flags == flags && cstats->nallocs == nallocs && cstats->unit_size == unit_size) return cstats; } struct pmem_obj_class_stats s = {0, 0, unit_size, alignment, nallocs, flags}; if (VEC_PUSH_BACK(&stats->class_stats, s) != 0) return NULL; return &VEC_BACK(&stats->class_stats); } /* * info_obj_chunk -- print chunk info */ static void info_obj_chunk(struct pmem_info *pip, uint64_t c, uint64_t z, struct chunk_header *chunk_hdr, struct chunk *chunk, struct pmem_obj_zone_stats *stats) { int v = pip->args.obj.vchunkhdr; outv(v, "\n"); outv_field(v, "Chunk", "%lu", c); struct pmemobjpool *pop = pip->obj.pop; outv_hexdump(v && pip->args.vhdrdump, chunk_hdr, sizeof(*chunk_hdr), PTR_TO_OFF(pop, chunk_hdr), 1); outv_field(v, "Type", "%s", out_get_chunk_type_str(chunk_hdr->type)); outv_field(v, "Flags", "0x%x %s", chunk_hdr->flags, out_get_chunk_flags(chunk_hdr->flags)); outv_field(v, "Size idx", "%u", chunk_hdr->size_idx); struct memory_block m = MEMORY_BLOCK_NONE; m.zone_id = (uint32_t)z; m.chunk_id = (uint32_t)c; m.size_idx = (uint32_t)chunk_hdr->size_idx; memblock_rebuild_state(pip->obj.heap, &m); if (chunk_hdr->type == CHUNK_TYPE_USED || chunk_hdr->type == CHUNK_TYPE_FREE) { VEC_FRONT(&stats->class_stats).n_units += chunk_hdr->size_idx; if (chunk_hdr->type == CHUNK_TYPE_USED) { VEC_FRONT(&stats->class_stats).n_used += chunk_hdr->size_idx; /* skip root object */ if (!info_obj_memblock_is_root(pip, &m)) { info_obj_object(pip, &m, pip->obj.objid++); } } } else if (chunk_hdr->type == CHUNK_TYPE_RUN) { struct chunk_run *run = (struct chunk_run *)chunk; outv_hexdump(v && pip->args.vhdrdump, run, sizeof(run->hdr.block_size) + sizeof(run->hdr.alignment), PTR_TO_OFF(pop, run), 1); struct run_bitmap bitmap; m.m_ops->get_bitmap(&m, &bitmap); struct pmem_obj_class_stats *cstats = info_obj_class_stats_get_or_insert(stats, run->hdr.block_size, run->hdr.alignment, bitmap.nbits, chunk_hdr->flags); if (cstats == NULL) { outv_err("out of memory, can't allocate statistics"); return; } outv_field(v, "Block size", "%s", out_get_size_str(run->hdr.block_size, pip->args.human)); uint32_t units = bitmap.nbits; uint32_t free_space = 0; uint32_t max_free_block = 0; m.m_ops->calc_free(&m, &free_space, &max_free_block); uint32_t used = units - free_space; cstats->n_units += units; cstats->n_used += used; outv_field(v, "Bitmap", "%u / %u", used, units); info_obj_run_bitmap(v && pip->args.obj.vbitmap, &bitmap); m.m_ops->iterate_used(&m, info_obj_run_cb, pip); } } /* * info_obj_zone_chunks -- print chunk headers from specified zone */ static void info_obj_zone_chunks(struct pmem_info *pip, struct zone *zone, uint64_t z, struct pmem_obj_zone_stats *stats) { VEC_INIT(&stats->class_stats); struct pmem_obj_class_stats default_class_stats = {0, 0, CHUNKSIZE, 0, 0, 0}; VEC_PUSH_BACK(&stats->class_stats, default_class_stats); uint64_t c = 0; while (c < zone->header.size_idx) { enum chunk_type type = zone->chunk_headers[c].type; uint64_t size_idx = zone->chunk_headers[c].size_idx; if (util_ranges_contain(&pip->args.obj.chunk_ranges, c)) { if (pip->args.obj.chunk_types & (1ULL << type)) { stats->n_chunks++; stats->n_chunks_type[type]++; stats->size_chunks += size_idx; stats->size_chunks_type[type] += size_idx; info_obj_chunk(pip, c, z, &zone->chunk_headers[c], &zone->chunks[c], stats); } if (size_idx > 1 && type != CHUNK_TYPE_RUN && pip->args.obj.chunk_types & (1 << CHUNK_TYPE_FOOTER)) { size_t f = c + size_idx - 1; info_obj_chunk(pip, f, z, &zone->chunk_headers[f], &zone->chunks[f], stats); } } c += size_idx; } } /* * info_obj_root_obj -- print root object */ static void info_obj_root_obj(struct pmem_info *pip) { int v = pip->args.obj.vroot; struct pmemobjpool *pop = pip->obj.pop; if (!pop->root_offset) { outv(v, "\nNo root object...\n"); return; } outv_title(v, "Root object"); outv_field(v, "Offset", "0x%016zx", pop->root_offset); uint64_t root_size = pop->root_size; outv_field(v, "Size", "%s", out_get_size_str(root_size, pip->args.human)); struct memory_block m = memblock_from_offset( pip->obj.heap, pop->root_offset); /* do not print object id and offset for root object */ info_obj_object_hdr(pip, v, VERBOSE_SILENT, &m, 0); } /* * info_obj_zones -- print zones and chunks */ static void info_obj_zones_chunks(struct pmem_info *pip) { if (!outv_check(pip->args.obj.vheap) && !outv_check(pip->args.vstats) && !outv_check(pip->args.obj.vobjects)) return; struct pmemobjpool *pop = pip->obj.pop; struct heap_layout *layout = OFF_TO_PTR(pop, pop->heap_offset); size_t maxzone = util_heap_max_zone(pop->heap_size); pip->obj.stats.n_zones = maxzone; pip->obj.stats.zone_stats = calloc(maxzone, sizeof(struct pmem_obj_zone_stats)); if (!pip->obj.stats.zone_stats) err(1, "Cannot allocate memory for zone stats"); for (size_t i = 0; i < maxzone; i++) { struct zone *zone = ZID_TO_ZONE(layout, i); if (util_ranges_contain(&pip->args.obj.zone_ranges, i)) { int vvv = pip->args.obj.vheap && (pip->args.obj.vzonehdr || pip->args.obj.vchunkhdr); outv_title(vvv, "Zone %zu", i); if (zone->header.magic == ZONE_HEADER_MAGIC) pip->obj.stats.n_zones_used++; info_obj_zone_hdr(pip, pip->args.obj.vheap && pip->args.obj.vzonehdr, &zone->header); outv_indent(vvv, 1); info_obj_zone_chunks(pip, zone, i, &pip->obj.stats.zone_stats[i]); outv_indent(vvv, -1); } } } /* * info_obj_descriptor -- print pmemobj descriptor */ static void info_obj_descriptor(struct pmem_info *pip) { int v = VERBOSE_DEFAULT; if (!outv_check(v)) return; outv(v, "\nPMEM OBJ Header:\n"); struct pmemobjpool *pop = pip->obj.pop; uint8_t *hdrptr = (uint8_t *)pop + sizeof(pop->hdr); size_t hdrsize = sizeof(*pop) - sizeof(pop->hdr); size_t hdroff = sizeof(pop->hdr); outv_hexdump(pip->args.vhdrdump, hdrptr, hdrsize, hdroff, 1); /* check if layout is zeroed */ char *layout = util_check_memory((uint8_t *)pop->layout, sizeof(pop->layout), 0) ? pop->layout : "(null)"; /* address for checksum */ void *dscp = (void *)((uintptr_t)(pop) + sizeof(struct pool_hdr)); outv_field(v, "Layout", "%s", layout); outv_field(v, "Lanes offset", "0x%lx", pop->lanes_offset); outv_field(v, "Number of lanes", "%lu", pop->nlanes); outv_field(v, "Heap offset", "0x%lx", pop->heap_offset); outv_field(v, "Heap size", "%lu", pop->heap_size); outv_field(v, "Checksum", "%s", out_get_checksum(dscp, OBJ_DSC_P_SIZE, &pop->checksum, 0)); outv_field(v, "Root offset", "0x%lx", pop->root_offset); /* run id with -v option */ outv_field(v + 1, "Run id", "%lu", pop->run_id); } /* * info_obj_stats_objjects -- print objects' statistics */ static void info_obj_stats_objects(struct pmem_info *pip, int v, struct pmem_obj_stats *stats) { outv_field(v, "Number of objects", "%lu", stats->n_total_objects); outv_field(v, "Number of bytes", "%s", out_get_size_str( stats->n_total_bytes, pip->args.human)); outv_title(v, "Objects by type"); outv_indent(v, 1); struct pmem_obj_type_stats *type_stats; PMDK_TAILQ_FOREACH(type_stats, &pip->obj.stats.type_stats, next) { if (!type_stats->n_objects) continue; double n_objects_perc = 100.0 * (double)type_stats->n_objects / (double)stats->n_total_objects; double n_bytes_perc = 100.0 * (double)type_stats->n_bytes / (double)stats->n_total_bytes; outv_nl(v); outv_field(v, "Type number", "%lu", type_stats->type_num); outv_field(v, "Number of objects", "%lu [%s]", type_stats->n_objects, out_get_percentage(n_objects_perc)); outv_field(v, "Number of bytes", "%s [%s]", out_get_size_str( type_stats->n_bytes, pip->args.human), out_get_percentage(n_bytes_perc)); } outv_indent(v, -1); } /* * info_boj_stats_alloc_classes -- print allocation classes' statistics */ static void info_obj_stats_alloc_classes(struct pmem_info *pip, int v, struct pmem_obj_zone_stats *stats) { uint64_t total_bytes = 0; uint64_t total_used = 0; outv_indent(v, 1); struct pmem_obj_class_stats *cstats; VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) { if (cstats->n_units == 0) continue; double used_perc = 100.0 * (double)cstats->n_used / (double)cstats->n_units; outv_nl(v); outv_field(v, "Unit size", "%s", out_get_size_str( cstats->unit_size, pip->args.human)); outv_field(v, "Units", "%lu", cstats->n_units); outv_field(v, "Used units", "%lu [%s]", cstats->n_used, out_get_percentage(used_perc)); uint64_t bytes = cstats->unit_size * cstats->n_units; uint64_t used = cstats->unit_size * cstats->n_used; total_bytes += bytes; total_used += used; double used_bytes_perc = 100.0 * (double)used / (double)bytes; outv_field(v, "Bytes", "%s", out_get_size_str(bytes, pip->args.human)); outv_field(v, "Used bytes", "%s [%s]", out_get_size_str(used, pip->args.human), out_get_percentage(used_bytes_perc)); } outv_indent(v, -1); double used_bytes_perc = total_bytes ? 100.0 * (double)total_used / (double)total_bytes : 0.0; outv_nl(v); outv_field(v, "Total bytes", "%s", out_get_size_str(total_bytes, pip->args.human)); outv_field(v, "Total used bytes", "%s [%s]", out_get_size_str(total_used, pip->args.human), out_get_percentage(used_bytes_perc)); } /* * info_obj_stats_chunks -- print chunks' statistics */ static void info_obj_stats_chunks(struct pmem_info *pip, int v, struct pmem_obj_zone_stats *stats) { outv_field(v, "Number of chunks", "%lu", stats->n_chunks); outv_indent(v, 1); for (unsigned type = 0; type < MAX_CHUNK_TYPE; type++) { double type_perc = 100.0 * (double)stats->n_chunks_type[type] / (double)stats->n_chunks; if (stats->n_chunks_type[type]) { outv_field(v, out_get_chunk_type_str(type), "%lu [%s]", stats->n_chunks_type[type], out_get_percentage(type_perc)); } } outv_indent(v, -1); outv_nl(v); outv_field(v, "Total chunks size", "%s", out_get_size_str( stats->size_chunks, pip->args.human)); outv_indent(v, 1); for (unsigned type = 0; type < MAX_CHUNK_TYPE; type++) { double type_perc = 100.0 * (double)stats->size_chunks_type[type] / (double)stats->size_chunks; if (stats->size_chunks_type[type]) { outv_field(v, out_get_chunk_type_str(type), "%lu [%s]", stats->size_chunks_type[type], out_get_percentage(type_perc)); } } outv_indent(v, -1); } /* * info_obj_add_zone_stats -- add stats to total */ static void info_obj_add_zone_stats(struct pmem_obj_zone_stats *total, struct pmem_obj_zone_stats *stats) { total->n_chunks += stats->n_chunks; total->size_chunks += stats->size_chunks; for (int type = 0; type < MAX_CHUNK_TYPE; type++) { total->n_chunks_type[type] += stats->n_chunks_type[type]; total->size_chunks_type[type] += stats->size_chunks_type[type]; } struct pmem_obj_class_stats *cstats; VEC_FOREACH_BY_PTR(cstats, &stats->class_stats) { struct pmem_obj_class_stats *ctotal = info_obj_class_stats_get_or_insert(total, cstats->unit_size, cstats->alignment, cstats->nallocs, cstats->flags); if (ctotal == NULL) { outv_err("out of memory, can't allocate statistics"); return; } ctotal->n_units += cstats->n_units; ctotal->n_used += cstats->n_used; } } /* * info_obj_stats_zones -- print zones' statistics */ static void info_obj_stats_zones(struct pmem_info *pip, int v, struct pmem_obj_stats *stats, struct pmem_obj_zone_stats *total) { double used_zones_perc = 100.0 * (double)stats->n_zones_used / (double)stats->n_zones; outv_field(v, "Number of zones", "%lu", stats->n_zones); outv_field(v, "Number of used zones", "%lu [%s]", stats->n_zones_used, out_get_percentage(used_zones_perc)); outv_indent(v, 1); for (uint64_t i = 0; i < stats->n_zones_used; i++) { outv_title(v, "Zone %" PRIu64, i); struct pmem_obj_zone_stats *zstats = &stats->zone_stats[i]; info_obj_stats_chunks(pip, v, zstats); outv_title(v, "Zone's allocation classes"); info_obj_stats_alloc_classes(pip, v, zstats); info_obj_add_zone_stats(total, zstats); } outv_indent(v, -1); } /* * info_obj_stats -- print statistics */ static void info_obj_stats(struct pmem_info *pip) { int v = pip->args.vstats; if (!outv_check(v)) return; struct pmem_obj_stats *stats = &pip->obj.stats; struct pmem_obj_zone_stats total; memset(&total, 0, sizeof(total)); outv_title(v, "Statistics"); outv_title(v, "Objects"); info_obj_stats_objects(pip, v, stats); outv_title(v, "Heap"); info_obj_stats_zones(pip, v, stats, &total); if (stats->n_zones_used > 1) { outv_title(v, "Total zone's statistics"); outv_title(v, "Chunks statistics"); info_obj_stats_chunks(pip, v, &total); outv_title(v, "Allocation classes"); info_obj_stats_alloc_classes(pip, v, &total); } VEC_DELETE(&total.class_stats); } static struct pmem_info *Pip; #ifndef _WIN32 static void info_obj_sa_sigaction(int signum, siginfo_t *info, void *context) { uintptr_t offset = (uintptr_t)info->si_addr - (uintptr_t)Pip->obj.pop; outv_err("Invalid offset 0x%lx\n", offset); exit(EXIT_FAILURE); } static struct sigaction info_obj_sigaction = { .sa_sigaction = info_obj_sa_sigaction, .sa_flags = SA_SIGINFO }; #else #define CALL_FIRST 1 static LONG CALLBACK exception_handler(_In_ PEXCEPTION_POINTERS ExceptionInfo) { PEXCEPTION_RECORD record = ExceptionInfo->ExceptionRecord; if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION) { return EXCEPTION_CONTINUE_SEARCH; } uintptr_t offset = (uintptr_t)record->ExceptionInformation[1] - (uintptr_t)Pip->obj.pop; outv_err("Invalid offset 0x%lx\n", offset); exit(EXIT_FAILURE); } #endif /* * info_obj -- print information about obj pool type */ int pmempool_info_obj(struct pmem_info *pip) { pip->obj.pop = pool_set_file_map(pip->pfile, 0); if (pip->obj.pop == NULL) return -1; pip->obj.size = pip->pfile->size; struct palloc_heap *heap = calloc(1, sizeof(*heap)); if (heap == NULL) err(1, "Cannot allocate memory for heap data"); heap->layout = OFF_TO_PTR(pip->obj.pop, pip->obj.pop->heap_offset); heap->base = pip->obj.pop; pip->obj.alloc_classes = alloc_class_collection_new(); pip->obj.heap = heap; Pip = pip; #ifndef _WIN32 if (sigaction(SIGSEGV, &info_obj_sigaction, NULL)) { #else if (AddVectoredExceptionHandler(CALL_FIRST, exception_handler) == NULL) { #endif perror("sigaction"); return -1; } pip->obj.uuid_lo = pmemobj_get_uuid_lo(pip->obj.pop); info_obj_descriptor(pip); info_obj_lanes(pip); info_obj_root_obj(pip); info_obj_heap(pip); info_obj_zones_chunks(pip); info_obj_stats(pip); free(heap); alloc_class_collection_delete(pip->obj.alloc_classes); return 0; }
24,182
24.11215
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/check.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * check.c -- pmempool check command source file */ #include <getopt.h> #include <stdlib.h> #include "common.h" #include "check.h" #include "output.h" #include "set.h" #include "file.h" #include "libpmempool.h" typedef enum { CHECK_RESULT_CONSISTENT, CHECK_RESULT_NOT_CONSISTENT, CHECK_RESULT_REPAIRED, CHECK_RESULT_CANNOT_REPAIR, CHECK_RESULT_SYNC_REQ, CHECK_RESULT_ERROR } check_result_t; /* * pmempool_check_context -- context and arguments for check command */ struct pmempool_check_context { int verbose; /* verbosity level */ char *fname; /* file name */ struct pool_set_file *pfile; bool repair; /* do repair */ bool backup; /* do backup */ bool advanced; /* do advanced repairs */ char *backup_fname; /* backup file name */ bool exec; /* do execute */ char ans; /* default answer on all questions or '?' */ }; /* * pmempool_check_default -- default arguments for check command */ static const struct pmempool_check_context pmempool_check_default = { .verbose = 1, .fname = NULL, .repair = false, .backup = false, .backup_fname = NULL, .advanced = false, .exec = true, .ans = '?', }; /* * help_str -- string for help message */ static const char * const help_str = "Check consistency of a pool\n" "\n" "Common options:\n" " -r, --repair try to repair a pool file if possible\n" " -y, --yes answer yes to all questions\n" " -d, --dry-run don't execute, just show what would be done\n" " -b, --backup <file> create backup of a pool file before executing\n" " -a, --advanced perform advanced repairs\n" " -q, --quiet be quiet and don't print any messages\n" " -v, --verbose increase verbosity level\n" " -h, --help display this help and exit\n" "\n" "For complete documentation see %s-check(1) manual page.\n" ; /* * long_options -- command line options */ static const struct option long_options[] = { {"repair", no_argument, NULL, 'r'}, {"yes", no_argument, NULL, 'y'}, {"dry-run", no_argument, NULL, 'd'}, {"no-exec", no_argument, NULL, 'N'}, /* deprecated */ {"backup", required_argument, NULL, 'b'}, {"advanced", no_argument, NULL, 'a'}, {"quiet", no_argument, NULL, 'q'}, {"verbose", no_argument, NULL, 'v'}, {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * print_usage -- print short description of application's usage */ static void print_usage(const char *appname) { printf("Usage: %s check [<args>] <file>\n", appname); } /* * print_version -- print version string */ static void print_version(const char *appname) { printf("%s %s\n", appname, SRCVERSION); } /* * pmempool_check_help -- print help message for check command */ void pmempool_check_help(const char *appname) { print_usage(appname); print_version(appname); printf(help_str, appname); } /* * pmempool_check_parse_args -- parse command line arguments */ static int pmempool_check_parse_args(struct pmempool_check_context *pcp, const char *appname, int argc, char *argv[]) { int opt; while ((opt = getopt_long(argc, argv, "ahvrdNb:qy", long_options, NULL)) != -1) { switch (opt) { case 'r': pcp->repair = true; break; case 'y': pcp->ans = 'y'; break; case 'd': case 'N': pcp->exec = false; break; case 'b': pcp->backup = true; pcp->backup_fname = optarg; break; case 'a': pcp->advanced = true; break; case 'q': pcp->verbose = 0; break; case 'v': pcp->verbose = 2; break; case 'h': pmempool_check_help(appname); exit(EXIT_SUCCESS); default: print_usage(appname); exit(EXIT_FAILURE); } } if (optind < argc) { pcp->fname = argv[optind]; } else { print_usage(appname); exit(EXIT_FAILURE); } if (!pcp->repair && !pcp->exec) { outv_err("'-N' option requires '-r'\n"); exit(EXIT_FAILURE); } if (!pcp->repair && pcp->backup) { outv_err("'-b' option requires '-r'\n"); exit(EXIT_FAILURE); } return 0; } static check_result_t pmempool_check_2_check_res_t[] = { [PMEMPOOL_CHECK_RESULT_CONSISTENT] = CHECK_RESULT_CONSISTENT, [PMEMPOOL_CHECK_RESULT_NOT_CONSISTENT] = CHECK_RESULT_NOT_CONSISTENT, [PMEMPOOL_CHECK_RESULT_REPAIRED] = CHECK_RESULT_REPAIRED, [PMEMPOOL_CHECK_RESULT_CANNOT_REPAIR] = CHECK_RESULT_CANNOT_REPAIR, [PMEMPOOL_CHECK_RESULT_SYNC_REQ] = CHECK_RESULT_SYNC_REQ, [PMEMPOOL_CHECK_RESULT_ERROR] = CHECK_RESULT_ERROR, }; static const char * check_ask(const char *msg) { char answer = ask_Yn('?', "%s", msg); switch (answer) { case 'y': return "yes"; case 'n': return "no"; default: return "?"; } } static check_result_t pmempool_check_perform(struct pmempool_check_context *pc) { struct pmempool_check_args args = { .path = pc->fname, .backup_path = pc->backup_fname, .pool_type = PMEMPOOL_POOL_TYPE_DETECT, .flags = PMEMPOOL_CHECK_FORMAT_STR }; if (pc->repair) args.flags |= PMEMPOOL_CHECK_REPAIR; if (!pc->exec) args.flags |= PMEMPOOL_CHECK_DRY_RUN; if (pc->advanced) args.flags |= PMEMPOOL_CHECK_ADVANCED; if (pc->ans == 'y') args.flags |= PMEMPOOL_CHECK_ALWAYS_YES; if (pc->verbose == 2) args.flags |= PMEMPOOL_CHECK_VERBOSE; PMEMpoolcheck *ppc = pmempool_check_init(&args, sizeof(args)); if (ppc == NULL) return CHECK_RESULT_ERROR; struct pmempool_check_status *status = NULL; while ((status = pmempool_check(ppc)) != NULL) { switch (status->type) { case PMEMPOOL_CHECK_MSG_TYPE_ERROR: outv(1, "%s\n", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_INFO: outv(2, "%s\n", status->str.msg); break; case PMEMPOOL_CHECK_MSG_TYPE_QUESTION: status->str.answer = check_ask(status->str.msg); break; default: pmempool_check_end(ppc); exit(EXIT_FAILURE); } } enum pmempool_check_result ret = pmempool_check_end(ppc); return pmempool_check_2_check_res_t[ret]; } /* * pmempool_check_func -- main function for check command */ int pmempool_check_func(const char *appname, int argc, char *argv[]) { int ret = 0; check_result_t res = CHECK_RESULT_CONSISTENT; struct pmempool_check_context pc = pmempool_check_default; /* parse command line arguments */ ret = pmempool_check_parse_args(&pc, appname, argc, argv); if (ret) return ret; /* set verbosity level */ out_set_vlevel(pc.verbose); res = pmempool_check_perform(&pc); switch (res) { case CHECK_RESULT_CONSISTENT: outv(2, "%s: consistent\n", pc.fname); ret = 0; break; case CHECK_RESULT_NOT_CONSISTENT: outv(1, "%s: not consistent\n", pc.fname); ret = -1; break; case CHECK_RESULT_REPAIRED: outv(1, "%s: repaired\n", pc.fname); ret = 0; break; case CHECK_RESULT_CANNOT_REPAIR: outv(1, "%s: cannot repair\n", pc.fname); ret = -1; break; case CHECK_RESULT_SYNC_REQ: outv(1, "%s: sync required\n", pc.fname); ret = 0; break; case CHECK_RESULT_ERROR: if (errno) outv_err("%s\n", strerror(errno)); if (pc.repair) outv_err("repairing failed\n"); else outv_err("checking consistency failed\n"); ret = -1; break; default: outv_err("status unknown\n"); ret = -1; break; } return ret; }
7,163
21.670886
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/common.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * common.h -- declarations of common functions */ #include <stdint.h> #include <stddef.h> #include <stdarg.h> #include <stdbool.h> #include "queue.h" #include "log.h" #include "blk.h" #include "libpmemobj.h" #include "lane.h" #include "ulog.h" #include "memops.h" #include "pmalloc.h" #include "list.h" #include "obj.h" #include "memblock.h" #include "heap_layout.h" #include "tx.h" #include "heap.h" #include "btt_layout.h" #include "page_size.h" /* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */ #ifdef _WIN32 #include "srcversion.h" #endif #define COUNT_OF(x) (sizeof(x) / sizeof(0[x])) #define OPT_SHIFT 12 #define OPT_MASK (~((1 << OPT_SHIFT) - 1)) #define OPT_LOG (1 << (PMEM_POOL_TYPE_LOG + OPT_SHIFT)) #define OPT_BLK (1 << (PMEM_POOL_TYPE_BLK + OPT_SHIFT)) #define OPT_OBJ (1 << (PMEM_POOL_TYPE_OBJ + OPT_SHIFT)) #define OPT_BTT (1 << (PMEM_POOL_TYPE_BTT + OPT_SHIFT)) #define OPT_ALL (OPT_LOG | OPT_BLK | OPT_OBJ | OPT_BTT) #define OPT_REQ_SHIFT 8 #define OPT_REQ_MASK ((1 << OPT_REQ_SHIFT) - 1) #define _OPT_REQ(c, n) ((c) << (OPT_REQ_SHIFT * (n))) #define OPT_REQ0(c) _OPT_REQ(c, 0) #define OPT_REQ1(c) _OPT_REQ(c, 1) #define OPT_REQ2(c) _OPT_REQ(c, 2) #define OPT_REQ3(c) _OPT_REQ(c, 3) #define OPT_REQ4(c) _OPT_REQ(c, 4) #define OPT_REQ5(c) _OPT_REQ(c, 5) #define OPT_REQ6(c) _OPT_REQ(c, 6) #define OPT_REQ7(c) _OPT_REQ(c, 7) #ifndef min #define min(a, b) ((a) < (b) ? (a) : (b)) #endif #define FOREACH_RANGE(range, ranges)\ PMDK_LIST_FOREACH(range, &(ranges)->head, next) #define PLIST_OFF_TO_PTR(pop, off)\ ((off) == 0 ? NULL : (void *)((uintptr_t)(pop) + (off) - OBJ_OOB_SIZE)) #define ENTRY_TO_ALLOC_HDR(entry)\ ((void *)((uintptr_t)(entry) - sizeof(struct allocation_header))) #define OBJH_FROM_PTR(ptr)\ ((void *)((uintptr_t)(ptr) - sizeof(struct legacy_object_header))) #define DEFAULT_HDR_SIZE PMEM_PAGESIZE #define DEFAULT_DESC_SIZE PMEM_PAGESIZE #define POOL_HDR_DESC_SIZE (DEFAULT_HDR_SIZE + DEFAULT_DESC_SIZE) #define PTR_TO_ALLOC_HDR(ptr)\ ((void *)((uintptr_t)(ptr) -\ sizeof(struct legacy_object_header))) #define OBJH_TO_PTR(objh)\ ((void *)((uintptr_t)(objh) + sizeof(struct legacy_object_header))) /* invalid answer for ask_* functions */ #define INV_ANS '\0' #define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b)))) /* * pmem_pool_type_t -- pool types */ typedef enum { PMEM_POOL_TYPE_LOG = 0x01, PMEM_POOL_TYPE_BLK = 0x02, PMEM_POOL_TYPE_OBJ = 0x04, PMEM_POOL_TYPE_BTT = 0x08, PMEM_POOL_TYPE_ALL = 0x0f, PMEM_POOL_TYPE_UNKNOWN = 0x80, } pmem_pool_type_t; struct option_requirement { int opt; pmem_pool_type_t type; uint64_t req; }; struct options { const struct option *opts; size_t noptions; char *bitmap; const struct option_requirement *req; }; struct pmem_pool_params { pmem_pool_type_t type; char signature[POOL_HDR_SIG_LEN]; uint64_t size; mode_t mode; int is_poolset; int is_part; int is_checksum_ok; union { struct { uint64_t bsize; } blk; struct { char layout[PMEMOBJ_MAX_LAYOUT]; } obj; }; }; struct pool_set_file { int fd; char *fname; void *addr; size_t size; struct pool_set *poolset; size_t replica; time_t mtime; mode_t mode; bool fileio; }; struct pool_set_file *pool_set_file_open(const char *fname, int rdonly, int check); void pool_set_file_close(struct pool_set_file *file); int pool_set_file_read(struct pool_set_file *file, void *buff, size_t nbytes, uint64_t off); int pool_set_file_write(struct pool_set_file *file, void *buff, size_t nbytes, uint64_t off); int pool_set_file_set_replica(struct pool_set_file *file, size_t replica); size_t pool_set_file_nreplicas(struct pool_set_file *file); void *pool_set_file_map(struct pool_set_file *file, uint64_t offset); void pool_set_file_persist(struct pool_set_file *file, const void *addr, size_t len); struct range { PMDK_LIST_ENTRY(range) next; uint64_t first; uint64_t last; }; struct ranges { PMDK_LIST_HEAD(rangeshead, range) head; }; pmem_pool_type_t pmem_pool_type_parse_hdr(const struct pool_hdr *hdrp); pmem_pool_type_t pmem_pool_type(const void *base_pool_addr); int pmem_pool_checksum(const void *base_pool_addr); pmem_pool_type_t pmem_pool_type_parse_str(const char *str); uint64_t pmem_pool_get_min_size(pmem_pool_type_t type); int pmem_pool_parse_params(const char *fname, struct pmem_pool_params *paramsp, int check); int util_poolset_map(const char *fname, struct pool_set **poolset, int rdonly); struct options *util_options_alloc(const struct option *options, size_t nopts, const struct option_requirement *req); void util_options_free(struct options *opts); int util_options_verify(const struct options *opts, pmem_pool_type_t type); int util_options_getopt(int argc, char *argv[], const char *optstr, const struct options *opts); pmem_pool_type_t util_get_pool_type_second_page(const void *pool_base_addr); int util_parse_mode(const char *str, mode_t *mode); int util_parse_ranges(const char *str, struct ranges *rangesp, struct range entire); int util_ranges_add(struct ranges *rangesp, struct range range); void util_ranges_clear(struct ranges *rangesp); int util_ranges_contain(const struct ranges *rangesp, uint64_t n); int util_ranges_empty(const struct ranges *rangesp); int util_check_memory(const uint8_t *buff, size_t len, uint8_t val); int util_parse_chunk_types(const char *str, uint64_t *types); int util_parse_lane_sections(const char *str, uint64_t *types); char ask(char op, char *answers, char def_ans, const char *fmt, va_list ap); char ask_Yn(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3); char ask_yN(char op, const char *fmt, ...) FORMAT_PRINTF(2, 3); unsigned util_heap_max_zone(size_t size); int util_pool_clear_badblocks(const char *path, int create); static const struct range ENTIRE_UINT64 = { { NULL, NULL }, /* range */ 0, /* first */ UINT64_MAX /* last */ };
5,957
28.205882
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/info_log.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * info_log.c -- pmempool info command source file for log pool */ #include <stdbool.h> #include <stdlib.h> #include <err.h> #include <sys/mman.h> #include "common.h" #include "output.h" #include "info.h" /* * info_log_data -- print used data from log pool */ static int info_log_data(struct pmem_info *pip, int v, struct pmemlog *plp) { if (!outv_check(v)) return 0; uint64_t size_used = plp->write_offset - plp->start_offset; if (size_used == 0) return 0; uint8_t *addr = pool_set_file_map(pip->pfile, plp->start_offset); if (addr == MAP_FAILED) { warn("%s", pip->file_name); outv_err("cannot read pmem log data\n"); return -1; } if (pip->args.log.walk == 0) { outv_title(v, "PMEMLOG data"); struct range *curp = NULL; PMDK_LIST_FOREACH(curp, &pip->args.ranges.head, next) { uint8_t *ptr = addr + curp->first; if (curp->last >= size_used) curp->last = size_used - 1; uint64_t count = curp->last - curp->first + 1; outv_hexdump(v, ptr, count, curp->first + plp->start_offset, 1); size_used -= count; if (!size_used) break; } } else { /* * Walk through used data with fixed chunk size * passed by user. */ uint64_t nchunks = size_used / pip->args.log.walk; outv_title(v, "PMEMLOG data [chunks: total = %lu size = %ld]", nchunks, pip->args.log.walk); struct range *curp = NULL; PMDK_LIST_FOREACH(curp, &pip->args.ranges.head, next) { uint64_t i; for (i = curp->first; i <= curp->last && i < nchunks; i++) { outv(v, "Chunk %10lu:\n", i); outv_hexdump(v, addr + i * pip->args.log.walk, pip->args.log.walk, plp->start_offset + i * pip->args.log.walk, 1); } } } return 0; } /* * info_logs_stats -- print log type pool statistics */ static void info_log_stats(struct pmem_info *pip, int v, struct pmemlog *plp) { uint64_t size_total = plp->end_offset - plp->start_offset; uint64_t size_used = plp->write_offset - plp->start_offset; uint64_t size_avail = size_total - size_used; if (size_total == 0) return; double perc_used = (double)size_used / (double)size_total * 100.0; double perc_avail = 100.0 - perc_used; outv_title(v, "PMEM LOG Statistics"); outv_field(v, "Total", "%s", out_get_size_str(size_total, pip->args.human)); outv_field(v, "Available", "%s [%s]", out_get_size_str(size_avail, pip->args.human), out_get_percentage(perc_avail)); outv_field(v, "Used", "%s [%s]", out_get_size_str(size_used, pip->args.human), out_get_percentage(perc_used)); } /* * info_log_descriptor -- print pmemlog descriptor and return 1 if * write offset is valid */ static int info_log_descriptor(struct pmem_info *pip, int v, struct pmemlog *plp) { outv_title(v, "PMEM LOG Header"); /* dump pmemlog header without pool_hdr */ outv_hexdump(pip->args.vhdrdump, (uint8_t *)plp + sizeof(plp->hdr), sizeof(*plp) - sizeof(plp->hdr), sizeof(plp->hdr), 1); log_convert2h(plp); int write_offset_valid = plp->write_offset >= plp->start_offset && plp->write_offset <= plp->end_offset; outv_field(v, "Start offset", "0x%lx", plp->start_offset); outv_field(v, "Write offset", "0x%lx [%s]", plp->write_offset, write_offset_valid ? "OK":"ERROR"); outv_field(v, "End offset", "0x%lx", plp->end_offset); return write_offset_valid; } /* * pmempool_info_log -- print information about log type pool */ int pmempool_info_log(struct pmem_info *pip) { int ret = 0; struct pmemlog *plp = malloc(sizeof(struct pmemlog)); if (!plp) err(1, "Cannot allocate memory for pmemlog structure"); if (pmempool_info_read(pip, plp, sizeof(struct pmemlog), 0)) { outv_err("cannot read pmemlog header\n"); free(plp); return -1; } if (info_log_descriptor(pip, VERBOSE_DEFAULT, plp)) { info_log_stats(pip, pip->args.vstats, plp); ret = info_log_data(pip, pip->args.vdata, plp); } free(plp); return ret; }
3,972
23.677019
70
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/info.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * info.h -- pmempool info command header file */ #include "vec.h" /* * Verbose levels used in application: * * VERBOSE_DEFAULT: * Default value for application's verbosity level. * This is also set for data structures which should be * printed without any command line argument. * * VERBOSE_MAX: * Maximum value for application's verbosity level. * This value is used when -v command line argument passed. * * VERBOSE_SILENT: * This value is higher than VERBOSE_MAX and it is used only * for verbosity levels of data structures which should _not_ be * printed without specified command line arguments. */ #define VERBOSE_SILENT 0 #define VERBOSE_DEFAULT 1 #define VERBOSE_MAX 2 /* * print_bb_e -- printing bad blocks options */ enum print_bb_e { PRINT_BAD_BLOCKS_NOT_SET, PRINT_BAD_BLOCKS_NO, PRINT_BAD_BLOCKS_YES, PRINT_BAD_BLOCKS_MAX }; /* * pmempool_info_args -- structure for storing command line arguments */ struct pmempool_info_args { char *file; /* input file */ unsigned col_width; /* column width for printing fields */ bool human; /* sizes in human-readable formats */ bool force; /* force parsing pool */ enum print_bb_e badblocks; /* print bad blocks */ pmem_pool_type_t type; /* forced pool type */ bool use_range; /* use range for blocks */ struct ranges ranges; /* range of block/chunks to dump */ int vlevel; /* verbosity level */ int vdata; /* verbosity level for data dump */ int vhdrdump; /* verbosity level for headers hexdump */ int vstats; /* verbosity level for statistics */ struct { size_t walk; /* data chunk size */ } log; struct { int vmap; /* verbosity level for BTT Map */ int vflog; /* verbosity level for BTT FLOG */ int vbackup; /* verbosity level for BTT Info backup */ bool skip_zeros; /* skip blocks marked with zero flag */ bool skip_error; /* skip blocks marked with error flag */ bool skip_no_flag; /* skip blocks not marked with any flag */ } blk; struct { int vlanes; /* verbosity level for lanes */ int vroot; int vobjects; int valloc; int voobhdr; int vheap; int vzonehdr; int vchunkhdr; int vbitmap; bool lanes_recovery; bool ignore_empty_obj; uint64_t chunk_types; size_t replica; struct ranges lane_ranges; struct ranges type_ranges; struct ranges zone_ranges; struct ranges chunk_ranges; } obj; }; /* * pmem_blk_stats -- structure with statistics for pmemblk */ struct pmem_blk_stats { uint32_t total; /* number of processed blocks */ uint32_t zeros; /* number of blocks marked by zero flag */ uint32_t errors; /* number of blocks marked by error flag */ uint32_t noflag; /* number of blocks not marked with any flag */ }; struct pmem_obj_class_stats { uint64_t n_units; uint64_t n_used; uint64_t unit_size; uint64_t alignment; uint32_t nallocs; uint16_t flags; }; struct pmem_obj_zone_stats { uint64_t n_chunks; uint64_t n_chunks_type[MAX_CHUNK_TYPE]; uint64_t size_chunks; uint64_t size_chunks_type[MAX_CHUNK_TYPE]; VEC(, struct pmem_obj_class_stats) class_stats; }; struct pmem_obj_type_stats { PMDK_TAILQ_ENTRY(pmem_obj_type_stats) next; uint64_t type_num; uint64_t n_objects; uint64_t n_bytes; }; struct pmem_obj_stats { uint64_t n_total_objects; uint64_t n_total_bytes; uint64_t n_zones; uint64_t n_zones_used; struct pmem_obj_zone_stats *zone_stats; PMDK_TAILQ_HEAD(obj_type_stats_head, pmem_obj_type_stats) type_stats; }; /* * pmem_info -- context for pmeminfo application */ struct pmem_info { const char *file_name; /* current file name */ struct pool_set_file *pfile; struct pmempool_info_args args; /* arguments parsed from command line */ struct options *opts; struct pool_set *poolset; pmem_pool_type_t type; struct pmem_pool_params params; struct { struct pmem_blk_stats stats; } blk; struct { struct pmemobjpool *pop; struct palloc_heap *heap; struct alloc_class_collection *alloc_classes; size_t size; struct pmem_obj_stats stats; uint64_t uuid_lo; uint64_t objid; } obj; }; int pmempool_info_func(const char *appname, int argc, char *argv[]); void pmempool_info_help(const char *appname); int pmempool_info_read(struct pmem_info *pip, void *buff, size_t nbytes, uint64_t off); int pmempool_info_blk(struct pmem_info *pip); int pmempool_info_log(struct pmem_info *pip); int pmempool_info_obj(struct pmem_info *pip); int pmempool_info_btt(struct pmem_info *pip);
4,492
25.904192
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/output.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * output.h -- declarations of output printing related functions */ #include <time.h> #include <stdint.h> #include <stdio.h> void out_set_vlevel(int vlevel); void out_set_stream(FILE *stream); void out_set_prefix(const char *prefix); void out_set_col_width(unsigned col_width); void outv_err(const char *fmt, ...) FORMAT_PRINTF(1, 2); void out_err(const char *file, int line, const char *func, const char *fmt, ...) FORMAT_PRINTF(4, 5); void outv_err_vargs(const char *fmt, va_list ap); void outv_indent(int vlevel, int i); void outv(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3); void outv_nl(int vlevel); int outv_check(int vlevel); void outv_title(int vlevel, const char *fmt, ...) FORMAT_PRINTF(2, 3); void outv_field(int vlevel, const char *field, const char *fmt, ...) FORMAT_PRINTF(3, 4); void outv_hexdump(int vlevel, const void *addr, size_t len, size_t offset, int sep); const char *out_get_uuid_str(uuid_t uuid); const char *out_get_time_str(time_t time); const char *out_get_size_str(uint64_t size, int human); const char *out_get_percentage(double percentage); const char *out_get_checksum(void *addr, size_t len, uint64_t *csump, uint64_t skip_off); const char *out_get_btt_map_entry(uint32_t map); const char *out_get_pool_type_str(pmem_pool_type_t type); const char *out_get_pool_signature(pmem_pool_type_t type); const char *out_get_tx_state_str(uint64_t state); const char *out_get_chunk_type_str(enum chunk_type type); const char *out_get_chunk_flags(uint16_t flags); const char *out_get_zone_magic_str(uint32_t magic); const char *out_get_pmemoid_str(PMEMoid oid, uint64_t uuid_lo); const char *out_get_arch_machine_class_str(uint8_t machine_class); const char *out_get_arch_data_str(uint8_t data); const char *out_get_arch_machine_str(uint16_t machine); const char *out_get_last_shutdown_str(uint8_t dirty); const char *out_get_alignment_desc_str(uint64_t ad, uint64_t cur_ad); const char *out_get_incompat_features_str(uint32_t incompat);
2,070
41.265306
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/pmempool/synchronize.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2018, Intel Corporation */ /* * synchronize.c -- pmempool sync command source file */ #include "synchronize.h" #include <stdio.h> #include <libgen.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <getopt.h> #include <stdbool.h> #include <sys/mman.h> #include <endian.h> #include "common.h" #include "output.h" #include "libpmempool.h" /* * pmempool_sync_context -- context and arguments for sync command */ struct pmempool_sync_context { unsigned flags; /* flags which modify the command execution */ char *poolset_file; /* a path to a poolset file */ }; /* * pmempool_sync_default -- default arguments for sync command */ static const struct pmempool_sync_context pmempool_sync_default = { .flags = 0, .poolset_file = NULL, }; /* * help_str -- string for help message */ static const char * const help_str = "Check consistency of a pool\n" "\n" "Common options:\n" " -b, --bad-blocks fix bad blocks - it requires creating or reading special recovery files\n" " -d, --dry-run do not apply changes, only check for viability of synchronization\n" " -v, --verbose increase verbosity level\n" " -h, --help display this help and exit\n" "\n" "For complete documentation see %s-sync(1) manual page.\n" ; /* * long_options -- command line options */ static const struct option long_options[] = { {"bad-blocks", no_argument, NULL, 'b'}, {"dry-run", no_argument, NULL, 'd'}, {"help", no_argument, NULL, 'h'}, {"verbose", no_argument, NULL, 'v'}, {NULL, 0, NULL, 0 }, }; /* * print_usage -- (internal) print application usage short description */ static void print_usage(const char *appname) { printf("usage: %s sync [<options>] <poolset_file>\n", appname); } /* * print_version -- (internal) print version string */ static void print_version(const char *appname) { printf("%s %s\n", appname, SRCVERSION); } /* * pmempool_sync_help -- print help message for the sync command */ void pmempool_sync_help(const char *appname) { print_usage(appname); print_version(appname); printf(help_str, appname); } /* * pmempool_sync_parse_args -- (internal) parse command line arguments */ static int pmempool_sync_parse_args(struct pmempool_sync_context *ctx, const char *appname, int argc, char *argv[]) { int opt; while ((opt = getopt_long(argc, argv, "bdhv", long_options, NULL)) != -1) { switch (opt) { case 'd': ctx->flags |= PMEMPOOL_SYNC_DRY_RUN; break; case 'b': ctx->flags |= PMEMPOOL_SYNC_FIX_BAD_BLOCKS; break; case 'h': pmempool_sync_help(appname); exit(EXIT_SUCCESS); case 'v': out_set_vlevel(1); break; default: print_usage(appname); exit(EXIT_FAILURE); } } if (optind < argc) { ctx->poolset_file = argv[optind]; } else { print_usage(appname); exit(EXIT_FAILURE); } return 0; } /* * pmempool_sync_func -- main function for the sync command */ int pmempool_sync_func(const char *appname, int argc, char *argv[]) { int ret = 0; struct pmempool_sync_context ctx = pmempool_sync_default; /* parse command line arguments */ if ((ret = pmempool_sync_parse_args(&ctx, appname, argc, argv))) return ret; ret = pmempool_sync(ctx.poolset_file, ctx.flags); if (ret) { outv_err("failed to synchronize: %s\n", pmempool_errormsg()); if (errno) outv_err("%s\n", strerror(errno)); return -1; } else { outv(1, "%s: synchronized\n", ctx.poolset_file); return 0; } }
3,499
21.151899
98
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/tools/daxio/daxio.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * daxio.c -- simple app for reading and writing data from/to * Device DAX device using mmap instead of file I/O API */ #include <assert.h> #include <stdio.h> #include <unistd.h> #include <getopt.h> #include <stdlib.h> #include <sys/mman.h> #include <errno.h> #include <fcntl.h> #include <inttypes.h> #include <sys/stat.h> #include <sys/sysmacros.h> #include <limits.h> #include <string.h> #include <ndctl/libndctl.h> #include <ndctl/libdaxctl.h> #include <libpmem.h> #include "util.h" #include "os.h" #include "badblocks.h" #define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1)) #define ALIGN_DOWN(size, align) ((size) & ~((align) - 1)) #define ERR(fmt, ...)\ do {\ fprintf(stderr, "daxio: " fmt, ##__VA_ARGS__);\ } while (0) #define FAIL(func)\ do {\ fprintf(stderr, "daxio: %s:%d: %s: %s\n",\ __func__, __LINE__, func, strerror(errno));\ } while (0) #define USAGE_MESSAGE \ "Usage: daxio [option] ...\n"\ "Valid options:\n"\ " -i, --input=FILE - input device/file (default stdin)\n"\ " -o, --output=FILE - output device/file (default stdout)\n"\ " -k, --skip=BYTES - skip offset for input (default 0)\n"\ " -s, --seek=BYTES - seek offset for output (default 0)\n"\ " -l, --len=BYTES - total length to perform the I/O\n"\ " -b, --clear-bad-blocks=<yes|no> - clear bad blocks (default: yes)\n"\ " -z, --zero - zeroing the device\n"\ " -h. --help - print this help\n"\ " -V, --version - display version of daxio\n" struct daxio_device { char *path; int fd; size_t size; /* actual file/device size */ int is_devdax; /* Device DAX only */ size_t align; /* internal device alignment */ char *addr; /* mapping base address */ size_t maplen; /* mapping length */ size_t offset; /* seek or skip */ unsigned major; unsigned minor; struct ndctl_ctx *ndctl_ctx; struct ndctl_region *region; /* parent region */ }; /* * daxio_context -- context and arguments */ struct daxio_context { size_t len; /* total length of I/O */ int zero; int clear_bad_blocks; struct daxio_device src; struct daxio_device dst; }; /* * default context */ static struct daxio_context Ctx = { SIZE_MAX, /* len */ 0, /* zero */ 1, /* clear_bad_blocks */ { NULL, -1, SIZE_MAX, 0, 0, NULL, 0, 0, 0, 0, NULL, NULL }, { NULL, -1, SIZE_MAX, 0, 0, NULL, 0, 0, 0, 0, NULL, NULL }, }; /* * print_version -- print daxio version */ static void print_version(void) { printf("%s\n", SRCVERSION); } /* * print_usage -- print short description of usage */ static void print_usage(void) { fprintf(stderr, USAGE_MESSAGE); } /* * long_options -- command line options */ static const struct option long_options[] = { {"input", required_argument, NULL, 'i'}, {"output", required_argument, NULL, 'o'}, {"skip", required_argument, NULL, 'k'}, {"seek", required_argument, NULL, 's'}, {"len", required_argument, NULL, 'l'}, {"clear-bad-blocks", required_argument, NULL, 'b'}, {"zero", no_argument, NULL, 'z'}, {"help", no_argument, NULL, 'h'}, {"version", no_argument, NULL, 'V'}, {NULL, 0, NULL, 0 }, }; /* * parse_args -- (internal) parse command line arguments */ static int parse_args(struct daxio_context *ctx, int argc, char * const argv[]) { int opt; size_t offset; size_t len; while ((opt = getopt_long(argc, argv, "i:o:k:s:l:b:zhV", long_options, NULL)) != -1) { switch (opt) { case 'i': ctx->src.path = optarg; break; case 'o': ctx->dst.path = optarg; break; case 'k': if (util_parse_size(optarg, &offset)) { ERR("'%s' -- invalid input offset\n", optarg); return -1; } ctx->src.offset = offset; break; case 's': if (util_parse_size(optarg, &offset)) { ERR("'%s' -- invalid output offset\n", optarg); return -1; } ctx->dst.offset = offset; break; case 'l': if (util_parse_size(optarg, &len)) { ERR("'%s' -- invalid length\n", optarg); return -1; } ctx->len = len; break; case 'z': ctx->zero = 1; break; case 'b': if (strcmp(optarg, "no") == 0) { ctx->clear_bad_blocks = 0; } else if (strcmp(optarg, "yes") == 0) { ctx->clear_bad_blocks = 1; } else { ERR( "'%s' -- invalid argument of the '--clear-bad-blocks' option\n", optarg); return -1; } break; case 'h': print_usage(); exit(EXIT_SUCCESS); case 'V': print_version(); exit(EXIT_SUCCESS); default: print_usage(); exit(EXIT_FAILURE); } } return 0; } /* * validate_args -- (internal) validate command line arguments */ static int validate_args(struct daxio_context *ctx) { if (ctx->zero && ctx->dst.path == NULL) { ERR("zeroing flag specified but no output file provided\n"); return -1; } if (!ctx->zero && ctx->src.path == NULL && ctx->dst.path == NULL) { ERR("an input file and/or an output file must be provided\n"); return -1; } /* if no input file provided, use stdin */ if (ctx->src.path == NULL) { if (ctx->src.offset != 0) { ERR( "skip offset specified but no input file provided\n"); return -1; } ctx->src.fd = STDIN_FILENO; ctx->src.path = "STDIN"; } /* if no output file provided, use stdout */ if (ctx->dst.path == NULL) { if (ctx->dst.offset != 0) { ERR( "seek offset specified but no output file provided\n"); return -1; } ctx->dst.fd = STDOUT_FILENO; ctx->dst.path = "STDOUT"; } return 0; } /* * match_dev_dax -- (internal) find Device DAX by major/minor device number */ static int match_dev_dax(struct daxio_device *dev, struct daxctl_region *dax_region) { struct daxctl_dev *d; daxctl_dev_foreach(dax_region, d) { if (dev->major == (unsigned)daxctl_dev_get_major(d) && dev->minor == (unsigned)daxctl_dev_get_minor(d)) { dev->size = daxctl_dev_get_size(d); return 1; } } return 0; } /* * find_dev_dax -- (internal) check if device is Device DAX * * If there is matching Device DAX, find its region, size and alignment. */ static int find_dev_dax(struct ndctl_ctx *ndctl_ctx, struct daxio_device *dev) { struct ndctl_bus *bus = NULL; struct ndctl_region *region = NULL; struct ndctl_dax *dax = NULL; struct daxctl_region *dax_region = NULL; ndctl_bus_foreach(ndctl_ctx, bus) { ndctl_region_foreach(bus, region) { ndctl_dax_foreach(region, dax) { dax_region = ndctl_dax_get_daxctl_region(dax); if (match_dev_dax(dev, dax_region)) { dev->is_devdax = 1; dev->align = ndctl_dax_get_align(dax); dev->region = region; return 1; } } } } /* try with dax regions */ struct daxctl_ctx *daxctl_ctx; if (daxctl_new(&daxctl_ctx)) return 0; int ret = 0; daxctl_region_foreach(daxctl_ctx, dax_region) { if (match_dev_dax(dev, dax_region)) { dev->is_devdax = 1; dev->align = daxctl_region_get_align(dax_region); dev->region = region; ret = 1; goto end; } } end: daxctl_unref(daxctl_ctx); return ret; } /* * setup_device -- (internal) open/mmap file/device */ static int setup_device(struct ndctl_ctx *ndctl_ctx, struct daxio_device *dev, int is_dst, int clear_bad_blocks) { int ret; int flags = O_RDWR; int prot = is_dst ? PROT_WRITE : PROT_READ; if (dev->fd != -1) { dev->size = SIZE_MAX; return 0; /* stdin/stdout */ } /* try to open file/device (if exists) */ dev->fd = os_open(dev->path, flags, S_IRUSR|S_IWUSR); if (dev->fd == -1) { ret = errno; if (ret == ENOENT && is_dst) { /* file does not exist - create it */ flags = O_CREAT|O_WRONLY|O_TRUNC; dev->size = SIZE_MAX; dev->fd = os_open(dev->path, flags, S_IRUSR|S_IWUSR); if (dev->fd == -1) { FAIL("open"); return -1; } return 0; } else { ERR("failed to open '%s': %s\n", dev->path, strerror(errno)); return -1; } } struct stat stbuf; ret = fstat(dev->fd, &stbuf); if (ret == -1) { FAIL("stat"); return -1; } /* check if this is regular file or device */ if (S_ISREG(stbuf.st_mode)) { if (is_dst) dev->size = SIZE_MAX; else dev->size = (size_t)stbuf.st_size; } else if (S_ISBLK(stbuf.st_mode)) { dev->size = (size_t)stbuf.st_size; } else if (S_ISCHR(stbuf.st_mode)) { dev->size = SIZE_MAX; dev->major = major(stbuf.st_rdev); dev->minor = minor(stbuf.st_rdev); } else { return -1; } /* check if this is Device DAX */ if (S_ISCHR(stbuf.st_mode)) find_dev_dax(ndctl_ctx, dev); if (!dev->is_devdax) return 0; if (is_dst && clear_bad_blocks) { /* XXX - clear only badblocks in range bound by offset/len */ if (badblocks_clear_all(dev->path)) { ERR("failed to clear bad blocks on \"%s\"\n" " Probably you have not enough permissions to do that.\n" " You can choose one of three options now:\n" " 1) run 'daxio' with 'sudo' or as 'root',\n" " 2) turn off clearing bad blocks using\n" " the '-b/--clear-bad-blocks=no' option or\n" " 3) change permissions of some resource files -\n" " - for details see the description of the CHECK_BAD_BLOCKS\n" " compat feature in the pmempool-feature(1) man page.\n", dev->path); return -1; } } if (dev->align == ULONG_MAX) { ERR("cannot determine device alignment for \"%s\"\n", dev->path); return -1; } if (dev->offset > dev->size) { ERR("'%zu' -- offset beyond device size (%zu)\n", dev->offset, dev->size); return -1; } /* align len/offset to the internal device alignment */ dev->maplen = ALIGN_UP(dev->size, dev->align); size_t offset = ALIGN_DOWN(dev->offset, dev->align); dev->offset = dev->offset - offset; dev->maplen = dev->maplen - offset; dev->addr = mmap(NULL, dev->maplen, prot, MAP_SHARED, dev->fd, (off_t)offset); if (dev->addr == MAP_FAILED) { FAIL("mmap"); return -1; } return 0; } /* * setup_devices -- (internal) open/mmap input and output */ static int setup_devices(struct ndctl_ctx *ndctl_ctx, struct daxio_context *ctx) { if (!ctx->zero && setup_device(ndctl_ctx, &ctx->src, 0, ctx->clear_bad_blocks)) return -1; return setup_device(ndctl_ctx, &ctx->dst, 1, ctx->clear_bad_blocks); } /* * adjust_io_len -- (internal) calculate I/O length if not specified */ static void adjust_io_len(struct daxio_context *ctx) { size_t src_len = ctx->src.maplen - ctx->src.offset; size_t dst_len = ctx->dst.maplen - ctx->dst.offset; size_t max_len = SIZE_MAX; if (ctx->zero) assert(ctx->dst.is_devdax); else assert(ctx->src.is_devdax || ctx->dst.is_devdax); if (ctx->src.is_devdax) max_len = src_len; if (ctx->dst.is_devdax) max_len = max_len < dst_len ? max_len : dst_len; /* if length is specified and is not bigger than mmapped region */ if (ctx->len != SIZE_MAX && ctx->len <= max_len) return; /* adjust len to device size */ ctx->len = max_len; } /* * cleanup_device -- (internal) unmap/close file/device */ static void cleanup_device(struct daxio_device *dev) { if (dev->addr) (void) munmap(dev->addr, dev->maplen); if (dev->path && dev->fd != -1) (void) close(dev->fd); } /* * cleanup_devices -- (internal) unmap/close input and output */ static void cleanup_devices(struct daxio_context *ctx) { cleanup_device(&ctx->dst); if (!ctx->zero) cleanup_device(&ctx->src); } /* * do_io -- (internal) write data to device/file */ static int do_io(struct ndctl_ctx *ndctl_ctx, struct daxio_context *ctx) { ssize_t cnt = 0; assert(ctx->src.is_devdax || ctx->dst.is_devdax); if (ctx->zero) { if (ctx->dst.offset > ctx->dst.maplen) { ERR("output offset larger than device size"); return -1; } if (ctx->dst.offset + ctx->len > ctx->dst.maplen) { ERR("output offset beyond device size"); return -1; } char *dst_addr = ctx->dst.addr + ctx->dst.offset; pmem_memset_persist(dst_addr, 0, ctx->len); cnt = (ssize_t)ctx->len; } else if (ctx->src.is_devdax && ctx->dst.is_devdax) { /* memcpy between src and dst */ char *src_addr = ctx->src.addr + ctx->src.offset; char *dst_addr = ctx->dst.addr + ctx->dst.offset; pmem_memcpy_persist(dst_addr, src_addr, ctx->len); cnt = (ssize_t)ctx->len; } else if (ctx->src.is_devdax) { /* write to file directly from mmap'ed src */ char *src_addr = ctx->src.addr + ctx->src.offset; if (ctx->dst.offset) { if (lseek(ctx->dst.fd, (off_t)ctx->dst.offset, SEEK_SET) < 0) { FAIL("lseek"); goto err; } } do { ssize_t wcnt = write(ctx->dst.fd, src_addr + cnt, ctx->len - (size_t)cnt); if (wcnt == -1) { FAIL("write"); goto err; } cnt += wcnt; } while ((size_t)cnt < ctx->len); } else if (ctx->dst.is_devdax) { /* read from file directly to mmap'ed dst */ char *dst_addr = ctx->dst.addr + ctx->dst.offset; if (ctx->src.offset) { if (lseek(ctx->src.fd, (off_t)ctx->src.offset, SEEK_SET) < 0) { FAIL("lseek"); return -1; } } do { ssize_t rcnt = read(ctx->src.fd, dst_addr + cnt, ctx->len - (size_t)cnt); if (rcnt == -1) { FAIL("read"); goto err; } /* end of file */ if (rcnt == 0) break; cnt = cnt + rcnt; } while ((size_t)cnt < ctx->len); pmem_persist(dst_addr, (size_t)cnt); if ((size_t)cnt != ctx->len) ERR("requested size %zu larger than source\n", ctx->len); } ERR("copied %zd bytes to device \"%s\"\n", cnt, ctx->dst.path); return 0; err: ERR("failed to perform I/O\n"); return -1; } int main(int argc, char **argv) { struct ndctl_ctx *ndctl_ctx; int ret = EXIT_SUCCESS; if (parse_args(&Ctx, argc, argv)) return EXIT_FAILURE; if (validate_args(&Ctx)) return EXIT_FAILURE; if (ndctl_new(&ndctl_ctx)) return EXIT_FAILURE; if (setup_devices(ndctl_ctx, &Ctx)) { ret = EXIT_FAILURE; goto err; } if (!Ctx.src.is_devdax && !Ctx.dst.is_devdax) { ERR("neither input nor output is device dax\n"); ret = EXIT_FAILURE; goto err; } adjust_io_len(&Ctx); if (do_io(ndctl_ctx, &Ctx)) ret = EXIT_FAILURE; err: cleanup_devices(&Ctx); ndctl_unref(ndctl_ctx); return ret; }
14,160
22.291118
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemlog/log.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * log.h -- internal definitions for libpmem log module */ #ifndef LOG_H #define LOG_H 1 #include <stdint.h> #include <stddef.h> #include <endian.h> #include "ctl.h" #include "util.h" #include "os_thread.h" #include "pool_hdr.h" #include "page_size.h" #ifdef __cplusplus extern "C" { #endif #include "alloc.h" #include "fault_injection.h" #define PMEMLOG_LOG_PREFIX "libpmemlog" #define PMEMLOG_LOG_LEVEL_VAR "PMEMLOG_LOG_LEVEL" #define PMEMLOG_LOG_FILE_VAR "PMEMLOG_LOG_FILE" /* attributes of the log memory pool format for the pool header */ #define LOG_HDR_SIG "PMEMLOG" /* must be 8 bytes including '\0' */ #define LOG_FORMAT_MAJOR 1 #define LOG_FORMAT_FEAT_DEFAULT \ {POOL_FEAT_COMPAT_DEFAULT, POOL_FEAT_INCOMPAT_DEFAULT, 0x0000} #define LOG_FORMAT_FEAT_CHECK \ {POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, 0x0000} static const features_t log_format_feat_default = LOG_FORMAT_FEAT_DEFAULT; struct pmemlog { struct pool_hdr hdr; /* memory pool header */ /* root info for on-media format... */ uint64_t start_offset; /* start offset of the usable log space */ uint64_t end_offset; /* maximum offset of the usable log space */ uint64_t write_offset; /* current write point for the log */ /* some run-time state, allocated out of memory pool... */ void *addr; /* mapped region */ size_t size; /* size of mapped region */ int is_pmem; /* true if pool is PMEM */ int rdonly; /* true if pool is opened read-only */ os_rwlock_t *rwlockp; /* pointer to RW lock */ int is_dev_dax; /* true if mapped on device dax */ struct ctl *ctl; /* top level node of the ctl tree structure */ struct pool_set *set; /* pool set info */ }; /* data area starts at this alignment after the struct pmemlog above */ #define LOG_FORMAT_DATA_ALIGN ((uintptr_t)PMEM_PAGESIZE) /* * log_convert2h -- convert pmemlog structure to host byte order */ static inline void log_convert2h(struct pmemlog *plp) { plp->start_offset = le64toh(plp->start_offset); plp->end_offset = le64toh(plp->end_offset); plp->write_offset = le64toh(plp->write_offset); } /* * log_convert2le -- convert pmemlog structure to LE byte order */ static inline void log_convert2le(struct pmemlog *plp) { plp->start_offset = htole64(plp->start_offset); plp->end_offset = htole64(plp->end_offset); plp->write_offset = htole64(plp->write_offset); } #if FAULT_INJECTION void pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at); int pmemlog_fault_injection_enabled(void); #else static inline void pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { abort(); } static inline int pmemlog_fault_injection_enabled(void) { return 0; } #endif #ifdef __cplusplus } #endif #endif
2,832
23.422414
74
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemlog/log.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * log.c -- log memory pool entry points for libpmem */ #include <inttypes.h> #include <stdio.h> #include <string.h> #include <sys/types.h> #include <sys/param.h> #include <unistd.h> #include <errno.h> #include <time.h> #include <stdint.h> #include <stdbool.h> #include "libpmem.h" #include "libpmemlog.h" #include "ctl_global.h" #include "os.h" #include "set.h" #include "out.h" #include "log.h" #include "mmap.h" #include "sys_util.h" #include "util_pmem.h" #include "valgrind_internal.h" static const struct pool_attr Log_create_attr = { LOG_HDR_SIG, LOG_FORMAT_MAJOR, LOG_FORMAT_FEAT_DEFAULT, {0}, {0}, {0}, {0}, {0} }; static const struct pool_attr Log_open_attr = { LOG_HDR_SIG, LOG_FORMAT_MAJOR, LOG_FORMAT_FEAT_CHECK, {0}, {0}, {0}, {0}, {0} }; /* * log_descr_create -- (internal) create log memory pool descriptor */ static void log_descr_create(PMEMlogpool *plp, size_t poolsize) { LOG(3, "plp %p poolsize %zu", plp, poolsize); ASSERTeq(poolsize % Pagesize, 0); /* create required metadata */ plp->start_offset = htole64(roundup(sizeof(*plp), LOG_FORMAT_DATA_ALIGN)); plp->end_offset = htole64(poolsize); plp->write_offset = plp->start_offset; /* store non-volatile part of pool's descriptor */ util_persist(plp->is_pmem, &plp->start_offset, 3 * sizeof(uint64_t)); } /* * log_descr_check -- (internal) validate log memory pool descriptor */ static int log_descr_check(PMEMlogpool *plp, size_t poolsize) { LOG(3, "plp %p poolsize %zu", plp, poolsize); struct pmemlog hdr = *plp; log_convert2h(&hdr); if ((hdr.start_offset != roundup(sizeof(*plp), LOG_FORMAT_DATA_ALIGN)) || (hdr.end_offset != poolsize) || (hdr.start_offset > hdr.end_offset)) { ERR("wrong start/end offsets " "(start: %" PRIu64 " end: %" PRIu64 "), " "pool size %zu", hdr.start_offset, hdr.end_offset, poolsize); errno = EINVAL; return -1; } if ((hdr.write_offset > hdr.end_offset) || (hdr.write_offset < hdr.start_offset)) { ERR("wrong write offset (start: %" PRIu64 " end: %" PRIu64 " write: %" PRIu64 ")", hdr.start_offset, hdr.end_offset, hdr.write_offset); errno = EINVAL; return -1; } LOG(3, "start: %" PRIu64 ", end: %" PRIu64 ", write: %" PRIu64 "", hdr.start_offset, hdr.end_offset, hdr.write_offset); return 0; } /* * log_runtime_init -- (internal) initialize log memory pool runtime data */ static int log_runtime_init(PMEMlogpool *plp, int rdonly) { LOG(3, "plp %p rdonly %d", plp, rdonly); /* remove volatile part of header */ VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr, sizeof(struct pmemlog) - sizeof(struct pool_hdr) - 3 * sizeof(uint64_t)); /* * Use some of the memory pool area for run-time info. This * run-time state is never loaded from the file, it is always * created here, so no need to worry about byte-order. */ plp->rdonly = rdonly; if ((plp->rwlockp = Malloc(sizeof(*plp->rwlockp))) == NULL) { ERR("!Malloc for a RW lock"); return -1; } util_rwlock_init(plp->rwlockp); /* * If possible, turn off all permissions on the pool header page. * * The prototype PMFS doesn't allow this when large pages are in * use. It is not considered an error if this fails. */ RANGE_NONE(plp->addr, sizeof(struct pool_hdr), plp->is_dev_dax); /* the rest should be kept read-only (debug version only) */ RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr), plp->size - sizeof(struct pool_hdr), plp->is_dev_dax); return 0; } /* * pmemlog_createU -- create a log memory pool */ #ifndef _WIN32 static inline #endif PMEMlogpool * pmemlog_createU(const char *path, size_t poolsize, mode_t mode) { LOG(3, "path %s poolsize %zu mode %d", path, poolsize, mode); struct pool_set *set; struct pool_attr adj_pool_attr = Log_create_attr; /* force set SDS feature */ if (SDS_at_create) adj_pool_attr.features.incompat |= POOL_FEAT_SDS; else adj_pool_attr.features.incompat &= ~POOL_FEAT_SDS; if (util_pool_create(&set, path, poolsize, PMEMLOG_MIN_POOL, PMEMLOG_MIN_PART, &adj_pool_attr, NULL, REPLICAS_DISABLED) != 0) { LOG(2, "cannot create pool or pool set"); return NULL; } ASSERT(set->nreplicas > 0); struct pool_replica *rep = set->replica[0]; PMEMlogpool *plp = rep->part[0].addr; VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr, sizeof(struct pmemlog) - ((uintptr_t)&plp->addr - (uintptr_t)&plp->hdr)); plp->addr = plp; plp->size = rep->repsize; plp->set = set; plp->is_pmem = rep->is_pmem; plp->is_dev_dax = rep->part[0].is_dev_dax; /* is_dev_dax implies is_pmem */ ASSERT(!plp->is_dev_dax || plp->is_pmem); /* create pool descriptor */ log_descr_create(plp, rep->repsize); /* initialize runtime parts */ if (log_runtime_init(plp, 0) != 0) { ERR("pool initialization failed"); goto err; } if (util_poolset_chmod(set, mode)) goto err; util_poolset_fdclose(set); LOG(3, "plp %p", plp); return plp; err: LOG(4, "error clean up"); int oerrno = errno; util_poolset_close(set, DELETE_CREATED_PARTS); errno = oerrno; return NULL; } #ifndef _WIN32 /* * pmemlog_create -- create a log memory pool */ PMEMlogpool * pmemlog_create(const char *path, size_t poolsize, mode_t mode) { return pmemlog_createU(path, poolsize, mode); } #else /* * pmemlog_createW -- create a log memory pool */ PMEMlogpool * pmemlog_createW(const wchar_t *path, size_t poolsize, mode_t mode) { char *upath = util_toUTF8(path); if (upath == NULL) return NULL; PMEMlogpool *ret = pmemlog_createU(upath, poolsize, mode); util_free_UTF8(upath); return ret; } #endif /* * log_open_common -- (internal) open a log memory pool * * This routine does all the work, but takes a cow flag so internal * calls can map a read-only pool if required. */ static PMEMlogpool * log_open_common(const char *path, unsigned flags) { LOG(3, "path %s flags 0x%x", path, flags); struct pool_set *set; if (util_pool_open(&set, path, PMEMLOG_MIN_PART, &Log_open_attr, NULL, NULL, flags) != 0) { LOG(2, "cannot open pool or pool set"); return NULL; } ASSERT(set->nreplicas > 0); struct pool_replica *rep = set->replica[0]; PMEMlogpool *plp = rep->part[0].addr; VALGRIND_REMOVE_PMEM_MAPPING(&plp->addr, sizeof(struct pmemlog) - ((uintptr_t)&plp->addr - (uintptr_t)&plp->hdr)); plp->addr = plp; plp->size = rep->repsize; plp->set = set; plp->is_pmem = rep->is_pmem; plp->is_dev_dax = rep->part[0].is_dev_dax; /* is_dev_dax implies is_pmem */ ASSERT(!plp->is_dev_dax || plp->is_pmem); if (set->nreplicas > 1) { errno = ENOTSUP; ERR("!replicas not supported"); goto err; } /* validate pool descriptor */ if (log_descr_check(plp, rep->repsize) != 0) { LOG(2, "descriptor check failed"); goto err; } /* initialize runtime parts */ if (log_runtime_init(plp, set->rdonly) != 0) { ERR("pool initialization failed"); goto err; } util_poolset_fdclose(set); LOG(3, "plp %p", plp); return plp; err: LOG(4, "error clean up"); int oerrno = errno; util_poolset_close(set, DO_NOT_DELETE_PARTS); errno = oerrno; return NULL; } /* * pmemlog_openU -- open an existing log memory pool */ #ifndef _WIN32 static inline #endif PMEMlogpool * pmemlog_openU(const char *path) { LOG(3, "path %s", path); return log_open_common(path, COW_at_open ? POOL_OPEN_COW : 0); } #ifndef _WIN32 /* * pmemlog_open -- open an existing log memory pool */ PMEMlogpool * pmemlog_open(const char *path) { return pmemlog_openU(path); } #else /* * pmemlog_openW -- open an existing log memory pool */ PMEMlogpool * pmemlog_openW(const wchar_t *path) { char *upath = util_toUTF8(path); if (upath == NULL) return NULL; PMEMlogpool *ret = pmemlog_openU(upath); util_free_UTF8(upath); return ret; } #endif /* * pmemlog_close -- close a log memory pool */ void pmemlog_close(PMEMlogpool *plp) { LOG(3, "plp %p", plp); util_rwlock_destroy(plp->rwlockp); Free((void *)plp->rwlockp); util_poolset_close(plp->set, DO_NOT_DELETE_PARTS); } /* * pmemlog_nbyte -- return usable size of a log memory pool */ size_t pmemlog_nbyte(PMEMlogpool *plp) { LOG(3, "plp %p", plp); util_rwlock_rdlock(plp->rwlockp); size_t size = le64toh(plp->end_offset) - le64toh(plp->start_offset); LOG(4, "plp %p nbyte %zu", plp, size); util_rwlock_unlock(plp->rwlockp); return size; } /* * log_persist -- (internal) persist data, then metadata * * On entry, the write lock should be held. */ static void log_persist(PMEMlogpool *plp, uint64_t new_write_offset) { uint64_t old_write_offset = le64toh(plp->write_offset); size_t length = new_write_offset - old_write_offset; /* unprotect the log space range (debug version only) */ RANGE_RW((char *)plp->addr + old_write_offset, length, plp->is_dev_dax); /* persist the data */ if (plp->is_pmem) pmem_drain(); /* data already flushed */ else pmem_msync((char *)plp->addr + old_write_offset, length); /* protect the log space range (debug version only) */ RANGE_RO((char *)plp->addr + old_write_offset, length, plp->is_dev_dax); /* unprotect the pool descriptor (debug version only) */ RANGE_RW((char *)plp->addr + sizeof(struct pool_hdr), LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax); /* write the metadata */ plp->write_offset = htole64(new_write_offset); /* persist the metadata */ if (plp->is_pmem) pmem_persist(&plp->write_offset, sizeof(plp->write_offset)); else pmem_msync(&plp->write_offset, sizeof(plp->write_offset)); /* set the write-protection again (debug version only) */ RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr), LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax); } /* * pmemlog_append -- add data to a log memory pool */ int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count) { int ret = 0; LOG(3, "plp %p buf %p count %zu", plp, buf, count); if (plp->rdonly) { ERR("can't append to read-only log"); errno = EROFS; return -1; } util_rwlock_wrlock(plp->rwlockp); /* get the current values */ uint64_t end_offset = le64toh(plp->end_offset); uint64_t write_offset = le64toh(plp->write_offset); if (write_offset >= end_offset) { /* no space left */ errno = ENOSPC; ERR("!pmemlog_append"); ret = -1; goto end; } /* make sure we don't write past the available space */ if (count > (end_offset - write_offset)) { errno = ENOSPC; ERR("!pmemlog_append"); ret = -1; goto end; } char *data = plp->addr; /* * unprotect the log space range, where the new data will be stored * (debug version only) */ RANGE_RW(&data[write_offset], count, plp->is_dev_dax); if (plp->is_pmem) pmem_memcpy_nodrain(&data[write_offset], buf, count); else memcpy(&data[write_offset], buf, count); /* protect the log space range (debug version only) */ RANGE_RO(&data[write_offset], count, plp->is_dev_dax); write_offset += count; /* persist the data and the metadata */ log_persist(plp, write_offset); end: util_rwlock_unlock(plp->rwlockp); return ret; } /* * pmemlog_appendv -- add gathered data to a log memory pool */ int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt) { LOG(3, "plp %p iovec %p iovcnt %d", plp, iov, iovcnt); int ret = 0; int i; if (iovcnt < 0) { errno = EINVAL; ERR("iovcnt is less than zero: %d", iovcnt); return -1; } if (plp->rdonly) { ERR("can't append to read-only log"); errno = EROFS; return -1; } util_rwlock_wrlock(plp->rwlockp); /* get the current values */ uint64_t end_offset = le64toh(plp->end_offset); uint64_t write_offset = le64toh(plp->write_offset); if (write_offset >= end_offset) { /* no space left */ errno = ENOSPC; ERR("!pmemlog_appendv"); ret = -1; goto end; } char *data = plp->addr; uint64_t count = 0; char *buf; /* calculate required space */ for (i = 0; i < iovcnt; ++i) count += iov[i].iov_len; /* check if there is enough free space */ if (count > (end_offset - write_offset)) { errno = ENOSPC; ret = -1; goto end; } /* append the data */ for (i = 0; i < iovcnt; ++i) { buf = iov[i].iov_base; count = iov[i].iov_len; /* * unprotect the log space range, where the new data will be * stored (debug version only) */ RANGE_RW(&data[write_offset], count, plp->is_dev_dax); if (plp->is_pmem) pmem_memcpy_nodrain(&data[write_offset], buf, count); else memcpy(&data[write_offset], buf, count); /* * protect the log space range (debug version only) */ RANGE_RO(&data[write_offset], count, plp->is_dev_dax); write_offset += count; } /* persist the data and the metadata */ log_persist(plp, write_offset); end: util_rwlock_unlock(plp->rwlockp); return ret; } /* * pmemlog_tell -- return current write point in a log memory pool */ long long pmemlog_tell(PMEMlogpool *plp) { LOG(3, "plp %p", plp); util_rwlock_rdlock(plp->rwlockp); ASSERT(le64toh(plp->write_offset) >= le64toh(plp->start_offset)); long long wp = (long long)(le64toh(plp->write_offset) - le64toh(plp->start_offset)); LOG(4, "write offset %lld", wp); util_rwlock_unlock(plp->rwlockp); return wp; } /* * pmemlog_rewind -- discard all data, resetting a log memory pool to empty */ void pmemlog_rewind(PMEMlogpool *plp) { LOG(3, "plp %p", plp); if (plp->rdonly) { ERR("can't rewind read-only log"); errno = EROFS; return; } util_rwlock_wrlock(plp->rwlockp); /* unprotect the pool descriptor (debug version only) */ RANGE_RW((char *)plp->addr + sizeof(struct pool_hdr), LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax); plp->write_offset = plp->start_offset; if (plp->is_pmem) pmem_persist(&plp->write_offset, sizeof(uint64_t)); else pmem_msync(&plp->write_offset, sizeof(uint64_t)); /* set the write-protection again (debug version only) */ RANGE_RO((char *)plp->addr + sizeof(struct pool_hdr), LOG_FORMAT_DATA_ALIGN, plp->is_dev_dax); util_rwlock_unlock(plp->rwlockp); } /* * pmemlog_walk -- walk through all data in a log memory pool * * chunksize of 0 means process_chunk gets called once for all data * as a single chunk. */ void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg) { LOG(3, "plp %p chunksize %zu", plp, chunksize); /* * We are assuming that the walker doesn't change the data it's reading * in place. We prevent everyone from changing the data behind our back * until we are done with processing it. */ util_rwlock_rdlock(plp->rwlockp); char *data = plp->addr; uint64_t write_offset = le64toh(plp->write_offset); uint64_t data_offset = le64toh(plp->start_offset); size_t len; if (chunksize == 0) { /* most common case: process everything at once */ len = write_offset - data_offset; LOG(3, "length %zu", len); (*process_chunk)(&data[data_offset], len, arg); } else { /* * Walk through the complete record, chunk by chunk. * The callback returns 0 to terminate the walk. */ while (data_offset < write_offset) { len = MIN(chunksize, write_offset - data_offset); if (!(*process_chunk)(&data[data_offset], len, arg)) break; data_offset += chunksize; } } util_rwlock_unlock(plp->rwlockp); } /* * pmemlog_checkU -- log memory pool consistency check * * Returns true if consistent, zero if inconsistent, -1/error if checking * cannot happen due to other errors. */ #ifndef _WIN32 static inline #endif int pmemlog_checkU(const char *path) { LOG(3, "path \"%s\"", path); PMEMlogpool *plp = log_open_common(path, POOL_OPEN_COW); if (plp == NULL) return -1; /* errno set by log_open_common() */ int consistent = 1; /* validate pool descriptor */ uint64_t hdr_start = le64toh(plp->start_offset); uint64_t hdr_end = le64toh(plp->end_offset); uint64_t hdr_write = le64toh(plp->write_offset); if (hdr_start != roundup(sizeof(*plp), LOG_FORMAT_DATA_ALIGN)) { ERR("wrong value of start_offset"); consistent = 0; } if (hdr_end != plp->size) { ERR("wrong value of end_offset"); consistent = 0; } if (hdr_start > hdr_end) { ERR("start_offset greater than end_offset"); consistent = 0; } if (hdr_start > hdr_write) { ERR("start_offset greater than write_offset"); consistent = 0; } if (hdr_write > hdr_end) { ERR("write_offset greater than end_offset"); consistent = 0; } pmemlog_close(plp); if (consistent) LOG(4, "pool consistency check OK"); return consistent; } #ifndef _WIN32 /* * pmemlog_check -- log memory pool consistency check * * Returns true if consistent, zero if inconsistent, -1/error if checking * cannot happen due to other errors. */ int pmemlog_check(const char *path) { return pmemlog_checkU(path); } #else /* * pmemlog_checkW -- log memory pool consistency check */ int pmemlog_checkW(const wchar_t *path) { char *upath = util_toUTF8(path); if (upath == NULL) return -1; int ret = pmemlog_checkU(upath); util_free_UTF8(upath); return ret; } #endif /* * pmemlog_ctl_getU -- programmatically executes a read ctl query */ #ifndef _WIN32 static inline #endif int pmemlog_ctl_getU(PMEMlogpool *plp, const char *name, void *arg) { LOG(3, "plp %p name %s arg %p", plp, name, arg); return ctl_query(plp == NULL ? NULL : plp->ctl, plp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_READ, arg); } /* * pmemblk_ctl_setU -- programmatically executes a write ctl query */ #ifndef _WIN32 static inline #endif int pmemlog_ctl_setU(PMEMlogpool *plp, const char *name, void *arg) { LOG(3, "plp %p name %s arg %p", plp, name, arg); return ctl_query(plp == NULL ? NULL : plp->ctl, plp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_WRITE, arg); } /* * pmemlog_ctl_execU -- programmatically executes a runnable ctl query */ #ifndef _WIN32 static inline #endif int pmemlog_ctl_execU(PMEMlogpool *plp, const char *name, void *arg) { LOG(3, "plp %p name %s arg %p", plp, name, arg); return ctl_query(plp == NULL ? NULL : plp->ctl, plp, CTL_QUERY_PROGRAMMATIC, name, CTL_QUERY_RUNNABLE, arg); } #ifndef _WIN32 /* * pmemlog_ctl_get -- programmatically executes a read ctl query */ int pmemlog_ctl_get(PMEMlogpool *plp, const char *name, void *arg) { return pmemlog_ctl_getU(plp, name, arg); } /* * pmemlog_ctl_set -- programmatically executes a write ctl query */ int pmemlog_ctl_set(PMEMlogpool *plp, const char *name, void *arg) { return pmemlog_ctl_setU(plp, name, arg); } /* * pmemlog_ctl_exec -- programmatically executes a runnable ctl query */ int pmemlog_ctl_exec(PMEMlogpool *plp, const char *name, void *arg) { return pmemlog_ctl_execU(plp, name, arg); } #else /* * pmemlog_ctl_getW -- programmatically executes a read ctl query */ int pmemlog_ctl_getW(PMEMlogpool *plp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemlog_ctl_getU(plp, uname, arg); util_free_UTF8(uname); return ret; } /* * pmemlog_ctl_setW -- programmatically executes a write ctl query */ int pmemlog_ctl_setW(PMEMlogpool *plp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemlog_ctl_setU(plp, uname, arg); util_free_UTF8(uname); return ret; } /* * pmemlog_ctl_execW -- programmatically executes a runnable ctl query */ int pmemlog_ctl_execW(PMEMlogpool *plp, const wchar_t *name, void *arg) { char *uname = util_toUTF8(name); if (uname == NULL) return -1; int ret = pmemlog_ctl_execU(plp, uname, arg); util_free_UTF8(uname); return ret; } #endif #if FAULT_INJECTION void pmemlog_inject_fault_at(enum pmem_allocation_type type, int nth, const char *at) { core_inject_fault_at(type, nth, at); } int pmemlog_fault_injection_enabled(void) { return core_fault_injection_enabled(); } #endif
19,695
20.982143
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/libpmemlog/libpmemlog.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * libpmemlog.c -- pmem entry points for libpmemlog */ #include <stdio.h> #include <stdint.h> #include "libpmemlog.h" #include "ctl_global.h" #include "pmemcommon.h" #include "log.h" /* * The variable from which the config is directly loaded. The string * cannot contain any comments or extraneous white characters. */ #define LOG_CONFIG_ENV_VARIABLE "PMEMLOG_CONF" /* * The variable that points to a config file from which the config is loaded. */ #define LOG_CONFIG_FILE_ENV_VARIABLE "PMEMLOG_CONF_FILE" /* * log_ctl_init_and_load -- (static) initializes CTL and loads configuration * from env variable and file */ static int log_ctl_init_and_load(PMEMlogpool *plp) { LOG(3, "plp %p", plp); if (plp != NULL && (plp->ctl = ctl_new()) == NULL) { LOG(2, "!ctl_new"); return -1; } char *env_config = os_getenv(LOG_CONFIG_ENV_VARIABLE); if (env_config != NULL) { if (ctl_load_config_from_string(plp ? plp->ctl : NULL, plp, env_config) != 0) { LOG(2, "unable to parse config stored in %s " "environment variable", LOG_CONFIG_ENV_VARIABLE); goto err; } } char *env_config_file = os_getenv(LOG_CONFIG_FILE_ENV_VARIABLE); if (env_config_file != NULL && env_config_file[0] != '\0') { if (ctl_load_config_from_file(plp ? plp->ctl : NULL, plp, env_config_file) != 0) { LOG(2, "unable to parse config stored in %s " "file (from %s environment variable)", env_config_file, LOG_CONFIG_FILE_ENV_VARIABLE); goto err; } } return 0; err: if (plp) ctl_delete(plp->ctl); return -1; } /* * log_init -- load-time initialization for log * * Called automatically by the run-time loader. */ ATTR_CONSTRUCTOR void libpmemlog_init(void) { ctl_global_register(); if (log_ctl_init_and_load(NULL)) FATAL("error: %s", pmemlog_errormsg()); common_init(PMEMLOG_LOG_PREFIX, PMEMLOG_LOG_LEVEL_VAR, PMEMLOG_LOG_FILE_VAR, PMEMLOG_MAJOR_VERSION, PMEMLOG_MINOR_VERSION); LOG(3, NULL); } /* * libpmemlog_fini -- libpmemlog cleanup routine * * Called automatically when the process terminates. */ ATTR_DESTRUCTOR void libpmemlog_fini(void) { LOG(3, NULL); common_fini(); } /* * pmemlog_check_versionU -- see if lib meets application version requirements */ #ifndef _WIN32 static inline #endif const char * pmemlog_check_versionU(unsigned major_required, unsigned minor_required) { LOG(3, "major_required %u minor_required %u", major_required, minor_required); if (major_required != PMEMLOG_MAJOR_VERSION) { ERR("libpmemlog major version mismatch (need %u, found %u)", major_required, PMEMLOG_MAJOR_VERSION); return out_get_errormsg(); } if (minor_required > PMEMLOG_MINOR_VERSION) { ERR("libpmemlog minor version mismatch (need %u, found %u)", minor_required, PMEMLOG_MINOR_VERSION); return out_get_errormsg(); } return NULL; } #ifndef _WIN32 /* * pmemlog_check_version -- see if lib meets application version requirements */ const char * pmemlog_check_version(unsigned major_required, unsigned minor_required) { return pmemlog_check_versionU(major_required, minor_required); } #else /* * pmemlog_check_versionW -- see if lib meets application version requirements */ const wchar_t * pmemlog_check_versionW(unsigned major_required, unsigned minor_required) { if (pmemlog_check_versionU(major_required, minor_required) != NULL) return out_get_errormsgW(); else return NULL; } #endif /* * pmemlog_set_funcs -- allow overriding libpmemlog's call to malloc, etc. */ void pmemlog_set_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)) { LOG(3, NULL); util_set_alloc_funcs(malloc_func, free_func, realloc_func, strdup_func); } /* * pmemlog_errormsgU -- return last error message */ #ifndef _WIN32 static inline #endif const char * pmemlog_errormsgU(void) { return out_get_errormsg(); } #ifndef _WIN32 /* * pmemlog_errormsg -- return last error message */ const char * pmemlog_errormsg(void) { return pmemlog_errormsgU(); } #else /* * pmemlog_errormsgW -- return last error message as wchar_t */ const wchar_t * pmemlog_errormsgW(void) { return out_get_errormsgW(); } #endif
4,301
20.29703
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/os_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * os_windows.c -- windows abstraction layer */ #include <io.h> #include <sys/locking.h> #include <errno.h> #include <pmemcompat.h> #include <windows.h> #include "alloc.h" #include "util.h" #include "os.h" #include "out.h" #define UTF8_BOM "\xEF\xBB\xBF" /* * os_open -- open abstraction layer */ int os_open(const char *pathname, int flags, ...) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return -1; int ret; if (flags & O_CREAT) { va_list arg; va_start(arg, flags); mode_t mode = va_arg(arg, mode_t); va_end(arg); ret = _wopen(path, flags, mode); } else { ret = _wopen(path, flags); } util_free_UTF16(path); /* BOM skipping should not modify errno */ int orig_errno = errno; /* * text files on windows can contain BOM. As we open files * in binary mode we have to detect bom and skip it */ if (ret != -1) { char bom[3]; if (_read(ret, bom, sizeof(bom)) != 3 || memcmp(bom, UTF8_BOM, 3) != 0) { /* UTF-8 bom not found - reset file to the beginning */ _lseek(ret, 0, SEEK_SET); } } errno = orig_errno; return ret; } /* * os_fsync -- fsync abstraction layer */ int os_fsync(int fd) { HANDLE handle = (HANDLE) _get_osfhandle(fd); if (handle == INVALID_HANDLE_VALUE) { errno = EBADF; return -1; } if (!FlushFileBuffers(handle)) { errno = EINVAL; return -1; } return 0; } /* * os_fsync_dir -- fsync the directory */ int os_fsync_dir(const char *dir_name) { /* XXX not used and not implemented */ ASSERT(0); return -1; } /* * os_stat -- stat abstraction layer */ int os_stat(const char *pathname, os_stat_t *buf) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return -1; int ret = _wstat64(path, buf); util_free_UTF16(path); return ret; } /* * os_unlink -- unlink abstraction layer */ int os_unlink(const char *pathname) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return -1; int ret = _wunlink(path); util_free_UTF16(path); return ret; } /* * os_access -- access abstraction layer */ int os_access(const char *pathname, int mode) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return -1; int ret = _waccess(path, mode); util_free_UTF16(path); return ret; } /* * os_skipBOM -- (internal) Skip BOM in file stream * * text files on windows can contain BOM. We have to detect bom and skip it. */ static void os_skipBOM(FILE *file) { if (file == NULL) return; /* BOM skipping should not modify errno */ int orig_errno = errno; /* UTF-8 BOM */ uint8_t bom[3]; size_t read_num = fread(bom, sizeof(bom[0]), sizeof(bom), file); if (read_num != ARRAY_SIZE(bom)) goto out; if (memcmp(bom, UTF8_BOM, ARRAY_SIZE(bom)) != 0) { /* UTF-8 bom not found - reset file to the beginning */ fseek(file, 0, SEEK_SET); } out: errno = orig_errno; } /* * os_fopen -- fopen abstraction layer */ FILE * os_fopen(const char *pathname, const char *mode) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return NULL; wchar_t *wmode = util_toUTF16(mode); if (wmode == NULL) { util_free_UTF16(path); return NULL; } FILE *ret = _wfopen(path, wmode); util_free_UTF16(path); util_free_UTF16(wmode); os_skipBOM(ret); return ret; } /* * os_fdopen -- fdopen abstraction layer */ FILE * os_fdopen(int fd, const char *mode) { FILE *ret = fdopen(fd, mode); os_skipBOM(ret); return ret; } /* * os_chmod -- chmod abstraction layer */ int os_chmod(const char *pathname, mode_t mode) { wchar_t *path = util_toUTF16(pathname); if (path == NULL) return -1; int ret = _wchmod(path, mode); util_free_UTF16(path); return ret; } /* * os_mkstemp -- generate a unique temporary filename from template */ int os_mkstemp(char *temp) { unsigned rnd; wchar_t *utemp = util_toUTF16(temp); if (utemp == NULL) return -1; wchar_t *path = _wmktemp(utemp); if (path == NULL) { util_free_UTF16(utemp); return -1; } wchar_t *npath = Malloc(sizeof(*npath) * wcslen(path) + _MAX_FNAME); if (npath == NULL) { util_free_UTF16(utemp); return -1; } wcscpy(npath, path); util_free_UTF16(utemp); /* * Use rand_s to generate more unique tmp file name than _mktemp do. * In case with multiple threads and multiple files even after close() * file name conflicts occurred. * It resolved issue with synchronous removing * multiples files by system. */ rand_s(&rnd); int ret = _snwprintf(npath + wcslen(npath), _MAX_FNAME, L"%u", rnd); if (ret < 0) goto out; /* * Use O_TEMPORARY flag to make sure the file is deleted when * the last file descriptor is closed. Also, it prevents opening * this file from another process. */ ret = _wopen(npath, O_RDWR | O_CREAT | O_EXCL | O_TEMPORARY, S_IWRITE | S_IREAD); out: Free(npath); return ret; } /* * os_posix_fallocate -- allocate file space */ int os_posix_fallocate(int fd, os_off_t offset, os_off_t len) { /* * From POSIX: * "EINVAL -- The len argument was zero or the offset argument was * less than zero." * * From Linux man-page: * "EINVAL -- offset was less than 0, or len was less than or * equal to 0" */ if (offset < 0 || len <= 0) return EINVAL; /* * From POSIX: * "EFBIG -- The value of offset+len is greater than the maximum * file size." * * Overflow can't be checked for by _chsize_s, since it only gets * the sum. */ if (offset + len < offset) return EFBIG; HANDLE handle = (HANDLE)_get_osfhandle(fd); if (handle == INVALID_HANDLE_VALUE) { return errno; } FILE_ATTRIBUTE_TAG_INFO attributes; if (!GetFileInformationByHandleEx(handle, FileAttributeTagInfo, &attributes, sizeof(attributes))) { return EINVAL; } /* * To physically allocate space on windows we have to remove * sparsefile and file compressed flags. This method is much faster * than using _chsize_s which has terrible performance. Dax on * windows doesn't support sparse files and file compression so * this workaround is acceptable. */ if (attributes.FileAttributes & FILE_ATTRIBUTE_SPARSE_FILE) { DWORD unused; FILE_SET_SPARSE_BUFFER buffer; buffer.SetSparse = FALSE; if (!DeviceIoControl(handle, FSCTL_SET_SPARSE, &buffer, sizeof(buffer), NULL, 0, &unused, NULL)) { return EINVAL; } } if (attributes.FileAttributes & FILE_ATTRIBUTE_COMPRESSED) { DWORD unused; USHORT buffer = 0; /* magic undocumented value */ if (!DeviceIoControl(handle, FSCTL_SET_COMPRESSION, &buffer, sizeof(buffer), NULL, 0, &unused, NULL)) { return EINVAL; } } /* * posix_fallocate should not clobber errno, but * _filelengthi64 might set errno. */ int orig_errno = errno; __int64 current_size = _filelengthi64(fd); int file_length_errno = errno; errno = orig_errno; if (current_size < 0) return file_length_errno; __int64 requested_size = offset + len; if (requested_size <= current_size) return 0; int ret = os_ftruncate(fd, requested_size); if (ret) { errno = ret; return -1; } return 0; } /* * os_ftruncate -- truncate a file to a specified length */ int os_ftruncate(int fd, os_off_t length) { LARGE_INTEGER distanceToMove = {0}; distanceToMove.QuadPart = length; HANDLE handle = (HANDLE)_get_osfhandle(fd); if (handle == INVALID_HANDLE_VALUE) return -1; if (!SetFilePointerEx(handle, distanceToMove, NULL, FILE_BEGIN)) { errno = EINVAL; return -1; } if (!SetEndOfFile(handle)) { errno = EINVAL; return -1; } return 0; } /* * os_flock -- apply or remove an advisory lock on an open file */ int os_flock(int fd, int operation) { int flags = 0; SYSTEM_INFO systemInfo; GetSystemInfo(&systemInfo); switch (operation & (OS_LOCK_EX | OS_LOCK_SH | OS_LOCK_UN)) { case OS_LOCK_EX: case OS_LOCK_SH: if (operation & OS_LOCK_NB) flags = _LK_NBLCK; else flags = _LK_LOCK; break; case OS_LOCK_UN: flags = _LK_UNLCK; break; default: errno = EINVAL; return -1; } os_off_t filelen = _filelengthi64(fd); if (filelen < 0) return -1; /* for our purpose it's enough to lock the first page of the file */ long len = (filelen > systemInfo.dwPageSize) ? systemInfo.dwPageSize : (long)filelen; int res = _locking(fd, flags, len); if (res != 0 && errno == EACCES) errno = EWOULDBLOCK; /* for consistency with flock() */ return res; } /* * os_writev -- windows version of writev function * * XXX: _write and other similar functions are 32 bit on windows * if size of data is bigger then 2^32, this function * will be not atomic. */ ssize_t os_writev(int fd, const struct iovec *iov, int iovcnt) { size_t size = 0; /* XXX: _write is 32 bit on windows */ for (int i = 0; i < iovcnt; i++) size += iov[i].iov_len; void *buf = malloc(size); if (buf == NULL) return ENOMEM; char *it_buf = buf; for (int i = 0; i < iovcnt; i++) { memcpy(it_buf, iov[i].iov_base, iov[i].iov_len); it_buf += iov[i].iov_len; } ssize_t written = 0; while (size > 0) { int ret = _write(fd, buf, size >= MAXUINT ? MAXUINT : (unsigned)size); if (ret == -1) { written = -1; break; } written += ret; size -= ret; } free(buf); return written; } #define NSEC_IN_SEC 1000000000ull /* number of useconds between 1970-01-01T00:00:00Z and 1601-01-01T00:00:00Z */ #define DELTA_WIN2UNIX (11644473600000000ull) /* * clock_gettime -- returns elapsed time since the system was restarted * or since Epoch, depending on the mode id */ int os_clock_gettime(int id, struct timespec *ts) { switch (id) { case CLOCK_MONOTONIC: { LARGE_INTEGER time; LARGE_INTEGER frequency; QueryPerformanceFrequency(&frequency); QueryPerformanceCounter(&time); ts->tv_sec = time.QuadPart / frequency.QuadPart; ts->tv_nsec = (long)( (time.QuadPart % frequency.QuadPart) * NSEC_IN_SEC / frequency.QuadPart); } break; case CLOCK_REALTIME: { FILETIME ctime_ft; GetSystemTimeAsFileTime(&ctime_ft); ULARGE_INTEGER ctime = { .HighPart = ctime_ft.dwHighDateTime, .LowPart = ctime_ft.dwLowDateTime, }; ts->tv_sec = (ctime.QuadPart - DELTA_WIN2UNIX * 10) / 10000000; ts->tv_nsec = ((ctime.QuadPart - DELTA_WIN2UNIX * 10) % 10000000) * 100; } break; default: SetLastError(EINVAL); return -1; } return 0; } /* * os_setenv -- change or add an environment variable */ int os_setenv(const char *name, const char *value, int overwrite) { errno_t err; /* * If caller doesn't want to overwrite make sure that a environment * variable with the same name doesn't exist. */ if (!overwrite && getenv(name)) return 0; /* * _putenv_s returns a non-zero error code on failure but setenv * needs to return -1 on failure, let's translate the error code. */ if ((err = _putenv_s(name, value)) != 0) { errno = err; return -1; } return 0; } /* * os_unsetenv -- remove an environment variable */ int os_unsetenv(const char *name) { errno_t err; if ((err = _putenv_s(name, "")) != 0) { errno = err; return -1; } return 0; } /* * os_getenv -- getenv abstraction layer */ char * os_getenv(const char *name) { return getenv(name); } /* * rand_r -- rand_r for windows * * XXX: RAND_MAX is equal 0x7fff on Windows, so to get 32 bit random number * we need to merge two numbers returned by rand_s(). * It is not to the best solution as subsequences returned by rand_s are * not guaranteed to be independent. * * XXX: Windows doesn't implement deterministic thread-safe pseudorandom * generator (generator which can be initialized by seed ). * We have to chose between a deterministic nonthread-safe generator * (rand(), srand()) or a non-deterministic thread-safe generator(rand_s()) * as thread-safety is more important, a seed parameter is ignored in this * implementation. */ unsigned os_rand_r(unsigned *seedp) { UNREFERENCED_PARAMETER(seedp); unsigned part1, part2; rand_s(&part1); rand_s(&part2); return part1 << 16 | part2; } /* * sys_siglist -- map of signal to human readable messages like sys_siglist */ const char * const sys_siglist[] = { "Unknown signal 0", /* 0 */ "Hangup", /* 1 */ "Interrupt", /* 2 */ "Quit", /* 3 */ "Illegal instruction", /* 4 */ "Trace/breakpoint trap", /* 5 */ "Aborted", /* 6 */ "Bus error", /* 7 */ "Floating point exception", /* 8 */ "Killed", /* 9 */ "User defined signal 1", /* 10 */ "Segmentation fault", /* 11 */ "User defined signal 2", /* 12 */ "Broken pipe", /* 13 */ "Alarm clock", /* 14 */ "Terminated", /* 15 */ "Stack fault", /* 16 */ "Child exited", /* 17 */ "Continued", /* 18 */ "Stopped (signal)", /* 19 */ "Stopped", /* 20 */ "Stopped (tty input)", /* 21 */ "Stopped (tty output)", /* 22 */ "Urgent I/O condition", /* 23 */ "CPU time limit exceeded", /* 24 */ "File size limit exceeded", /* 25 */ "Virtual timer expired", /* 26 */ "Profiling timer expired", /* 27 */ "Window changed", /* 28 */ "I/O possible", /* 29 */ "Power failure", /* 30 */ "Bad system call", /* 31 */ "Unknown signal 32" /* 32 */ }; int sys_siglist_size = ARRAYSIZE(sys_siglist); /* * string constants for strsignal * XXX: ideally this should have the signal number as the suffix but then we * should use a buffer from thread local storage, so deferring the same till * we need it * NOTE: In Linux strsignal uses TLS for the same reason but if it fails to get * a thread local buffer it falls back to using a static buffer trading the * thread safety. */ #define STR_REALTIME_SIGNAL "Real-time signal" #define STR_UNKNOWN_SIGNAL "Unknown signal" /* * strsignal -- returns a string describing the signal number 'sig' * * XXX: According to POSIX, this one is of type 'char *', but in our * implementation it returns 'const char *'. */ const char * os_strsignal(int sig) { if (sig >= 0 && sig < ARRAYSIZE(sys_siglist)) return sys_siglist[sig]; else if (sig >= 34 && sig <= 64) return STR_REALTIME_SIGNAL; else return STR_UNKNOWN_SIGNAL; } int os_execv(const char *path, char *const argv[]) { wchar_t *wpath = util_toUTF16(path); if (wpath == NULL) return -1; int argc = 0; while (argv[argc]) argc++; int ret; wchar_t **wargv = Zalloc((argc + 1) * sizeof(wargv[0])); if (!wargv) { ret = -1; goto wargv_alloc_failed; } for (int i = 0; i < argc; ++i) { wargv[i] = util_toUTF16(argv[i]); if (!wargv[i]) { ret = -1; goto end; } } intptr_t iret = _wexecv(wpath, wargv); if (iret == 0) ret = 0; else ret = -1; end: for (int i = 0; i < argc; ++i) util_free_UTF16(wargv[i]); Free(wargv); wargv_alloc_failed: util_free_UTF16(wpath); return ret; }
16,299
20.967655
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/os_thread_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * os_thread_posix.c -- Posix thread abstraction layer */ #define _GNU_SOURCE #include <pthread.h> #ifdef __FreeBSD__ #include <pthread_np.h> #endif #include <semaphore.h> #include "os_thread.h" #include "util.h" typedef struct { pthread_t thread; } internal_os_thread_t; /* * os_once -- pthread_once abstraction layer */ int os_once(os_once_t *o, void (*func)(void)) { COMPILE_ERROR_ON(sizeof(os_once_t) < sizeof(pthread_once_t)); return pthread_once((pthread_once_t *)o, func); } /* * os_tls_key_create -- pthread_key_create abstraction layer */ int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *)) { COMPILE_ERROR_ON(sizeof(os_tls_key_t) < sizeof(pthread_key_t)); return pthread_key_create((pthread_key_t *)key, destructor); } /* * os_tls_key_delete -- pthread_key_delete abstraction layer */ int os_tls_key_delete(os_tls_key_t key) { return pthread_key_delete((pthread_key_t)key); } /* * os_tls_setspecific -- pthread_key_setspecific abstraction layer */ int os_tls_set(os_tls_key_t key, const void *value) { return pthread_setspecific((pthread_key_t)key, value); } /* * os_tls_get -- pthread_key_getspecific abstraction layer */ void * os_tls_get(os_tls_key_t key) { return pthread_getspecific((pthread_key_t)key); } /* * os_mutex_init -- pthread_mutex_init abstraction layer */ int os_mutex_init(os_mutex_t *__restrict mutex) { COMPILE_ERROR_ON(sizeof(os_mutex_t) < sizeof(pthread_mutex_t)); return pthread_mutex_init((pthread_mutex_t *)mutex, NULL); } /* * os_mutex_destroy -- pthread_mutex_destroy abstraction layer */ int os_mutex_destroy(os_mutex_t *__restrict mutex) { return pthread_mutex_destroy((pthread_mutex_t *)mutex); } /* * os_mutex_lock -- pthread_mutex_lock abstraction layer */ int os_mutex_lock(os_mutex_t *__restrict mutex) { return pthread_mutex_lock((pthread_mutex_t *)mutex); } /* * os_mutex_trylock -- pthread_mutex_trylock abstraction layer */ int os_mutex_trylock(os_mutex_t *__restrict mutex) { return pthread_mutex_trylock((pthread_mutex_t *)mutex); } /* * os_mutex_unlock -- pthread_mutex_unlock abstraction layer */ int os_mutex_unlock(os_mutex_t *__restrict mutex) { return pthread_mutex_unlock((pthread_mutex_t *)mutex); } /* * os_mutex_timedlock -- pthread_mutex_timedlock abstraction layer */ int os_mutex_timedlock(os_mutex_t *__restrict mutex, const struct timespec *abstime) { return pthread_mutex_timedlock((pthread_mutex_t *)mutex, abstime); } /* * os_rwlock_init -- pthread_rwlock_init abstraction layer */ int os_rwlock_init(os_rwlock_t *__restrict rwlock) { COMPILE_ERROR_ON(sizeof(os_rwlock_t) < sizeof(pthread_rwlock_t)); return pthread_rwlock_init((pthread_rwlock_t *)rwlock, NULL); } /* * os_rwlock_destroy -- pthread_rwlock_destroy abstraction layer */ int os_rwlock_destroy(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_destroy((pthread_rwlock_t *)rwlock); } /* * os_rwlock_rdlock - pthread_rwlock_rdlock abstraction layer */ int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_rdlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_wrlock -- pthread_rwlock_wrlock abstraction layer */ int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_wrlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_unlock -- pthread_rwlock_unlock abstraction layer */ int os_rwlock_unlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_unlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_tryrdlock -- pthread_rwlock_tryrdlock abstraction layer */ int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_tryrdlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_tryrwlock -- pthread_rwlock_trywrlock abstraction layer */ int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock) { return pthread_rwlock_trywrlock((pthread_rwlock_t *)rwlock); } /* * os_rwlock_timedrdlock -- pthread_rwlock_timedrdlock abstraction layer */ int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { return pthread_rwlock_timedrdlock((pthread_rwlock_t *)rwlock, abstime); } /* * os_rwlock_timedwrlock -- pthread_rwlock_timedwrlock abstraction layer */ int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { return pthread_rwlock_timedwrlock((pthread_rwlock_t *)rwlock, abstime); } /* * os_spin_init -- pthread_spin_init abstraction layer */ int os_spin_init(os_spinlock_t *lock, int pshared) { COMPILE_ERROR_ON(sizeof(os_spinlock_t) < sizeof(pthread_spinlock_t)); return pthread_spin_init((pthread_spinlock_t *)lock, pshared); } /* * os_spin_destroy -- pthread_spin_destroy abstraction layer */ int os_spin_destroy(os_spinlock_t *lock) { return pthread_spin_destroy((pthread_spinlock_t *)lock); } /* * os_spin_lock -- pthread_spin_lock abstraction layer */ int os_spin_lock(os_spinlock_t *lock) { return pthread_spin_lock((pthread_spinlock_t *)lock); } /* * os_spin_unlock -- pthread_spin_unlock abstraction layer */ int os_spin_unlock(os_spinlock_t *lock) { return pthread_spin_unlock((pthread_spinlock_t *)lock); } /* * os_spin_trylock -- pthread_spin_trylock abstraction layer */ int os_spin_trylock(os_spinlock_t *lock) { return pthread_spin_trylock((pthread_spinlock_t *)lock); } /* * os_cond_init -- pthread_cond_init abstraction layer */ int os_cond_init(os_cond_t *__restrict cond) { COMPILE_ERROR_ON(sizeof(os_cond_t) < sizeof(pthread_cond_t)); return pthread_cond_init((pthread_cond_t *)cond, NULL); } /* * os_cond_destroy -- pthread_cond_destroy abstraction layer */ int os_cond_destroy(os_cond_t *__restrict cond) { return pthread_cond_destroy((pthread_cond_t *)cond); } /* * os_cond_broadcast -- pthread_cond_broadcast abstraction layer */ int os_cond_broadcast(os_cond_t *__restrict cond) { return pthread_cond_broadcast((pthread_cond_t *)cond); } /* * os_cond_signal -- pthread_cond_signal abstraction layer */ int os_cond_signal(os_cond_t *__restrict cond) { return pthread_cond_signal((pthread_cond_t *)cond); } /* * os_cond_timedwait -- pthread_cond_timedwait abstraction layer */ int os_cond_timedwait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex, const struct timespec *abstime) { return pthread_cond_timedwait((pthread_cond_t *)cond, (pthread_mutex_t *)mutex, abstime); } /* * os_cond_wait -- pthread_cond_wait abstraction layer */ int os_cond_wait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex) { return pthread_cond_wait((pthread_cond_t *)cond, (pthread_mutex_t *)mutex); } /* * os_thread_create -- pthread_create abstraction layer */ int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr, void *(*start_routine)(void *), void *arg) { COMPILE_ERROR_ON(sizeof(os_thread_t) < sizeof(internal_os_thread_t)); internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_create(&thread_info->thread, (pthread_attr_t *)attr, start_routine, arg); } /* * os_thread_join -- pthread_join abstraction layer */ int os_thread_join(os_thread_t *thread, void **result) { internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_join(thread_info->thread, result); } /* * os_thread_self -- pthread_self abstraction layer */ void os_thread_self(os_thread_t *thread) { internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; thread_info->thread = pthread_self(); } /* * os_thread_atfork -- pthread_atfork abstraction layer */ int os_thread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)) { return pthread_atfork(prepare, parent, child); } /* * os_thread_setaffinity_np -- pthread_atfork abstraction layer */ int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size, const os_cpu_set_t *set) { COMPILE_ERROR_ON(sizeof(os_cpu_set_t) < sizeof(cpu_set_t)); internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; return pthread_setaffinity_np(thread_info->thread, set_size, (cpu_set_t *)set); } /* * os_cpu_zero -- CP_ZERO abstraction layer */ void os_cpu_zero(os_cpu_set_t *set) { CPU_ZERO((cpu_set_t *)set); } /* * os_cpu_set -- CP_SET abstraction layer */ void os_cpu_set(size_t cpu, os_cpu_set_t *set) { CPU_SET(cpu, (cpu_set_t *)set); } /* * os_semaphore_init -- initializes semaphore instance */ int os_semaphore_init(os_semaphore_t *sem, unsigned value) { COMPILE_ERROR_ON(sizeof(os_semaphore_t) < sizeof(sem_t)); return sem_init((sem_t *)sem, 0, value); } /* * os_semaphore_destroy -- destroys a semaphore instance */ int os_semaphore_destroy(os_semaphore_t *sem) { return sem_destroy((sem_t *)sem); } /* * os_semaphore_wait -- decreases the value of the semaphore */ int os_semaphore_wait(os_semaphore_t *sem) { return sem_wait((sem_t *)sem); } /* * os_semaphore_trywait -- tries to decrease the value of the semaphore */ int os_semaphore_trywait(os_semaphore_t *sem) { return sem_trywait((sem_t *)sem); } /* * os_semaphore_post -- increases the value of the semaphore */ int os_semaphore_post(os_semaphore_t *sem) { return sem_post((sem_t *)sem); }
9,190
20.032037
72
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/util.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * util.c -- very basic utilities */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <unistd.h> #include <endian.h> #include <errno.h> #include <time.h> #include <stdarg.h> #include "util.h" #include "os.h" #include "valgrind_internal.h" #include "alloc.h" /* library-wide page size */ unsigned long long Pagesize; /* allocation/mmap granularity */ unsigned long long Mmap_align; #if ANY_VG_TOOL_ENABLED /* Initialized to true if the process is running inside Valgrind. */ unsigned _On_valgrind; #endif #if VG_HELGRIND_ENABLED /* Initialized to true if the process is running inside Valgrind helgrind. */ unsigned _On_helgrind; #endif #if VG_DRD_ENABLED /* Initialized to true if the process is running inside Valgrind drd. */ unsigned _On_drd; #endif #if VG_HELGRIND_ENABLED || VG_DRD_ENABLED /* Initialized to true if the process is running inside Valgrind drd or hg. */ unsigned _On_drd_or_hg; #endif #if VG_MEMCHECK_ENABLED /* Initialized to true if the process is running inside Valgrind memcheck. */ unsigned _On_memcheck; #endif #if VG_PMEMCHECK_ENABLED /* Initialized to true if the process is running inside Valgrind pmemcheck. */ unsigned _On_pmemcheck; #define LIB_LOG_LEN 20 #define FUNC_LOG_LEN 50 #define SUFFIX_LEN 7 /* true if pmreorder instrumentation has to be enabled */ int _Pmreorder_emit; /* * util_emit_log -- emits lib and func name with appropriate suffix * to pmemcheck store log file */ void util_emit_log(const char *lib, const char *func, int order) { char lib_name[LIB_LOG_LEN]; char func_name[FUNC_LOG_LEN]; char suffix[SUFFIX_LEN]; size_t lib_len = strlen(lib); size_t func_len = strlen(func); if (order == 0) strcpy(suffix, ".BEGIN"); else strcpy(suffix, ".END"); size_t suffix_len = strlen(suffix); if (lib_len + suffix_len + 1 > LIB_LOG_LEN) { VALGRIND_EMIT_LOG("Library name is too long"); return; } if (func_len + suffix_len + 1 > FUNC_LOG_LEN) { VALGRIND_EMIT_LOG("Function name is too long"); return; } strcpy(lib_name, lib); strcat(lib_name, suffix); strcpy(func_name, func); strcat(func_name, suffix); if (order == 0) { VALGRIND_EMIT_LOG(func_name); VALGRIND_EMIT_LOG(lib_name); } else { VALGRIND_EMIT_LOG(lib_name); VALGRIND_EMIT_LOG(func_name); } } #endif /* * util_is_zeroed -- check if given memory range is all zero */ int util_is_zeroed(const void *addr, size_t len) { const char *a = addr; if (len == 0) return 1; if (a[0] == 0 && memcmp(a, a + 1, len - 1) == 0) return 1; return 0; } /* * util_checksum_compute -- compute Fletcher64-like checksum * * csump points to where the checksum lives, so that location * is treated as zeros while calculating the checksum. The * checksummed data is assumed to be in little endian order. */ uint64_t util_checksum_compute(void *addr, size_t len, uint64_t *csump, size_t skip_off) { if (len % 4 != 0) abort(); uint32_t *p32 = addr; uint32_t *p32end = (uint32_t *)((char *)addr + len); uint32_t *skip; uint32_t lo32 = 0; uint32_t hi32 = 0; if (skip_off) skip = (uint32_t *)((char *)addr + skip_off); else skip = (uint32_t *)((char *)addr + len); while (p32 < p32end) if (p32 == (uint32_t *)csump || p32 >= skip) { /* lo32 += 0; treat first 32-bits as zero */ p32++; hi32 += lo32; /* lo32 += 0; treat second 32-bits as zero */ p32++; hi32 += lo32; } else { lo32 += le32toh(*p32); ++p32; hi32 += lo32; } return (uint64_t)hi32 << 32 | lo32; } /* * util_checksum -- compute Fletcher64-like checksum * * csump points to where the checksum lives, so that location * is treated as zeros while calculating the checksum. * If insert is true, the calculated checksum is inserted into * the range at *csump. Otherwise the calculated checksum is * checked against *csump and the result returned (true means * the range checksummed correctly). */ int util_checksum(void *addr, size_t len, uint64_t *csump, int insert, size_t skip_off) { uint64_t csum = util_checksum_compute(addr, len, csump, skip_off); if (insert) { *csump = htole64(csum); return 1; } return *csump == htole64(csum); } /* * util_checksum_seq -- compute sequential Fletcher64-like checksum * * Merges checksum from the old buffer with checksum for current buffer. */ uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum) { if (len % 4 != 0) abort(); const uint32_t *p32 = addr; const uint32_t *p32end = (const uint32_t *)((const char *)addr + len); uint32_t lo32 = (uint32_t)csum; uint32_t hi32 = (uint32_t)(csum >> 32); while (p32 < p32end) { lo32 += le32toh(*p32); ++p32; hi32 += lo32; } return (uint64_t)hi32 << 32 | lo32; } /* * util_fgets -- fgets wrapper with conversion CRLF to LF */ char * util_fgets(char *buffer, int max, FILE *stream) { char *str = fgets(buffer, max, stream); if (str == NULL) goto end; int len = (int)strlen(str); if (len < 2) goto end; if (str[len - 2] == '\r' && str[len - 1] == '\n') { str[len - 2] = '\n'; str[len - 1] = '\0'; } end: return str; } struct suff { const char *suff; uint64_t mag; }; /* * util_parse_size -- parse size from string */ int util_parse_size(const char *str, size_t *sizep) { const struct suff suffixes[] = { { "B", 1ULL }, { "K", 1ULL << 10 }, /* JEDEC */ { "M", 1ULL << 20 }, { "G", 1ULL << 30 }, { "T", 1ULL << 40 }, { "P", 1ULL << 50 }, { "KiB", 1ULL << 10 }, /* IEC */ { "MiB", 1ULL << 20 }, { "GiB", 1ULL << 30 }, { "TiB", 1ULL << 40 }, { "PiB", 1ULL << 50 }, { "kB", 1000ULL }, /* SI */ { "MB", 1000ULL * 1000 }, { "GB", 1000ULL * 1000 * 1000 }, { "TB", 1000ULL * 1000 * 1000 * 1000 }, { "PB", 1000ULL * 1000 * 1000 * 1000 * 1000 } }; int res = -1; unsigned i; size_t size = 0; char unit[9] = {0}; int ret = sscanf(str, "%zu%8s", &size, unit); if (ret == 1) { res = 0; } else if (ret == 2) { for (i = 0; i < ARRAY_SIZE(suffixes); ++i) { if (strcmp(suffixes[i].suff, unit) == 0) { size = size * suffixes[i].mag; res = 0; break; } } } else { return -1; } if (sizep && res == 0) *sizep = size; return res; } /* * util_init -- initialize the utils * * This is called from the library initialization code. */ void util_init(void) { /* XXX - replace sysconf() with util_get_sys_xxx() */ if (Pagesize == 0) Pagesize = (unsigned long) sysconf(_SC_PAGESIZE); #ifndef _WIN32 Mmap_align = Pagesize; #else if (Mmap_align == 0) { SYSTEM_INFO si; GetSystemInfo(&si); Mmap_align = si.dwAllocationGranularity; } #endif #if ANY_VG_TOOL_ENABLED _On_valgrind = RUNNING_ON_VALGRIND; #endif #if VG_MEMCHECK_ENABLED if (_On_valgrind) { unsigned tmp; unsigned result; unsigned res = VALGRIND_GET_VBITS(&tmp, &result, sizeof(tmp)); _On_memcheck = res ? 1 : 0; } else { _On_memcheck = 0; } #endif #if VG_DRD_ENABLED if (_On_valgrind) _On_drd = DRD_GET_DRD_THREADID ? 1 : 0; else _On_drd = 0; #endif #if VG_HELGRIND_ENABLED if (_On_valgrind) { unsigned tmp; unsigned result; /* * As of now (pmem-3.15) VALGRIND_HG_GET_ABITS is broken on * the upstream version of Helgrind headers. It generates * a sign-conversion error and actually returns UINT32_MAX-1 * when not running under Helgrind. */ long res = VALGRIND_HG_GET_ABITS(&tmp, &result, sizeof(tmp)); _On_helgrind = res != -2 ? 1 : 0; } else { _On_helgrind = 0; } #endif #if VG_DRD_ENABLED || VG_HELGRIND_ENABLED _On_drd_or_hg = (unsigned)(On_helgrind + On_drd); #endif #if VG_PMEMCHECK_ENABLED if (On_valgrind) { char *pmreorder_env = os_getenv("PMREORDER_EMIT_LOG"); if (pmreorder_env) _Pmreorder_emit = atoi(pmreorder_env); VALGRIND_PMC_REGISTER_PMEM_MAPPING(&_On_pmemcheck, sizeof(_On_pmemcheck)); unsigned pmc = (unsigned)VALGRIND_PMC_CHECK_IS_PMEM_MAPPING( &_On_pmemcheck, sizeof(_On_pmemcheck)); VALGRIND_PMC_REMOVE_PMEM_MAPPING(&_On_pmemcheck, sizeof(_On_pmemcheck)); _On_pmemcheck = pmc ? 1 : 0; } else { _On_pmemcheck = 0; _Pmreorder_emit = 0; } #endif } /* * util_concat_str -- concatenate two strings */ char * util_concat_str(const char *s1, const char *s2) { char *result = malloc(strlen(s1) + strlen(s2) + 1); if (!result) return NULL; strcpy(result, s1); strcat(result, s2); return result; } /* * util_localtime -- a wrapper for localtime function * * localtime can set nonzero errno even if it succeeds (e.g. when there is no * /etc/localtime file under Linux) and we do not want the errno to be polluted * in such cases. */ struct tm * util_localtime(const time_t *timep) { int oerrno = errno; struct tm *tm = localtime(timep); if (tm != NULL) errno = oerrno; return tm; } /* * util_safe_strcpy -- copies string from src to dst, returns -1 * when length of source string (including null-terminator) * is greater than max_length, 0 otherwise * * For gcc (found in version 8.1.1) calling this function with * max_length equal to dst size produces -Wstringop-truncation warning * * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85902 */ #ifdef STRINGOP_TRUNCATION_SUPPORTED #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wstringop-truncation" #endif int util_safe_strcpy(char *dst, const char *src, size_t max_length) { if (max_length == 0) return -1; strncpy(dst, src, max_length); return dst[max_length - 1] == '\0' ? 0 : -1; } #ifdef STRINGOP_TRUNCATION_SUPPORTED #pragma GCC diagnostic pop #endif #define PARSER_MAX_LINE (PATH_MAX + 1024) /* * util_snprintf -- run snprintf; in case of truncation or a failure * return a negative value, or the number of characters printed otherwise. */ int util_snprintf(char *str, size_t size, const char *format, ...) { va_list ap; va_start(ap, format); int ret = vsnprintf(str, size, format, ap); va_end(ap); if (ret < 0) { if (!errno) errno = EIO; goto err; } else if ((size_t)ret >= size) { errno = ENOBUFS; goto err; } return ret; err: return -1; } /* * util_readline -- read line from stream */ char * util_readline(FILE *fh) { size_t bufsize = PARSER_MAX_LINE; size_t position = 0; char *buffer = NULL; do { char *tmp = buffer; buffer = Realloc(buffer, bufsize); if (buffer == NULL) { Free(tmp); return NULL; } /* ensure if we can cast bufsize to int */ char *s = util_fgets(buffer + position, (int)bufsize / 2, fh); if (s == NULL) { Free(buffer); return NULL; } position = strlen(buffer); bufsize *= 2; } while (!feof(fh) && buffer[position - 1] != '\n'); return buffer; }
10,620
20.456566
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/os_thread_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * os_thread_windows.c -- (imperfect) POSIX-like threads for Windows * * Loosely inspired by: * http://locklessinc.com/articles/pthreads_on_windows/ */ #include <time.h> #include <synchapi.h> #include <sys/types.h> #include <sys/timeb.h> #include "os_thread.h" #include "util.h" #include "out.h" typedef struct { unsigned attr; CRITICAL_SECTION lock; } internal_os_mutex_t; typedef struct { unsigned attr; char is_write; SRWLOCK lock; } internal_os_rwlock_t; typedef struct { unsigned attr; CONDITION_VARIABLE cond; } internal_os_cond_t; typedef long long internal_os_once_t; typedef struct { HANDLE handle; } internal_semaphore_t; typedef struct { GROUP_AFFINITY affinity; } internal_os_cpu_set_t; typedef struct { HANDLE thread_handle; void *arg; void *(*start_routine)(void *); void *result; } internal_os_thread_t; /* number of useconds between 1970-01-01T00:00:00Z and 1601-01-01T00:00:00Z */ #define DELTA_WIN2UNIX (11644473600000000ull) #define TIMED_LOCK(action, ts) {\ if ((action) == TRUE)\ return 0;\ unsigned long long et = (ts)->tv_sec * 1000000000 + (ts)->tv_nsec;\ while (1) {\ FILETIME _t;\ GetSystemTimeAsFileTime(&_t);\ ULARGE_INTEGER _UI = {\ .HighPart = _t.dwHighDateTime,\ .LowPart = _t.dwLowDateTime,\ };\ if (100 * _UI.QuadPart - 1000 * DELTA_WIN2UNIX >= et)\ return ETIMEDOUT;\ if ((action) == TRUE)\ return 0;\ Sleep(1);\ }\ return ETIMEDOUT;\ } /* * os_mutex_init -- initializes mutex */ int os_mutex_init(os_mutex_t *__restrict mutex) { COMPILE_ERROR_ON(sizeof(os_mutex_t) < sizeof(internal_os_mutex_t)); internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; InitializeCriticalSection(&mutex_internal->lock); return 0; } /* * os_mutex_destroy -- destroys mutex */ int os_mutex_destroy(os_mutex_t *__restrict mutex) { internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; DeleteCriticalSection(&mutex_internal->lock); return 0; } /* * os_mutex_lock -- locks mutex */ _Use_decl_annotations_ int os_mutex_lock(os_mutex_t *__restrict mutex) { internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; EnterCriticalSection(&mutex_internal->lock); if (mutex_internal->lock.RecursionCount > 1) { LeaveCriticalSection(&mutex_internal->lock); FATAL("deadlock detected"); } return 0; } /* * os_mutex_trylock -- tries lock mutex */ _Use_decl_annotations_ int os_mutex_trylock(os_mutex_t *__restrict mutex) { internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; if (TryEnterCriticalSection(&mutex_internal->lock) == FALSE) return EBUSY; if (mutex_internal->lock.RecursionCount > 1) { LeaveCriticalSection(&mutex_internal->lock); return EBUSY; } return 0; } /* * os_mutex_timedlock -- tries lock mutex with timeout */ int os_mutex_timedlock(os_mutex_t *__restrict mutex, const struct timespec *abstime) { TIMED_LOCK((os_mutex_trylock(mutex) == 0), abstime); } /* * os_mutex_unlock -- unlocks mutex */ int os_mutex_unlock(os_mutex_t *__restrict mutex) { internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; LeaveCriticalSection(&mutex_internal->lock); return 0; } /* * os_rwlock_init -- initializes rwlock */ int os_rwlock_init(os_rwlock_t *__restrict rwlock) { COMPILE_ERROR_ON(sizeof(os_rwlock_t) < sizeof(internal_os_rwlock_t)); internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; InitializeSRWLock(&rwlock_internal->lock); return 0; } /* * os_rwlock_destroy -- destroys rwlock */ int os_rwlock_destroy(os_rwlock_t *__restrict rwlock) { /* do nothing */ UNREFERENCED_PARAMETER(rwlock); return 0; } /* * os_rwlock_rdlock -- get shared lock */ int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock) { internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; AcquireSRWLockShared(&rwlock_internal->lock); rwlock_internal->is_write = 0; return 0; } /* * os_rwlock_wrlock -- get exclusive lock */ int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock) { internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; AcquireSRWLockExclusive(&rwlock_internal->lock); rwlock_internal->is_write = 1; return 0; } /* * os_rwlock_tryrdlock -- tries get shared lock */ int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock) { internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; if (TryAcquireSRWLockShared(&rwlock_internal->lock) == FALSE) { return EBUSY; } else { rwlock_internal->is_write = 0; return 0; } } /* * os_rwlock_trywrlock -- tries get exclusive lock */ _Use_decl_annotations_ int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock) { internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; if (TryAcquireSRWLockExclusive(&rwlock_internal->lock) == FALSE) { return EBUSY; } else { rwlock_internal->is_write = 1; return 0; } } /* * os_rwlock_timedrdlock -- gets shared lock with timeout */ int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { TIMED_LOCK((os_rwlock_tryrdlock(rwlock) == 0), abstime); } /* * os_rwlock_timedwrlock -- gets exclusive lock with timeout */ int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime) { TIMED_LOCK((os_rwlock_trywrlock(rwlock) == 0), abstime); } /* * os_rwlock_unlock -- unlocks rwlock */ _Use_decl_annotations_ int os_rwlock_unlock(os_rwlock_t *__restrict rwlock) { internal_os_rwlock_t *rwlock_internal = (internal_os_rwlock_t *)rwlock; if (rwlock_internal->is_write) ReleaseSRWLockExclusive(&rwlock_internal->lock); else ReleaseSRWLockShared(&rwlock_internal->lock); return 0; } /* * os_cond_init -- initializes condition variable */ int os_cond_init(os_cond_t *__restrict cond) { COMPILE_ERROR_ON(sizeof(os_cond_t) < sizeof(internal_os_cond_t)); internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond; InitializeConditionVariable(&cond_internal->cond); return 0; } /* * os_cond_destroy -- destroys condition variable */ int os_cond_destroy(os_cond_t *__restrict cond) { /* do nothing */ UNREFERENCED_PARAMETER(cond); return 0; } /* * os_cond_broadcast -- broadcast condition variable */ int os_cond_broadcast(os_cond_t *__restrict cond) { internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond; WakeAllConditionVariable(&cond_internal->cond); return 0; } /* * os_cond_wait -- signal condition variable */ int os_cond_signal(os_cond_t *__restrict cond) { internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond; WakeConditionVariable(&cond_internal->cond); return 0; } /* * get_rel_wait -- (internal) convert timespec to windows timeout */ static DWORD get_rel_wait(const struct timespec *abstime) { struct __timeb64 t; _ftime64_s(&t); time_t now_ms = t.time * 1000 + t.millitm; time_t ms = (time_t)(abstime->tv_sec * 1000 + abstime->tv_nsec / 1000000); DWORD rel_wait = (DWORD)(ms - now_ms); return rel_wait < 0 ? 0 : rel_wait; } /* * os_cond_timedwait -- waits on condition variable with timeout */ int os_cond_timedwait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex, const struct timespec *abstime) { internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond; internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; BOOL ret; SetLastError(0); ret = SleepConditionVariableCS(&cond_internal->cond, &mutex_internal->lock, get_rel_wait(abstime)); if (ret == FALSE) return (GetLastError() == ERROR_TIMEOUT) ? ETIMEDOUT : EINVAL; return 0; } /* * os_cond_wait -- waits on condition variable */ int os_cond_wait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex) { internal_os_cond_t *cond_internal = (internal_os_cond_t *)cond; internal_os_mutex_t *mutex_internal = (internal_os_mutex_t *)mutex; /* XXX - return error code based on GetLastError() */ BOOL ret; ret = SleepConditionVariableCS(&cond_internal->cond, &mutex_internal->lock, INFINITE); return (ret == FALSE) ? EINVAL : 0; } /* * os_once -- once-only function call */ int os_once(os_once_t *once, void (*func)(void)) { internal_os_once_t *once_internal = (internal_os_once_t *)once; internal_os_once_t tmp; while ((tmp = *once_internal) != 2) { if (tmp == 1) continue; /* another thread is already calling func() */ /* try to be the first one... */ if (!util_bool_compare_and_swap64(once_internal, tmp, 1)) continue; /* sorry, another thread was faster */ func(); if (!util_bool_compare_and_swap64(once_internal, 1, 2)) { ERR("error setting once"); return -1; } } return 0; } /* * os_tls_key_create -- creates a new tls key */ int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *)) { *key = FlsAlloc(destructor); if (*key == TLS_OUT_OF_INDEXES) return EAGAIN; return 0; } /* * os_tls_key_delete -- deletes key from tls */ int os_tls_key_delete(os_tls_key_t key) { if (!FlsFree(key)) return EINVAL; return 0; } /* * os_tls_set -- sets a value in tls */ int os_tls_set(os_tls_key_t key, const void *value) { if (!FlsSetValue(key, (LPVOID)value)) return ENOENT; return 0; } /* * os_tls_get -- gets a value from tls */ void * os_tls_get(os_tls_key_t key) { return FlsGetValue(key); } /* threading */ /* * os_thread_start_routine_wrapper is a start routine for _beginthreadex() and * it helps: * * - wrap the os_thread_create's start function */ static unsigned __stdcall os_thread_start_routine_wrapper(void *arg) { internal_os_thread_t *thread_info = (internal_os_thread_t *)arg; thread_info->result = thread_info->start_routine(thread_info->arg); return 0; } /* * os_thread_create -- starts a new thread */ int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr, void *(*start_routine)(void *), void *arg) { COMPILE_ERROR_ON(sizeof(os_thread_t) < sizeof(internal_os_thread_t)); internal_os_thread_t *thread_info = (internal_os_thread_t *)thread; thread_info->start_routine = start_routine; thread_info->arg = arg; thread_info->thread_handle = (HANDLE)_beginthreadex(NULL, 0, os_thread_start_routine_wrapper, thread_info, CREATE_SUSPENDED, NULL); if (thread_info->thread_handle == 0) { free(thread_info); return errno; } if (ResumeThread(thread_info->thread_handle) == -1) { free(thread_info); return EAGAIN; } return 0; } /* * os_thread_join -- joins a thread */ int os_thread_join(os_thread_t *thread, void **result) { internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread; WaitForSingleObject(internal_thread->thread_handle, INFINITE); CloseHandle(internal_thread->thread_handle); if (result != NULL) *result = internal_thread->result; return 0; } /* * os_thread_self -- returns handle to calling thread */ void os_thread_self(os_thread_t *thread) { internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread; internal_thread->thread_handle = GetCurrentThread(); } /* * os_cpu_zero -- clears cpu set */ void os_cpu_zero(os_cpu_set_t *set) { internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set; memset(&internal_set->affinity, 0, sizeof(internal_set->affinity)); } /* * os_cpu_set -- adds cpu to set */ void os_cpu_set(size_t cpu, os_cpu_set_t *set) { internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set; int sum = 0; int group_max = GetActiveProcessorGroupCount(); int group = 0; while (group < group_max) { sum += GetActiveProcessorCount(group); if (sum > cpu) { /* * XXX: can't set affinity to two different cpu groups */ if (internal_set->affinity.Group != group) { internal_set->affinity.Mask = 0; internal_set->affinity.Group = group; } cpu -= sum - GetActiveProcessorCount(group); internal_set->affinity.Mask |= 1LL << cpu; return; } group++; } FATAL("os_cpu_set cpu out of bounds"); } /* * os_thread_setaffinity_np -- sets affinity of the thread */ int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size, const os_cpu_set_t *set) { internal_os_cpu_set_t *internal_set = (internal_os_cpu_set_t *)set; internal_os_thread_t *internal_thread = (internal_os_thread_t *)thread; int ret = SetThreadGroupAffinity(internal_thread->thread_handle, &internal_set->affinity, NULL); return ret != 0 ? 0 : EINVAL; } /* * os_semaphore_init -- initializes a new semaphore instance */ int os_semaphore_init(os_semaphore_t *sem, unsigned value) { internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem; internal_sem->handle = CreateSemaphore(NULL, value, LONG_MAX, NULL); return internal_sem->handle != 0 ? 0 : -1; } /* * os_semaphore_destroy -- destroys a semaphore instance */ int os_semaphore_destroy(os_semaphore_t *sem) { internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem; BOOL ret = CloseHandle(internal_sem->handle); return ret ? 0 : -1; } /* * os_semaphore_wait -- decreases the value of the semaphore */ int os_semaphore_wait(os_semaphore_t *sem) { internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem; DWORD ret = WaitForSingleObject(internal_sem->handle, INFINITE); return ret == WAIT_OBJECT_0 ? 0 : -1; } /* * os_semaphore_trywait -- tries to decrease the value of the semaphore */ int os_semaphore_trywait(os_semaphore_t *sem) { internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem; DWORD ret = WaitForSingleObject(internal_sem->handle, 0); if (ret == WAIT_TIMEOUT) errno = EAGAIN; return ret == WAIT_OBJECT_0 ? 0 : -1; } /* * os_semaphore_post -- increases the value of the semaphore */ int os_semaphore_post(os_semaphore_t *sem) { internal_semaphore_t *internal_sem = (internal_semaphore_t *)sem; BOOL ret = ReleaseSemaphore(internal_sem->handle, 1, NULL); return ret ? 0 : -1; }
15,425
22.443769
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/out.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * out.c -- support for logging, tracing, and assertion output * * Macros like LOG(), OUT, ASSERT(), etc. end up here. */ #include <stdio.h> #include <stdarg.h> #include <stdlib.h> #include <unistd.h> #include <limits.h> #include <string.h> #include <errno.h> #include "out.h" #include "os.h" #include "os_thread.h" #include "valgrind_internal.h" #include "util.h" /* XXX - modify Linux makefiles to generate srcversion.h and remove #ifdef */ #ifdef _WIN32 #include "srcversion.h" #endif static const char *Log_prefix; static int Log_level; static FILE *Out_fp; static unsigned Log_alignment; #ifndef NO_LIBPTHREAD #define MAXPRINT 8192 /* maximum expected log line */ #else #define MAXPRINT 256 /* maximum expected log line for libpmem */ #endif struct errormsg { char msg[MAXPRINT]; #ifdef _WIN32 wchar_t wmsg[MAXPRINT]; #endif }; #ifndef NO_LIBPTHREAD static os_once_t Last_errormsg_key_once = OS_ONCE_INIT; static os_tls_key_t Last_errormsg_key; static void _Last_errormsg_key_alloc(void) { int pth_ret = os_tls_key_create(&Last_errormsg_key, free); if (pth_ret) FATAL("!os_thread_key_create"); VALGRIND_ANNOTATE_HAPPENS_BEFORE(&Last_errormsg_key_once); } static void Last_errormsg_key_alloc(void) { os_once(&Last_errormsg_key_once, _Last_errormsg_key_alloc); /* * Workaround Helgrind's bug: * https://bugs.kde.org/show_bug.cgi?id=337735 */ VALGRIND_ANNOTATE_HAPPENS_AFTER(&Last_errormsg_key_once); } static inline void Last_errormsg_fini(void) { void *p = os_tls_get(Last_errormsg_key); if (p) { free(p); (void) os_tls_set(Last_errormsg_key, NULL); } (void) os_tls_key_delete(Last_errormsg_key); } static inline struct errormsg * Last_errormsg_get(void) { Last_errormsg_key_alloc(); struct errormsg *errormsg = os_tls_get(Last_errormsg_key); if (errormsg == NULL) { errormsg = malloc(sizeof(struct errormsg)); if (errormsg == NULL) FATAL("!malloc"); /* make sure it contains empty string initially */ errormsg->msg[0] = '\0'; int ret = os_tls_set(Last_errormsg_key, errormsg); if (ret) FATAL("!os_tls_set"); } return errormsg; } #else /* * We don't want libpmem to depend on libpthread. Instead of using pthread * API to dynamically allocate thread-specific error message buffer, we put * it into TLS. However, keeping a pretty large static buffer (8K) in TLS * may lead to some issues, so the maximum message length is reduced. * Fortunately, it looks like the longest error message in libpmem should * not be longer than about 90 chars (in case of pmem_check_version()). */ static __thread struct errormsg Last_errormsg; static inline void Last_errormsg_key_alloc(void) { } static inline void Last_errormsg_fini(void) { } static inline const struct errormsg * Last_errormsg_get(void) { return &Last_errormsg; } #endif /* NO_LIBPTHREAD */ /* * out_init -- initialize the log * * This is called from the library initialization code. */ void out_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version) { static int once; /* only need to initialize the out module once */ if (once) return; once++; Log_prefix = log_prefix; #ifdef DEBUG char *log_level; char *log_file; if ((log_level = os_getenv(log_level_var)) != NULL) { Log_level = atoi(log_level); if (Log_level < 0) { Log_level = 0; } } if ((log_file = os_getenv(log_file_var)) != NULL && log_file[0] != '\0') { /* reserve more than enough space for a PID + '\0' */ char log_file_pid[PATH_MAX]; size_t len = strlen(log_file); if (len > 0 && log_file[len - 1] == '-') { if (util_snprintf(log_file_pid, PATH_MAX, "%s%d", log_file, getpid()) < 0) { ERR("snprintf: %d", errno); abort(); } log_file = log_file_pid; } if ((Out_fp = os_fopen(log_file, "w")) == NULL) { char buff[UTIL_MAX_ERR_MSG]; util_strerror(errno, buff, UTIL_MAX_ERR_MSG); fprintf(stderr, "Error (%s): %s=%s: %s\n", log_prefix, log_file_var, log_file, buff); abort(); } } #endif /* DEBUG */ char *log_alignment = os_getenv("PMDK_LOG_ALIGN"); if (log_alignment) { int align = atoi(log_alignment); if (align > 0) Log_alignment = (unsigned)align; } if (Out_fp == NULL) Out_fp = stderr; else setlinebuf(Out_fp); #ifdef DEBUG static char namepath[PATH_MAX]; LOG(1, "pid %d: program: %s", getpid(), util_getexecname(namepath, PATH_MAX)); #endif LOG(1, "%s version %d.%d", log_prefix, major_version, minor_version); static __attribute__((used)) const char *version_msg = "src version: " SRCVERSION; LOG(1, "%s", version_msg); #if VG_PMEMCHECK_ENABLED /* * Attribute "used" to prevent compiler from optimizing out the variable * when LOG expands to no code (!DEBUG) */ static __attribute__((used)) const char *pmemcheck_msg = "compiled with support for Valgrind pmemcheck"; LOG(1, "%s", pmemcheck_msg); #endif /* VG_PMEMCHECK_ENABLED */ #if VG_HELGRIND_ENABLED static __attribute__((used)) const char *helgrind_msg = "compiled with support for Valgrind helgrind"; LOG(1, "%s", helgrind_msg); #endif /* VG_HELGRIND_ENABLED */ #if VG_MEMCHECK_ENABLED static __attribute__((used)) const char *memcheck_msg = "compiled with support for Valgrind memcheck"; LOG(1, "%s", memcheck_msg); #endif /* VG_MEMCHECK_ENABLED */ #if VG_DRD_ENABLED static __attribute__((used)) const char *drd_msg = "compiled with support for Valgrind drd"; LOG(1, "%s", drd_msg); #endif /* VG_DRD_ENABLED */ #if SDS_ENABLED static __attribute__((used)) const char *shutdown_state_msg = "compiled with support for shutdown state"; LOG(1, "%s", shutdown_state_msg); #endif #if NDCTL_ENABLED static __attribute__((used)) const char *ndctl_ge_63_msg = "compiled with libndctl 63+"; LOG(1, "%s", ndctl_ge_63_msg); #endif Last_errormsg_key_alloc(); } /* * out_fini -- close the log file * * This is called to close log file before process stop. */ void out_fini(void) { if (Out_fp != NULL && Out_fp != stderr) { fclose(Out_fp); Out_fp = stderr; } Last_errormsg_fini(); } /* * out_print_func -- default print_func, goes to stderr or Out_fp */ static void out_print_func(const char *s) { /* to suppress drd false-positive */ /* XXX: confirm real nature of this issue: pmem/issues#863 */ #ifdef SUPPRESS_FPUTS_DRD_ERROR VALGRIND_ANNOTATE_IGNORE_READS_BEGIN(); VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN(); #endif fputs(s, Out_fp); #ifdef SUPPRESS_FPUTS_DRD_ERROR VALGRIND_ANNOTATE_IGNORE_READS_END(); VALGRIND_ANNOTATE_IGNORE_WRITES_END(); #endif } /* * calling Print(s) calls the current print_func... */ typedef void (*Print_func)(const char *s); typedef int (*Vsnprintf_func)(char *str, size_t size, const char *format, va_list ap); static Print_func Print = out_print_func; static Vsnprintf_func Vsnprintf = vsnprintf; /* * out_set_print_func -- allow override of print_func used by out module */ void out_set_print_func(void (*print_func)(const char *s)) { LOG(3, "print %p", print_func); Print = (print_func == NULL) ? out_print_func : print_func; } /* * out_set_vsnprintf_func -- allow override of vsnprintf_func used by out module */ void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size, const char *format, va_list ap)) { LOG(3, "vsnprintf %p", vsnprintf_func); Vsnprintf = (vsnprintf_func == NULL) ? vsnprintf : vsnprintf_func; } /* * out_snprintf -- (internal) custom snprintf implementation */ FORMAT_PRINTF(3, 4) static int out_snprintf(char *str, size_t size, const char *format, ...) { int ret; va_list ap; va_start(ap, format); ret = Vsnprintf(str, size, format, ap); va_end(ap); return (ret); } /* * out_common -- common output code, all output goes through here */ static void out_common(const char *file, int line, const char *func, int level, const char *suffix, const char *fmt, va_list ap) { int oerrno = errno; char buf[MAXPRINT]; unsigned cc = 0; int ret; const char *sep = ""; char errstr[UTIL_MAX_ERR_MSG] = ""; unsigned long olast_error = 0; #ifdef _WIN32 if (fmt && fmt[0] == '!' && fmt[1] == '!') olast_error = GetLastError(); #endif if (file) { char *f = strrchr(file, OS_DIR_SEPARATOR); if (f) file = f + 1; ret = out_snprintf(&buf[cc], MAXPRINT - cc, "<%s>: <%d> [%s:%d %s] ", Log_prefix, level, file, line, func); if (ret < 0) { Print("out_snprintf failed"); goto end; } cc += (unsigned)ret; if (cc < Log_alignment) { memset(buf + cc, ' ', Log_alignment - cc); cc = Log_alignment; } } if (fmt) { if (*fmt == '!') { sep = ": "; fmt++; if (*fmt == '!') { fmt++; /* it will abort on non Windows OS */ util_strwinerror(olast_error, errstr, UTIL_MAX_ERR_MSG); } else { util_strerror(oerrno, errstr, UTIL_MAX_ERR_MSG); } } ret = Vsnprintf(&buf[cc], MAXPRINT - cc, fmt, ap); if (ret < 0) { Print("Vsnprintf failed"); goto end; } cc += (unsigned)ret; } out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s%s", sep, errstr, suffix); Print(buf); end: errno = oerrno; #ifdef _WIN32 SetLastError(olast_error); #endif } /* * out_error -- common error output code, all error messages go through here */ static void out_error(const char *file, int line, const char *func, const char *suffix, const char *fmt, va_list ap) { int oerrno = errno; unsigned long olast_error = 0; #ifdef _WIN32 olast_error = GetLastError(); #endif unsigned cc = 0; int ret; const char *sep = ""; char errstr[UTIL_MAX_ERR_MSG] = ""; char *errormsg = (char *)out_get_errormsg(); if (fmt) { if (*fmt == '!') { sep = ": "; fmt++; if (*fmt == '!') { fmt++; /* it will abort on non Windows OS */ util_strwinerror(olast_error, errstr, UTIL_MAX_ERR_MSG); } else { util_strerror(oerrno, errstr, UTIL_MAX_ERR_MSG); } } ret = Vsnprintf(&errormsg[cc], MAXPRINT, fmt, ap); if (ret < 0) { strcpy(errormsg, "Vsnprintf failed"); goto end; } cc += (unsigned)ret; out_snprintf(&errormsg[cc], MAXPRINT - cc, "%s%s", sep, errstr); } #ifdef DEBUG if (Log_level >= 1) { char buf[MAXPRINT]; cc = 0; if (file) { char *f = strrchr(file, OS_DIR_SEPARATOR); if (f) file = f + 1; ret = out_snprintf(&buf[cc], MAXPRINT, "<%s>: <1> [%s:%d %s] ", Log_prefix, file, line, func); if (ret < 0) { Print("out_snprintf failed"); goto end; } cc += (unsigned)ret; if (cc < Log_alignment) { memset(buf + cc, ' ', Log_alignment - cc); cc = Log_alignment; } } out_snprintf(&buf[cc], MAXPRINT - cc, "%s%s", errormsg, suffix); Print(buf); } #endif end: errno = oerrno; #ifdef _WIN32 SetLastError(olast_error); #endif } /* * out -- output a line, newline added automatically */ void out(const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_common(NULL, 0, NULL, 0, "\n", fmt, ap); va_end(ap); } /* * out_nonl -- output a line, no newline added automatically */ void out_nonl(int level, const char *fmt, ...) { va_list ap; if (Log_level < level) return; va_start(ap, fmt); out_common(NULL, 0, NULL, level, "", fmt, ap); va_end(ap); } /* * out_log -- output a log line if Log_level >= level */ void out_log(const char *file, int line, const char *func, int level, const char *fmt, ...) { va_list ap; if (Log_level < level) return; va_start(ap, fmt); out_common(file, line, func, level, "\n", fmt, ap); va_end(ap); } /* * out_fatal -- output a fatal error & die (i.e. assertion failure) */ void out_fatal(const char *file, int line, const char *func, const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_common(file, line, func, 1, "\n", fmt, ap); va_end(ap); abort(); } /* * out_err -- output an error message */ void out_err(const char *file, int line, const char *func, const char *fmt, ...) { va_list ap; va_start(ap, fmt); out_error(file, line, func, "\n", fmt, ap); va_end(ap); } /* * out_get_errormsg -- get the last error message */ const char * out_get_errormsg(void) { const struct errormsg *errormsg = Last_errormsg_get(); return &errormsg->msg[0]; } #ifdef _WIN32 /* * out_get_errormsgW -- get the last error message in wchar_t */ const wchar_t * out_get_errormsgW(void) { struct errormsg *errormsg = Last_errormsg_get(); const char *utf8 = &errormsg->msg[0]; wchar_t *utf16 = &errormsg->wmsg[0]; if (util_toUTF16_buff(utf8, utf16, sizeof(errormsg->wmsg)) != 0) FATAL("!Failed to convert string"); return (const wchar_t *)utf16; } #endif
12,602
20.252951
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/util.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * Copyright (c) 2016-2020, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * util.h -- internal definitions for util module */ #ifndef PMDK_UTIL_H #define PMDK_UTIL_H 1 #include <string.h> #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <ctype.h> #ifdef _MSC_VER #include <intrin.h> /* popcnt, bitscan */ #endif #include <sys/param.h> #ifdef __cplusplus extern "C" { #endif extern unsigned long long Pagesize; extern unsigned long long Mmap_align; #if defined(__x86_64) || defined(_M_X64) || defined(__aarch64__) #define CACHELINE_SIZE 64ULL #elif defined(__PPC64__) #define CACHELINE_SIZE 128ULL #else #error unable to recognize architecture at compile time #endif #define PAGE_ALIGNED_DOWN_SIZE(size) ((size) & ~(Pagesize - 1)) #define PAGE_ALIGNED_UP_SIZE(size)\ PAGE_ALIGNED_DOWN_SIZE((size) + (Pagesize - 1)) #define IS_PAGE_ALIGNED(size) (((size) & (Pagesize - 1)) == 0) #define IS_MMAP_ALIGNED(size) (((size) & (Mmap_align - 1)) == 0) #define PAGE_ALIGN_UP(addr) ((void *)PAGE_ALIGNED_UP_SIZE((uintptr_t)(addr))) #define ALIGN_UP(size, align) (((size) + (align) - 1) & ~((align) - 1)) #define ALIGN_DOWN(size, align) ((size) & ~((align) - 1)) #define ADDR_SUM(vp, lp) ((void *)((char *)(vp) + (lp))) #define util_alignof(t) offsetof(struct {char _util_c; t _util_m; }, _util_m) #define FORMAT_PRINTF(a, b) __attribute__((__format__(__printf__, (a), (b)))) void util_init(void); int util_is_zeroed(const void *addr, size_t len); uint64_t util_checksum_compute(void *addr, size_t len, uint64_t *csump, size_t skip_off); int util_checksum(void *addr, size_t len, uint64_t *csump, int insert, size_t skip_off); uint64_t util_checksum_seq(const void *addr, size_t len, uint64_t csum); int util_parse_size(const char *str, size_t *sizep); char *util_fgets(char *buffer, int max, FILE *stream); char *util_getexecname(char *path, size_t pathlen); char *util_part_realpath(const char *path); int util_compare_file_inodes(const char *path1, const char *path2); void *util_aligned_malloc(size_t alignment, size_t size); void util_aligned_free(void *ptr); struct tm *util_localtime(const time_t *timep); int util_safe_strcpy(char *dst, const char *src, size_t max_length); void util_emit_log(const char *lib, const char *func, int order); char *util_readline(FILE *fh); int util_snprintf(char *str, size_t size, const char *format, ...) FORMAT_PRINTF(3, 4); #ifdef _WIN32 char *util_toUTF8(const wchar_t *wstr); wchar_t *util_toUTF16(const char *wstr); void util_free_UTF8(char *str); void util_free_UTF16(wchar_t *str); int util_toUTF16_buff(const char *in, wchar_t *out, size_t out_size); int util_toUTF8_buff(const wchar_t *in, char *out, size_t out_size); void util_suppress_errmsg(void); int util_lasterror_to_errno(unsigned long err); #endif #define UTIL_MAX_ERR_MSG 128 void util_strerror(int errnum, char *buff, size_t bufflen); void util_strwinerror(unsigned long err, char *buff, size_t bufflen); void util_set_alloc_funcs( void *(*malloc_func)(size_t size), void (*free_func)(void *ptr), void *(*realloc_func)(void *ptr, size_t size), char *(*strdup_func)(const char *s)); /* * Macro calculates number of elements in given table */ #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) #endif #ifdef _MSC_VER #define force_inline inline __forceinline #define NORETURN __declspec(noreturn) #define barrier() _ReadWriteBarrier() #else #define force_inline __attribute__((always_inline)) inline #define NORETURN __attribute__((noreturn)) #define barrier() asm volatile("" ::: "memory") #endif #ifdef _MSC_VER typedef UNALIGNED uint64_t ua_uint64_t; typedef UNALIGNED uint32_t ua_uint32_t; typedef UNALIGNED uint16_t ua_uint16_t; #else typedef uint64_t ua_uint64_t __attribute__((aligned(1))); typedef uint32_t ua_uint32_t __attribute__((aligned(1))); typedef uint16_t ua_uint16_t __attribute__((aligned(1))); #endif #define util_get_not_masked_bits(x, mask) ((x) & ~(mask)) /* * util_setbit -- setbit macro substitution which properly deals with types */ static inline void util_setbit(uint8_t *b, uint32_t i) { b[i / 8] = (uint8_t)(b[i / 8] | (uint8_t)(1 << (i % 8))); } /* * util_clrbit -- clrbit macro substitution which properly deals with types */ static inline void util_clrbit(uint8_t *b, uint32_t i) { b[i / 8] = (uint8_t)(b[i / 8] & (uint8_t)(~(1 << (i % 8)))); } #define util_isset(a, i) isset(a, i) #define util_isclr(a, i) isclr(a, i) #define util_flag_isset(a, f) ((a) & (f)) #define util_flag_isclr(a, f) (((a) & (f)) == 0) /* * util_is_pow2 -- returns !0 when there's only 1 bit set in v, 0 otherwise */ static force_inline int util_is_pow2(uint64_t v) { return v && !(v & (v - 1)); } /* * util_div_ceil -- divides a by b and rounds up the result */ static force_inline unsigned util_div_ceil(unsigned a, unsigned b) { return (unsigned)(((unsigned long)a + b - 1) / b); } /* * util_bool_compare_and_swap -- perform an atomic compare and swap * util_fetch_and_* -- perform an operation atomically, return old value * util_synchronize -- issue a full memory barrier * util_popcount -- count number of set bits * util_lssb_index -- return index of least significant set bit, * undefined on zero * util_mssb_index -- return index of most significant set bit * undefined on zero * * XXX assertions needed on (value != 0) in both versions of bitscans * */ #ifndef _MSC_VER /* * ISO C11 -- 7.17.1.4 * memory_order - an enumerated type whose enumerators identify memory ordering * constraints. */ typedef enum { memory_order_relaxed = __ATOMIC_RELAXED, memory_order_consume = __ATOMIC_CONSUME, memory_order_acquire = __ATOMIC_ACQUIRE, memory_order_release = __ATOMIC_RELEASE, memory_order_acq_rel = __ATOMIC_ACQ_REL, memory_order_seq_cst = __ATOMIC_SEQ_CST } memory_order; /* * ISO C11 -- 7.17.7.2 The atomic_load generic functions * Integer width specific versions as supplement for: * * * #include <stdatomic.h> * C atomic_load(volatile A *object); * C atomic_load_explicit(volatile A *object, memory_order order); * * The atomic_load interface doesn't return the loaded value, but instead * copies it to a specified address -- see comments at the MSVC version. * * Also, instead of generic functions, two versions are available: * for 32 bit fundamental integers, and for 64 bit ones. */ #define util_atomic_load_explicit32 __atomic_load #define util_atomic_load_explicit64 __atomic_load /* * ISO C11 -- 7.17.7.1 The atomic_store generic functions * Integer width specific versions as supplement for: * * #include <stdatomic.h> * void atomic_store(volatile A *object, C desired); * void atomic_store_explicit(volatile A *object, C desired, * memory_order order); */ #define util_atomic_store_explicit32 __atomic_store_n #define util_atomic_store_explicit64 __atomic_store_n /* * https://gcc.gnu.org/onlinedocs/gcc/_005f_005fsync-Builtins.html * https://gcc.gnu.org/onlinedocs/gcc/Other-Builtins.html * https://clang.llvm.org/docs/LanguageExtensions.html#builtin-functions */ #define util_bool_compare_and_swap32 __sync_bool_compare_and_swap #define util_bool_compare_and_swap64 __sync_bool_compare_and_swap #define util_fetch_and_add32 __sync_fetch_and_add #define util_fetch_and_add64 __sync_fetch_and_add #define util_fetch_and_sub32 __sync_fetch_and_sub #define util_fetch_and_sub64 __sync_fetch_and_sub #define util_fetch_and_and32 __sync_fetch_and_and #define util_fetch_and_and64 __sync_fetch_and_and #define util_fetch_and_or32 __sync_fetch_and_or #define util_fetch_and_or64 __sync_fetch_and_or #define util_synchronize __sync_synchronize #define util_popcount(value) ((unsigned char)__builtin_popcount(value)) #define util_popcount64(value) ((unsigned char)__builtin_popcountll(value)) #define util_lssb_index(value) ((unsigned char)__builtin_ctz(value)) #define util_lssb_index64(value) ((unsigned char)__builtin_ctzll(value)) #define util_mssb_index(value) ((unsigned char)(31 - __builtin_clz(value))) #define util_mssb_index64(value) ((unsigned char)(63 - __builtin_clzll(value))) #else /* ISO C11 -- 7.17.1.4 */ typedef enum { memory_order_relaxed, memory_order_consume, memory_order_acquire, memory_order_release, memory_order_acq_rel, memory_order_seq_cst } memory_order; /* * ISO C11 -- 7.17.7.2 The atomic_load generic functions * Integer width specific versions as supplement for: * * * #include <stdatomic.h> * C atomic_load(volatile A *object); * C atomic_load_explicit(volatile A *object, memory_order order); * * The atomic_load interface doesn't return the loaded value, but instead * copies it to a specified address. * The MSVC specific implementation needs to trigger a barrier (at least * compiler barrier) after the load from the volatile value. The actual load * from the volatile value itself is expected to be atomic. * * The actual isnterface here: * #include "util.h" * void util_atomic_load32(volatile A *object, A *destination); * void util_atomic_load64(volatile A *object, A *destination); * void util_atomic_load_explicit32(volatile A *object, A *destination, * memory_order order); * void util_atomic_load_explicit64(volatile A *object, A *destination, * memory_order order); */ #ifndef _M_X64 #error MSVC ports of util_atomic_ only work on X86_64 #endif #if _MSC_VER >= 2000 #error util_atomic_ utility functions not tested with this version of VC++ #error These utility functions are not future proof, as they are not #error based on publicly available documentation. #endif #define util_atomic_load_explicit(object, dest, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_consume &&\ order != memory_order_acquire &&\ order != memory_order_relaxed);\ *dest = *object;\ if (order == memory_order_seq_cst ||\ order == memory_order_consume ||\ order == memory_order_acquire)\ _ReadWriteBarrier();\ } while (0) #define util_atomic_load_explicit32 util_atomic_load_explicit #define util_atomic_load_explicit64 util_atomic_load_explicit /* ISO C11 -- 7.17.7.1 The atomic_store generic functions */ #define util_atomic_store_explicit64(object, desired, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_release &&\ order != memory_order_relaxed);\ if (order == memory_order_seq_cst) {\ _InterlockedExchange64(\ (volatile long long *)object, desired);\ } else {\ if (order == memory_order_release)\ _ReadWriteBarrier();\ *object = desired;\ }\ } while (0) #define util_atomic_store_explicit32(object, desired, order)\ do {\ COMPILE_ERROR_ON(order != memory_order_seq_cst &&\ order != memory_order_release &&\ order != memory_order_relaxed);\ if (order == memory_order_seq_cst) {\ _InterlockedExchange(\ (volatile long *)object, desired);\ } else {\ if (order == memory_order_release)\ _ReadWriteBarrier();\ *object = desired;\ }\ } while (0) /* * https://msdn.microsoft.com/en-us/library/hh977022.aspx */ static __inline int bool_compare_and_swap32_VC(volatile LONG *ptr, LONG oldval, LONG newval) { LONG old = InterlockedCompareExchange(ptr, newval, oldval); return (old == oldval); } static __inline int bool_compare_and_swap64_VC(volatile LONG64 *ptr, LONG64 oldval, LONG64 newval) { LONG64 old = InterlockedCompareExchange64(ptr, newval, oldval); return (old == oldval); } #define util_bool_compare_and_swap32(p, o, n)\ bool_compare_and_swap32_VC((LONG *)(p), (LONG)(o), (LONG)(n)) #define util_bool_compare_and_swap64(p, o, n)\ bool_compare_and_swap64_VC((LONG64 *)(p), (LONG64)(o), (LONG64)(n)) #define util_fetch_and_add32(ptr, value)\ InterlockedExchangeAdd((LONG *)(ptr), value) #define util_fetch_and_add64(ptr, value)\ InterlockedExchangeAdd64((LONG64 *)(ptr), value) #define util_fetch_and_sub32(ptr, value)\ InterlockedExchangeSubtract((LONG *)(ptr), value) #define util_fetch_and_sub64(ptr, value)\ InterlockedExchangeAdd64((LONG64 *)(ptr), -((LONG64)(value))) #define util_fetch_and_and32(ptr, value)\ InterlockedAnd((LONG *)(ptr), value) #define util_fetch_and_and64(ptr, value)\ InterlockedAnd64((LONG64 *)(ptr), value) #define util_fetch_and_or32(ptr, value)\ InterlockedOr((LONG *)(ptr), value) #define util_fetch_and_or64(ptr, value)\ InterlockedOr64((LONG64 *)(ptr), value) static __inline void util_synchronize(void) { MemoryBarrier(); } #define util_popcount(value) (unsigned char)__popcnt(value) #define util_popcount64(value) (unsigned char)__popcnt64(value) static __inline unsigned char util_lssb_index(int value) { unsigned long ret; _BitScanForward(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_lssb_index64(long long value) { unsigned long ret; _BitScanForward64(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_mssb_index(int value) { unsigned long ret; _BitScanReverse(&ret, value); return (unsigned char)ret; } static __inline unsigned char util_mssb_index64(long long value) { unsigned long ret; _BitScanReverse64(&ret, value); return (unsigned char)ret; } #endif /* ISO C11 -- 7.17.7 Operations on atomic types */ #define util_atomic_load32(object, dest)\ util_atomic_load_explicit32(object, dest, memory_order_seq_cst) #define util_atomic_load64(object, dest)\ util_atomic_load_explicit64(object, dest, memory_order_seq_cst) #define util_atomic_store32(object, desired)\ util_atomic_store_explicit32(object, desired, memory_order_seq_cst) #define util_atomic_store64(object, desired)\ util_atomic_store_explicit64(object, desired, memory_order_seq_cst) /* * util_get_printable_ascii -- convert non-printable ascii to dot '.' */ static inline char util_get_printable_ascii(char c) { return isprint((unsigned char)c) ? c : '.'; } char *util_concat_str(const char *s1, const char *s2); #if !defined(likely) #if defined(__GNUC__) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else #define likely(x) (!!(x)) #define unlikely(x) (!!(x)) #endif #endif #if defined(__CHECKER__) #define COMPILE_ERROR_ON(cond) #define ASSERT_COMPILE_ERROR_ON(cond) #elif defined(_MSC_VER) #define COMPILE_ERROR_ON(cond) C_ASSERT(!(cond)) /* XXX - can't be done with C_ASSERT() unless we have __builtin_constant_p() */ #define ASSERT_COMPILE_ERROR_ON(cond) do {} while (0) #else #define COMPILE_ERROR_ON(cond) ((void)sizeof(char[(cond) ? -1 : 1])) #define ASSERT_COMPILE_ERROR_ON(cond) COMPILE_ERROR_ON(cond) #endif #ifndef _MSC_VER #define ATTR_CONSTRUCTOR __attribute__((constructor)) static #define ATTR_DESTRUCTOR __attribute__((destructor)) static #else #define ATTR_CONSTRUCTOR #define ATTR_DESTRUCTOR #endif #ifndef _MSC_VER #define CONSTRUCTOR(fun) ATTR_CONSTRUCTOR #else #ifdef __cplusplus #define CONSTRUCTOR(fun) \ void fun(); \ struct _##fun { \ _##fun() { \ fun(); \ } \ }; static _##fun foo; \ static #else #define CONSTRUCTOR(fun) \ MSVC_CONSTR(fun) \ static #endif #endif #ifdef __GNUC__ #define CHECK_FUNC_COMPATIBLE(func1, func2)\ COMPILE_ERROR_ON(!__builtin_types_compatible_p(typeof(func1),\ typeof(func2))) #else #define CHECK_FUNC_COMPATIBLE(func1, func2) do {} while (0) #endif /* __GNUC__ */ #ifdef __cplusplus } #endif #endif /* util.h */
17,058
30.47417
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/valgrind_internal.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * valgrind_internal.h -- internal definitions for valgrind macros */ #ifndef PMDK_VALGRIND_INTERNAL_H #define PMDK_VALGRIND_INTERNAL_H 1 #if !defined(_WIN32) && !defined(__FreeBSD__) #ifndef VALGRIND_ENABLED #define VALGRIND_ENABLED 1 #endif #endif #if VALGRIND_ENABLED #define VG_PMEMCHECK_ENABLED 1 #define VG_HELGRIND_ENABLED 1 #define VG_MEMCHECK_ENABLED 1 #define VG_DRD_ENABLED 1 #endif #if VG_PMEMCHECK_ENABLED || VG_HELGRIND_ENABLED || VG_MEMCHECK_ENABLED || \ VG_DRD_ENABLED #define ANY_VG_TOOL_ENABLED 1 #else #define ANY_VG_TOOL_ENABLED 0 #endif #if ANY_VG_TOOL_ENABLED extern unsigned _On_valgrind; #define On_valgrind __builtin_expect(_On_valgrind, 0) #include "valgrind/valgrind.h" #else #define On_valgrind (0) #endif #if VG_HELGRIND_ENABLED extern unsigned _On_helgrind; #define On_helgrind __builtin_expect(_On_helgrind, 0) #include "valgrind/helgrind.h" #else #define On_helgrind (0) #endif #if VG_DRD_ENABLED extern unsigned _On_drd; #define On_drd __builtin_expect(_On_drd, 0) #include "valgrind/drd.h" #else #define On_drd (0) #endif #if VG_HELGRIND_ENABLED || VG_DRD_ENABLED extern unsigned _On_drd_or_hg; #define On_drd_or_hg __builtin_expect(_On_drd_or_hg, 0) #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do {\ if (On_drd_or_hg) \ ANNOTATE_HAPPENS_BEFORE((obj));\ } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do {\ if (On_drd_or_hg) \ ANNOTATE_HAPPENS_AFTER((obj));\ } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ if (On_drd_or_hg) \ ANNOTATE_NEW_MEMORY((addr), (size));\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {\ if (On_drd_or_hg) \ ANNOTATE_IGNORE_READS_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {\ if (On_drd_or_hg) \ ANNOTATE_IGNORE_READS_END();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {\ if (On_drd_or_hg) \ ANNOTATE_IGNORE_WRITES_BEGIN();\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {\ if (On_drd_or_hg) \ ANNOTATE_IGNORE_WRITES_END();\ } while (0) /* Supported by both helgrind and drd. */ #define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\ if (On_drd_or_hg) \ VALGRIND_HG_DISABLE_CHECKING((addr), (size));\ } while (0) #else #define On_drd_or_hg (0) #define VALGRIND_ANNOTATE_HAPPENS_BEFORE(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_HAPPENS_AFTER(obj) do { (void)(obj); } while (0) #define VALGRIND_ANNOTATE_NEW_MEMORY(addr, size) do {\ (void) (addr);\ (void) (size);\ } while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_READS_END() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_BEGIN() do {} while (0) #define VALGRIND_ANNOTATE_IGNORE_WRITES_END() do {} while (0) #define VALGRIND_HG_DRD_DISABLE_CHECKING(addr, size) do {\ (void) (addr);\ (void) (size);\ } while (0) #endif #if VG_PMEMCHECK_ENABLED extern unsigned _On_pmemcheck; #define On_pmemcheck __builtin_expect(_On_pmemcheck, 0) #include "valgrind/pmemcheck.h" void pobj_emit_log(const char *func, int order); void pmem_emit_log(const char *func, int order); void pmem2_emit_log(const char *func, int order); extern int _Pmreorder_emit; #define Pmreorder_emit __builtin_expect(_Pmreorder_emit, 0) #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_REGISTER_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ if (On_pmemcheck)\ VALGRIND_PMC_REGISTER_PMEM_FILE((desc), (base_addr), (size), \ (offset));\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_REMOVE_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_CHECK_IS_PMEM_MAPPING((addr), (len));\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {\ if (On_pmemcheck)\ VALGRIND_PMC_PRINT_PMEM_MAPPINGS;\ } while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ } while (0) #define VALGRIND_DO_FENCE do {\ if (On_pmemcheck)\ VALGRIND_PMC_DO_FENCE;\ } while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ if (On_pmemcheck) {\ VALGRIND_PMC_DO_FLUSH((addr), (len));\ VALGRIND_PMC_DO_FENCE;\ }\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_SET_CLEAN(addr, len);\ } while (0) #define VALGRIND_WRITE_STATS do {\ if (On_pmemcheck)\ VALGRIND_PMC_WRITE_STATS;\ } while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ if (On_pmemcheck)\ VALGRIND_PMC_EMIT_LOG((emit_log));\ } while (0) #define VALGRIND_START_TX do {\ if (On_pmemcheck)\ VALGRIND_PMC_START_TX;\ } while (0) #define VALGRIND_START_TX_N(txn) do {\ if (On_pmemcheck)\ VALGRIND_PMC_START_TX_N(txn);\ } while (0) #define VALGRIND_END_TX do {\ if (On_pmemcheck)\ VALGRIND_PMC_END_TX;\ } while (0) #define VALGRIND_END_TX_N(txn) do {\ if (On_pmemcheck)\ VALGRIND_PMC_END_TX_N(txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_ADD_TO_TX(addr, len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_ADD_TO_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_REMOVE_FROM_TX(addr, len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_REMOVE_FROM_TX_N(txn, addr, len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ if (On_pmemcheck)\ VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(addr, len);\ } while (0) /* * Logs library and function name with proper suffix * to pmemcheck store log file. */ #define PMEMOBJ_API_START()\ if (Pmreorder_emit)\ pobj_emit_log(__func__, 0); #define PMEMOBJ_API_END()\ if (Pmreorder_emit)\ pobj_emit_log(__func__, 1); #define PMEM_API_START()\ if (Pmreorder_emit)\ pmem_emit_log(__func__, 0); #define PMEM_API_END()\ if (Pmreorder_emit)\ pmem_emit_log(__func__, 1); #define PMEM2_API_START(func_name)\ if (Pmreorder_emit)\ pmem2_emit_log(func_name, 0); #define PMEM2_API_END(func_name)\ if (Pmreorder_emit)\ pmem2_emit_log(func_name, 1); #else #define On_pmemcheck (0) #define Pmreorder_emit (0) #define VALGRIND_REGISTER_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REGISTER_PMEM_FILE(desc, base_addr, size, offset) do {\ (void) (desc);\ (void) (base_addr);\ (void) (size);\ (void) (offset);\ } while (0) #define VALGRIND_REMOVE_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_CHECK_IS_PMEM_MAPPING(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_PRINT_PMEM_MAPPINGS do {} while (0) #define VALGRIND_DO_FLUSH(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_DO_FENCE do {} while (0) #define VALGRIND_DO_PERSIST(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_SET_CLEAN(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_WRITE_STATS do {} while (0) #define VALGRIND_EMIT_LOG(emit_log) do {\ (void) (emit_log);\ } while (0) #define VALGRIND_START_TX do {} while (0) #define VALGRIND_START_TX_N(txn) do { (void) (txn); } while (0) #define VALGRIND_END_TX do {} while (0) #define VALGRIND_END_TX_N(txn) do {\ (void) (txn);\ } while (0) #define VALGRIND_ADD_TO_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_REMOVE_FROM_TX_N(txn, addr, len) do {\ (void) (txn);\ (void) (addr);\ (void) (len);\ } while (0) #define VALGRIND_ADD_TO_GLOBAL_TX_IGNORE(addr, len) do {\ (void) (addr);\ (void) (len);\ } while (0) #define PMEMOBJ_API_START() do {} while (0) #define PMEMOBJ_API_END() do {} while (0) #define PMEM_API_START() do {} while (0) #define PMEM_API_END() do {} while (0) #define PMEM2_API_START(func_name) do {\ (void) (func_name);\ } while (0) #define PMEM2_API_END(func_name) do {\ (void) (func_name);\ } while (0) #endif #if VG_MEMCHECK_ENABLED extern unsigned _On_memcheck; #define On_memcheck __builtin_expect(_On_memcheck, 0) #include "valgrind/memcheck.h" #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_DISABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {\ if (On_valgrind)\ VALGRIND_ENABLE_ERROR_REPORTING;\ } while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed) do {\ if (On_memcheck)\ VALGRIND_CREATE_MEMPOOL(heap, rzB, is_zeroed);\ } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap) do {\ if (On_memcheck)\ VALGRIND_DESTROY_MEMPOOL(heap);\ } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size) do {\ if (On_memcheck)\ VALGRIND_MEMPOOL_ALLOC(heap, addr, size);\ } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr) do {\ if (On_memcheck)\ VALGRIND_MEMPOOL_FREE(heap, addr);\ } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size) do {\ if (On_memcheck)\ VALGRIND_MEMPOOL_CHANGE(heap, addrA, addrB, size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len) do {\ if (On_memcheck)\ VALGRIND_MAKE_MEM_DEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len) do {\ if (On_memcheck)\ VALGRIND_MAKE_MEM_UNDEFINED(addr, len);\ } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len) do {\ if (On_memcheck)\ VALGRIND_MAKE_MEM_NOACCESS(addr, len);\ } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len) do {\ if (On_memcheck)\ VALGRIND_CHECK_MEM_IS_ADDRESSABLE(addr, len);\ } while (0) #else #define On_memcheck (0) #define VALGRIND_DO_DISABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_ENABLE_ERROR_REPORTING do {} while (0) #define VALGRIND_DO_CREATE_MEMPOOL(heap, rzB, is_zeroed)\ do { (void) (heap); (void) (rzB); (void) (is_zeroed); } while (0) #define VALGRIND_DO_DESTROY_MEMPOOL(heap)\ do { (void) (heap); } while (0) #define VALGRIND_DO_MEMPOOL_ALLOC(heap, addr, size)\ do { (void) (heap); (void) (addr); (void) (size); } while (0) #define VALGRIND_DO_MEMPOOL_FREE(heap, addr)\ do { (void) (heap); (void) (addr); } while (0) #define VALGRIND_DO_MEMPOOL_CHANGE(heap, addrA, addrB, size)\ do {\ (void) (heap); (void) (addrA); (void) (addrB); (void) (size);\ } while (0) #define VALGRIND_DO_MAKE_MEM_DEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_UNDEFINED(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_MAKE_MEM_NOACCESS(addr, len)\ do { (void) (addr); (void) (len); } while (0) #define VALGRIND_DO_CHECK_MEM_IS_ADDRESSABLE(addr, len)\ do { (void) (addr); (void) (len); } while (0) #endif #endif
11,169
22.319415
75
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/alloc.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019-2020, Intel Corporation */ #ifndef COMMON_ALLOC_H #define COMMON_ALLOC_H #include <stdlib.h> #ifdef __cplusplus extern "C" { #endif typedef void *(*Malloc_func)(size_t size); typedef void *(*Realloc_func)(void *ptr, size_t size); extern Malloc_func fn_malloc; extern Realloc_func fn_realloc; #if FAULT_INJECTION void *_flt_Malloc(size_t, const char *); void *_flt_Realloc(void *, size_t, const char *); #define Malloc(size) _flt_Malloc(size, __func__) #define Realloc(ptr, size) _flt_Realloc(ptr, size, __func__) #else void *_Malloc(size_t); void *_Realloc(void *, size_t); #define Malloc(size) _Malloc(size) #define Realloc(ptr, size) _Realloc(ptr, size) #endif void set_func_malloc(void *(*malloc_func)(size_t size)); void set_func_realloc(void *(*realloc_func)(void *ptr, size_t size)); /* * overridable names for malloc & friends used by this library */ typedef void (*Free_func)(void *ptr); typedef char *(*Strdup_func)(const char *s); extern Free_func Free; extern Strdup_func Strdup; extern void *Zalloc(size_t sz); #ifdef __cplusplus } #endif #endif
1,131
21.64
69
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/os_thread.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * os_thread.h -- os thread abstraction layer */ #ifndef OS_THREAD_H #define OS_THREAD_H 1 #include <stdint.h> #include <time.h> #ifdef __cplusplus extern "C" { #endif typedef union { long long align; char padding[44]; /* linux: 40 windows: 44 */ } os_mutex_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 13 */ } os_rwlock_t; typedef union { long long align; char padding[48]; /* linux: 48 windows: 12 */ } os_cond_t; typedef union { long long align; char padding[32]; /* linux: 8 windows: 32 */ } os_thread_t; typedef union { long long align; /* linux: long windows: 8 FreeBSD: 12 */ char padding[16]; /* 16 to be safe */ } os_once_t; #define OS_ONCE_INIT { .padding = {0} } typedef unsigned os_tls_key_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 8 */ } os_semaphore_t; typedef union { long long align; char padding[56]; /* linux: 56 windows: 8 */ } os_thread_attr_t; typedef union { long long align; char padding[512]; } os_cpu_set_t; #ifdef __FreeBSD__ #define cpu_set_t cpuset_t typedef uintptr_t os_spinlock_t; #else typedef volatile int os_spinlock_t; /* XXX: not implemented on windows */ #endif void os_cpu_zero(os_cpu_set_t *set); void os_cpu_set(size_t cpu, os_cpu_set_t *set); #ifndef _WIN32 #define _When_(...) #endif int os_once(os_once_t *o, void (*func)(void)); int os_tls_key_create(os_tls_key_t *key, void (*destructor)(void *)); int os_tls_key_delete(os_tls_key_t key); int os_tls_set(os_tls_key_t key, const void *value); void *os_tls_get(os_tls_key_t key); int os_mutex_init(os_mutex_t *__restrict mutex); int os_mutex_destroy(os_mutex_t *__restrict mutex); _When_(return == 0, _Acquires_lock_(mutex->lock)) int os_mutex_lock(os_mutex_t *__restrict mutex); _When_(return == 0, _Acquires_lock_(mutex->lock)) int os_mutex_trylock(os_mutex_t *__restrict mutex); int os_mutex_unlock(os_mutex_t *__restrict mutex); /* XXX - non POSIX */ int os_mutex_timedlock(os_mutex_t *__restrict mutex, const struct timespec *abstime); int os_rwlock_init(os_rwlock_t *__restrict rwlock); int os_rwlock_destroy(os_rwlock_t *__restrict rwlock); int os_rwlock_rdlock(os_rwlock_t *__restrict rwlock); int os_rwlock_wrlock(os_rwlock_t *__restrict rwlock); int os_rwlock_tryrdlock(os_rwlock_t *__restrict rwlock); _When_(return == 0, _Acquires_exclusive_lock_(rwlock->lock)) int os_rwlock_trywrlock(os_rwlock_t *__restrict rwlock); _When_(rwlock->is_write != 0, _Requires_exclusive_lock_held_(rwlock->lock)) _When_(rwlock->is_write == 0, _Requires_shared_lock_held_(rwlock->lock)) int os_rwlock_unlock(os_rwlock_t *__restrict rwlock); int os_rwlock_timedrdlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime); int os_rwlock_timedwrlock(os_rwlock_t *__restrict rwlock, const struct timespec *abstime); int os_spin_init(os_spinlock_t *lock, int pshared); int os_spin_destroy(os_spinlock_t *lock); int os_spin_lock(os_spinlock_t *lock); int os_spin_unlock(os_spinlock_t *lock); int os_spin_trylock(os_spinlock_t *lock); int os_cond_init(os_cond_t *__restrict cond); int os_cond_destroy(os_cond_t *__restrict cond); int os_cond_broadcast(os_cond_t *__restrict cond); int os_cond_signal(os_cond_t *__restrict cond); int os_cond_timedwait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex, const struct timespec *abstime); int os_cond_wait(os_cond_t *__restrict cond, os_mutex_t *__restrict mutex); /* threading */ int os_thread_create(os_thread_t *thread, const os_thread_attr_t *attr, void *(*start_routine)(void *), void *arg); int os_thread_join(os_thread_t *thread, void **result); void os_thread_self(os_thread_t *thread); /* thread affinity */ int os_thread_setaffinity_np(os_thread_t *thread, size_t set_size, const os_cpu_set_t *set); int os_thread_atfork(void (*prepare)(void), void (*parent)(void), void (*child)(void)); int os_semaphore_init(os_semaphore_t *sem, unsigned value); int os_semaphore_destroy(os_semaphore_t *sem); int os_semaphore_wait(os_semaphore_t *sem); int os_semaphore_trywait(os_semaphore_t *sem); int os_semaphore_post(os_semaphore_t *sem); #ifdef __cplusplus } #endif #endif /* OS_THREAD_H */
5,876
31.291209
75
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/out.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * out.h -- definitions for "out" module */ #ifndef PMDK_OUT_H #define PMDK_OUT_H 1 #include <stdarg.h> #include <stddef.h> #include <stdlib.h> #include "util.h" #ifdef __cplusplus extern "C" { #endif /* * Suppress errors which are after appropriate ASSERT* macro for nondebug * builds. */ #if !defined(DEBUG) && (defined(__clang_analyzer__) || defined(__COVERITY__) ||\ defined(__KLOCWORK__)) #define OUT_FATAL_DISCARD_NORETURN __attribute__((noreturn)) #else #define OUT_FATAL_DISCARD_NORETURN #endif #ifndef EVALUATE_DBG_EXPRESSIONS #if defined(DEBUG) || defined(__clang_analyzer__) || defined(__COVERITY__) ||\ defined(__KLOCWORK__) #define EVALUATE_DBG_EXPRESSIONS 1 #else #define EVALUATE_DBG_EXPRESSIONS 0 #endif #endif #ifdef DEBUG #define OUT_LOG out_log #define OUT_NONL out_nonl #define OUT_FATAL out_fatal #define OUT_FATAL_ABORT out_fatal #else static __attribute__((always_inline)) inline void out_log_discard(const char *file, int line, const char *func, int level, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) level; (void) fmt; } static __attribute__((always_inline)) inline void out_nonl_discard(int level, const char *fmt, ...) { (void) level; (void) fmt; } static __attribute__((always_inline)) OUT_FATAL_DISCARD_NORETURN inline void out_fatal_discard(const char *file, int line, const char *func, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) fmt; } static __attribute__((always_inline)) NORETURN inline void out_fatal_abort(const char *file, int line, const char *func, const char *fmt, ...) { (void) file; (void) line; (void) func; (void) fmt; abort(); } #define OUT_LOG out_log_discard #define OUT_NONL out_nonl_discard #define OUT_FATAL out_fatal_discard #define OUT_FATAL_ABORT out_fatal_abort #endif #if defined(__KLOCWORK__) #define TEST_ALWAYS_TRUE_EXPR(cnd) #define TEST_ALWAYS_EQ_EXPR(cnd) #define TEST_ALWAYS_NE_EXPR(cnd) #else #define TEST_ALWAYS_TRUE_EXPR(cnd)\ if (__builtin_constant_p(cnd))\ ASSERT_COMPILE_ERROR_ON(cnd); #define TEST_ALWAYS_EQ_EXPR(lhs, rhs)\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ ASSERT_COMPILE_ERROR_ON((lhs) == (rhs)); #define TEST_ALWAYS_NE_EXPR(lhs, rhs)\ if (__builtin_constant_p(lhs) && __builtin_constant_p(rhs))\ ASSERT_COMPILE_ERROR_ON((lhs) != (rhs)); #endif /* produce debug/trace output */ #define LOG(level, ...) do { \ if (!EVALUATE_DBG_EXPRESSIONS) break;\ OUT_LOG(__FILE__, __LINE__, __func__, level, __VA_ARGS__);\ } while (0) /* produce debug/trace output without prefix and new line */ #define LOG_NONL(level, ...) do { \ if (!EVALUATE_DBG_EXPRESSIONS) break; \ OUT_NONL(level, __VA_ARGS__); \ } while (0) /* produce output and exit */ #define FATAL(...)\ OUT_FATAL_ABORT(__FILE__, __LINE__, __func__, __VA_ARGS__) /* assert a condition is true at runtime */ #define ASSERT_rt(cnd) do { \ if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \ OUT_FATAL(__FILE__, __LINE__, __func__, "assertion failure: %s", #cnd);\ } while (0) /* assertion with extra info printed if assertion fails at runtime */ #define ASSERTinfo_rt(cnd, info) do { \ if (!EVALUATE_DBG_EXPRESSIONS || (cnd)) break; \ OUT_FATAL(__FILE__, __LINE__, __func__, \ "assertion failure: %s (%s = %s)", #cnd, #info, info);\ } while (0) /* assert two integer values are equal at runtime */ #define ASSERTeq_rt(lhs, rhs) do { \ if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) == (rhs))) break; \ OUT_FATAL(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) == %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \ } while (0) /* assert two integer values are not equal at runtime */ #define ASSERTne_rt(lhs, rhs) do { \ if (!EVALUATE_DBG_EXPRESSIONS || ((lhs) != (rhs))) break; \ OUT_FATAL(__FILE__, __LINE__, __func__,\ "assertion failure: %s (0x%llx) != %s (0x%llx)", #lhs,\ (unsigned long long)(lhs), #rhs, (unsigned long long)(rhs)); \ } while (0) /* assert a condition is true */ #define ASSERT(cnd)\ do {\ /*\ * Detect useless asserts on always true expression. Please use\ * COMPILE_ERROR_ON(!cnd) or ASSERT_rt(cnd) in such cases.\ */\ TEST_ALWAYS_TRUE_EXPR(cnd);\ ASSERT_rt(cnd);\ } while (0) /* assertion with extra info printed if assertion fails */ #define ASSERTinfo(cnd, info)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_TRUE_EXPR(cnd);\ ASSERTinfo_rt(cnd, info);\ } while (0) /* assert two integer values are equal */ #define ASSERTeq(lhs, rhs)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_EQ_EXPR(lhs, rhs);\ ASSERTeq_rt(lhs, rhs);\ } while (0) /* assert two integer values are not equal */ #define ASSERTne(lhs, rhs)\ do {\ /* See comment in ASSERT. */\ TEST_ALWAYS_NE_EXPR(lhs, rhs);\ ASSERTne_rt(lhs, rhs);\ } while (0) #define ERR(...)\ out_err(__FILE__, __LINE__, __func__, __VA_ARGS__) void out_init(const char *log_prefix, const char *log_level_var, const char *log_file_var, int major_version, int minor_version); void out_fini(void); void out(const char *fmt, ...) FORMAT_PRINTF(1, 2); void out_nonl(int level, const char *fmt, ...) FORMAT_PRINTF(2, 3); void out_log(const char *file, int line, const char *func, int level, const char *fmt, ...) FORMAT_PRINTF(5, 6); void out_err(const char *file, int line, const char *func, const char *fmt, ...) FORMAT_PRINTF(4, 5); void NORETURN out_fatal(const char *file, int line, const char *func, const char *fmt, ...) FORMAT_PRINTF(4, 5); void out_set_print_func(void (*print_func)(const char *s)); void out_set_vsnprintf_func(int (*vsnprintf_func)(char *str, size_t size, const char *format, va_list ap)); #ifdef _WIN32 #ifndef PMDK_UTF8_API #define out_get_errormsg out_get_errormsgW #else #define out_get_errormsg out_get_errormsgU #endif #endif #ifndef _WIN32 const char *out_get_errormsg(void); #else const char *out_get_errormsgU(void); const wchar_t *out_get_errormsgW(void); #endif #ifdef __cplusplus } #endif #endif
6,066
25.150862
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/valgrind/memcheck.h
/* ---------------------------------------------------------------- Notice that the following BSD-style license applies to this one file (memcheck.h) only. The rest of Valgrind is licensed under the terms of the GNU General Public License, version 2, unless otherwise indicated. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- This file is part of MemCheck, a heavyweight Valgrind tool for detecting memory errors. Copyright (C) 2000-2017 Julian Seward. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 3. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 4. The name of the author may not be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ---------------------------------------------------------------- Notice that the above BSD-style license applies to this one file (memcheck.h) only. The entire rest of Valgrind is licensed under the terms of the GNU General Public License, version 2. See the COPYING file in the source distribution for details. ---------------------------------------------------------------- */ #ifndef __MEMCHECK_H #define __MEMCHECK_H /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query memory permissions inside your own programs. See comment near the top of valgrind.h on how to use them. */ #include "valgrind.h" /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'), VG_USERREQ__MAKE_MEM_UNDEFINED, VG_USERREQ__MAKE_MEM_DEFINED, VG_USERREQ__DISCARD, VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, VG_USERREQ__CHECK_MEM_IS_DEFINED, VG_USERREQ__DO_LEAK_CHECK, VG_USERREQ__COUNT_LEAKS, VG_USERREQ__GET_VBITS, VG_USERREQ__SET_VBITS, VG_USERREQ__CREATE_BLOCK, VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */ VG_USERREQ__COUNT_LEAK_BLOCKS, VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE, VG_USERREQ__CHECK_MEM_IS_UNDEFINED, /* This is just for memcheck's internal use - don't use it */ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR = VG_USERREQ_TOOL_BASE('M','C') + 256 } Vg_MemCheckClientRequest; /* Client-code macros to manipulate the state of memory. */ /* Mark memory at _qzz_addr as unaddressable for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_NOACCESS(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_NOACCESS, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similarly, mark memory at _qzz_addr as addressable but undefined for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_UNDEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similarly, mark memory at _qzz_addr as addressable and defined for _qzz_len bytes. */ #define VALGRIND_MAKE_MEM_DEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_DEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Similar to VALGRIND_MAKE_MEM_DEFINED except that addressability is not altered: bytes which are addressable are marked as defined, but those which are not addressable are left unchanged. */ #define VALGRIND_MAKE_MEM_DEFINED_IF_ADDRESSABLE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Create a block-description handle. The description is an ascii string which is included in any messages pertaining to addresses within the specified memory range. Has no other effect on the properties of the memory range. */ #define VALGRIND_CREATE_BLOCK(_qzz_addr,_qzz_len, _qzz_desc) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__CREATE_BLOCK, \ (_qzz_addr), (_qzz_len), (_qzz_desc), \ 0, 0) /* Discard a block-description-handle. Returns 1 for an invalid handle, 0 for a valid handle. */ #define VALGRIND_DISCARD(_qzz_blkindex) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__DISCARD, \ 0, (_qzz_blkindex), 0, 0, 0) /* Client-code macros to check the state of memory. */ /* Check that memory at _qzz_addr is addressable for _qzz_len bytes. If suitable addressability is not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_ADDRESSABLE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Check that memory at _qzz_addr is addressable and defined for _qzz_len bytes. If suitable addressability and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_DEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_DEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Use this macro to force the definedness and addressability of an lvalue to be checked. If suitable addressability and definedness are not established, Valgrind prints an error message and returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_VALUE_IS_DEFINED(__lvalue) \ VALGRIND_CHECK_MEM_IS_DEFINED( \ (volatile unsigned char *)&(__lvalue), \ (unsigned long)(sizeof (__lvalue))) /* Check that memory at _qzz_addr is unaddressable for _qzz_len bytes. If any byte in this range is addressable, Valgrind returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_UNADDRESSABLE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_UNADDRESSABLE,\ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Check that memory at _qzz_addr is undefined for _qzz_len bytes. If any byte in this range is defined or unaddressable, Valgrind returns the address of the first offending byte. Otherwise it returns zero. */ #define VALGRIND_CHECK_MEM_IS_UNDEFINED(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__CHECK_MEM_IS_UNDEFINED, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /* Do a full memory leak check (like --leak-check=full) mid-execution. */ #define VALGRIND_DO_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 0, 0, 0, 0) /* Same as VALGRIND_DO_LEAK_CHECK but only showing the entries for which there was an increase in leaked bytes or leaked nr of blocks since the previous leak search. */ #define VALGRIND_DO_ADDED_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 1, 0, 0, 0) /* Same as VALGRIND_DO_ADDED_LEAK_CHECK but showing entries with increased or decreased leaked bytes/blocks since previous leak search. */ #define VALGRIND_DO_CHANGED_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 0, 2, 0, 0, 0) /* Do a summary memory leak check (like --leak-check=summary) mid-execution. */ #define VALGRIND_DO_QUICK_LEAK_CHECK \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__DO_LEAK_CHECK, \ 1, 0, 0, 0, 0) /* Return number of leaked, dubious, reachable and suppressed bytes found by all previous leak checks. They must be lvalues. */ #define VALGRIND_COUNT_LEAKS(leaked, dubious, reachable, suppressed) \ /* For safety on 64-bit platforms we assign the results to private unsigned long variables, then assign these to the lvalues the user specified, which works no matter what type 'leaked', 'dubious', etc are. We also initialise '_qzz_leaked', etc because VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as defined. */ \ { \ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ VG_USERREQ__COUNT_LEAKS, \ &_qzz_leaked, &_qzz_dubious, \ &_qzz_reachable, &_qzz_suppressed, 0); \ leaked = _qzz_leaked; \ dubious = _qzz_dubious; \ reachable = _qzz_reachable; \ suppressed = _qzz_suppressed; \ } /* Return number of leaked, dubious, reachable and suppressed bytes found by all previous leak checks. They must be lvalues. */ #define VALGRIND_COUNT_LEAK_BLOCKS(leaked, dubious, reachable, suppressed) \ /* For safety on 64-bit platforms we assign the results to private unsigned long variables, then assign these to the lvalues the user specified, which works no matter what type 'leaked', 'dubious', etc are. We also initialise '_qzz_leaked', etc because VG_USERREQ__COUNT_LEAKS doesn't mark the values returned as defined. */ \ { \ unsigned long _qzz_leaked = 0, _qzz_dubious = 0; \ unsigned long _qzz_reachable = 0, _qzz_suppressed = 0; \ VALGRIND_DO_CLIENT_REQUEST_STMT( \ VG_USERREQ__COUNT_LEAK_BLOCKS, \ &_qzz_leaked, &_qzz_dubious, \ &_qzz_reachable, &_qzz_suppressed, 0); \ leaked = _qzz_leaked; \ dubious = _qzz_dubious; \ reachable = _qzz_reachable; \ suppressed = _qzz_suppressed; \ } /* Get the validity data for addresses [zza..zza+zznbytes-1] and copy it into the provided zzvbits array. Return values: 0 if not running on valgrind 1 success 2 [previously indicated unaligned arrays; these are now allowed] 3 if any parts of zzsrc/zzvbits are not addressable. The metadata is not copied in cases 0, 2 or 3 so it should be impossible to segfault your system by using this call. */ #define VALGRIND_GET_VBITS(zza,zzvbits,zznbytes) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__GET_VBITS, \ (const char*)(zza), \ (char*)(zzvbits), \ (zznbytes), 0, 0) /* Set the validity data for addresses [zza..zza+zznbytes-1], copying it from the provided zzvbits array. Return values: 0 if not running on valgrind 1 success 2 [previously indicated unaligned arrays; these are now allowed] 3 if any parts of zza/zzvbits are not addressable. The metadata is not copied in cases 0, 2 or 3 so it should be impossible to segfault your system by using this call. */ #define VALGRIND_SET_VBITS(zza,zzvbits,zznbytes) \ (unsigned)VALGRIND_DO_CLIENT_REQUEST_EXPR(0, \ VG_USERREQ__SET_VBITS, \ (const char*)(zza), \ (const char*)(zzvbits), \ (zznbytes), 0, 0 ) /* Disable and re-enable reporting of addressing errors in the specified address range. */ #define VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__DISABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) #define VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__ENABLE_ADDR_ERROR_REPORTING_IN_RANGE, \ (_qzz_addr), (_qzz_len), 0, 0, 0) #endif
15,621
47.666667
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/core/valgrind/pmemcheck.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2015, Intel Corporation */ #ifndef __PMEMCHECK_H #define __PMEMCHECK_H /* This file is for inclusion into client (your!) code. You can use these macros to manipulate and query memory permissions inside your own programs. See comment near the top of valgrind.h on how to use them. */ #include "valgrind.h" /* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !! This enum comprises an ABI exported by Valgrind to programs which use client requests. DO NOT CHANGE THE ORDER OF THESE ENTRIES, NOR DELETE ANY -- add new ones at the end. */ typedef enum { VG_USERREQ__PMC_REGISTER_PMEM_MAPPING = VG_USERREQ_TOOL_BASE('P','C'), VG_USERREQ__PMC_REGISTER_PMEM_FILE, VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, VG_USERREQ__PMC_DO_FLUSH, VG_USERREQ__PMC_DO_FENCE, VG_USERREQ__PMC_RESERVED1, /* Do not use. */ VG_USERREQ__PMC_WRITE_STATS, VG_USERREQ__PMC_RESERVED2, /* Do not use. */ VG_USERREQ__PMC_RESERVED3, /* Do not use. */ VG_USERREQ__PMC_RESERVED4, /* Do not use. */ VG_USERREQ__PMC_RESERVED5, /* Do not use. */ VG_USERREQ__PMC_RESERVED7, /* Do not use. */ VG_USERREQ__PMC_RESERVED8, /* Do not use. */ VG_USERREQ__PMC_RESERVED9, /* Do not use. */ VG_USERREQ__PMC_RESERVED10, /* Do not use. */ VG_USERREQ__PMC_SET_CLEAN, /* transaction support */ VG_USERREQ__PMC_START_TX, VG_USERREQ__PMC_START_TX_N, VG_USERREQ__PMC_END_TX, VG_USERREQ__PMC_END_TX_N, VG_USERREQ__PMC_ADD_TO_TX, VG_USERREQ__PMC_ADD_TO_TX_N, VG_USERREQ__PMC_REMOVE_FROM_TX, VG_USERREQ__PMC_REMOVE_FROM_TX_N, VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE, VG_USERREQ__PMC_RESERVED6, /* Do not use. */ VG_USERREQ__PMC_EMIT_LOG, } Vg_PMemCheckClientRequest; /* Client-code macros to manipulate pmem mappings */ /** Register a persistent memory mapping region */ #define VALGRIND_PMC_REGISTER_PMEM_MAPPING(_qzz_addr, _qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REGISTER_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register a persistent memory file */ #define VALGRIND_PMC_REGISTER_PMEM_FILE(_qzz_desc, _qzz_addr_base, \ _qzz_size, _qzz_offset) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REGISTER_PMEM_FILE, \ (_qzz_desc), (_qzz_addr_base), (_qzz_size), \ (_qzz_offset), 0) /** Remove a persistent memory mapping region */ #define VALGRIND_PMC_REMOVE_PMEM_MAPPING(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Check if the given range is a registered persistent memory mapping */ #define VALGRIND_PMC_CHECK_IS_PMEM_MAPPING(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_CHECK_IS_PMEM_MAPPING, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register an SFENCE */ #define VALGRIND_PMC_PRINT_PMEM_MAPPINGS \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_PRINT_PMEM_MAPPINGS, \ 0, 0, 0, 0, 0) /** Register a CLFLUSH-like operation */ #define VALGRIND_PMC_DO_FLUSH(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_DO_FLUSH, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Register an SFENCE */ #define VALGRIND_PMC_DO_FENCE \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_DO_FENCE, \ 0, 0, 0, 0, 0) /** Write tool stats */ #define VALGRIND_PMC_WRITE_STATS \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_WRITE_STATS, \ 0, 0, 0, 0, 0) /** Emit user log */ #define VALGRIND_PMC_EMIT_LOG(_qzz_emit_log) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_EMIT_LOG, \ (_qzz_emit_log), 0, 0, 0, 0) /** Set a region of persistent memory as clean */ #define VALGRIND_PMC_SET_CLEAN(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_SET_CLEAN, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Support for transactions */ /** Start an implicit persistent memory transaction */ #define VALGRIND_PMC_START_TX \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_START_TX, \ 0, 0, 0, 0, 0) /** Start an explicit persistent memory transaction */ #define VALGRIND_PMC_START_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_START_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** End an implicit persistent memory transaction */ #define VALGRIND_PMC_END_TX \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_END_TX, \ 0, 0, 0, 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_END_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_END_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** Add a persistent memory region to the implicit transaction */ #define VALGRIND_PMC_ADD_TO_TX(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_TO_TX, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Add a persistent memory region to an explicit transaction */ #define VALGRIND_PMC_ADD_TO_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_TO_TX_N, \ (_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0) /** Remove a persistent memory region from the implicit transaction */ #define VALGRIND_PMC_REMOVE_FROM_TX(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_FROM_TX, \ (_qzz_addr), (_qzz_len), 0, 0, 0) /** Remove a persistent memory region from an explicit transaction */ #define VALGRIND_PMC_REMOVE_FROM_TX_N(_qzz_txn,_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_FROM_TX_N, \ (_qzz_txn), (_qzz_addr), (_qzz_len), 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_ADD_THREAD_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_ADD_THREAD_TO_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** End an explicit persistent memory transaction */ #define VALGRIND_PMC_REMOVE_THREAD_FROM_TX_N(_qzz_txn) \ VALGRIND_DO_CLIENT_REQUEST_EXPR(0 /* default return */, \ VG_USERREQ__PMC_REMOVE_THREAD_FROM_TX_N, \ (_qzz_txn), 0, 0, 0, 0) /** Remove a persistent memory region from the implicit transaction */ #define VALGRIND_PMC_ADD_TO_GLOBAL_TX_IGNORE(_qzz_addr,_qzz_len) \ VALGRIND_DO_CLIENT_REQUEST_STMT(VG_USERREQ__PMC_ADD_TO_GLOBAL_TX_IGNORE,\ (_qzz_addr), (_qzz_len), 0, 0, 0) #endif
9,085
47.588235
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/ctl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * ctl.h -- internal declaration of statistics and control related structures */ #ifndef PMDK_CTL_H #define PMDK_CTL_H 1 #include "queue.h" #include "errno.h" #include "out.h" #ifdef __cplusplus extern "C" { #endif struct ctl; struct ctl_index { const char *name; long value; PMDK_SLIST_ENTRY(ctl_index) entry; }; PMDK_SLIST_HEAD(ctl_indexes, ctl_index); enum ctl_query_source { CTL_UNKNOWN_QUERY_SOURCE, /* query executed directly from the program */ CTL_QUERY_PROGRAMMATIC, /* query executed from the config file */ CTL_QUERY_CONFIG_INPUT, MAX_CTL_QUERY_SOURCE }; enum ctl_query_type { CTL_QUERY_READ, CTL_QUERY_WRITE, CTL_QUERY_RUNNABLE, MAX_CTL_QUERY_TYPE }; typedef int (*node_callback)(void *ctx, enum ctl_query_source type, void *arg, struct ctl_indexes *indexes); enum ctl_node_type { CTL_NODE_UNKNOWN, CTL_NODE_NAMED, CTL_NODE_LEAF, CTL_NODE_INDEXED, MAX_CTL_NODE }; typedef int (*ctl_arg_parser)(const void *arg, void *dest, size_t dest_size); struct ctl_argument_parser { size_t dest_offset; /* offset of the field inside of the argument */ size_t dest_size; /* size of the field inside of the argument */ ctl_arg_parser parser; }; struct ctl_argument { size_t dest_size; /* sizeof the entire argument */ struct ctl_argument_parser parsers[]; /* array of 'fields' in arg */ }; #define sizeof_member(t, m) sizeof(((t *)0)->m) #define CTL_ARG_PARSER(t, p)\ {0, sizeof(t), p} #define CTL_ARG_PARSER_STRUCT(t, m, p)\ {offsetof(t, m), sizeof_member(t, m), p} #define CTL_ARG_PARSER_END {0, 0, NULL} /* * CTL Tree node structure, do not use directly. All the necessary functionality * is provided by the included macros. */ struct ctl_node { const char *name; enum ctl_node_type type; node_callback cb[MAX_CTL_QUERY_TYPE]; const struct ctl_argument *arg; const struct ctl_node *children; }; struct ctl *ctl_new(void); void ctl_delete(struct ctl *stats); int ctl_load_config_from_string(struct ctl *ctl, void *ctx, const char *cfg_string); int ctl_load_config_from_file(struct ctl *ctl, void *ctx, const char *cfg_file); /* Use through CTL_REGISTER_MODULE, never directly */ void ctl_register_module_node(struct ctl *c, const char *name, struct ctl_node *n); int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_BOOLEAN {sizeof(int),\ {{0, sizeof(int), ctl_arg_boolean},\ CTL_ARG_PARSER_END}}; int ctl_arg_integer(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_INT {sizeof(int),\ {{0, sizeof(int), ctl_arg_integer},\ CTL_ARG_PARSER_END}}; #define CTL_ARG_LONG_LONG {sizeof(long long),\ {{0, sizeof(long long), ctl_arg_integer},\ CTL_ARG_PARSER_END}}; int ctl_arg_string(const void *arg, void *dest, size_t dest_size); #define CTL_ARG_STRING(len) {len,\ {{0, len, ctl_arg_string},\ CTL_ARG_PARSER_END}}; #define CTL_STR(name) #name #define CTL_NODE_END {NULL, CTL_NODE_UNKNOWN, {NULL, NULL, NULL}, NULL, NULL} #define CTL_NODE(name, ...)\ ctl_node_##__VA_ARGS__##_##name int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source, const char *name, enum ctl_query_type type, void *arg); /* Declaration of a new child node */ #define CTL_CHILD(name, ...)\ {CTL_STR(name), CTL_NODE_NAMED, {NULL, NULL, NULL}, NULL,\ (struct ctl_node *)CTL_NODE(name, __VA_ARGS__)} /* Declaration of a new indexed node */ #define CTL_INDEXED(name, ...)\ {CTL_STR(name), CTL_NODE_INDEXED, {NULL, NULL, NULL}, NULL,\ (struct ctl_node *)CTL_NODE(name, __VA_ARGS__)} #define CTL_READ_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_read #define CTL_WRITE_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_write #define CTL_RUNNABLE_HANDLER(name, ...)\ ctl_##__VA_ARGS__##_##name##_runnable #define CTL_ARG(name)\ ctl_arg_##name /* * Declaration of a new read-only leaf. If used the corresponding read function * must be declared by CTL_READ_HANDLER macro. */ #define CTL_LEAF_RO(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {CTL_READ_HANDLER(name, __VA_ARGS__), NULL, NULL}, NULL, NULL} /* * Declaration of a new write-only leaf. If used the corresponding write * function must be declared by CTL_WRITE_HANDLER macro. */ #define CTL_LEAF_WO(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {NULL, CTL_WRITE_HANDLER(name, __VA_ARGS__), NULL},\ &CTL_ARG(name), NULL} /* * Declaration of a new runnable leaf. If used the corresponding run * function must be declared by CTL_RUNNABLE_HANDLER macro. */ #define CTL_LEAF_RUNNABLE(name, ...)\ {CTL_STR(name), CTL_NODE_LEAF, \ {NULL, NULL, CTL_RUNNABLE_HANDLER(name, __VA_ARGS__)},\ NULL, NULL} /* * Declaration of a new read-write leaf. If used both read and write function * must be declared by CTL_READ_HANDLER and CTL_WRITE_HANDLER macros. */ #define CTL_LEAF_RW(name)\ {CTL_STR(name), CTL_NODE_LEAF,\ {CTL_READ_HANDLER(name), CTL_WRITE_HANDLER(name), NULL},\ &CTL_ARG(name), NULL} #define CTL_REGISTER_MODULE(_ctl, name)\ ctl_register_module_node((_ctl), CTL_STR(name),\ (struct ctl_node *)CTL_NODE(name)) #ifdef __cplusplus } #endif #endif
5,127
24.261084
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/file.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * file.h -- internal definitions for file module */ #ifndef PMDK_FILE_H #define PMDK_FILE_H 1 #include <stddef.h> #include <sys/stat.h> #include <sys/types.h> #include <dirent.h> #include <limits.h> #include "os.h" #ifdef __cplusplus extern "C" { #endif #ifdef _WIN32 #define NAME_MAX _MAX_FNAME #endif struct file_info { char filename[NAME_MAX + 1]; int is_dir; }; struct dir_handle { const char *path; #ifdef _WIN32 HANDLE handle; char *_file; #else DIR *dirp; #endif }; enum file_type { OTHER_ERROR = -2, NOT_EXISTS = -1, TYPE_NORMAL = 1, TYPE_DEVDAX = 2 }; int util_file_dir_open(struct dir_handle *a, const char *path); int util_file_dir_next(struct dir_handle *a, struct file_info *info); int util_file_dir_close(struct dir_handle *a); int util_file_dir_remove(const char *path); int util_file_exists(const char *path); enum file_type util_stat_get_type(const os_stat_t *st); enum file_type util_fd_get_type(int fd); enum file_type util_file_get_type(const char *path); int util_ddax_region_find(const char *path, unsigned *region_id); ssize_t util_file_get_size(const char *path); ssize_t util_fd_get_size(int fd); size_t util_file_device_dax_alignment(const char *path); void *util_file_map_whole(const char *path); int util_file_zero(const char *path, os_off_t off, size_t len); ssize_t util_file_pread(const char *path, void *buffer, size_t size, os_off_t offset); ssize_t util_file_pwrite(const char *path, const void *buffer, size_t size, os_off_t offset); int util_tmpfile(const char *dir, const char *templ, int flags); int util_is_absolute_path(const char *path); int util_file_create(const char *path, size_t size, size_t minsize); int util_file_open(const char *path, size_t *size, size_t minsize, int flags); int util_unlink(const char *path); int util_unlink_flock(const char *path); int util_file_mkdir(const char *path, mode_t mode); int util_write_all(int fd, const char *buf, size_t count); #ifndef _WIN32 #define util_read read #define util_write write #else static inline ssize_t util_read(int fd, void *buf, size_t count) { /* * Simulate short read, because Windows' _read uses "unsigned" as * a type of the last argument and "int" as a return type. * We have to limit "count" to what _read can return as a success, * not what it can accept. */ if (count > INT_MAX) count = INT_MAX; return _read(fd, buf, (unsigned)count); } static inline ssize_t util_write(int fd, const void *buf, size_t count) { /* * Simulate short write, because Windows' _write uses "unsigned" as * a type of the last argument and "int" as a return type. * We have to limit "count" to what _write can return as a success, * not what it can accept. */ if (count > INT_MAX) count = INT_MAX; return _write(fd, buf, (unsigned)count); } #define S_ISCHR(m) (((m) & S_IFMT) == S_IFCHR) #define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) #endif #ifdef __cplusplus } #endif #endif
3,013
24.982759
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/badblocks.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * badblocks.h -- bad blocks API based on the libpmem2 library */ #ifndef PMDK_BADBLOCKS_H #define PMDK_BADBLOCKS_H 1 #include <string.h> #include <stdint.h> #include <sys/types.h> #ifdef __cplusplus extern "C" { #endif #define B2SEC(n) ((n) >> 9) /* convert bytes to sectors */ #define SEC2B(n) ((n) << 9) /* convert sectors to bytes */ #define NO_HEALTHY_REPLICA ((int)(-1)) #define BB_NOT_SUPP \ "checking bad blocks is not supported on this OS, please switch off the CHECK_BAD_BLOCKS compat feature using 'pmempool-feature'" /* * 'struct badblock' is already defined in ndctl/libndctl.h, * so we cannot use this name. * * libndctl returns offset relative to the beginning of the region, * but in this structure we save offset relative to the beginning of: * - namespace (before badblocks_get()) * and * - file (before sync_recalc_badblocks()) * and * - pool (after sync_recalc_badblocks()) */ struct bad_block { /* * offset in bytes relative to the beginning of * - namespace (before badblocks_get()) * and * - file (before sync_recalc_badblocks()) * and * - pool (after sync_recalc_badblocks()) */ size_t offset; /* length in bytes */ size_t length; /* number of healthy replica to fix this bad block */ int nhealthy; }; struct badblocks { unsigned bb_cnt; /* number of bad blocks */ struct bad_block *bbv; /* array of bad blocks */ }; struct badblocks *badblocks_new(void); void badblocks_delete(struct badblocks *bbs); long badblocks_count(const char *path); int badblocks_get(const char *file, struct badblocks *bbs); int badblocks_clear(const char *path, struct badblocks *bbs); int badblocks_clear_all(const char *file); int badblocks_check_file(const char *path); #ifdef __cplusplus } #endif #endif /* PMDK_BADBLOCKS_H */
1,878
23.089744
130
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/ctl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2020, Intel Corporation */ /* * ctl.c -- implementation of the interface for examination and modification of * the library's internal state */ #include "ctl.h" #include "os.h" #include "alloc.h" #define CTL_MAX_ENTRIES 100 #define MAX_CONFIG_FILE_LEN (1 << 20) /* 1 megabyte */ #define CTL_STRING_QUERY_SEPARATOR ";" #define CTL_NAME_VALUE_SEPARATOR "=" #define CTL_QUERY_NODE_SEPARATOR "." #define CTL_VALUE_ARG_SEPARATOR "," static int ctl_global_first_free = 0; static struct ctl_node CTL_NODE(global)[CTL_MAX_ENTRIES]; /* * This is the top level node of the ctl tree structure. Each node can contain * children and leaf nodes. * * Internal nodes simply create a new path in the tree whereas child nodes are * the ones providing the read/write functionality by the means of callbacks. * * Each tree node must be NULL-terminated, CTL_NODE_END macro is provided for * convenience. */ struct ctl { struct ctl_node root[CTL_MAX_ENTRIES]; int first_free; }; /* * ctl_find_node -- (internal) searches for a matching entry point in the * provided nodes * * The caller is responsible for freeing all of the allocated indexes, * regardless of the return value. */ static const struct ctl_node * ctl_find_node(const struct ctl_node *nodes, const char *name, struct ctl_indexes *indexes) { LOG(3, "nodes %p name %s indexes %p", nodes, name, indexes); const struct ctl_node *n = NULL; char *sptr = NULL; char *parse_str = Strdup(name); if (parse_str == NULL) return NULL; char *node_name = strtok_r(parse_str, CTL_QUERY_NODE_SEPARATOR, &sptr); /* * Go through the string and separate tokens that correspond to nodes * in the main ctl tree. */ while (node_name != NULL) { char *endptr; /* * Ignore errno from strtol: FreeBSD returns EINVAL if no * conversion is performed. Linux does not, but endptr * check is valid in both cases. */ int tmp_errno = errno; long index_value = strtol(node_name, &endptr, 0); errno = tmp_errno; struct ctl_index *index_entry = NULL; if (endptr != node_name) { /* a valid index */ index_entry = Malloc(sizeof(*index_entry)); if (index_entry == NULL) goto error; index_entry->value = index_value; PMDK_SLIST_INSERT_HEAD(indexes, index_entry, entry); } for (n = &nodes[0]; n->name != NULL; ++n) { if (index_entry && n->type == CTL_NODE_INDEXED) break; else if (strcmp(n->name, node_name) == 0) break; } if (n->name == NULL) goto error; if (index_entry) index_entry->name = n->name; nodes = n->children; node_name = strtok_r(NULL, CTL_QUERY_NODE_SEPARATOR, &sptr); } Free(parse_str); return n; error: Free(parse_str); return NULL; } /* * ctl_delete_indexes -- * (internal) removes and frees all entries on the index list */ static void ctl_delete_indexes(struct ctl_indexes *indexes) { while (!PMDK_SLIST_EMPTY(indexes)) { struct ctl_index *index = PMDK_SLIST_FIRST(indexes); PMDK_SLIST_REMOVE_HEAD(indexes, entry); Free(index); } } /* * ctl_parse_args -- (internal) parses a string argument based on the node * structure */ static void * ctl_parse_args(const struct ctl_argument *arg_proto, char *arg) { ASSERTne(arg, NULL); char *dest_arg = Malloc(arg_proto->dest_size); if (dest_arg == NULL) { ERR("!Malloc"); return NULL; } char *sptr = NULL; char *arg_sep = strtok_r(arg, CTL_VALUE_ARG_SEPARATOR, &sptr); for (const struct ctl_argument_parser *p = arg_proto->parsers; p->parser != NULL; ++p) { ASSERT(p->dest_offset + p->dest_size <= arg_proto->dest_size); if (arg_sep == NULL) { ERR("!strtok_r"); goto error_parsing; } if (p->parser(arg_sep, dest_arg + p->dest_offset, p->dest_size) != 0) goto error_parsing; arg_sep = strtok_r(NULL, CTL_VALUE_ARG_SEPARATOR, &sptr); } return dest_arg; error_parsing: Free(dest_arg); return NULL; } /* * ctl_query_get_real_args -- (internal) returns a pointer with actual argument * structure as required by the node callback */ static void * ctl_query_get_real_args(const struct ctl_node *n, void *write_arg, enum ctl_query_source source) { void *real_arg = NULL; switch (source) { case CTL_QUERY_CONFIG_INPUT: real_arg = ctl_parse_args(n->arg, write_arg); break; case CTL_QUERY_PROGRAMMATIC: real_arg = write_arg; break; default: ASSERT(0); break; } return real_arg; } /* * ctl_query_cleanup_real_args -- (internal) cleanups relevant argument * structures allocated as a result of the get_real_args call */ static void ctl_query_cleanup_real_args(const struct ctl_node *n, void *real_arg, enum ctl_query_source source) { switch (source) { case CTL_QUERY_CONFIG_INPUT: Free(real_arg); break; case CTL_QUERY_PROGRAMMATIC: break; default: ASSERT(0); break; } } /* * ctl_exec_query_read -- (internal) calls the read callback of a node */ static int ctl_exec_query_read(void *ctx, const struct ctl_node *n, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { if (arg == NULL) { ERR("read queries require non-NULL argument"); errno = EINVAL; return -1; } return n->cb[CTL_QUERY_READ](ctx, source, arg, indexes); } /* * ctl_exec_query_write -- (internal) calls the write callback of a node */ static int ctl_exec_query_write(void *ctx, const struct ctl_node *n, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { if (arg == NULL) { ERR("write queries require non-NULL argument"); errno = EINVAL; return -1; } void *real_arg = ctl_query_get_real_args(n, arg, source); if (real_arg == NULL) { LOG(1, "Invalid arguments"); return -1; } int ret = n->cb[CTL_QUERY_WRITE](ctx, source, real_arg, indexes); ctl_query_cleanup_real_args(n, real_arg, source); return ret; } /* * ctl_exec_query_runnable -- (internal) calls the run callback of a node */ static int ctl_exec_query_runnable(void *ctx, const struct ctl_node *n, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) { return n->cb[CTL_QUERY_RUNNABLE](ctx, source, arg, indexes); } static int (*ctl_exec_query[MAX_CTL_QUERY_TYPE])(void *ctx, const struct ctl_node *n, enum ctl_query_source source, void *arg, struct ctl_indexes *indexes) = { ctl_exec_query_read, ctl_exec_query_write, ctl_exec_query_runnable, }; /* * ctl_query -- (internal) parses the name and calls the appropriate methods * from the ctl tree */ int ctl_query(struct ctl *ctl, void *ctx, enum ctl_query_source source, const char *name, enum ctl_query_type type, void *arg) { LOG(3, "ctl %p ctx %p source %d name %s type %d arg %p", ctl, ctx, source, name, type, arg); if (name == NULL) { ERR("invalid query"); errno = EINVAL; return -1; } /* * All of the indexes are put on this list so that the handlers can * easily retrieve the index values. The list is cleared once the ctl * query has been handled. */ struct ctl_indexes indexes; PMDK_SLIST_INIT(&indexes); int ret = -1; const struct ctl_node *n = ctl_find_node(CTL_NODE(global), name, &indexes); if (n == NULL && ctl) { ctl_delete_indexes(&indexes); n = ctl_find_node(ctl->root, name, &indexes); } if (n == NULL || n->type != CTL_NODE_LEAF || n->cb[type] == NULL) { ERR("invalid query entry point %s", name); errno = EINVAL; goto out; } ret = ctl_exec_query[type](ctx, n, source, arg, &indexes); out: ctl_delete_indexes(&indexes); return ret; } /* * ctl_register_module_node -- adds a new node to the CTL tree root. */ void ctl_register_module_node(struct ctl *c, const char *name, struct ctl_node *n) { struct ctl_node *nnode = c == NULL ? &CTL_NODE(global)[ctl_global_first_free++] : &c->root[c->first_free++]; nnode->children = n; nnode->type = CTL_NODE_NAMED; nnode->name = name; } /* * ctl_parse_query -- (internal) splits an entire query string * into name and value */ static int ctl_parse_query(char *qbuf, char **name, char **value) { if (qbuf == NULL) return -1; char *sptr; *name = strtok_r(qbuf, CTL_NAME_VALUE_SEPARATOR, &sptr); if (*name == NULL) return -1; *value = strtok_r(NULL, CTL_NAME_VALUE_SEPARATOR, &sptr); if (*value == NULL) return -1; /* the value itself mustn't include CTL_NAME_VALUE_SEPARATOR */ char *extra = strtok_r(NULL, CTL_NAME_VALUE_SEPARATOR, &sptr); if (extra != NULL) return -1; return 0; } /* * ctl_load_config -- executes the entire query collection from a provider */ static int ctl_load_config(struct ctl *ctl, void *ctx, char *buf) { int r = 0; char *sptr = NULL; /* for internal use of strtok */ char *name; char *value; ASSERTne(buf, NULL); char *qbuf = strtok_r(buf, CTL_STRING_QUERY_SEPARATOR, &sptr); while (qbuf != NULL) { r = ctl_parse_query(qbuf, &name, &value); if (r != 0) { ERR("failed to parse query %s", qbuf); return -1; } r = ctl_query(ctl, ctx, CTL_QUERY_CONFIG_INPUT, name, CTL_QUERY_WRITE, value); if (r < 0 && ctx != NULL) return -1; qbuf = strtok_r(NULL, CTL_STRING_QUERY_SEPARATOR, &sptr); } return 0; } /* * ctl_load_config_from_string -- loads obj configuration from string */ int ctl_load_config_from_string(struct ctl *ctl, void *ctx, const char *cfg_string) { LOG(3, "ctl %p ctx %p cfg_string \"%s\"", ctl, ctx, cfg_string); char *buf = Strdup(cfg_string); if (buf == NULL) { ERR("!Strdup"); return -1; } int ret = ctl_load_config(ctl, ctx, buf); Free(buf); return ret; } /* * ctl_load_config_from_file -- loads obj configuration from file * * This function opens up the config file, allocates a buffer of size equal to * the size of the file, reads its content and sanitizes it for ctl_load_config. */ int ctl_load_config_from_file(struct ctl *ctl, void *ctx, const char *cfg_file) { LOG(3, "ctl %p ctx %p cfg_file \"%s\"", ctl, ctx, cfg_file); int ret = -1; FILE *fp = os_fopen(cfg_file, "r"); if (fp == NULL) return ret; int err; if ((err = fseek(fp, 0, SEEK_END)) != 0) goto error_file_parse; long fsize = ftell(fp); if (fsize == -1) goto error_file_parse; if (fsize > MAX_CONFIG_FILE_LEN) { ERR("Config file too large"); goto error_file_parse; } if ((err = fseek(fp, 0, SEEK_SET)) != 0) goto error_file_parse; char *buf = Zalloc((size_t)fsize + 1); /* +1 for NULL-termination */ if (buf == NULL) { ERR("!Zalloc"); goto error_file_parse; } size_t bufpos = 0; int c; int is_comment_section = 0; while ((c = fgetc(fp)) != EOF) { if (c == '#') is_comment_section = 1; else if (c == '\n') is_comment_section = 0; else if (!is_comment_section && !isspace(c)) buf[bufpos++] = (char)c; } ret = ctl_load_config(ctl, ctx, buf); Free(buf); error_file_parse: (void) fclose(fp); return ret; } /* * ctl_new -- allocates and initializes ctl data structures */ struct ctl * ctl_new(void) { struct ctl *c = Zalloc(sizeof(struct ctl)); if (c == NULL) { ERR("!Zalloc"); return NULL; } c->first_free = 0; return c; } /* * ctl_delete -- deletes ctl */ void ctl_delete(struct ctl *c) { Free(c); } /* * ctl_parse_ll -- (internal) parses and returns a long long signed integer */ static long long ctl_parse_ll(const char *str) { char *endptr; int olderrno = errno; errno = 0; long long val = strtoll(str, &endptr, 0); if (endptr == str || errno != 0) return LLONG_MIN; errno = olderrno; return val; } /* * ctl_arg_boolean -- checks whether the provided argument contains * either a 1 or y or Y. */ int ctl_arg_boolean(const void *arg, void *dest, size_t dest_size) { int *intp = dest; char in = ((char *)arg)[0]; if (tolower(in) == 'y' || in == '1') { *intp = 1; return 0; } else if (tolower(in) == 'n' || in == '0') { *intp = 0; return 0; } return -1; } /* * ctl_arg_integer -- parses signed integer argument */ int ctl_arg_integer(const void *arg, void *dest, size_t dest_size) { long long val = ctl_parse_ll(arg); if (val == LLONG_MIN) return -1; switch (dest_size) { case sizeof(int): if (val > INT_MAX || val < INT_MIN) return -1; *(int *)dest = (int)val; break; case sizeof(long long): *(long long *)dest = val; break; case sizeof(uint8_t): if (val > UINT8_MAX || val < 0) return -1; *(uint8_t *)dest = (uint8_t)val; break; default: ERR("invalid destination size %zu", dest_size); errno = EINVAL; return -1; } return 0; } /* * ctl_arg_string -- verifies length and copies a string argument into a zeroed * buffer */ int ctl_arg_string(const void *arg, void *dest, size_t dest_size) { /* check if the incoming string is longer or equal to dest_size */ if (strnlen(arg, dest_size) == dest_size) return -1; strncpy(dest, arg, dest_size); return 0; }
12,706
20.946459
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/pool_hdr.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * pool_hdr.c -- pool header utilities */ #include <errno.h> #include <stdio.h> #include <string.h> #include <endian.h> #include "out.h" #include "pool_hdr.h" /* Determine ISA for which PMDK is currently compiled */ #if defined(__x86_64) || defined(_M_X64) /* x86 -- 64 bit */ #define PMDK_MACHINE PMDK_MACHINE_X86_64 #define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64 #elif defined(__aarch64__) /* 64 bit ARM not supported yet */ #define PMDK_MACHINE PMDK_MACHINE_AARCH64 #define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64 #elif defined(__PPC64__) #define PMDK_MACHINE PMDK_MACHINE_PPC64 #define PMDK_MACHINE_CLASS PMDK_MACHINE_CLASS_64 #else /* add appropriate definitions here when porting PMDK to another ISA */ #error unable to recognize ISA at compile time #endif /* * arch_machine -- (internal) determine endianness */ static uint8_t arch_data(void) { uint16_t word = (PMDK_DATA_BE << 8) + PMDK_DATA_LE; return ((uint8_t *)&word)[0]; } /* * util_get_arch_flags -- get architecture identification flags */ void util_get_arch_flags(struct arch_flags *arch_flags) { memset(arch_flags, 0, sizeof(*arch_flags)); arch_flags->machine = PMDK_MACHINE; arch_flags->machine_class = PMDK_MACHINE_CLASS; arch_flags->data = arch_data(); arch_flags->alignment_desc = alignment_desc(); } /* * util_convert2le_hdr -- convert pool_hdr into little-endian byte order */ void util_convert2le_hdr(struct pool_hdr *hdrp) { hdrp->major = htole32(hdrp->major); hdrp->features.compat = htole32(hdrp->features.compat); hdrp->features.incompat = htole32(hdrp->features.incompat); hdrp->features.ro_compat = htole32(hdrp->features.ro_compat); hdrp->arch_flags.alignment_desc = htole64(hdrp->arch_flags.alignment_desc); hdrp->arch_flags.machine = htole16(hdrp->arch_flags.machine); hdrp->crtime = htole64(hdrp->crtime); hdrp->checksum = htole64(hdrp->checksum); } /* * util_convert2h_hdr_nocheck -- convert pool_hdr into host byte order */ void util_convert2h_hdr_nocheck(struct pool_hdr *hdrp) { hdrp->major = le32toh(hdrp->major); hdrp->features.compat = le32toh(hdrp->features.compat); hdrp->features.incompat = le32toh(hdrp->features.incompat); hdrp->features.ro_compat = le32toh(hdrp->features.ro_compat); hdrp->crtime = le64toh(hdrp->crtime); hdrp->arch_flags.machine = le16toh(hdrp->arch_flags.machine); hdrp->arch_flags.alignment_desc = le64toh(hdrp->arch_flags.alignment_desc); hdrp->checksum = le64toh(hdrp->checksum); } /* * util_arch_flags_check -- validates arch_flags */ int util_check_arch_flags(const struct arch_flags *arch_flags) { struct arch_flags cur_af; int ret = 0; util_get_arch_flags(&cur_af); if (!util_is_zeroed(&arch_flags->reserved, sizeof(arch_flags->reserved))) { ERR("invalid reserved values"); ret = -1; } if (arch_flags->machine != cur_af.machine) { ERR("invalid machine value"); ret = -1; } if (arch_flags->data != cur_af.data) { ERR("invalid data value"); ret = -1; } if (arch_flags->machine_class != cur_af.machine_class) { ERR("invalid machine_class value"); ret = -1; } if (arch_flags->alignment_desc != cur_af.alignment_desc) { ERR("invalid alignment_desc value"); ret = -1; } return ret; } /* * util_get_unknown_features -- filter out unknown features flags */ features_t util_get_unknown_features(features_t features, features_t known) { features_t unknown; unknown.compat = util_get_not_masked_bits( features.compat, known.compat); unknown.incompat = util_get_not_masked_bits( features.incompat, known.incompat); unknown.ro_compat = util_get_not_masked_bits( features.ro_compat, known.ro_compat); return unknown; } /* * util_feature_check -- check features masks */ int util_feature_check(struct pool_hdr *hdrp, features_t known) { LOG(3, "hdrp %p features {incompat %#x ro_compat %#x compat %#x}", hdrp, known.incompat, known.ro_compat, known.compat); features_t unknown = util_get_unknown_features(hdrp->features, known); /* check incompatible ("must support") features */ if (unknown.incompat) { ERR("unsafe to continue due to unknown incompat "\ "features: %#x", unknown.incompat); errno = EINVAL; return -1; } /* check RO-compatible features (force RO if unsupported) */ if (unknown.ro_compat) { ERR("switching to read-only mode due to unknown ro_compat "\ "features: %#x", unknown.ro_compat); return 0; } /* check compatible ("may") features */ if (unknown.compat) { LOG(3, "ignoring unknown compat features: %#x", unknown.compat); } return 1; } /* * util_feature_cmp -- compares features with reference * * returns 1 if features and reference match and 0 otherwise */ int util_feature_cmp(features_t features, features_t ref) { LOG(3, "features {incompat %#x ro_compat %#x compat %#x} " "ref {incompat %#x ro_compat %#x compat %#x}", features.incompat, features.ro_compat, features.compat, ref.incompat, ref.ro_compat, ref.compat); return features.compat == ref.compat && features.incompat == ref.incompat && features.ro_compat == ref.ro_compat; } /* * util_feature_is_zero -- check if features flags are zeroed * * returns 1 if features is zeroed and 0 otherwise */ int util_feature_is_zero(features_t features) { const uint32_t bits = features.compat | features.incompat | features.ro_compat; return bits ? 0 : 1; } /* * util_feature_is_set -- check if feature flag is set in features * * returns 1 if feature flag is set and 0 otherwise */ int util_feature_is_set(features_t features, features_t flag) { uint32_t bits = 0; bits |= features.compat & flag.compat; bits |= features.incompat & flag.incompat; bits |= features.ro_compat & flag.ro_compat; return bits ? 1 : 0; } /* * util_feature_enable -- enable feature */ void util_feature_enable(features_t *features, features_t new_feature) { #define FEATURE_ENABLE(flags, X) \ (flags) |= (X) FEATURE_ENABLE(features->compat, new_feature.compat); FEATURE_ENABLE(features->incompat, new_feature.incompat); FEATURE_ENABLE(features->ro_compat, new_feature.ro_compat); #undef FEATURE_ENABLE } /* * util_feature_disable -- (internal) disable feature */ void util_feature_disable(features_t *features, features_t old_feature) { #define FEATURE_DISABLE(flags, X) \ (flags) &= ~(X) FEATURE_DISABLE(features->compat, old_feature.compat); FEATURE_DISABLE(features->incompat, old_feature.incompat); FEATURE_DISABLE(features->ro_compat, old_feature.ro_compat); #undef FEATURE_DISABLE } static const features_t feature_2_pmempool_feature_map[] = { FEAT_INCOMPAT(SINGLEHDR), /* PMEMPOOL_FEAT_SINGLEHDR */ FEAT_INCOMPAT(CKSUM_2K), /* PMEMPOOL_FEAT_CKSUM_2K */ FEAT_INCOMPAT(SDS), /* PMEMPOOL_FEAT_SHUTDOWN_STATE */ FEAT_COMPAT(CHECK_BAD_BLOCKS), /* PMEMPOOL_FEAT_CHECK_BAD_BLOCKS */ }; #define FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE \ ARRAY_SIZE(feature_2_pmempool_feature_map) static const char *str_2_pmempool_feature_map[] = { "SINGLEHDR", "CKSUM_2K", "SHUTDOWN_STATE", "CHECK_BAD_BLOCKS", }; #define PMEMPOOL_FEATURE_2_STR_MAP_SIZE ARRAY_SIZE(str_2_pmempool_feature_map) /* * util_str2feature -- convert string to feat_flags value */ features_t util_str2feature(const char *str) { /* all features have to be named in incompat_features_str array */ COMPILE_ERROR_ON(FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE != PMEMPOOL_FEATURE_2_STR_MAP_SIZE); for (uint32_t f = 0; f < PMEMPOOL_FEATURE_2_STR_MAP_SIZE; ++f) { if (strcmp(str, str_2_pmempool_feature_map[f]) == 0) { return feature_2_pmempool_feature_map[f]; } } return features_zero; } /* * util_feature2pmempool_feature -- convert feature to pmempool_feature */ uint32_t util_feature2pmempool_feature(features_t feat) { for (uint32_t pf = 0; pf < FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE; ++pf) { const features_t *record = &feature_2_pmempool_feature_map[pf]; if (util_feature_cmp(feat, *record)) { return pf; } } return UINT32_MAX; } /* * util_str2pmempool_feature -- convert string to uint32_t enum pmempool_feature * equivalent */ uint32_t util_str2pmempool_feature(const char *str) { features_t fval = util_str2feature(str); if (util_feature_is_zero(fval)) return UINT32_MAX; return util_feature2pmempool_feature(fval); } /* * util_feature2str -- convert uint32_t feature to string */ const char * util_feature2str(features_t features, features_t *found) { for (uint32_t i = 0; i < FEAT_2_PMEMPOOL_FEATURE_MAP_SIZE; ++i) { const features_t *record = &feature_2_pmempool_feature_map[i]; if (util_feature_is_set(features, *record)) { if (found) memcpy(found, record, sizeof(features_t)); return str_2_pmempool_feature_map[i]; } } return NULL; }
8,733
24.242775
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/shutdown_state.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * shutdown_state.h -- unsafe shudown detection */ #ifndef PMDK_SHUTDOWN_STATE_H #define PMDK_SHUTDOWN_STATE_H 1 #include <stdint.h> #ifdef __cplusplus extern "C" { #endif struct pool_replica; struct shutdown_state { uint64_t usc; uint64_t uuid; /* UID checksum */ uint8_t dirty; uint8_t reserved[39]; uint64_t checksum; }; int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep); int shutdown_state_add_part(struct shutdown_state *sds, int fd, struct pool_replica *rep); void shutdown_state_set_dirty(struct shutdown_state *sds, struct pool_replica *rep); void shutdown_state_clear_dirty(struct shutdown_state *sds, struct pool_replica *rep); int shutdown_state_check(struct shutdown_state *curr_sds, struct shutdown_state *pool_sds, struct pool_replica *rep); #ifdef __cplusplus } #endif #endif /* shutdown_state.h */
950
21.642857
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/uuid.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * uuid.c -- uuid utilities */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include "uuid.h" #include "out.h" /* * util_uuid_to_string -- generate a string form of the uuid */ int util_uuid_to_string(const uuid_t u, char *buf) { int len; /* size that is returned from sprintf call */ if (buf == NULL) { LOG(2, "invalid buffer for uuid string"); return -1; } if (u == NULL) { LOG(2, "invalid uuid structure"); return -1; } struct uuid *uuid = (struct uuid *)u; len = snprintf(buf, POOL_HDR_UUID_STR_LEN, "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x", uuid->time_low, uuid->time_mid, uuid->time_hi_and_ver, uuid->clock_seq_hi, uuid->clock_seq_low, uuid->node[0], uuid->node[1], uuid->node[2], uuid->node[3], uuid->node[4], uuid->node[5]); if (len != POOL_HDR_UUID_STR_LEN - 1) { LOG(2, "snprintf(uuid): %d", len); return -1; } return 0; } /* * util_uuid_from_string -- generate a binary form of the uuid * * uuid string read from /proc/sys/kernel/random/uuid. UUID string * format example: * f81d4fae-7dec-11d0-a765-00a0c91e6bf6 */ int util_uuid_from_string(const char *uuid, struct uuid *ud) { if (strlen(uuid) != 36) { LOG(2, "invalid uuid string"); return -1; } if (uuid[8] != '-' || uuid[13] != '-' || uuid[18] != '-' || uuid[23] != '-') { LOG(2, "invalid uuid string"); return -1; } int n = sscanf(uuid, "%08x-%04hx-%04hx-%02hhx%02hhx-" "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx", &ud->time_low, &ud->time_mid, &ud->time_hi_and_ver, &ud->clock_seq_hi, &ud->clock_seq_low, &ud->node[0], &ud->node[1], &ud->node[2], &ud->node[3], &ud->node[4], &ud->node[5]); if (n != 11) { LOG(2, "sscanf(uuid)"); return -1; } return 0; }
1,818
20.654762
66
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/queue.h
/* * Source: glibc 2.24 (git://sourceware.org/glibc.git /misc/sys/queue.h) * * Copyright (c) 1991, 1993 * The Regents of the University of California. All rights reserved. * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)queue.h 8.5 (Berkeley) 8/20/94 */ #ifndef _PMDK_QUEUE_H_ #define _PMDK_QUEUE_H_ /* * This file defines five types of data structures: singly-linked lists, * lists, simple queues, tail queues, and circular queues. * * A singly-linked list is headed by a single forward pointer. The * elements are singly linked for minimum space and pointer manipulation * overhead at the expense of O(n) removal for arbitrary elements. New * elements can be added to the list after an existing element or at the * head of the list. Elements being removed from the head of the list * should use the explicit macro for this purpose for optimum * efficiency. A singly-linked list may only be traversed in the forward * direction. Singly-linked lists are ideal for applications with large * datasets and few or no removals or for implementing a LIFO queue. * * A list is headed by a single forward pointer (or an array of forward * pointers for a hash table header). The elements are doubly linked * so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before * or after an existing element or at the head of the list. A list * may only be traversed in the forward direction. * * A simple queue is headed by a pair of pointers, one the head of the * list and the other to the tail of the list. The elements are singly * linked to save space, so elements can only be removed from the * head of the list. New elements can be added to the list after * an existing element, at the head of the list, or at the end of the * list. A simple queue may only be traversed in the forward direction. * * A tail queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or * after an existing element, at the head of the list, or at the end of * the list. A tail queue may be traversed in either direction. * * A circle queue is headed by a pair of pointers, one to the head of the * list and the other to the tail of the list. The elements are doubly * linked so that an arbitrary element can be removed without a need to * traverse the list. New elements can be added to the list before or after * an existing element, at the head of the list, or at the end of the list. * A circle queue may be traversed in either direction, but has a more * complex end of list detection. * * For details on the use of these macros, see the queue(3) manual page. */ /* * XXX This is a workaround for a bug in the llvm's static analyzer. For more * info see https://github.com/pmem/issues/issues/309. */ #ifdef __clang_analyzer__ static void custom_assert(void) { abort(); } #define ANALYZER_ASSERT(x) (__builtin_expect(!(x), 0) ? (void)0 : custom_assert()) #else #define ANALYZER_ASSERT(x) do {} while (0) #endif /* * List definitions. */ #define PMDK_LIST_HEAD(name, type) \ struct name { \ struct type *lh_first; /* first element */ \ } #define PMDK_LIST_HEAD_INITIALIZER(head) \ { NULL } #ifdef __cplusplus #define PMDK__CAST_AND_ASSIGN(x, y) x = (__typeof__(x))y; #else #define PMDK__CAST_AND_ASSIGN(x, y) x = (void *)(y); #endif #define PMDK_LIST_ENTRY(type) \ struct { \ struct type *le_next; /* next element */ \ struct type **le_prev; /* address of previous next element */ \ } /* * List functions. */ #define PMDK_LIST_INIT(head) do { \ (head)->lh_first = NULL; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_AFTER(listelm, elm, field) do { \ if (((elm)->field.le_next = (listelm)->field.le_next) != NULL) \ (listelm)->field.le_next->field.le_prev = \ &(elm)->field.le_next; \ (listelm)->field.le_next = (elm); \ (elm)->field.le_prev = &(listelm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.le_prev = (listelm)->field.le_prev; \ (elm)->field.le_next = (listelm); \ *(listelm)->field.le_prev = (elm); \ (listelm)->field.le_prev = &(elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.le_next = (head)->lh_first) != NULL) \ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\ (head)->lh_first = (elm); \ (elm)->field.le_prev = &(head)->lh_first; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_REMOVE(elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if ((elm)->field.le_next != NULL) \ (elm)->field.le_next->field.le_prev = \ (elm)->field.le_prev; \ *(elm)->field.le_prev = (elm)->field.le_next; \ } while (/*CONSTCOND*/0) #define PMDK_LIST_FOREACH(var, head, field) \ for ((var) = ((head)->lh_first); \ (var); \ (var) = ((var)->field.le_next)) /* * List access methods. */ #define PMDK_LIST_EMPTY(head) ((head)->lh_first == NULL) #define PMDK_LIST_FIRST(head) ((head)->lh_first) #define PMDK_LIST_NEXT(elm, field) ((elm)->field.le_next) /* * Singly-linked List definitions. */ #define PMDK_SLIST_HEAD(name, type) \ struct name { \ struct type *slh_first; /* first element */ \ } #define PMDK_SLIST_HEAD_INITIALIZER(head) \ { NULL } #define PMDK_SLIST_ENTRY(type) \ struct { \ struct type *sle_next; /* next element */ \ } /* * Singly-linked List functions. */ #define PMDK_SLIST_INIT(head) do { \ (head)->slh_first = NULL; \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_INSERT_AFTER(slistelm, elm, field) do { \ (elm)->field.sle_next = (slistelm)->field.sle_next; \ (slistelm)->field.sle_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_INSERT_HEAD(head, elm, field) do { \ (elm)->field.sle_next = (head)->slh_first; \ (head)->slh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_REMOVE_HEAD(head, field) do { \ (head)->slh_first = (head)->slh_first->field.sle_next; \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_REMOVE(head, elm, type, field) do { \ if ((head)->slh_first == (elm)) { \ PMDK_SLIST_REMOVE_HEAD((head), field); \ } \ else { \ struct type *curelm = (head)->slh_first; \ while(curelm->field.sle_next != (elm)) \ curelm = curelm->field.sle_next; \ curelm->field.sle_next = \ curelm->field.sle_next->field.sle_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_SLIST_FOREACH(var, head, field) \ for((var) = (head)->slh_first; (var); (var) = (var)->field.sle_next) /* * Singly-linked List access methods. */ #define PMDK_SLIST_EMPTY(head) ((head)->slh_first == NULL) #define PMDK_SLIST_FIRST(head) ((head)->slh_first) #define PMDK_SLIST_NEXT(elm, field) ((elm)->field.sle_next) /* * Singly-linked Tail queue declarations. */ #define PMDK_STAILQ_HEAD(name, type) \ struct name { \ struct type *stqh_first; /* first element */ \ struct type **stqh_last; /* addr of last next element */ \ } #define PMDK_STAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).stqh_first } #define PMDK_STAILQ_ENTRY(type) \ struct { \ struct type *stqe_next; /* next element */ \ } /* * Singly-linked Tail queue functions. */ #define PMDK_STAILQ_INIT(head) do { \ (head)->stqh_first = NULL; \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \ (head)->stqh_last = &(elm)->field.stqe_next; \ (head)->stqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.stqe_next = NULL; \ *(head)->stqh_last = (elm); \ (head)->stqh_last = &(elm)->field.stqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\ (head)->stqh_last = &(elm)->field.stqe_next; \ (listelm)->field.stqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_REMOVE_HEAD(head, field) do { \ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \ (head)->stqh_last = &(head)->stqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_REMOVE(head, elm, type, field) do { \ if ((head)->stqh_first == (elm)) { \ PMDK_STAILQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->stqh_first; \ while (curelm->field.stqe_next != (elm)) \ curelm = curelm->field.stqe_next; \ if ((curelm->field.stqe_next = \ curelm->field.stqe_next->field.stqe_next) == NULL) \ (head)->stqh_last = &(curelm)->field.stqe_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_STAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->stqh_first); \ (var); \ (var) = ((var)->field.stqe_next)) #define PMDK_STAILQ_CONCAT(head1, head2) do { \ if (!PMDK_STAILQ_EMPTY((head2))) { \ *(head1)->stqh_last = (head2)->stqh_first; \ (head1)->stqh_last = (head2)->stqh_last; \ PMDK_STAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Singly-linked Tail queue access methods. */ #define PMDK_STAILQ_EMPTY(head) ((head)->stqh_first == NULL) #define PMDK_STAILQ_FIRST(head) ((head)->stqh_first) #define PMDK_STAILQ_NEXT(elm, field) ((elm)->field.stqe_next) /* * Simple queue definitions. */ #define PMDK_SIMPLEQ_HEAD(name, type) \ struct name { \ struct type *sqh_first; /* first element */ \ struct type **sqh_last; /* addr of last next element */ \ } #define PMDK_SIMPLEQ_HEAD_INITIALIZER(head) \ { NULL, &(head).sqh_first } #define PMDK_SIMPLEQ_ENTRY(type) \ struct { \ struct type *sqe_next; /* next element */ \ } /* * Simple queue functions. */ #define PMDK_SIMPLEQ_INIT(head) do { \ (head)->sqh_first = NULL; \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \ (head)->sqh_last = &(elm)->field.sqe_next; \ (head)->sqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.sqe_next = NULL; \ *(head)->sqh_last = (elm); \ (head)->sqh_last = &(elm)->field.sqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\ (head)->sqh_last = &(elm)->field.sqe_next; \ (listelm)->field.sqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_REMOVE_HEAD(head, field) do { \ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \ (head)->sqh_last = &(head)->sqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_REMOVE(head, elm, type, field) do { \ if ((head)->sqh_first == (elm)) { \ PMDK_SIMPLEQ_REMOVE_HEAD((head), field); \ } else { \ struct type *curelm = (head)->sqh_first; \ while (curelm->field.sqe_next != (elm)) \ curelm = curelm->field.sqe_next; \ if ((curelm->field.sqe_next = \ curelm->field.sqe_next->field.sqe_next) == NULL) \ (head)->sqh_last = &(curelm)->field.sqe_next; \ } \ } while (/*CONSTCOND*/0) #define PMDK_SIMPLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->sqh_first); \ (var); \ (var) = ((var)->field.sqe_next)) /* * Simple queue access methods. */ #define PMDK_SIMPLEQ_EMPTY(head) ((head)->sqh_first == NULL) #define PMDK_SIMPLEQ_FIRST(head) ((head)->sqh_first) #define PMDK_SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next) /* * Tail queue definitions. */ #define PMDK__TAILQ_HEAD(name, type, qual) \ struct name { \ qual type *tqh_first; /* first element */ \ qual type *qual *tqh_last; /* addr of last next element */ \ } #define PMDK_TAILQ_HEAD(name, type) PMDK__TAILQ_HEAD(name, struct type,) #define PMDK_TAILQ_HEAD_INITIALIZER(head) \ { NULL, &(head).tqh_first } #define PMDK__TAILQ_ENTRY(type, qual) \ struct { \ qual type *tqe_next; /* next element */ \ qual type *qual *tqe_prev; /* address of previous next element */\ } #define PMDK_TAILQ_ENTRY(type) PMDK__TAILQ_ENTRY(struct type,) /* * Tail queue functions. */ #define PMDK_TAILQ_INIT(head) do { \ (head)->tqh_first = NULL; \ (head)->tqh_last = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_HEAD(head, elm, field) do { \ if (((elm)->field.tqe_next = (head)->tqh_first) != NULL) \ (head)->tqh_first->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (head)->tqh_first = (elm); \ (elm)->field.tqe_prev = &(head)->tqh_first; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_TAIL(head, elm, field) do { \ (elm)->field.tqe_next = NULL; \ (elm)->field.tqe_prev = (head)->tqh_last; \ *(head)->tqh_last = (elm); \ (head)->tqh_last = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != NULL)\ (elm)->field.tqe_next->field.tqe_prev = \ &(elm)->field.tqe_next; \ else \ (head)->tqh_last = &(elm)->field.tqe_next; \ (listelm)->field.tqe_next = (elm); \ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_INSERT_BEFORE(listelm, elm, field) do { \ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \ (elm)->field.tqe_next = (listelm); \ *(listelm)->field.tqe_prev = (elm); \ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_REMOVE(head, elm, field) do { \ ANALYZER_ASSERT((elm) != NULL); \ if (((elm)->field.tqe_next) != NULL) \ (elm)->field.tqe_next->field.tqe_prev = \ (elm)->field.tqe_prev; \ else \ (head)->tqh_last = (elm)->field.tqe_prev; \ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_TAILQ_FOREACH(var, head, field) \ for ((var) = ((head)->tqh_first); \ (var); \ (var) = ((var)->field.tqe_next)) #define PMDK_TAILQ_FOREACH_REVERSE(var, head, headname, field) \ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \ (var); \ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last))) #define PMDK_TAILQ_CONCAT(head1, head2, field) do { \ if (!PMDK_TAILQ_EMPTY(head2)) { \ *(head1)->tqh_last = (head2)->tqh_first; \ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \ (head1)->tqh_last = (head2)->tqh_last; \ PMDK_TAILQ_INIT((head2)); \ } \ } while (/*CONSTCOND*/0) /* * Tail queue access methods. */ #define PMDK_TAILQ_EMPTY(head) ((head)->tqh_first == NULL) #define PMDK_TAILQ_FIRST(head) ((head)->tqh_first) #define PMDK_TAILQ_NEXT(elm, field) ((elm)->field.tqe_next) #define PMDK_TAILQ_LAST(head, headname) \ (*(((struct headname *)((head)->tqh_last))->tqh_last)) #define PMDK_TAILQ_PREV(elm, headname, field) \ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last)) /* * Circular queue definitions. */ #define PMDK_CIRCLEQ_HEAD(name, type) \ struct name { \ struct type *cqh_first; /* first element */ \ struct type *cqh_last; /* last element */ \ } #define PMDK_CIRCLEQ_HEAD_INITIALIZER(head) \ { (void *)&(head), (void *)&(head) } #define PMDK_CIRCLEQ_ENTRY(type) \ struct { \ struct type *cqe_next; /* next element */ \ struct type *cqe_prev; /* previous element */ \ } /* * Circular queue functions. */ #define PMDK_CIRCLEQ_INIT(head) do { \ PMDK__CAST_AND_ASSIGN((head)->cqh_first, (head)); \ PMDK__CAST_AND_ASSIGN((head)->cqh_last, (head)); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm)->field.cqe_next; \ (elm)->field.cqe_prev = (listelm); \ if ((listelm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (listelm)->field.cqe_next->field.cqe_prev = (elm); \ (listelm)->field.cqe_next = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \ (elm)->field.cqe_next = (listelm); \ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \ if ((listelm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (listelm)->field.cqe_prev->field.cqe_next = (elm); \ (listelm)->field.cqe_prev = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_HEAD(head, elm, field) do { \ (elm)->field.cqe_next = (head)->cqh_first; \ (elm)->field.cqe_prev = (void *)(head); \ if ((head)->cqh_last == (void *)(head)) \ (head)->cqh_last = (elm); \ else \ (head)->cqh_first->field.cqe_prev = (elm); \ (head)->cqh_first = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field) do { \ PMDK__CAST_AND_ASSIGN((elm)->field.cqe_next, (head)); \ (elm)->field.cqe_prev = (head)->cqh_last; \ if ((head)->cqh_first == (void *)(head)) \ (head)->cqh_first = (elm); \ else \ (head)->cqh_last->field.cqe_next = (elm); \ (head)->cqh_last = (elm); \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_REMOVE(head, elm, field) do { \ if ((elm)->field.cqe_next == (void *)(head)) \ (head)->cqh_last = (elm)->field.cqe_prev; \ else \ (elm)->field.cqe_next->field.cqe_prev = \ (elm)->field.cqe_prev; \ if ((elm)->field.cqe_prev == (void *)(head)) \ (head)->cqh_first = (elm)->field.cqe_next; \ else \ (elm)->field.cqe_prev->field.cqe_next = \ (elm)->field.cqe_next; \ } while (/*CONSTCOND*/0) #define PMDK_CIRCLEQ_FOREACH(var, head, field) \ for ((var) = ((head)->cqh_first); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_next)) #define PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) \ for ((var) = ((head)->cqh_last); \ (var) != (const void *)(head); \ (var) = ((var)->field.cqe_prev)) /* * Circular queue access methods. */ #define PMDK_CIRCLEQ_EMPTY(head) ((head)->cqh_first == (void *)(head)) #define PMDK_CIRCLEQ_FIRST(head) ((head)->cqh_first) #define PMDK_CIRCLEQ_LAST(head) ((head)->cqh_last) #define PMDK_CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next) #define PMDK_CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev) #define PMDK_CIRCLEQ_LOOP_NEXT(head, elm, field) \ (((elm)->field.cqe_next == (void *)(head)) \ ? ((head)->cqh_first) \ : ((elm)->field.cqe_next)) #define PMDK_CIRCLEQ_LOOP_PREV(head, elm, field) \ (((elm)->field.cqe_prev == (void *)(head)) \ ? ((head)->cqh_last) \ : ((elm)->field.cqe_prev)) /* * Sorted queue functions. */ #define PMDK_SORTEDQ_HEAD(name, type) PMDK_CIRCLEQ_HEAD(name, type) #define PMDK_SORTEDQ_HEAD_INITIALIZER(head) PMDK_CIRCLEQ_HEAD_INITIALIZER(head) #define PMDK_SORTEDQ_ENTRY(type) PMDK_CIRCLEQ_ENTRY(type) #define PMDK_SORTEDQ_INIT(head) PMDK_CIRCLEQ_INIT(head) #define PMDK_SORTEDQ_INSERT(head, elm, field, type, comparer) { \ type *_elm_it; \ for (_elm_it = (head)->cqh_first; \ ((_elm_it != (void *)(head)) && \ (comparer(_elm_it, (elm)) < 0)); \ _elm_it = _elm_it->field.cqe_next) \ /*NOTHING*/; \ if (_elm_it == (void *)(head)) \ PMDK_CIRCLEQ_INSERT_TAIL(head, elm, field); \ else \ PMDK_CIRCLEQ_INSERT_BEFORE(head, _elm_it, elm, field); \ } #define PMDK_SORTEDQ_REMOVE(head, elm, field) PMDK_CIRCLEQ_REMOVE(head, elm, field) #define PMDK_SORTEDQ_FOREACH(var, head, field) PMDK_CIRCLEQ_FOREACH(var, head, field) #define PMDK_SORTEDQ_FOREACH_REVERSE(var, head, field) \ PMDK_CIRCLEQ_FOREACH_REVERSE(var, head, field) /* * Sorted queue access methods. */ #define PMDK_SORTEDQ_EMPTY(head) PMDK_CIRCLEQ_EMPTY(head) #define PMDK_SORTEDQ_FIRST(head) PMDK_CIRCLEQ_FIRST(head) #define PMDK_SORTEDQ_LAST(head) PMDK_CIRCLEQ_LAST(head) #define PMDK_SORTEDQ_NEXT(elm, field) PMDK_CIRCLEQ_NEXT(elm, field) #define PMDK_SORTEDQ_PREV(elm, field) PMDK_CIRCLEQ_PREV(elm, field) #endif /* sys/queue.h */
22,165
33.907087
85
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/set.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * Copyright (c) 2016, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * set.h -- internal definitions for set module */ #ifndef PMDK_SET_H #define PMDK_SET_H 1 #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <sys/types.h> #include "out.h" #include "vec.h" #include "pool_hdr.h" #include "librpmem.h" #ifdef __cplusplus extern "C" { #endif /* * pool sets & replicas */ #define POOLSET_HDR_SIG "PMEMPOOLSET" #define POOLSET_HDR_SIG_LEN 11 /* does NOT include '\0' */ #define POOLSET_REPLICA_SIG "REPLICA" #define POOLSET_REPLICA_SIG_LEN 7 /* does NOT include '\0' */ #define POOLSET_OPTION_SIG "OPTION" #define POOLSET_OPTION_SIG_LEN 6 /* does NOT include '\0' */ /* pool set option flags */ enum pool_set_option_flag { OPTION_UNKNOWN = 0x0, OPTION_SINGLEHDR = 0x1, /* pool headers only in the first part */ OPTION_NOHDRS = 0x2, /* no pool headers, remote replicas only */ }; struct pool_set_option { const char *name; enum pool_set_option_flag flag; }; #define POOL_LOCAL 0 #define POOL_REMOTE 1 #define REPLICAS_DISABLED 0 #define REPLICAS_ENABLED 1 /* util_pool_open flags */ #define POOL_OPEN_COW 1 /* copy-on-write mode */ #define POOL_OPEN_IGNORE_SDS 2 /* ignore shutdown state */ #define POOL_OPEN_IGNORE_BAD_BLOCKS 4 /* ignore bad blocks */ #define POOL_OPEN_CHECK_BAD_BLOCKS 8 /* check bad blocks */ enum del_parts_mode { DO_NOT_DELETE_PARTS, /* do not delete part files */ DELETE_CREATED_PARTS, /* delete only newly created parts files */ DELETE_ALL_PARTS /* force delete all parts files */ }; struct pool_set_part { /* populated by a pool set file parser */ const char *path; size_t filesize; /* aligned to page size */ int fd; int flags; /* stores flags used when opening the file */ /* valid only if fd >= 0 */ int is_dev_dax; /* indicates if the part is on device dax */ size_t alignment; /* internal alignment (Device DAX only) */ int created; /* indicates newly created (zeroed) file */ /* util_poolset_open/create */ void *remote_hdr; /* allocated header for remote replica */ void *hdr; /* base address of header */ size_t hdrsize; /* size of the header mapping */ int hdr_map_sync; /* header mapped with MAP_SYNC */ void *addr; /* base address of the mapping */ size_t size; /* size of the mapping - page aligned */ int map_sync; /* part has been mapped with MAP_SYNC flag */ int rdonly; /* is set based on compat features, affects */ /* the whole poolset */ uuid_t uuid; int has_bad_blocks; /* part file contains bad blocks */ int sds_dirty_modified; /* sds dirty flag was set */ }; struct pool_set_directory { const char *path; size_t resvsize; /* size of the address space reservation */ }; struct remote_replica { void *rpp; /* RPMEMpool opaque handle */ char *node_addr; /* address of a remote node */ /* poolset descriptor is a pool set file name on a remote node */ char *pool_desc; /* descriptor of a poolset */ }; struct pool_replica { unsigned nparts; unsigned nallocated; unsigned nhdrs; /* should be 0, 1 or nparts */ size_t repsize; /* total size of all the parts (mappings) */ size_t resvsize; /* min size of the address space reservation */ int is_pmem; /* true if all the parts are in PMEM */ struct remote_replica *remote; /* not NULL if the replica */ /* is a remote one */ VEC(, struct pool_set_directory) directory; struct pool_set_part part[]; }; struct pool_set { char *path; /* path of the poolset file */ unsigned nreplicas; uuid_t uuid; int rdonly; int zeroed; /* true if all the parts are new files */ size_t poolsize; /* the smallest replica size */ int has_bad_blocks; /* pool set contains bad blocks */ int remote; /* true if contains a remote replica */ unsigned options; /* enabled pool set options */ int directory_based; size_t resvsize; unsigned next_id; unsigned next_directory_id; int ignore_sds; /* don't use shutdown state */ struct pool_replica *replica[]; }; struct part_file { int is_remote; /* * Pointer to the part file structure - * - not-NULL only for a local part file */ struct pool_set_part *part; /* * Pointer to the replica structure - * - not-NULL only for a remote replica */ struct remote_replica *remote; }; struct pool_attr { char signature[POOL_HDR_SIG_LEN]; /* pool signature */ uint32_t major; /* format major version number */ features_t features; /* features flags */ unsigned char poolset_uuid[POOL_HDR_UUID_LEN]; /* pool uuid */ unsigned char first_part_uuid[POOL_HDR_UUID_LEN]; /* first part uuid */ unsigned char prev_repl_uuid[POOL_HDR_UUID_LEN]; /* prev replica uuid */ unsigned char next_repl_uuid[POOL_HDR_UUID_LEN]; /* next replica uuid */ unsigned char arch_flags[POOL_HDR_ARCH_LEN]; /* arch flags */ }; /* get index of the (r)th replica */ static inline unsigned REPidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return r % set->nreplicas; } /* get index of the (r + 1)th replica */ static inline unsigned REPNidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (r + 1) % set->nreplicas; } /* get index of the (r - 1)th replica */ static inline unsigned REPPidx(const struct pool_set *set, unsigned r) { ASSERTne(set->nreplicas, 0); return (set->nreplicas + r - 1) % set->nreplicas; } /* get index of the (r)th part */ static inline unsigned PARTidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return p % rep->nparts; } /* get index of the (r + 1)th part */ static inline unsigned PARTNidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return (p + 1) % rep->nparts; } /* get index of the (r - 1)th part */ static inline unsigned PARTPidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nparts, 0); return (rep->nparts + p - 1) % rep->nparts; } /* get index of the (r)th part */ static inline unsigned HDRidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return p % rep->nhdrs; } /* get index of the (r + 1)th part */ static inline unsigned HDRNidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return (p + 1) % rep->nhdrs; } /* get index of the (r - 1)th part */ static inline unsigned HDRPidx(const struct pool_replica *rep, unsigned p) { ASSERTne(rep->nhdrs, 0); return (rep->nhdrs + p - 1) % rep->nhdrs; } /* get (r)th replica */ static inline struct pool_replica * REP(const struct pool_set *set, unsigned r) { return set->replica[REPidx(set, r)]; } /* get (r + 1)th replica */ static inline struct pool_replica * REPN(const struct pool_set *set, unsigned r) { return set->replica[REPNidx(set, r)]; } /* get (r - 1)th replica */ static inline struct pool_replica * REPP(const struct pool_set *set, unsigned r) { return set->replica[REPPidx(set, r)]; } /* get (p)th part */ static inline struct pool_set_part * PART(struct pool_replica *rep, unsigned p) { return &rep->part[PARTidx(rep, p)]; } /* get (p + 1)th part */ static inline struct pool_set_part * PARTN(struct pool_replica *rep, unsigned p) { return &rep->part[PARTNidx(rep, p)]; } /* get (p - 1)th part */ static inline struct pool_set_part * PARTP(struct pool_replica *rep, unsigned p) { return &rep->part[PARTPidx(rep, p)]; } /* get (p)th header */ static inline struct pool_hdr * HDR(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRidx(rep, p)].hdr); } /* get (p + 1)th header */ static inline struct pool_hdr * HDRN(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRNidx(rep, p)].hdr); } /* get (p - 1)th header */ static inline struct pool_hdr * HDRP(struct pool_replica *rep, unsigned p) { return (struct pool_hdr *)(rep->part[HDRPidx(rep, p)].hdr); } extern int Prefault_at_open; extern int Prefault_at_create; extern int SDS_at_create; extern int Fallocate_at_create; extern int COW_at_open; int util_poolset_parse(struct pool_set **setp, const char *path, int fd); int util_poolset_read(struct pool_set **setp, const char *path); int util_poolset_create_set(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, int ignore_sds); int util_poolset_open(struct pool_set *set); void util_poolset_close(struct pool_set *set, enum del_parts_mode del); void util_poolset_free(struct pool_set *set); int util_poolset_chmod(struct pool_set *set, mode_t mode); void util_poolset_fdclose(struct pool_set *set); void util_poolset_fdclose_always(struct pool_set *set); int util_is_poolset_file(const char *path); int util_poolset_foreach_part_struct(struct pool_set *set, int (*cb)(struct part_file *pf, void *arg), void *arg); int util_poolset_foreach_part(const char *path, int (*cb)(struct part_file *pf, void *arg), void *arg); size_t util_poolset_size(const char *path); int util_replica_deep_common(const void *addr, size_t len, struct pool_set *set, unsigned replica_id, int flush); int util_replica_deep_persist(const void *addr, size_t len, struct pool_set *set, unsigned replica_id); int util_replica_deep_drain(const void *addr, size_t len, struct pool_set *set, unsigned replica_id); int util_pool_create(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, int can_have_rep); int util_pool_create_uuids(struct pool_set **setp, const char *path, size_t poolsize, size_t minsize, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, int can_have_rep, int remote); int util_part_open(struct pool_set_part *part, size_t minsize, int create_part); void util_part_fdclose(struct pool_set_part *part); int util_replica_open(struct pool_set *set, unsigned repidx, int flags); int util_replica_set_attr(struct pool_replica *rep, const struct rpmem_pool_attr *rattr); void util_pool_hdr2attr(struct pool_attr *attr, struct pool_hdr *hdr); void util_pool_attr2hdr(struct pool_hdr *hdr, const struct pool_attr *attr); int util_replica_close(struct pool_set *set, unsigned repidx); int util_map_part(struct pool_set_part *part, void *addr, size_t size, size_t offset, int flags, int rdonly); int util_unmap_part(struct pool_set_part *part); int util_unmap_parts(struct pool_replica *rep, unsigned start_index, unsigned end_index); int util_header_create(struct pool_set *set, unsigned repidx, unsigned partidx, const struct pool_attr *attr, int overwrite); int util_map_hdr(struct pool_set_part *part, int flags, int rdonly); void util_unmap_hdr(struct pool_set_part *part); int util_pool_has_device_dax(struct pool_set *set); int util_pool_open_nocheck(struct pool_set *set, unsigned flags); int util_pool_open(struct pool_set **setp, const char *path, size_t minpartsize, const struct pool_attr *attr, unsigned *nlanes, void *addr, unsigned flags); int util_pool_open_remote(struct pool_set **setp, const char *path, int cow, size_t minpartsize, struct rpmem_pool_attr *rattr); void *util_pool_extend(struct pool_set *set, size_t *size, size_t minpartsize); void util_remote_init(void); void util_remote_fini(void); int util_update_remote_header(struct pool_set *set, unsigned repn); void util_remote_init_lock(void); void util_remote_destroy_lock(void); int util_pool_close_remote(RPMEMpool *rpp); void util_remote_unload(void); void util_replica_fdclose(struct pool_replica *rep); int util_poolset_remote_open(struct pool_replica *rep, unsigned repidx, size_t minsize, int create, void *pool_addr, size_t pool_size, unsigned *nlanes); int util_remote_load(void); int util_replica_open_remote(struct pool_set *set, unsigned repidx, int flags); int util_poolset_remote_replica_open(struct pool_set *set, unsigned repidx, size_t minsize, int create, unsigned *nlanes); int util_replica_close_local(struct pool_replica *rep, unsigned repn, enum del_parts_mode del); int util_replica_close_remote(struct pool_replica *rep, unsigned repn, enum del_parts_mode del); extern int (*Rpmem_persist)(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane, unsigned flags); extern int (*Rpmem_deep_persist)(RPMEMpool *rpp, size_t offset, size_t length, unsigned lane); extern int (*Rpmem_read)(RPMEMpool *rpp, void *buff, size_t offset, size_t length, unsigned lane); extern int (*Rpmem_close)(RPMEMpool *rpp); extern int (*Rpmem_remove)(const char *target, const char *pool_set_name, int flags); extern int (*Rpmem_set_attr)(RPMEMpool *rpp, const struct rpmem_pool_attr *rattr); #ifdef __cplusplus } #endif #endif
14,145
31.077098
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/shutdown_state.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * shutdown_state.c -- unsafe shudown detection */ #include <string.h> #include <stdbool.h> #include <endian.h> #include "shutdown_state.h" #include "out.h" #include "util.h" #include "os_deep.h" #include "set.h" #include "libpmem2.h" #include "badblocks.h" #include "../libpmem2/pmem2_utils.h" #define FLUSH_SDS(sds, rep) \ if ((rep) != NULL) os_part_deep_common(rep, 0, sds, sizeof(*(sds)), 1) /* * shutdown_state_checksum -- (internal) counts SDS checksum and flush it */ static void shutdown_state_checksum(struct shutdown_state *sds, struct pool_replica *rep) { LOG(3, "sds %p", sds); util_checksum(sds, sizeof(*sds), &sds->checksum, 1, 0); FLUSH_SDS(sds, rep); } /* * shutdown_state_init -- initializes shutdown_state struct */ int shutdown_state_init(struct shutdown_state *sds, struct pool_replica *rep) { /* check if we didn't change size of shutdown_state accidentally */ COMPILE_ERROR_ON(sizeof(struct shutdown_state) != 64); LOG(3, "sds %p", sds); memset(sds, 0, sizeof(*sds)); shutdown_state_checksum(sds, rep); return 0; } /* * shutdown_state_add_part -- adds file uuid and usc to shutdown_state struct * * if path does not exist it will fail which does NOT mean shutdown failure */ int shutdown_state_add_part(struct shutdown_state *sds, int fd, struct pool_replica *rep) { LOG(3, "sds %p, fd %d", sds, fd); size_t len = 0; char *uid; uint64_t usc; struct pmem2_source *src; if (pmem2_source_from_fd(&src, fd)) return 1; int ret = pmem2_source_device_usc(src, &usc); if (ret == PMEM2_E_NOSUPP) { usc = 0; } else if (ret != 0) { if (ret == -EPERM) { /* overwrite error message */ ERR( "Cannot read unsafe shutdown count. For more information please check https://github.com/pmem/pmdk/issues/4207"); } LOG(2, "cannot read unsafe shutdown count for %d", fd); goto err; } ret = pmem2_source_device_id(src, NULL, &len); if (ret != PMEM2_E_NOSUPP && ret != 0) { ERR("cannot read uuid of %d", fd); goto err; } len += 4 - len % 4; uid = Zalloc(len); if (uid == NULL) { ERR("!Zalloc"); goto err; } ret = pmem2_source_device_id(src, uid, &len); if (ret != PMEM2_E_NOSUPP && ret != 0) { ERR("cannot read uuid of %d", fd); Free(uid); goto err; } sds->usc = htole64(le64toh(sds->usc) + usc); uint64_t tmp; util_checksum(uid, len, &tmp, 1, 0); sds->uuid = htole64(le64toh(sds->uuid) + tmp); FLUSH_SDS(sds, rep); Free(uid); pmem2_source_delete(&src); shutdown_state_checksum(sds, rep); return 0; err: pmem2_source_delete(&src); return 1; } /* * shutdown_state_set_dirty -- sets dirty pool flag */ void shutdown_state_set_dirty(struct shutdown_state *sds, struct pool_replica *rep) { LOG(3, "sds %p", sds); sds->dirty = 1; rep->part[0].sds_dirty_modified = 1; FLUSH_SDS(sds, rep); shutdown_state_checksum(sds, rep); } /* * shutdown_state_clear_dirty -- clears dirty pool flag */ void shutdown_state_clear_dirty(struct shutdown_state *sds, struct pool_replica *rep) { LOG(3, "sds %p", sds); struct pool_set_part part = rep->part[0]; /* * If a dirty flag was set in previous program execution it should be * preserved as it stores information about potential ADR failure. */ if (part.sds_dirty_modified != 1) return; sds->dirty = 0; part.sds_dirty_modified = 0; FLUSH_SDS(sds, rep); shutdown_state_checksum(sds, rep); } /* * shutdown_state_reinit -- (internal) reinitializes shutdown_state struct */ static void shutdown_state_reinit(struct shutdown_state *curr_sds, struct shutdown_state *pool_sds, struct pool_replica *rep) { LOG(3, "curr_sds %p, pool_sds %p", curr_sds, pool_sds); shutdown_state_init(pool_sds, rep); pool_sds->uuid = htole64(curr_sds->uuid); pool_sds->usc = htole64(curr_sds->usc); pool_sds->dirty = 0; FLUSH_SDS(pool_sds, rep); shutdown_state_checksum(pool_sds, rep); } /* * shutdown_state_check -- compares and fixes shutdown state */ int shutdown_state_check(struct shutdown_state *curr_sds, struct shutdown_state *pool_sds, struct pool_replica *rep) { LOG(3, "curr_sds %p, pool_sds %p", curr_sds, pool_sds); if (util_is_zeroed(pool_sds, sizeof(*pool_sds)) && !util_is_zeroed(curr_sds, sizeof(*curr_sds))) { shutdown_state_reinit(curr_sds, pool_sds, rep); return 0; } bool is_uuid_usc_correct = le64toh(pool_sds->usc) == le64toh(curr_sds->usc) && le64toh(pool_sds->uuid) == le64toh(curr_sds->uuid); bool is_checksum_correct = util_checksum(pool_sds, sizeof(*pool_sds), &pool_sds->checksum, 0, 0); int dirty = pool_sds->dirty; if (!is_checksum_correct) { /* the program was killed during opening or closing the pool */ LOG(2, "incorrect checksum - SDS will be reinitialized"); shutdown_state_reinit(curr_sds, pool_sds, rep); return 0; } if (is_uuid_usc_correct) { if (dirty == 0) return 0; /* * the program was killed when the pool was opened * but there wasn't an ADR failure */ LOG(2, "the pool was not closed - SDS will be reinitialized"); shutdown_state_reinit(curr_sds, pool_sds, rep); return 0; } if (dirty == 0) { /* an ADR failure but the pool was closed */ LOG(2, "an ADR failure was detected but the pool was closed - SDS will be reinitialized"); shutdown_state_reinit(curr_sds, pool_sds, rep); return 0; } /* an ADR failure - the pool might be corrupted */ ERR("an ADR failure was detected, the pool might be corrupted"); return 1; }
5,491
22.370213
117
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/mmap.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * mmap.h -- internal definitions for mmap module */ #ifndef PMDK_MMAP_H #define PMDK_MMAP_H 1 #include <stddef.h> #include <stdint.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <errno.h> #include "out.h" #include "queue.h" #include "os.h" #ifdef __cplusplus extern "C" { #endif extern int Mmap_no_random; extern void *Mmap_hint; extern char *Mmap_mapfile; void *util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync); void *util_map(int fd, os_off_t off, size_t len, int flags, int rdonly, size_t req_align, int *map_sync); int util_unmap(void *addr, size_t len); #ifdef __FreeBSD__ #define MAP_NORESERVE 0 #define OS_MAPFILE "/proc/curproc/map" #else #define OS_MAPFILE "/proc/self/maps" #endif #ifndef MAP_SYNC #define MAP_SYNC 0x80000 #endif #ifndef MAP_SHARED_VALIDATE #define MAP_SHARED_VALIDATE 0x03 #endif /* * macros for micromanaging range protections for the debug version */ #ifdef DEBUG #define RANGE(addr, len, is_dev_dax, type) do {\ if (!is_dev_dax) ASSERT(util_range_##type(addr, len) >= 0);\ } while (0) #else #define RANGE(addr, len, is_dev_dax, type) do {} while (0) #endif #define RANGE_RO(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, ro) #define RANGE_RW(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, rw) #define RANGE_NONE(addr, len, is_dev_dax) RANGE(addr, len, is_dev_dax, none) /* pmem mapping type */ enum pmem_map_type { PMEM_DEV_DAX, /* device dax */ PMEM_MAP_SYNC, /* mapping with MAP_SYNC flag on dax fs */ MAX_PMEM_TYPE }; /* * this structure tracks the file mappings outstanding per file handle */ struct map_tracker { PMDK_SORTEDQ_ENTRY(map_tracker) entry; uintptr_t base_addr; uintptr_t end_addr; unsigned region_id; enum pmem_map_type type; #ifdef _WIN32 /* Windows-specific data */ HANDLE FileHandle; HANDLE FileMappingHandle; DWORD Access; os_off_t Offset; size_t FileLen; #endif }; void util_mmap_init(void); void util_mmap_fini(void); int util_range_ro(void *addr, size_t len); int util_range_rw(void *addr, size_t len); int util_range_none(void *addr, size_t len); char *util_map_hint_unused(void *minaddr, size_t len, size_t align); char *util_map_hint(size_t len, size_t req_align); #define KILOBYTE ((uintptr_t)1 << 10) #define MEGABYTE ((uintptr_t)1 << 20) #define GIGABYTE ((uintptr_t)1 << 30) /* * util_map_hint_align -- choose the desired mapping alignment * * The smallest supported alignment is 2 megabytes because of the object * alignment requirements. Changing this value to 4 kilobytes constitues a * layout change. * * Use 1GB page alignment only if the mapping length is at least * twice as big as the page size. */ static inline size_t util_map_hint_align(size_t len, size_t req_align) { size_t align = 2 * MEGABYTE; if (req_align) align = req_align; else if (len >= 2 * GIGABYTE) align = GIGABYTE; return align; } int util_range_register(const void *addr, size_t len, const char *path, enum pmem_map_type type); int util_range_unregister(const void *addr, size_t len); struct map_tracker *util_range_find(uintptr_t addr, size_t len); int util_range_is_pmem(const void *addr, size_t len); #ifdef __cplusplus } #endif #endif
3,328
22.27972
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/ravl.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2020, Intel Corporation */ /* * ravl.c -- implementation of a RAVL tree * https://sidsen.azurewebsites.net//papers/ravl-trees-journal.pdf */ #include <stdlib.h> #include <string.h> #include <errno.h> #include "out.h" #include "ravl.h" #include "alloc.h" #define RAVL_DEFAULT_DATA_SIZE (sizeof(void *)) enum ravl_slot_type { RAVL_LEFT, RAVL_RIGHT, MAX_SLOTS, RAVL_ROOT }; struct ravl_node { struct ravl_node *parent; struct ravl_node *slots[MAX_SLOTS]; int32_t rank; /* cannot be greater than height of the subtree */ int32_t pointer_based; char data[]; }; struct ravl { struct ravl_node *root; ravl_compare *compare; size_t data_size; }; /* * ravl_new -- creates a new ravl tree instance */ struct ravl * ravl_new_sized(ravl_compare *compare, size_t data_size) { struct ravl *r = Malloc(sizeof(*r)); if (r == NULL) { ERR("!Malloc"); return r; } r->compare = compare; r->root = NULL; r->data_size = data_size; return r; } /* * ravl_new -- creates a new tree that stores data pointers */ struct ravl * ravl_new(ravl_compare *compare) { return ravl_new_sized(compare, RAVL_DEFAULT_DATA_SIZE); } /* * ravl_clear_node -- (internal) recursively clears the given subtree, * calls callback in an in-order fashion. Optionally frees the given node. */ static void ravl_foreach_node(struct ravl_node *n, ravl_cb cb, void *arg, int free_node) { if (n == NULL) return; ravl_foreach_node(n->slots[RAVL_LEFT], cb, arg, free_node); if (cb) cb((void *)n->data, arg); ravl_foreach_node(n->slots[RAVL_RIGHT], cb, arg, free_node); if (free_node) Free(n); } /* * ravl_clear -- clears the entire tree, starting from the root */ void ravl_clear(struct ravl *ravl) { ravl_foreach_node(ravl->root, NULL, NULL, 1); ravl->root = NULL; } /* * ravl_delete_cb -- clears and deletes the given ravl instance, calls callback */ void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg) { ravl_foreach_node(ravl->root, cb, arg, 1); Free(ravl); } /* * ravl_delete -- clears and deletes the given ravl instance */ void ravl_delete(struct ravl *ravl) { ravl_delete_cb(ravl, NULL, NULL); } /* * ravl_foreach -- traverses the entire tree, calling callback for every node */ void ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg) { ravl_foreach_node(ravl->root, cb, arg, 0); } /* * ravl_empty -- checks whether the given tree is empty */ int ravl_empty(struct ravl *ravl) { return ravl->root == NULL; } /* * ravl_node_insert_constructor -- node data constructor for ravl_insert */ static void ravl_node_insert_constructor(void *data, size_t data_size, const void *arg) { /* copy only the 'arg' pointer */ memcpy(data, &arg, sizeof(arg)); } /* * ravl_node_copy_constructor -- node data constructor for ravl_emplace_copy */ static void ravl_node_copy_constructor(void *data, size_t data_size, const void *arg) { memcpy(data, arg, data_size); } /* * ravl_new_node -- (internal) allocates and initializes a new node */ static struct ravl_node * ravl_new_node(struct ravl *ravl, ravl_constr constr, const void *arg) { struct ravl_node *n = Malloc(sizeof(*n) + ravl->data_size); if (n == NULL) { ERR("!Malloc"); return n; } n->parent = NULL; n->slots[RAVL_LEFT] = NULL; n->slots[RAVL_RIGHT] = NULL; n->rank = 0; n->pointer_based = constr == ravl_node_insert_constructor; constr(n->data, ravl->data_size, arg); return n; } /* * ravl_slot_opposite -- (internal) returns the opposite slot type, cannot be * called for root type */ static enum ravl_slot_type ravl_slot_opposite(enum ravl_slot_type t) { ASSERTne(t, RAVL_ROOT); return t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT; } /* * ravl_node_slot_type -- (internal) returns the type of the given node: * left child, right child or root */ static enum ravl_slot_type ravl_node_slot_type(struct ravl_node *n) { if (n->parent == NULL) return RAVL_ROOT; return n->parent->slots[RAVL_LEFT] == n ? RAVL_LEFT : RAVL_RIGHT; } /* * ravl_node_sibling -- (internal) returns the sibling of the given node, * NULL if the node is root (has no parent) */ static struct ravl_node * ravl_node_sibling(struct ravl_node *n) { enum ravl_slot_type t = ravl_node_slot_type(n); if (t == RAVL_ROOT) return NULL; return n->parent->slots[t == RAVL_LEFT ? RAVL_RIGHT : RAVL_LEFT]; } /* * ravl_node_ref -- (internal) returns the pointer to the memory location in * which the given node resides */ static struct ravl_node ** ravl_node_ref(struct ravl *ravl, struct ravl_node *n) { enum ravl_slot_type t = ravl_node_slot_type(n); return t == RAVL_ROOT ? &ravl->root : &n->parent->slots[t]; } /* * ravl_rotate -- (internal) performs a rotation around a given node * * The node n swaps place with its parent. If n is right child, parent becomes * the left child of n, otherwise parent becomes right child of n. */ static void ravl_rotate(struct ravl *ravl, struct ravl_node *n) { ASSERTne(n->parent, NULL); struct ravl_node *p = n->parent; struct ravl_node **pref = ravl_node_ref(ravl, p); enum ravl_slot_type t = ravl_node_slot_type(n); enum ravl_slot_type t_opposite = ravl_slot_opposite(t); n->parent = p->parent; p->parent = n; *pref = n; if ((p->slots[t] = n->slots[t_opposite]) != NULL) p->slots[t]->parent = p; n->slots[t_opposite] = p; } /* * ravl_node_rank -- (internal) returns the rank of the node * * For the purpose of balancing, NULL nodes have rank -1. */ static int ravl_node_rank(struct ravl_node *n) { return n == NULL ? -1 : n->rank; } /* * ravl_node_rank_difference_parent -- (internal) returns the rank different * between parent node p and its child n * * Every rank difference must be positive. * * Either of these can be NULL. */ static int ravl_node_rank_difference_parent(struct ravl_node *p, struct ravl_node *n) { return ravl_node_rank(p) - ravl_node_rank(n); } /* * ravl_node_rank_differenced - (internal) returns the rank difference between * parent and its child * * Can be used to check if a given node is an i-child. */ static int ravl_node_rank_difference(struct ravl_node *n) { return ravl_node_rank_difference_parent(n->parent, n); } /* * ravl_node_is_i_j -- (internal) checks if a given node is strictly i,j-node */ static int ravl_node_is_i_j(struct ravl_node *n, int i, int j) { return (ravl_node_rank_difference_parent(n, n->slots[RAVL_LEFT]) == i && ravl_node_rank_difference_parent(n, n->slots[RAVL_RIGHT]) == j); } /* * ravl_node_is -- (internal) checks if a given node is i,j-node or j,i-node */ static int ravl_node_is(struct ravl_node *n, int i, int j) { return ravl_node_is_i_j(n, i, j) || ravl_node_is_i_j(n, j, i); } /* * ravl_node_promote -- promotes a given node by increasing its rank */ static void ravl_node_promote(struct ravl_node *n) { n->rank += 1; } /* * ravl_node_promote -- demotes a given node by increasing its rank */ static void ravl_node_demote(struct ravl_node *n) { ASSERT(n->rank > 0); n->rank -= 1; } /* * ravl_balance -- balances the tree after insert * * This function must restore the invariant that every rank * difference is positive. */ static void ravl_balance(struct ravl *ravl, struct ravl_node *n) { /* walk up the tree, promoting nodes */ while (n->parent && ravl_node_is(n->parent, 0, 1)) { ravl_node_promote(n->parent); n = n->parent; } /* * Either the rank rule holds or n is a 0-child whose sibling is an * i-child with i > 1. */ struct ravl_node *s = ravl_node_sibling(n); if (!(ravl_node_rank_difference(n) == 0 && ravl_node_rank_difference_parent(n->parent, s) > 1)) return; struct ravl_node *y = n->parent; /* if n is a left child, let z be n's right child and vice versa */ enum ravl_slot_type t = ravl_slot_opposite(ravl_node_slot_type(n)); struct ravl_node *z = n->slots[t]; if (z == NULL || ravl_node_rank_difference(z) == 2) { ravl_rotate(ravl, n); ravl_node_demote(y); } else if (ravl_node_rank_difference(z) == 1) { ravl_rotate(ravl, z); ravl_rotate(ravl, z); ravl_node_promote(z); ravl_node_demote(n); ravl_node_demote(y); } } /* * ravl_insert -- insert data into the tree */ int ravl_insert(struct ravl *ravl, const void *data) { return ravl_emplace(ravl, ravl_node_insert_constructor, data); } /* * ravl_insert -- copy construct data inside of a new tree node */ int ravl_emplace_copy(struct ravl *ravl, const void *data) { return ravl_emplace(ravl, ravl_node_copy_constructor, data); } /* * ravl_emplace -- construct data inside of a new tree node */ int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg) { LOG(6, NULL); struct ravl_node *n = ravl_new_node(ravl, constr, arg); if (n == NULL) return -1; /* walk down the tree and insert the new node into a missing slot */ struct ravl_node **dstp = &ravl->root; struct ravl_node *dst = NULL; while (*dstp != NULL) { dst = (*dstp); int cmp_result = ravl->compare(ravl_data(n), ravl_data(dst)); if (cmp_result == 0) goto error_duplicate; dstp = &dst->slots[cmp_result > 0]; } n->parent = dst; *dstp = n; ravl_balance(ravl, n); return 0; error_duplicate: errno = EEXIST; Free(n); return -1; } /* * ravl_node_type_most -- (internal) returns left-most or right-most node in * the subtree */ static struct ravl_node * ravl_node_type_most(struct ravl_node *n, enum ravl_slot_type t) { while (n->slots[t] != NULL) n = n->slots[t]; return n; } /* * ravl_node_cessor -- (internal) returns the successor or predecessor of the * node */ static struct ravl_node * ravl_node_cessor(struct ravl_node *n, enum ravl_slot_type t) { /* * If t child is present, we are looking for t-opposite-most node * in t child subtree */ if (n->slots[t]) return ravl_node_type_most(n->slots[t], ravl_slot_opposite(t)); /* otherwise get the first parent on the t path */ while (n->parent != NULL && n == n->parent->slots[t]) n = n->parent; return n->parent; } /* * ravl_node_successor -- (internal) returns node's successor * * It's the first node larger than n. */ static struct ravl_node * ravl_node_successor(struct ravl_node *n) { return ravl_node_cessor(n, RAVL_RIGHT); } /* * ravl_node_successor -- (internal) returns node's successor * * It's the first node smaller than n. */ static struct ravl_node * ravl_node_predecessor(struct ravl_node *n) { return ravl_node_cessor(n, RAVL_LEFT); } /* * ravl_predicate_holds -- (internal) verifies the given predicate for * the current node in the search path * * If the predicate holds for the given node or a node that can be directly * derived from it, returns 1. Otherwise returns 0. */ static int ravl_predicate_holds(struct ravl *ravl, int result, struct ravl_node **ret, struct ravl_node *n, const void *data, enum ravl_predicate flags) { if (flags & RAVL_PREDICATE_EQUAL) { if (result == 0) { *ret = n; return 1; } } if (flags & RAVL_PREDICATE_GREATER) { if (result < 0) { /* data < n->data */ *ret = n; return 0; } else if (result == 0) { *ret = ravl_node_successor(n); return 1; } } if (flags & RAVL_PREDICATE_LESS) { if (result > 0) { /* data > n->data */ *ret = n; return 0; } else if (result == 0) { *ret = ravl_node_predecessor(n); return 1; } } return 0; } /* * ravl_find -- searches for the node in the tree */ struct ravl_node * ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate flags) { LOG(6, NULL); struct ravl_node *r = NULL; struct ravl_node *n = ravl->root; while (n) { int result = ravl->compare(data, ravl_data(n)); if (ravl_predicate_holds(ravl, result, &r, n, data, flags)) return r; n = n->slots[result > 0]; } return r; } /* * ravl_remove -- removes the given node from the tree */ void ravl_remove(struct ravl *ravl, struct ravl_node *n) { LOG(6, NULL); if (n->slots[RAVL_LEFT] != NULL && n->slots[RAVL_RIGHT] != NULL) { /* if both children are present, remove the successor instead */ struct ravl_node *s = ravl_node_successor(n); memcpy(n->data, s->data, ravl->data_size); ravl_remove(ravl, s); } else { /* swap n with the child that may exist */ struct ravl_node *r = n->slots[RAVL_LEFT] ? n->slots[RAVL_LEFT] : n->slots[RAVL_RIGHT]; if (r != NULL) r->parent = n->parent; *ravl_node_ref(ravl, n) = r; Free(n); } } /* * ravl_data -- returns the data contained within the node */ void * ravl_data(struct ravl_node *node) { if (node->pointer_based) { void *data; memcpy(&data, node->data, sizeof(void *)); return data; } else { return (void *)node->data; } }
12,600
20.801038
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/vecq.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * vecq.h -- vector queue (FIFO) interface */ #ifndef PMDK_VECQ_H #define PMDK_VECQ_H 1 #include <stddef.h> #include "util.h" #include "out.h" #include "alloc.h" #ifdef __cplusplus extern "C" { #endif #define VECQ_INIT_SIZE (64) #define VECQ(name, type)\ struct name {\ type *buffer;\ size_t capacity;\ size_t front;\ size_t back;\ } #define VECQ_INIT(vec) do {\ (vec)->buffer = NULL;\ (vec)->capacity = 0;\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_REINIT(vec) do {\ VALGRIND_ANNOTATE_NEW_MEMORY((vec), sizeof(*vec));\ VALGRIND_ANNOTATE_NEW_MEMORY((vec)->buffer,\ (sizeof(*(vec)->buffer) * ((vec)->capacity)));\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_FRONT_POS(vec)\ ((vec)->front & ((vec)->capacity - 1)) #define VECQ_BACK_POS(vec)\ ((vec)->back & ((vec)->capacity - 1)) #define VECQ_FRONT(vec)\ (vec)->buffer[VECQ_FRONT_POS(vec)] #define VECQ_BACK(vec)\ (vec)->buffer[VECQ_BACK_POS(vec)] #define VECQ_DEQUEUE(vec)\ ((vec)->buffer[(((vec)->front++) & ((vec)->capacity - 1))]) #define VECQ_SIZE(vec)\ ((vec)->back - (vec)->front) static inline int realloc_set(void **buf, size_t s) { void *tbuf = Realloc(*buf, s); if (tbuf == NULL) { ERR("!Realloc"); return -1; } *buf = tbuf; return 0; } #define VECQ_NCAPACITY(vec)\ ((vec)->capacity == 0 ? VECQ_INIT_SIZE : (vec)->capacity * 2) #define VECQ_GROW(vec)\ (realloc_set((void **)&(vec)->buffer,\ VECQ_NCAPACITY(vec) * sizeof(*(vec)->buffer)) ? -1 :\ (memcpy((vec)->buffer + (vec)->capacity, (vec)->buffer,\ VECQ_FRONT_POS(vec) * sizeof(*(vec)->buffer)),\ (vec)->front = VECQ_FRONT_POS(vec),\ (vec)->back = (vec)->front + (vec)->capacity,\ (vec)->capacity = VECQ_NCAPACITY(vec),\ 0\ )) #define VECQ_INSERT(vec, element)\ (VECQ_BACK(vec) = element, (vec)->back += 1, 0) #define VECQ_ENQUEUE(vec, element)\ ((vec)->capacity == VECQ_SIZE(vec) ?\ (VECQ_GROW(vec) == 0 ? VECQ_INSERT(vec, element) : -1) :\ VECQ_INSERT(vec, element)) #define VECQ_CAPACITY(vec)\ ((vec)->capacity) #define VECQ_FOREACH(el, vec)\ for (size_t _vec_i = 0;\ _vec_i < VECQ_SIZE(vec) &&\ (((el) = (vec)->buffer[_vec_i & ((vec)->capacity - 1)]), 1);\ ++_vec_i) #define VECQ_FOREACH_REVERSE(el, vec)\ for (size_t _vec_i = VECQ_SIZE(vec);\ _vec_i > 0 &&\ (((el) = (vec)->buffer[(_vec_i - 1) & ((vec)->capacity - 1)]), 1);\ --_vec_i) #define VECQ_CLEAR(vec) do {\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #define VECQ_DELETE(vec) do {\ Free((vec)->buffer);\ (vec)->buffer = NULL;\ (vec)->capacity = 0;\ (vec)->front = 0;\ (vec)->back = 0;\ } while (0) #ifdef __cplusplus } #endif #endif /* PMDK_VECQ_H */
2,731
20.178295
68
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/os_deep_linux.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2020, Intel Corporation */ /* * os_deep_linux.c -- Linux abstraction layer */ #define _GNU_SOURCE #include <inttypes.h> #include <fcntl.h> #include <sys/stat.h> #include "out.h" #include "os.h" #include "mmap.h" #include "file.h" #include "libpmem.h" #include "os_deep.h" #include "../libpmem2/deep_flush.h" /* * os_deep_type -- (internal) perform deep operation based on a pmem * mapping type */ static int os_deep_type(const struct map_tracker *mt, void *addr, size_t len) { LOG(15, "mt %p addr %p len %zu", mt, addr, len); switch (mt->type) { case PMEM_DEV_DAX: pmem_drain(); int ret = pmem2_deep_flush_write(mt->region_id); if (ret < 0) { if (ret == PMEM2_E_NOSUPP) { errno = ENOTSUP; LOG(1, "!deep_flush not supported"); } else { errno = pmem2_err_to_errno(ret); LOG(2, "cannot write to deep_flush" "in region %u", mt->region_id); } return -1; } return 0; case PMEM_MAP_SYNC: return pmem_msync(addr, len); default: ASSERT(0); return -1; } } /* * os_range_deep_common -- perform deep action of given address range */ int os_range_deep_common(uintptr_t addr, size_t len) { LOG(3, "addr 0x%016" PRIxPTR " len %zu", addr, len); while (len != 0) { const struct map_tracker *mt = util_range_find(addr, len); /* no more overlapping track regions or NOT a device DAX */ if (mt == NULL) { LOG(15, "pmem_msync addr %p, len %lu", (void *)addr, len); return pmem_msync((void *)addr, len); } /* * For range that intersects with the found mapping * write to (Device DAX) deep_flush file. * Call msync for the non-intersecting part. */ if (mt->base_addr > addr) { size_t curr_len = mt->base_addr - addr; if (curr_len > len) curr_len = len; if (pmem_msync((void *)addr, curr_len) != 0) return -1; len -= curr_len; if (len == 0) return 0; addr = mt->base_addr; } size_t mt_in_len = mt->end_addr - addr; size_t persist_len = MIN(len, mt_in_len); if (os_deep_type(mt, (void *)addr, persist_len)) return -1; if (mt->end_addr >= addr + len) return 0; len -= mt_in_len; addr = mt->end_addr; } return 0; } /* * os_part_deep_common -- common function to handle both * deep_persist and deep_drain part flush cases. */ int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush) { LOG(3, "part %p part %d addr %p len %lu flush %d", rep, partidx, addr, len, flush); if (!rep->is_pmem) { /* * In case of part on non-pmem call msync on the range * to deep flush the data. Deep drain is empty as all * data is msynced to persistence. */ if (!flush) return 0; if (pmem_msync(addr, len)) { LOG(1, "pmem_msync(%p, %lu)", addr, len); return -1; } return 0; } struct pool_set_part part = rep->part[partidx]; /* Call deep flush if it was requested */ if (flush) { LOG(15, "pmem_deep_flush addr %p, len %lu", addr, len); pmem_deep_flush(addr, len); } /* * Before deep drain call normal drain to ensure that data * is at least in WPQ. */ pmem_drain(); if (part.is_dev_dax) { /* * During deep_drain for part on device DAX search for * device region id, and perform WPQ flush on found * device DAX region. */ unsigned region_id; int ret = util_ddax_region_find(part.path, &region_id); if (ret < 0) { if (errno == ENOENT) { errno = ENOTSUP; LOG(1, "!deep_flush not supported"); } else { LOG(1, "invalid dax_region id %u", region_id); } return -1; } if (pmem2_deep_flush_write(region_id)) { LOG(1, "pmem2_deep_flush_write(%u)", region_id); return -1; } } else { /* * For deep_drain on normal pmem it is enough to * call msync on one page. */ if (pmem_msync(addr, MIN(Pagesize, len))) { LOG(1, "pmem_msync(%p, %lu)", addr, len); return -1; } } return 0; }
3,932
21.095506
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/file_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2020, Intel Corporation */ /* * file_windows.c -- Windows emulation of Linux-specific system calls */ /* * XXX - The initial approach to PMDK for Windows port was to minimize the * amount of changes required in the core part of the library, and to avoid * preprocessor conditionals, if possible. For that reason, some of the * Linux system calls that have no equivalents on Windows have been emulated * using Windows API. * Note that it was not a goal to fully emulate POSIX-compliant behavior * of mentioned functions. They are used only internally, so current * implementation is just good enough to satisfy PMDK needs and to make it * work on Windows. */ #include <windows.h> #include <sys/stat.h> #include <sys/file.h> #include "alloc.h" #include "file.h" #include "out.h" #include "os.h" /* * util_tmpfile -- create a temporary file */ int util_tmpfile(const char *dir, const char *templ, int flags) { LOG(3, "dir \"%s\" template \"%s\" flags %x", dir, templ, flags); /* only O_EXCL is allowed here */ ASSERT(flags == 0 || flags == O_EXCL); int oerrno; int fd = -1; size_t len = strlen(dir) + strlen(templ) + 1; char *fullname = Malloc(sizeof(*fullname) * len); if (fullname == NULL) { ERR("!Malloc"); return -1; } int ret = _snprintf(fullname, len, "%s%s", dir, templ); if (ret < 0 || ret >= len) { ERR("snprintf: %d", ret); goto err; } LOG(4, "fullname \"%s\"", fullname); /* * XXX - block signals and modify file creation mask for the time * of mkstmep() execution. Restore previous settings once the file * is created. */ fd = os_mkstemp(fullname); if (fd < 0) { ERR("!os_mkstemp"); goto err; } /* * There is no point to use unlink() here. First, because it does not * work on open files. Second, because the file is created with * O_TEMPORARY flag, and it looks like such temp files cannot be open * from another process, even though they are visible on * the filesystem. */ Free(fullname); return fd; err: Free(fullname); oerrno = errno; if (fd != -1) (void) os_close(fd); errno = oerrno; return -1; } /* * util_is_absolute_path -- check if the path is absolute */ int util_is_absolute_path(const char *path) { LOG(3, "path \"%s\"", path); if (path == NULL || path[0] == '\0') return 0; if (path[0] == '\\' || path[1] == ':') return 1; return 0; } /* * util_file_mkdir -- creates new dir */ int util_file_mkdir(const char *path, mode_t mode) { /* * On windows we cannot create read only dir so mode * parameter is useless. */ UNREFERENCED_PARAMETER(mode); LOG(3, "path: %s mode: %d", path, mode); return _mkdir(path); } /* * util_file_dir_open -- open a directory */ int util_file_dir_open(struct dir_handle *handle, const char *path) { /* init handle */ handle->handle = NULL; handle->path = path; return 0; } /* * util_file_dir_next - read next file in directory */ int util_file_dir_next(struct dir_handle *handle, struct file_info *info) { WIN32_FIND_DATAA data; if (handle->handle == NULL) { handle->handle = FindFirstFileA(handle->path, &data); if (handle->handle == NULL) return 1; } else { if (FindNextFileA(handle->handle, &data) == 0) return 1; } info->filename[NAME_MAX] = '\0'; strncpy(info->filename, data.cFileName, NAME_MAX + 1); if (info->filename[NAME_MAX] != '\0') return -1; /* filename truncated */ info->is_dir = data.dwFileAttributes == FILE_ATTRIBUTE_DIRECTORY; return 0; } /* * util_file_dir_close -- close a directory */ int util_file_dir_close(struct dir_handle *handle) { return FindClose(handle->handle); } /* * util_file_dir_remove -- remove directory */ int util_file_dir_remove(const char *path) { return RemoveDirectoryA(path) == 0 ? -1 : 0; } /* * util_file_device_dax_alignment -- returns internal Device DAX alignment */ size_t util_file_device_dax_alignment(const char *path) { LOG(3, "path \"%s\"", path); return 0; } /* * util_ddax_region_find -- returns DEV dax region id that contains file */ int util_ddax_region_find(const char *path, unsigned *region_id) { LOG(3, "path \"%s\"", path); return -1; }
4,186
20.253807
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/mmap.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2020, Intel Corporation */ /* * mmap.c -- mmap utilities */ #include <errno.h> #include <inttypes.h> #include <fcntl.h> #include <stdlib.h> #include <string.h> #include <sys/mman.h> #include <unistd.h> #include "file.h" #include "queue.h" #include "mmap.h" #include "sys_util.h" #include "os.h" #include "alloc.h" #include "libpmem2.h" int Mmap_no_random; void *Mmap_hint; static os_rwlock_t Mmap_list_lock; static PMDK_SORTEDQ_HEAD(map_list_head, map_tracker) Mmap_list = PMDK_SORTEDQ_HEAD_INITIALIZER(Mmap_list); /* * util_mmap_init -- initialize the mmap utils * * This is called from the library initialization code. */ void util_mmap_init(void) { LOG(3, NULL); util_rwlock_init(&Mmap_list_lock); /* * For testing, allow overriding the default mmap() hint address. * If hint address is defined, it also disables address randomization. */ char *e = os_getenv("PMEM_MMAP_HINT"); if (e) { char *endp; errno = 0; unsigned long long val = strtoull(e, &endp, 16); if (errno || endp == e) { LOG(2, "Invalid PMEM_MMAP_HINT"); } else if (os_access(OS_MAPFILE, R_OK)) { LOG(2, "No /proc, PMEM_MMAP_HINT ignored"); } else { Mmap_hint = (void *)val; Mmap_no_random = 1; LOG(3, "PMEM_MMAP_HINT set to %p", Mmap_hint); } } } /* * util_mmap_fini -- clean up the mmap utils * * This is called before process stop. */ void util_mmap_fini(void) { LOG(3, NULL); util_rwlock_destroy(&Mmap_list_lock); } /* * util_map -- memory map a file * * This is just a convenience function that calls mmap() with the * appropriate arguments and includes our trace points. */ void * util_map(int fd, os_off_t off, size_t len, int flags, int rdonly, size_t req_align, int *map_sync) { LOG(3, "fd %d len %zu flags %d rdonly %d req_align %zu map_sync %p", fd, len, flags, rdonly, req_align, map_sync); void *base; void *addr = util_map_hint(len, req_align); if (addr == MAP_FAILED) { LOG(1, "cannot find a contiguous region of given size"); return NULL; } if (req_align) ASSERTeq((uintptr_t)addr % req_align, 0); int proto = rdonly ? PROT_READ : PROT_READ|PROT_WRITE; base = util_map_sync(addr, len, proto, flags, fd, off, map_sync); if (base == MAP_FAILED) { ERR("!mmap %zu bytes", len); return NULL; } LOG(3, "mapped at %p", base); return base; } /* * util_unmap -- unmap a file * * This is just a convenience function that calls munmap() with the * appropriate arguments and includes our trace points. */ int util_unmap(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); /* * XXX Workaround for https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=169608 */ #ifdef __FreeBSD__ if (!IS_PAGE_ALIGNED((uintptr_t)addr)) { errno = EINVAL; ERR("!munmap"); return -1; } #endif int retval = munmap(addr, len); if (retval < 0) ERR("!munmap"); return retval; } /* * util_range_ro -- set a memory range read-only */ int util_range_ro(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); uintptr_t uptr; int retval; /* * mprotect requires addr to be a multiple of pagesize, so * adjust addr and len to represent the full 4k chunks * covering the given range. */ /* increase len by the amount we gain when we round addr down */ len += (uintptr_t)addr & (Pagesize - 1); /* round addr down to page boundary */ uptr = (uintptr_t)addr & ~(Pagesize - 1); if ((retval = mprotect((void *)uptr, len, PROT_READ)) < 0) ERR("!mprotect: PROT_READ"); return retval; } /* * util_range_rw -- set a memory range read-write */ int util_range_rw(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); uintptr_t uptr; int retval; /* * mprotect requires addr to be a multiple of pagesize, so * adjust addr and len to represent the full 4k chunks * covering the given range. */ /* increase len by the amount we gain when we round addr down */ len += (uintptr_t)addr & (Pagesize - 1); /* round addr down to page boundary */ uptr = (uintptr_t)addr & ~(Pagesize - 1); if ((retval = mprotect((void *)uptr, len, PROT_READ|PROT_WRITE)) < 0) ERR("!mprotect: PROT_READ|PROT_WRITE"); return retval; } /* * util_range_none -- set a memory range for no access allowed */ int util_range_none(void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); uintptr_t uptr; int retval; /* * mprotect requires addr to be a multiple of pagesize, so * adjust addr and len to represent the full 4k chunks * covering the given range. */ /* increase len by the amount we gain when we round addr down */ len += (uintptr_t)addr & (Pagesize - 1); /* round addr down to page boundary */ uptr = (uintptr_t)addr & ~(Pagesize - 1); if ((retval = mprotect((void *)uptr, len, PROT_NONE)) < 0) ERR("!mprotect: PROT_NONE"); return retval; } /* * util_range_comparer -- (internal) compares the two mapping trackers */ static intptr_t util_range_comparer(struct map_tracker *a, struct map_tracker *b) { return ((intptr_t)a->base_addr - (intptr_t)b->base_addr); } /* * util_range_find_unlocked -- (internal) find the map tracker * for given address range * * Returns the first entry at least partially overlapping given range. * It's up to the caller to check whether the entry exactly matches the range, * or if the range spans multiple entries. */ static struct map_tracker * util_range_find_unlocked(uintptr_t addr, size_t len) { LOG(10, "addr 0x%016" PRIxPTR " len %zu", addr, len); uintptr_t end = addr + len; struct map_tracker *mt; PMDK_SORTEDQ_FOREACH(mt, &Mmap_list, entry) { if (addr < mt->end_addr && (addr >= mt->base_addr || end > mt->base_addr)) goto out; /* break if there is no chance to find matching entry */ if (addr < mt->base_addr) break; } mt = NULL; out: return mt; } /* * util_range_find -- find the map tracker for given address range * the same as util_range_find_unlocked but locked */ struct map_tracker * util_range_find(uintptr_t addr, size_t len) { LOG(10, "addr 0x%016" PRIxPTR " len %zu", addr, len); util_rwlock_rdlock(&Mmap_list_lock); struct map_tracker *mt = util_range_find_unlocked(addr, len); util_rwlock_unlock(&Mmap_list_lock); return mt; } /* * util_range_register -- add a memory range into a map tracking list */ int util_range_register(const void *addr, size_t len, const char *path, enum pmem_map_type type) { LOG(3, "addr %p len %zu path %s type %d", addr, len, path, type); /* check if not tracked already */ if (util_range_find((uintptr_t)addr, len) != NULL) { ERR( "duplicated persistent memory range; presumably unmapped with munmap() instead of pmem_unmap(): addr %p len %zu", addr, len); errno = ENOMEM; return -1; } struct map_tracker *mt; mt = Malloc(sizeof(struct map_tracker)); if (mt == NULL) { ERR("!Malloc"); return -1; } mt->base_addr = (uintptr_t)addr; mt->end_addr = mt->base_addr + len; mt->type = type; if (type == PMEM_DEV_DAX) { unsigned region_id; int ret = util_ddax_region_find(path, &region_id); if (ret < 0) { ERR("Cannot find DAX device region id"); return -1; } mt->region_id = region_id; } util_rwlock_wrlock(&Mmap_list_lock); PMDK_SORTEDQ_INSERT(&Mmap_list, mt, entry, struct map_tracker, util_range_comparer); util_rwlock_unlock(&Mmap_list_lock); return 0; } /* * util_range_split -- (internal) remove or split a map tracking entry */ static int util_range_split(struct map_tracker *mt, const void *addrp, const void *endp) { LOG(3, "begin %p end %p", addrp, endp); uintptr_t addr = (uintptr_t)addrp; uintptr_t end = (uintptr_t)endp; ASSERTne(mt, NULL); if (addr == end || addr % Mmap_align != 0 || end % Mmap_align != 0) { ERR( "invalid munmap length, must be non-zero and page aligned"); return -1; } struct map_tracker *mtb = NULL; struct map_tracker *mte = NULL; /* * 1) b e b e * xxxxxxxxxxxxx => xxx.......xxxx - mtb+mte * 2) b e b e * xxxxxxxxxxxxx => xxxxxxx....... - mtb * 3) b e b e * xxxxxxxxxxxxx => ........xxxxxx - mte * 4) b e b e * xxxxxxxxxxxxx => .............. - <none> */ if (addr > mt->base_addr) { /* case #1/2 */ /* new mapping at the beginning */ mtb = Malloc(sizeof(struct map_tracker)); if (mtb == NULL) { ERR("!Malloc"); goto err; } mtb->base_addr = mt->base_addr; mtb->end_addr = addr; mtb->region_id = mt->region_id; mtb->type = mt->type; } if (end < mt->end_addr) { /* case #1/3 */ /* new mapping at the end */ mte = Malloc(sizeof(struct map_tracker)); if (mte == NULL) { ERR("!Malloc"); goto err; } mte->base_addr = end; mte->end_addr = mt->end_addr; mte->region_id = mt->region_id; mte->type = mt->type; } PMDK_SORTEDQ_REMOVE(&Mmap_list, mt, entry); if (mtb) { PMDK_SORTEDQ_INSERT(&Mmap_list, mtb, entry, struct map_tracker, util_range_comparer); } if (mte) { PMDK_SORTEDQ_INSERT(&Mmap_list, mte, entry, struct map_tracker, util_range_comparer); } /* free entry for the original mapping */ Free(mt); return 0; err: Free(mtb); Free(mte); return -1; } /* * util_range_unregister -- remove a memory range * from map tracking list * * Remove the region between [begin,end]. If it's in a middle of the existing * mapping, it results in two new map trackers. */ int util_range_unregister(const void *addr, size_t len) { LOG(3, "addr %p len %zu", addr, len); int ret = 0; util_rwlock_wrlock(&Mmap_list_lock); /* * Changes in the map tracker list must match the underlying behavior. * * $ man 2 mmap: * The address addr must be a multiple of the page size (but length * need not be). All pages containing a part of the indicated range * are unmapped. * * This means that we must align the length to the page size. */ len = PAGE_ALIGNED_UP_SIZE(len); void *end = (char *)addr + len; /* XXX optimize the loop */ struct map_tracker *mt; while ((mt = util_range_find_unlocked((uintptr_t)addr, len)) != NULL) { if (util_range_split(mt, addr, end) != 0) { ret = -1; break; } } util_rwlock_unlock(&Mmap_list_lock); return ret; } /* * util_range_is_pmem -- return true if entire range * is persistent memory */ int util_range_is_pmem(const void *addrp, size_t len) { LOG(10, "addr %p len %zu", addrp, len); uintptr_t addr = (uintptr_t)addrp; int retval = 1; util_rwlock_rdlock(&Mmap_list_lock); do { struct map_tracker *mt = util_range_find(addr, len); if (mt == NULL) { LOG(4, "address not found 0x%016" PRIxPTR, addr); retval = 0; break; } LOG(10, "range found - begin 0x%016" PRIxPTR " end 0x%016" PRIxPTR, mt->base_addr, mt->end_addr); if (mt->base_addr > addr) { LOG(10, "base address doesn't match: " "0x%" PRIxPTR " > 0x%" PRIxPTR, mt->base_addr, addr); retval = 0; break; } uintptr_t map_len = mt->end_addr - addr; if (map_len > len) map_len = len; len -= map_len; addr += map_len; } while (len > 0); util_rwlock_unlock(&Mmap_list_lock); return retval; }
11,141
21.063366
115
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/mmap_posix.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2018, Intel Corporation */ /* * mmap_posix.c -- memory-mapped files for Posix */ #include <stdio.h> #include <sys/mman.h> #include <sys/param.h> #include "mmap.h" #include "out.h" #include "os.h" #define PROCMAXLEN 2048 /* maximum expected line length in /proc files */ char *Mmap_mapfile = OS_MAPFILE; /* Should be modified only for testing */ #ifdef __FreeBSD__ static const char * const sscanf_os = "%p %p"; #else static const char * const sscanf_os = "%p-%p"; #endif /* * util_map_hint_unused -- use /proc to determine a hint address for mmap() * * This is a helper function for util_map_hint(). * It opens up /proc/self/maps and looks for the first unused address * in the process address space that is: * - greater or equal 'minaddr' argument, * - large enough to hold range of given length, * - aligned to the specified unit. * * Asking for aligned address like this will allow the DAX code to use large * mappings. It is not an error if mmap() ignores the hint and chooses * different address. */ char * util_map_hint_unused(void *minaddr, size_t len, size_t align) { LOG(3, "minaddr %p len %zu align %zu", minaddr, len, align); ASSERT(align > 0); FILE *fp; if ((fp = os_fopen(Mmap_mapfile, "r")) == NULL) { ERR("!%s", Mmap_mapfile); return MAP_FAILED; } char line[PROCMAXLEN]; /* for fgets() */ char *lo = NULL; /* beginning of current range in maps file */ char *hi = NULL; /* end of current range in maps file */ char *raddr = minaddr; /* ignore regions below 'minaddr' */ if (raddr == NULL) raddr += Pagesize; raddr = (char *)roundup((uintptr_t)raddr, align); while (fgets(line, PROCMAXLEN, fp) != NULL) { /* check for range line */ if (sscanf(line, sscanf_os, &lo, &hi) == 2) { LOG(4, "%p-%p", lo, hi); if (lo > raddr) { if ((uintptr_t)(lo - raddr) >= len) { LOG(4, "unused region of size %zu " "found at %p", lo - raddr, raddr); break; } else { LOG(4, "region is too small: %zu < %zu", lo - raddr, len); } } if (hi > raddr) { raddr = (char *)roundup((uintptr_t)hi, align); LOG(4, "nearest aligned addr %p", raddr); } if (raddr == NULL) { LOG(4, "end of address space reached"); break; } } } /* * Check for a case when this is the last unused range in the address * space, but is not large enough. (very unlikely) */ if ((raddr != NULL) && (UINTPTR_MAX - (uintptr_t)raddr < len)) { ERR("end of address space reached"); raddr = MAP_FAILED; } fclose(fp); LOG(3, "returning %p", raddr); return raddr; } /* * util_map_hint -- determine hint address for mmap() * * If PMEM_MMAP_HINT environment variable is not set, we let the system to pick * the randomized mapping address. Otherwise, a user-defined hint address * is used. * * ALSR in 64-bit Linux kernel uses 28-bit of randomness for mmap * (bit positions 12-39), which means the base mapping address is randomized * within [0..1024GB] range, with 4KB granularity. Assuming additional * 1GB alignment, it results in 1024 possible locations. * * Configuring the hint address via PMEM_MMAP_HINT environment variable * disables address randomization. In such case, the function will search for * the first unused, properly aligned region of given size, above the specified * address. */ char * util_map_hint(size_t len, size_t req_align) { LOG(3, "len %zu req_align %zu", len, req_align); char *hint_addr = MAP_FAILED; /* choose the desired alignment based on the requested length */ size_t align = util_map_hint_align(len, req_align); if (Mmap_no_random) { LOG(4, "user-defined hint %p", Mmap_hint); hint_addr = util_map_hint_unused(Mmap_hint, len, align); } else { /* * Create dummy mapping to find an unused region of given size. * Request for increased size for later address alignment. * Use MAP_PRIVATE with read-only access to simulate * zero cost for overcommit accounting. Note: MAP_NORESERVE * flag is ignored if overcommit is disabled (mode 2). */ char *addr = mmap(NULL, len + align, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (addr == MAP_FAILED) { ERR("!mmap MAP_ANONYMOUS"); } else { LOG(4, "system choice %p", addr); hint_addr = (char *)roundup((uintptr_t)addr, align); munmap(addr, len + align); } } LOG(4, "hint %p", hint_addr); return hint_addr; } /* * util_map_sync -- memory map given file into memory, if MAP_SHARED flag is * provided it attempts to use MAP_SYNC flag. Otherwise it fallbacks to * mmap(2). */ void * util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync) { LOG(15, "addr %p len %zu proto %x flags %x fd %d offset %ld " "map_sync %p", addr, len, proto, flags, fd, offset, map_sync); if (map_sync) *map_sync = 0; /* if map_sync is NULL do not even try to mmap with MAP_SYNC flag */ if (!map_sync || flags & MAP_PRIVATE) return mmap(addr, len, proto, flags, fd, offset); /* MAP_SHARED */ void *ret = mmap(addr, len, proto, flags | MAP_SHARED_VALIDATE | MAP_SYNC, fd, offset); if (ret != MAP_FAILED) { LOG(4, "mmap with MAP_SYNC succeeded"); *map_sync = 1; return ret; } if (errno == EINVAL || errno == ENOTSUP) { LOG(4, "mmap with MAP_SYNC not supported"); return mmap(addr, len, proto, flags, fd, offset); } /* other error */ return MAP_FAILED; }
5,438
27.036082
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/ravl.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018-2019, Intel Corporation */ /* * ravl.h -- internal definitions for ravl tree */ #ifndef LIBPMEMOBJ_RAVL_H #define LIBPMEMOBJ_RAVL_H 1 #include <stddef.h> #ifdef __cplusplus extern "C" { #endif struct ravl; struct ravl_node; enum ravl_predicate { RAVL_PREDICATE_EQUAL = 1 << 0, RAVL_PREDICATE_GREATER = 1 << 1, RAVL_PREDICATE_LESS = 1 << 2, RAVL_PREDICATE_LESS_EQUAL = RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_LESS, RAVL_PREDICATE_GREATER_EQUAL = RAVL_PREDICATE_EQUAL | RAVL_PREDICATE_GREATER, }; typedef int ravl_compare(const void *lhs, const void *rhs); typedef void ravl_cb(void *data, void *arg); typedef void ravl_constr(void *data, size_t data_size, const void *arg); struct ravl *ravl_new(ravl_compare *compare); struct ravl *ravl_new_sized(ravl_compare *compare, size_t data_size); void ravl_delete(struct ravl *ravl); void ravl_delete_cb(struct ravl *ravl, ravl_cb cb, void *arg); void ravl_foreach(struct ravl *ravl, ravl_cb cb, void *arg); int ravl_empty(struct ravl *ravl); void ravl_clear(struct ravl *ravl); int ravl_insert(struct ravl *ravl, const void *data); int ravl_emplace(struct ravl *ravl, ravl_constr constr, const void *arg); int ravl_emplace_copy(struct ravl *ravl, const void *data); struct ravl_node *ravl_find(struct ravl *ravl, const void *data, enum ravl_predicate predicate_flags); void *ravl_data(struct ravl_node *node); void ravl_remove(struct ravl *ravl, struct ravl_node *node); #ifdef __cplusplus } #endif #endif /* LIBPMEMOBJ_RAVL_H */
1,556
27.309091
73
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/pool_hdr.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * pool_hdr.h -- internal definitions for pool header module */ #ifndef PMDK_POOL_HDR_H #define PMDK_POOL_HDR_H 1 #include <stddef.h> #include <stdint.h> #include <unistd.h> #include "uuid.h" #include "shutdown_state.h" #include "util.h" #include "page_size.h" #ifdef __cplusplus extern "C" { #endif /* * Number of bits per type in alignment descriptor */ #define ALIGNMENT_DESC_BITS 4 /* * architecture identification flags * * These flags allow to unambiguously determine the architecture * on which the pool was created. * * The alignment_desc field contains information about alignment * of the following basic types: * - char * - short * - int * - long * - long long * - size_t * - os_off_t * - float * - double * - long double * - void * * * The alignment of each type is computed as an offset of field * of specific type in the following structure: * struct { * char byte; * type field; * }; * * The value is decremented by 1 and masked by 4 bits. * Multiple alignments are stored on consecutive 4 bits of each * type in the order specified above. * * The values used in the machine, and machine_class fields are in * principle independent of operating systems, and object formats. * In practice they happen to match constants used in ELF object headers. */ struct arch_flags { uint64_t alignment_desc; /* alignment descriptor */ uint8_t machine_class; /* address size -- 64 bit or 32 bit */ uint8_t data; /* data encoding -- LE or BE */ uint8_t reserved[4]; uint16_t machine; /* required architecture */ }; #define POOL_HDR_ARCH_LEN sizeof(struct arch_flags) /* possible values of the machine class field in the above struct */ #define PMDK_MACHINE_CLASS_64 2 /* 64 bit pointers, 64 bit size_t */ /* possible values of the machine field in the above struct */ #define PMDK_MACHINE_X86_64 62 #define PMDK_MACHINE_AARCH64 183 #define PMDK_MACHINE_PPC64 21 /* possible values of the data field in the above struct */ #define PMDK_DATA_LE 1 /* 2's complement, little endian */ #define PMDK_DATA_BE 2 /* 2's complement, big endian */ /* * features flags */ typedef struct { uint32_t compat; /* mask: compatible "may" features */ uint32_t incompat; /* mask: "must support" features */ uint32_t ro_compat; /* mask: force RO if unsupported */ } features_t; /* * header used at the beginning of all types of memory pools * * for pools build on persistent memory, the integer types * below are stored in little-endian byte order. */ #define POOL_HDR_SIG_LEN 8 #define POOL_HDR_UNUSED_SIZE 1904 #define POOL_HDR_UNUSED2_SIZE 1976 #define POOL_HDR_ALIGN_PAD (PMEM_PAGESIZE - 4096) struct pool_hdr { char signature[POOL_HDR_SIG_LEN]; uint32_t major; /* format major version number */ features_t features; /* features flags */ uuid_t poolset_uuid; /* pool set UUID */ uuid_t uuid; /* UUID of this file */ uuid_t prev_part_uuid; /* prev part */ uuid_t next_part_uuid; /* next part */ uuid_t prev_repl_uuid; /* prev replica */ uuid_t next_repl_uuid; /* next replica */ uint64_t crtime; /* when created (seconds since epoch) */ struct arch_flags arch_flags; /* architecture identification flags */ unsigned char unused[POOL_HDR_UNUSED_SIZE]; /* must be zero */ /* not checksumed */ unsigned char unused2[POOL_HDR_UNUSED2_SIZE]; /* must be zero */ struct shutdown_state sds; /* shutdown status */ uint64_t checksum; /* checksum of above fields */ #if PMEM_PAGESIZE > 4096 /* prevent zero size array */ unsigned char align_pad[POOL_HDR_ALIGN_PAD]; /* alignment pad */ #endif }; #define POOL_HDR_SIZE (sizeof(struct pool_hdr)) #define POOL_DESC_SIZE PMEM_PAGESIZE void util_convert2le_hdr(struct pool_hdr *hdrp); void util_convert2h_hdr_nocheck(struct pool_hdr *hdrp); void util_get_arch_flags(struct arch_flags *arch_flags); int util_check_arch_flags(const struct arch_flags *arch_flags); features_t util_get_unknown_features(features_t features, features_t known); int util_feature_check(struct pool_hdr *hdrp, features_t features); int util_feature_cmp(features_t features, features_t ref); int util_feature_is_zero(features_t features); int util_feature_is_set(features_t features, features_t flag); void util_feature_enable(features_t *features, features_t new_feature); void util_feature_disable(features_t *features, features_t new_feature); const char *util_feature2str(features_t feature, features_t *found); features_t util_str2feature(const char *str); uint32_t util_str2pmempool_feature(const char *str); uint32_t util_feature2pmempool_feature(features_t feat); /* * set of macros for determining the alignment descriptor */ #define DESC_MASK ((1 << ALIGNMENT_DESC_BITS) - 1) #define alignment_of(t) offsetof(struct { char c; t x; }, x) #define alignment_desc_of(t) (((uint64_t)alignment_of(t) - 1) & DESC_MASK) #define alignment_desc()\ (alignment_desc_of(char) << 0 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(short) << 1 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(int) << 2 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long) << 3 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long long) << 4 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(size_t) << 5 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(off_t) << 6 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(float) << 7 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(double) << 8 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(long double) << 9 * ALIGNMENT_DESC_BITS) |\ (alignment_desc_of(void *) << 10 * ALIGNMENT_DESC_BITS) #define POOL_FEAT_ZERO 0x0000U static const features_t features_zero = {POOL_FEAT_ZERO, POOL_FEAT_ZERO, POOL_FEAT_ZERO}; /* * compat features */ #define POOL_FEAT_CHECK_BAD_BLOCKS 0x0001U /* check bad blocks in a pool */ #define POOL_FEAT_COMPAT_ALL \ (POOL_FEAT_CHECK_BAD_BLOCKS) #define FEAT_COMPAT(X) \ {POOL_FEAT_##X, POOL_FEAT_ZERO, POOL_FEAT_ZERO} /* * incompat features */ #define POOL_FEAT_SINGLEHDR 0x0001U /* pool header only in the first part */ #define POOL_FEAT_CKSUM_2K 0x0002U /* only first 2K of hdr checksummed */ #define POOL_FEAT_SDS 0x0004U /* check shutdown state */ #define POOL_FEAT_INCOMPAT_ALL \ (POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_FEAT_SDS) /* * incompat features effective values (if applicable) */ #ifdef SDS_ENABLED #define POOL_E_FEAT_SDS POOL_FEAT_SDS #else #define POOL_E_FEAT_SDS 0x0000U /* empty */ #endif #define POOL_FEAT_COMPAT_VALID \ (POOL_FEAT_CHECK_BAD_BLOCKS) #define POOL_FEAT_INCOMPAT_VALID \ (POOL_FEAT_SINGLEHDR | POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS) #if defined(_WIN32) || NDCTL_ENABLED #define POOL_FEAT_INCOMPAT_DEFAULT \ (POOL_FEAT_CKSUM_2K | POOL_E_FEAT_SDS) #else /* * shutdown state support on Linux requires root access on kernel < 4.20 with * ndctl < 63 so it is disabled by default */ #define POOL_FEAT_INCOMPAT_DEFAULT \ (POOL_FEAT_CKSUM_2K) #endif #if NDCTL_ENABLED #define POOL_FEAT_COMPAT_DEFAULT \ (POOL_FEAT_CHECK_BAD_BLOCKS) #else #define POOL_FEAT_COMPAT_DEFAULT \ (POOL_FEAT_ZERO) #endif #define FEAT_INCOMPAT(X) \ {POOL_FEAT_ZERO, POOL_FEAT_##X, POOL_FEAT_ZERO} #define POOL_FEAT_VALID \ {POOL_FEAT_COMPAT_VALID, POOL_FEAT_INCOMPAT_VALID, POOL_FEAT_ZERO} /* * defines the first not checksummed field - all fields after this will be * ignored during checksum calculations. */ #define POOL_HDR_CSUM_2K_END_OFF offsetof(struct pool_hdr, unused2) #define POOL_HDR_CSUM_4K_END_OFF offsetof(struct pool_hdr, checksum) /* * pick the first not checksummed field. 2K variant is used if * POOL_FEAT_CKSUM_2K incompat feature is set. */ #define POOL_HDR_CSUM_END_OFF(hdrp) \ ((hdrp)->features.incompat & POOL_FEAT_CKSUM_2K) \ ? POOL_HDR_CSUM_2K_END_OFF : POOL_HDR_CSUM_4K_END_OFF /* ignore shutdown state if incompat feature is disabled */ #define IGNORE_SDS(hdrp) \ (((hdrp) != NULL) && (((hdrp)->features.incompat & POOL_FEAT_SDS) == 0)) #ifdef __cplusplus } #endif #endif
7,980
29.696154
77
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/os_deep_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017-2018, Intel Corporation */ /* * os_deep_windows.c -- Windows abstraction layer for deep_* functions */ #include <windows.h> #include "out.h" #include "os.h" #include "set.h" #include "libpmem.h" /* * os_range_deep_common -- call msnyc for non DEV dax */ int os_range_deep_common(uintptr_t addr, size_t len) { LOG(3, "os_range_deep_common addr %p len %lu", addr, len); if (len == 0) return 0; return pmem_msync((void *)addr, len); } /* * os_part_deep_common -- common function to handle both * deep_persist and deep_drain part flush cases. */ int os_part_deep_common(struct pool_replica *rep, unsigned partidx, void *addr, size_t len, int flush) { LOG(3, "part %p part %d addr %p len %lu flush %d", rep, partidx, addr, len, flush); if (!rep->is_pmem) { /* * In case of part on non-pmem call msync on the range * to deep flush the data. Deep drain is empty as all * data is msynced to persistence. */ if (!flush) return 0; if (pmem_msync(addr, len)) { LOG(1, "pmem_msync(%p, %lu)", addr, len); return -1; } return 0; } /* Call deep flush if it was requested */ if (flush) { LOG(15, "pmem_deep_flush addr %p, len %lu", addr, len); pmem_deep_flush(addr, len); } /* * Before deep drain call normal drain to ensure that data * is at least in WPQ. */ pmem_drain(); /* * For deep_drain on normal pmem it is enough to * call msync on one page. */ if (pmem_msync(addr, MIN(Pagesize, len))) { LOG(1, "pmem_msync(%p, %lu)", addr, len); return -1; } return 0; }
1,598
20.039474
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/common/mmap_windows.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* * Copyright (c) 2015-2017, Microsoft Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * mmap_windows.c -- memory-mapped files for Windows */ #include <sys/mman.h> #include "mmap.h" #include "out.h" /* * util_map_hint_unused -- use VirtualQuery to determine hint address * * This is a helper function for util_map_hint(). * It iterates through memory regions and looks for the first unused address * in the process address space that is: * - greater or equal 'minaddr' argument, * - large enough to hold range of given length, * - aligned to the specified unit. */ char * util_map_hint_unused(void *minaddr, size_t len, size_t align) { LOG(3, "minaddr %p len %zu align %zu", minaddr, len, align); ASSERT(align > 0); MEMORY_BASIC_INFORMATION mi; char *lo = NULL; /* beginning of current range in maps file */ char *hi = NULL; /* end of current range in maps file */ char *raddr = minaddr; /* ignore regions below 'minaddr' */ if (raddr == NULL) raddr += Pagesize; raddr = (char *)roundup((uintptr_t)raddr, align); while ((uintptr_t)raddr < UINTPTR_MAX - len) { size_t ret = VirtualQuery(raddr, &mi, sizeof(mi)); if (ret == 0) { ERR("VirtualQuery %p", raddr); return MAP_FAILED; } LOG(4, "addr %p len %zu state %d", mi.BaseAddress, mi.RegionSize, mi.State); if ((mi.State != MEM_FREE) || (mi.RegionSize < len)) { raddr = (char *)mi.BaseAddress + mi.RegionSize; raddr = (char *)roundup((uintptr_t)raddr, align); LOG(4, "nearest aligned addr %p", raddr); } else { LOG(4, "unused region of size %zu found at %p", mi.RegionSize, mi.BaseAddress); return mi.BaseAddress; } } LOG(4, "end of address space reached"); return MAP_FAILED; } /* * util_map_hint -- determine hint address for mmap() * * XXX - Windows doesn't support large DAX pages yet, so there is * no point in aligning for the same. */ char * util_map_hint(size_t len, size_t req_align) { LOG(3, "len %zu req_align %zu", len, req_align); char *hint_addr = MAP_FAILED; /* choose the desired alignment based on the requested length */ size_t align = util_map_hint_align(len, req_align); if (Mmap_no_random) { LOG(4, "user-defined hint %p", Mmap_hint); hint_addr = util_map_hint_unused(Mmap_hint, len, align); } else { /* * Create dummy mapping to find an unused region of given size. * Request for increased size for later address alignment. * * Use MAP_NORESERVE flag to only reserve the range of pages * rather than commit. We don't want the pages to be actually * backed by the operating system paging file, as the swap * file is usually too small to handle terabyte pools. */ char *addr = mmap(NULL, len + align, PROT_READ, MAP_PRIVATE|MAP_ANONYMOUS|MAP_NORESERVE, -1, 0); if (addr != MAP_FAILED) { LOG(4, "system choice %p", addr); hint_addr = (char *)roundup((uintptr_t)addr, align); munmap(addr, len + align); } } LOG(4, "hint %p", hint_addr); return hint_addr; } /* * util_map_sync -- memory map given file into memory */ void * util_map_sync(void *addr, size_t len, int proto, int flags, int fd, os_off_t offset, int *map_sync) { LOG(15, "addr %p len %zu proto %x flags %x fd %d offset %ld", addr, len, proto, flags, fd, offset); if (map_sync) *map_sync = 0; return mmap(addr, len, proto, flags, fd, offset); }
4,965
31.887417
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemlog/logfile/addlog.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * addlog -- given a log file, append a log entry * * Usage: * fallocate -l 1G /path/to/pm-aware/file * addlog /path/to/pm-aware/file "first line of entry" "second line" */ #include <ex_common.h> #include <sys/stat.h> #include <stdio.h> #include <fcntl.h> #include <time.h> #include <stdlib.h> #include <string.h> #include <libpmemlog.h> #include "logentry.h" int main(int argc, char *argv[]) { PMEMlogpool *plp; struct logentry header; struct iovec *iovp; struct iovec *next_iovp; int iovcnt; if (argc < 3) { fprintf(stderr, "usage: %s filename lines...\n", argv[0]); exit(1); } const char *path = argv[1]; /* create the log in the given file, or open it if already created */ plp = pmemlog_create(path, 0, CREATE_MODE_RW); if (plp == NULL && (plp = pmemlog_open(path)) == NULL) { perror(path); exit(1); } /* fill in the header */ time(&header.timestamp); header.pid = getpid(); /* * Create an iov for pmemlog_appendv(). For each argument given, * allocate two entries (one for the string, one for the newline * appended to the string). Allocate 1 additional entry for the * header that gets prepended to the entry. */ iovcnt = (argc - 2) * 2 + 2; if ((iovp = malloc(sizeof(*iovp) * iovcnt)) == NULL) { perror("malloc"); exit(1); } next_iovp = iovp; /* put the header into iov first */ next_iovp->iov_base = &header; next_iovp->iov_len = sizeof(header); next_iovp++; /* * Now put each arg in, following it with the string "\n". * Calculate a total character count in header.len along the way. */ header.len = 0; for (int arg = 2; arg < argc; arg++) { /* add the string given */ next_iovp->iov_base = argv[arg]; next_iovp->iov_len = strlen(argv[arg]); header.len += next_iovp->iov_len; next_iovp++; /* add the newline */ next_iovp->iov_base = "\n"; next_iovp->iov_len = 1; header.len += 1; next_iovp++; } /* * pad with NULs (at least one) to align next entry to sizeof(long long) * bytes */ int a = sizeof(long long); int len_to_round = 1 + (a - (header.len + 1) % a) % a; char *buf[sizeof(long long)] = {0}; next_iovp->iov_base = buf; next_iovp->iov_len = len_to_round; header.len += len_to_round; next_iovp++; /* atomically add it all to the log */ if (pmemlog_appendv(plp, iovp, iovcnt) < 0) { perror("pmemlog_appendv"); free(iovp); exit(1); } free(iovp); pmemlog_close(plp); return 0; }
2,511
21.230088
73
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemblk/assetdb/asset_checkout.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2014-2019, Intel Corporation */ /* * asset_checkout -- mark an asset as checked out to someone * * Usage: * asset_checkin /path/to/pm-aware/file asset-ID name */ #include <ex_common.h> #include <stdio.h> #include <stdlib.h> #include <fcntl.h> #include <string.h> #include <time.h> #include <assert.h> #include <libpmemblk.h> #include "asset.h" int main(int argc, char *argv[]) { PMEMblkpool *pbp; struct asset asset; int assetid; if (argc < 4) { fprintf(stderr, "usage: %s assetdb asset-ID name\n", argv[0]); exit(1); } const char *path = argv[1]; assetid = atoi(argv[2]); assert(assetid > 0); /* open an array of atomically writable elements */ if ((pbp = pmemblk_open(path, sizeof(struct asset))) == NULL) { perror("pmemblk_open"); exit(1); } /* read a required element in */ if (pmemblk_read(pbp, &asset, assetid) < 0) { perror("pmemblk_read"); exit(1); } /* check if it contains any data */ if ((asset.state != ASSET_FREE) && (asset.state != ASSET_CHECKED_OUT)) { fprintf(stderr, "Asset ID %d not found", assetid); exit(1); } if (asset.state == ASSET_CHECKED_OUT) { fprintf(stderr, "Asset ID %d already checked out\n", assetid); exit(1); } /* update user name, set checked out state, and take timestamp */ strncpy(asset.user, argv[3], ASSET_USER_NAME_MAX - 1); asset.user[ASSET_USER_NAME_MAX - 1] = '\0'; asset.state = ASSET_CHECKED_OUT; time(&asset.time); /* put it back in the block */ if (pmemblk_write(pbp, &asset, assetid) < 0) { perror("pmemblk_write"); exit(1); } pmemblk_close(pbp); return 0; }
1,634
20.233766
66
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/buffons_needle_problem.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2019, Intel Corporation */ /* * buffons_needle_problem.c <path> [<n>] -- example illustrating * usage of libpmemobj * * Calculates pi number by solving Buffon's needle problem. * Takes one/two arguments -- path of the file and integer amount of trials * or only path when continuing simulation after interruption. * The greater number of trials, the higher calculation precision. */ #include <ex_common.h> #include <stdio.h> #include <stdlib.h> #include <limits.h> #ifdef _WIN32 #define _USE_MATH_DEFINES #endif #include <math.h> #include <time.h> #include <libpmemobj.h> /* * Layout definition */ POBJ_LAYOUT_BEGIN(pi); POBJ_LAYOUT_ROOT(pi, struct my_root) POBJ_LAYOUT_END(pi) /* * Used for changing degrees into radians */ #define RADIAN_CALCULATE M_PI / 180.0 static PMEMobjpool *pop; struct my_root { double x; /* coordinate of the needle's center */ double angle; /* angle between vertical position and the needle */ double l; /* length of the needle */ double sin_angle_l; /* sin(angle) * l */ double pi; /* calculated pi number */ double d; /* distance between lines on the board */ uint64_t i; /* variable used in for loop */ uint64_t p; /* amount of the positive trials */ uint64_t n; /* amount of the trials */ }; static void print_usage(char *argv_main[]) { printf("usage: %s <path> [<n>]\n", argv_main[0]); } /* * random_number -- randomizes number in range [0,1] */ static double random_number(void) { return (double)rand() / (double)RAND_MAX; } int main(int argc, char *argv[]) { if (argc < 2 || argc > 3) { print_usage(argv); return 1; } const char *path = argv[1]; if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pi), PMEMOBJ_MIN_POOL, 0666)) == NULL) { perror("failed to create pool\n"); return 1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(pi))) == NULL) { perror("failed to open pool\n"); return 1; } } srand((unsigned int)time(NULL)); TOID(struct my_root) root = POBJ_ROOT(pop, struct my_root); struct my_root *const rootp_rw = D_RW(root); if (argc == 3) { const char *n = argv[2]; char *endptr; errno = 0; uint64_t ull_n = strtoull(n, &endptr, 10); if (*endptr != '\0' || (ull_n == ULLONG_MAX && errno == ERANGE)) { perror("wrong n parameter\n"); print_usage(argv); pmemobj_close(pop); return 1; } TX_BEGIN(pop) { TX_ADD(root); rootp_rw->l = 0.9; rootp_rw->d = 1.0; rootp_rw->i = 0; rootp_rw->p = 0; rootp_rw->n = ull_n; } TX_END } for (; rootp_rw->i < rootp_rw->n; ) { TX_BEGIN(pop) { TX_ADD(root); rootp_rw->angle = random_number() * 90 * RADIAN_CALCULATE; rootp_rw->x = random_number() * rootp_rw->d / 2; rootp_rw->sin_angle_l = rootp_rw->l / 2 * sin(rootp_rw->angle); if (rootp_rw->x <= rootp_rw->sin_angle_l) { rootp_rw->p++; } rootp_rw->pi = (2 * rootp_rw->l * rootp_rw->n) / (rootp_rw->p * rootp_rw->d); rootp_rw->i++; } TX_END } printf("%f\n", D_RO(root)->pi); pmemobj_close(pop); return 0; }
3,119
20.22449
75
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/pi.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * pi.c -- example usage of user lists * * Calculates pi number with multiple threads using Leibniz formula. */ #include <ex_common.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/stat.h> #include <assert.h> #include <inttypes.h> #include <libpmemobj.h> #ifndef _WIN32 #include <pthread.h> #endif /* * Layout definition */ POBJ_LAYOUT_BEGIN(pi); POBJ_LAYOUT_ROOT(pi, struct pi); POBJ_LAYOUT_TOID(pi, struct pi_task); POBJ_LAYOUT_END(pi); static PMEMobjpool *pop; struct pi_task_proto { uint64_t start; uint64_t stop; long double result; }; struct pi_task { struct pi_task_proto proto; POBJ_LIST_ENTRY(struct pi_task) todo; POBJ_LIST_ENTRY(struct pi_task) done; }; struct pi { POBJ_LIST_HEAD(todo, struct pi_task) todo; POBJ_LIST_HEAD(done, struct pi_task) done; }; /* * pi_task_construct -- task constructor */ static int pi_task_construct(PMEMobjpool *pop, void *ptr, void *arg) { struct pi_task *t = (struct pi_task *)ptr; struct pi_task_proto *p = (struct pi_task_proto *)arg; t->proto = *p; pmemobj_persist(pop, t, sizeof(*t)); return 0; } /* * calc_pi -- worker for pi calculation */ #ifndef _WIN32 static void * calc_pi(void *arg) #else static DWORD WINAPI calc_pi(LPVOID arg) #endif { TOID(struct pi) pi = POBJ_ROOT(pop, struct pi); TOID(struct pi_task) task = *((TOID(struct pi_task) *)arg); long double result = 0; for (uint64_t i = D_RO(task)->proto.start; i < D_RO(task)->proto.stop; ++i) { result += (pow(-1, (double)i) / (2 * i + 1)); } D_RW(task)->proto.result = result; pmemobj_persist(pop, &D_RW(task)->proto.result, sizeof(result)); POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(pi)->todo, &D_RW(pi)->done, task, todo, done); return NULL; } /* * calc_pi_mt -- calculate all the pending to-do tasks */ static void calc_pi_mt(void) { TOID(struct pi) pi = POBJ_ROOT(pop, struct pi); int pending = 0; TOID(struct pi_task) iter; POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo) pending++; if (pending == 0) return; int i = 0; TOID(struct pi_task) *tasks = (TOID(struct pi_task) *)malloc( sizeof(TOID(struct pi_task)) * pending); if (tasks == NULL) { fprintf(stderr, "failed to allocate tasks\n"); return; } POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo) tasks[i++] = iter; #ifndef _WIN32 pthread_t workers[pending]; for (i = 0; i < pending; ++i) if (pthread_create(&workers[i], NULL, calc_pi, &tasks[i]) != 0) break; for (i = i - 1; i >= 0; --i) pthread_join(workers[i], NULL); #else HANDLE *workers = (HANDLE *) malloc(sizeof(HANDLE) * pending); for (i = 0; i < pending; ++i) { workers[i] = CreateThread(NULL, 0, calc_pi, &tasks[i], 0, NULL); if (workers[i] == NULL) break; } WaitForMultipleObjects(i, workers, TRUE, INFINITE); for (i = i - 1; i >= 0; --i) CloseHandle(workers[i]); free(workers); #endif free(tasks); } /* * prep_todo_list -- create tasks to be done */ static int prep_todo_list(int threads, int ops) { TOID(struct pi) pi = POBJ_ROOT(pop, struct pi); if (!POBJ_LIST_EMPTY(&D_RO(pi)->todo)) return -1; int ops_per_thread = ops / threads; uint64_t last = 0; /* last calculated denominator */ TOID(struct pi_task) iter; POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) { if (last < D_RO(iter)->proto.stop) last = D_RO(iter)->proto.stop; } int i; for (i = 0; i < threads; ++i) { uint64_t start = last + (i * ops_per_thread); struct pi_task_proto proto; proto.start = start; proto.stop = start + ops_per_thread; proto.result = 0; POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(pi)->todo, todo, sizeof(struct pi_task), pi_task_construct, &proto); } return 0; } int main(int argc, char *argv[]) { if (argc < 3) { printf("usage: %s file-name " "[print|done|todo|finish|calc <# of threads> <ops>]\n", argv[0]); return 1; } const char *path = argv[1]; pop = NULL; if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(pi), PMEMOBJ_MIN_POOL, CREATE_MODE_RW)) == NULL) { printf("failed to create pool\n"); return 1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(pi))) == NULL) { printf("failed to open pool\n"); return 1; } } TOID(struct pi) pi = POBJ_ROOT(pop, struct pi); char op = argv[2][0]; switch (op) { case 'p': { /* print pi */ long double pi_val = 0; TOID(struct pi_task) iter; POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) { pi_val += D_RO(iter)->proto.result; } printf("pi: %Lf\n", pi_val * 4); } break; case 'd': { /* print done list */ TOID(struct pi_task) iter; POBJ_LIST_FOREACH(iter, &D_RO(pi)->done, done) { printf("(%" PRIu64 " - %" PRIu64 ") = %Lf\n", D_RO(iter)->proto.start, D_RO(iter)->proto.stop, D_RO(iter)->proto.result); } } break; case 't': { /* print to-do list */ TOID(struct pi_task) iter; POBJ_LIST_FOREACH(iter, &D_RO(pi)->todo, todo) { printf("(%" PRIu64 " - %" PRIu64 ") = %Lf\n", D_RO(iter)->proto.start, D_RO(iter)->proto.stop, D_RO(iter)->proto.result); } } break; case 'c': { /* calculate pi */ if (argc < 5) { printf("usage: %s file-name " "calc <# of threads> <ops>\n", argv[0]); return 1; } int threads = atoi(argv[3]); int ops = atoi(argv[4]); assert((threads > 0) && (ops > 0)); if (prep_todo_list(threads, ops) == -1) printf("pending todo tasks\n"); else calc_pi_mt(); } break; case 'f': { /* finish to-do tasks */ calc_pi_mt(); } break; } pmemobj_close(pop); return 0; }
5,620
20.786822
68
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/setjmp.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * setjmp.c -- example illustrating an issue with indeterminate value * of non-volatile automatic variables after transaction abort. * See libpmemobj(7) for details. * * NOTE: To observe the problem (likely segfault on a second call to free()), * the example program should be compiled with optimizations enabled (-O2). */ #include <stdlib.h> #include <stdio.h> #include <libpmemobj.h> /* name of our layout in the pool */ #define LAYOUT_NAME "setjmp_example" int main(int argc, const char *argv[]) { const char path[] = "/pmem-fs/myfile"; PMEMobjpool *pop; /* create the pmemobj pool */ pop = pmemobj_create(path, LAYOUT_NAME, PMEMOBJ_MIN_POOL, 0666); if (pop == NULL) { perror(path); exit(1); } /* initialize pointer variables with invalid addresses */ int *bad_example_1 = (int *)0xBAADF00D; int *bad_example_2 = (int *)0xBAADF00D; int *bad_example_3 = (int *)0xBAADF00D; int *volatile good_example = (int *)0xBAADF00D; TX_BEGIN(pop) { bad_example_1 = malloc(sizeof(int)); bad_example_2 = malloc(sizeof(int)); bad_example_3 = malloc(sizeof(int)); good_example = malloc(sizeof(int)); /* manual or library abort called here */ pmemobj_tx_abort(EINVAL); } TX_ONCOMMIT { /* * This section is longjmp-safe */ } TX_ONABORT { /* * This section is not longjmp-safe */ free(good_example); /* OK */ free(bad_example_1); /* undefined behavior */ } TX_FINALLY { /* * This section is not longjmp-safe on transaction abort only */ free(bad_example_2); /* undefined behavior */ } TX_END free(bad_example_3); /* undefined behavior */ pmemobj_close(pop); return 0; }
1,723
23.985507
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/pmemblk/obj_pmemblk.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_pmemblk.c -- alternate pmemblk implementation based on pmemobj * * usage: obj_pmemblk [co] file blk_size [cmd[:blk_num[:data]]...] * * c - create file * o - open file * * The "cmd" arguments match the pmemblk functions: * w - write to a block * r - read a block * z - zero a block * n - write out number of available blocks * e - put a block in error state */ #include <ex_common.h> #include <sys/stat.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <errno.h> #include "libpmemobj.h" #include "libpmem.h" #include "libpmemblk.h" #define USABLE_SIZE (7.0 / 10) #define POOL_SIZE ((size_t)(1024 * 1024 * 100)) #define MAX_POOL_SIZE ((size_t)1024 * 1024 * 1024 * 16) #define MAX_THREADS 256 #define BSIZE_MAX ((size_t)(1024 * 1024 * 10)) #define ZERO_MASK (1 << 0) #define ERROR_MASK (1 << 1) POBJ_LAYOUT_BEGIN(obj_pmemblk); POBJ_LAYOUT_ROOT(obj_pmemblk, struct base); POBJ_LAYOUT_TOID(obj_pmemblk, uint8_t); POBJ_LAYOUT_END(obj_pmemblk); /* The root object struct holding all necessary data */ struct base { TOID(uint8_t) data; /* contiguous memory region */ TOID(uint8_t) flags; /* block flags */ size_t bsize; /* block size */ size_t nblocks; /* number of available blocks */ PMEMmutex locks[MAX_THREADS]; /* thread synchronization locks */ }; /* * pmemblk_map -- (internal) read or initialize the blk pool */ static int pmemblk_map(PMEMobjpool *pop, size_t bsize, size_t fsize) { int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* read pool descriptor and validate user provided values */ if (D_RO(bp)->bsize) { if (bsize && D_RO(bp)->bsize != bsize) return -1; else return 0; } /* new pool, calculate and store metadata */ TX_BEGIN(pop) { TX_ADD(bp); D_RW(bp)->bsize = bsize; size_t pool_size = (size_t)(fsize * USABLE_SIZE); D_RW(bp)->nblocks = pool_size / bsize; D_RW(bp)->data = TX_ZALLOC(uint8_t, pool_size); D_RW(bp)->flags = TX_ZALLOC(uint8_t, sizeof(uint8_t) * D_RO(bp)->nblocks); } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemblk_open -- open a block memory pool */ PMEMblkpool * pmemblk_open(const char *path, size_t bsize) { PMEMobjpool *pop = pmemobj_open(path, POBJ_LAYOUT_NAME(obj_pmemblk)); if (pop == NULL) return NULL; struct stat buf; if (stat(path, &buf)) { perror("stat"); return NULL; } return pmemblk_map(pop, bsize, buf.st_size) ? NULL : (PMEMblkpool *)pop; } /* * pmemblk_create -- create a block memory pool */ PMEMblkpool * pmemblk_create(const char *path, size_t bsize, size_t poolsize, mode_t mode) { /* max size of a single allocation is 16GB */ if (poolsize > MAX_POOL_SIZE) { errno = EINVAL; return NULL; } PMEMobjpool *pop = pmemobj_create(path, POBJ_LAYOUT_NAME(obj_pmemblk), poolsize, mode); if (pop == NULL) return NULL; return pmemblk_map(pop, bsize, poolsize) ? NULL : (PMEMblkpool *)pop; } /* * pmemblk_close -- close a block memory pool */ void pmemblk_close(PMEMblkpool *pbp) { pmemobj_close((PMEMobjpool *)pbp); } /* * pmemblk_check -- block memory pool consistency check */ int pmemblk_check(const char *path, size_t bsize) { int ret = pmemobj_check(path, POBJ_LAYOUT_NAME(obj_pmemblk)); if (ret) return ret; /* open just to validate block size */ PMEMblkpool *pop = pmemblk_open(path, bsize); if (!pop) return -1; pmemblk_close(pop); return 0; } /* * pmemblk_set_error -- not available in this implementation */ int pmemblk_set_error(PMEMblkpool *pbp, long long blockno) { PMEMobjpool *pop = (PMEMobjpool *)pbp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); int retval = 0; if (blockno >= (long long)D_RO(bp)->nblocks) return 1; TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) { uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno; /* add the modified flags to the undo log */ pmemobj_tx_add_range_direct(flags, sizeof(*flags)); *flags |= ERROR_MASK; } TX_ONABORT { retval = 1; } TX_END return retval; } /* * pmemblk_nblock -- return number of usable blocks in a block memory pool */ size_t pmemblk_nblock(PMEMblkpool *pbp) { PMEMobjpool *pop = (PMEMobjpool *)pbp; return ((struct base *)pmemobj_direct(pmemobj_root(pop, sizeof(struct base))))->nblocks; } /* * pmemblk_read -- read a block in a block memory pool */ int pmemblk_read(PMEMblkpool *pbp, void *buf, long long blockno) { PMEMobjpool *pop = (PMEMobjpool *)pbp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); if (blockno >= (long long)D_RO(bp)->nblocks) return 1; pmemobj_mutex_lock(pop, &D_RW(bp)->locks[blockno % MAX_THREADS]); /* check the error mask */ uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno; if ((*flags & ERROR_MASK) != 0) { pmemobj_mutex_unlock(pop, &D_RW(bp)->locks[blockno % MAX_THREADS]); errno = EIO; return 1; } /* the block is zeroed, reverse zeroing logic */ if ((*flags & ZERO_MASK) == 0) { memset(buf, 0, D_RO(bp)->bsize); } else { size_t block_off = blockno * D_RO(bp)->bsize; uint8_t *src = D_RW(D_RW(bp)->data) + block_off; memcpy(buf, src, D_RO(bp)->bsize); } pmemobj_mutex_unlock(pop, &D_RW(bp)->locks[blockno % MAX_THREADS]); return 0; } /* * pmemblk_write -- write a block (atomically) in a block memory pool */ int pmemblk_write(PMEMblkpool *pbp, const void *buf, long long blockno) { PMEMobjpool *pop = (PMEMobjpool *)pbp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); if (blockno >= (long long)D_RO(bp)->nblocks) return 1; TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) { size_t block_off = blockno * D_RO(bp)->bsize; uint8_t *dst = D_RW(D_RW(bp)->data) + block_off; /* add the modified block to the undo log */ pmemobj_tx_add_range_direct(dst, D_RO(bp)->bsize); memcpy(dst, buf, D_RO(bp)->bsize); /* clear the error flag and set the zero flag */ uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno; /* add the modified flags to the undo log */ pmemobj_tx_add_range_direct(flags, sizeof(*flags)); *flags &= ~ERROR_MASK; /* use reverse logic for zero mask */ *flags |= ZERO_MASK; } TX_ONABORT { retval = 1; } TX_END return retval; } /* * pmemblk_set_zero -- zero a block in a block memory pool */ int pmemblk_set_zero(PMEMblkpool *pbp, long long blockno) { PMEMobjpool *pop = (PMEMobjpool *)pbp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); if (blockno >= (long long)D_RO(bp)->nblocks) return 1; TX_BEGIN_PARAM(pop, TX_PARAM_MUTEX, &D_RW(bp)->locks[blockno % MAX_THREADS], TX_PARAM_NONE) { uint8_t *flags = D_RW(D_RW(bp)->flags) + blockno; /* add the modified flags to the undo log */ pmemobj_tx_add_range_direct(flags, sizeof(*flags)); /* use reverse logic for zero mask */ *flags &= ~ZERO_MASK; } TX_ONABORT { retval = 1; } TX_END return retval; } int main(int argc, char *argv[]) { if (argc < 4) { fprintf(stderr, "usage: %s [co] file blk_size"\ " [cmd[:blk_num[:data]]...]\n", argv[0]); return 1; } unsigned long bsize = strtoul(argv[3], NULL, 10); assert(bsize <= BSIZE_MAX); if (bsize == 0) { perror("blk_size cannot be 0"); return 1; } PMEMblkpool *pbp; if (strncmp(argv[1], "c", 1) == 0) { pbp = pmemblk_create(argv[2], bsize, POOL_SIZE, CREATE_MODE_RW); } else if (strncmp(argv[1], "o", 1) == 0) { pbp = pmemblk_open(argv[2], bsize); } else { fprintf(stderr, "usage: %s [co] file blk_size" " [cmd[:blk_num[:data]]...]\n", argv[0]); return 1; } if (pbp == NULL) { perror("pmemblk_create/pmemblk_open"); return 1; } /* process the command line arguments */ for (int i = 4; i < argc; i++) { switch (*argv[i]) { case 'w': { printf("write: %s\n", argv[i] + 2); const char *block_str = strtok(argv[i] + 2, ":"); const char *data = strtok(NULL, ":"); assert(block_str != NULL); assert(data != NULL); unsigned long block = strtoul(block_str, NULL, 10); if (pmemblk_write(pbp, data, block)) perror("pmemblk_write failed"); break; } case 'r': { printf("read: %s\n", argv[i] + 2); char *buf = (char *)malloc(bsize); assert(buf != NULL); const char *block_str = strtok(argv[i] + 2, ":"); assert(block_str != NULL); if (pmemblk_read(pbp, buf, strtoul(block_str, NULL, 10))) { perror("pmemblk_read failed"); free(buf); break; } buf[bsize - 1] = '\0'; printf("%s\n", buf); free(buf); break; } case 'z': { printf("zero: %s\n", argv[i] + 2); const char *block_str = strtok(argv[i] + 2, ":"); assert(block_str != NULL); if (pmemblk_set_zero(pbp, strtoul(block_str, NULL, 10))) perror("pmemblk_set_zero failed"); break; } case 'e': { printf("error: %s\n", argv[i] + 2); const char *block_str = strtok(argv[i] + 2, ":"); assert(block_str != NULL); if (pmemblk_set_error(pbp, strtoul(block_str, NULL, 10))) perror("pmemblk_set_error failed"); break; } case 'n': { printf("nblocks: "); printf("%zu\n", pmemblk_nblock(pbp)); break; } default: { fprintf(stderr, "unrecognized command %s\n", argv[i]); break; } }; } /* all done */ pmemblk_close(pbp); return 0; }
9,447
22.62
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/slab_allocator/main.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * main.c -- example usage of a slab-like mechanism implemented in libpmemobj * * This application does nothing besides demonstrating the example slab * allocator mechanism. * * By using the CTL alloc class API we can instrument libpmemobj to optimally * manage memory for the pool. */ #include <ex_common.h> #include <assert.h> #include <stdio.h> #include "slab_allocator.h" POBJ_LAYOUT_BEGIN(slab_allocator); POBJ_LAYOUT_ROOT(slab_allocator, struct root); POBJ_LAYOUT_TOID(slab_allocator, struct bar); POBJ_LAYOUT_TOID(slab_allocator, struct foo); POBJ_LAYOUT_END(slab_allocator); struct foo { char data[100]; }; struct bar { char data[500]; }; struct root { TOID(struct foo) foop; TOID(struct bar) barp; }; int main(int argc, char *argv[]) { if (argc < 2) { printf("usage: %s file-name\n", argv[0]); return 1; } const char *path = argv[1]; PMEMobjpool *pop; if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(btree), PMEMOBJ_MIN_POOL, 0666)) == NULL) { perror("failed to create pool\n"); return 1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(btree))) == NULL) { perror("failed to open pool\n"); return 1; } } struct slab_allocator *foo_producer = slab_new(pop, sizeof(struct foo)); assert(foo_producer != NULL); struct slab_allocator *bar_producer = slab_new(pop, sizeof(struct bar)); assert(bar_producer != NULL); TOID(struct root) root = POBJ_ROOT(pop, struct root); if (TOID_IS_NULL(D_RO(root)->foop)) { TX_BEGIN(pop) { TX_SET(root, foop.oid, slab_tx_alloc(foo_producer)); } TX_END } if (TOID_IS_NULL(D_RO(root)->barp)) { slab_alloc(bar_producer, &D_RW(root)->barp.oid, NULL, NULL); } assert(pmemobj_alloc_usable_size(D_RO(root)->foop.oid) == sizeof(struct foo)); assert(pmemobj_alloc_usable_size(D_RO(root)->barp.oid) == sizeof(struct bar)); slab_delete(foo_producer); slab_delete(bar_producer); pmemobj_close(pop); return 0; }
2,066
21.225806
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/slab_allocator/slab_allocator.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2017, Intel Corporation */ /* * slab_allocator.h -- slab-like mechanism for libpmemobj */ #ifndef SLAB_ALLOCATOR_H #define SLAB_ALLOCATOR_H #include <libpmemobj.h> struct slab_allocator; struct slab_allocator *slab_new(PMEMobjpool *pop, size_t size); void slab_delete(struct slab_allocator *slab); int slab_alloc(struct slab_allocator *slab, PMEMoid *oid, pmemobj_constr constructor, void *arg); PMEMoid slab_tx_alloc(struct slab_allocator *slab); #endif /* SLAB_ALLOCATOR_H */
542
22.608696
63
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/array/array.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2019, Intel Corporation */ /* * array.c -- example of arrays usage */ #include <ex_common.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <sys/stat.h> #include <libpmemobj.h> #define TOID_ARRAY(x) TOID(x) #define COUNT_OF(x) (sizeof(x) / sizeof(x[0])) #define MAX_BUFFLEN 30 #define MAX_TYPE_NUM 8 POBJ_LAYOUT_BEGIN(array); POBJ_LAYOUT_TOID(array, struct array_elm); POBJ_LAYOUT_TOID(array, int); POBJ_LAYOUT_TOID(array, PMEMoid); POBJ_LAYOUT_TOID(array, TOID(struct array_elm)); POBJ_LAYOUT_TOID(array, struct array_info); POBJ_LAYOUT_END(array); static PMEMobjpool *pop; enum array_types { UNKNOWN_ARRAY_TYPE, INT_ARRAY_TYPE, PMEMOID_ARRAY_TYPE, TOID_ARRAY_TYPE, MAX_ARRAY_TYPE }; struct array_elm { int id; }; struct array_info { char name[MAX_BUFFLEN]; size_t size; enum array_types type; PMEMoid array; }; /* * print_usage -- print general usage */ static void print_usage(void) { printf("usage: ./array <file-name> " "<alloc|realloc|free|print>" " <array-name> [<size> [<TOID|PMEMoid|int>]]\n"); } /* * get type -- parse argument given as type of array */ static enum array_types get_type(const char *type_name) { const char *names[MAX_ARRAY_TYPE] = {"", "int", "PMEMoid", "TOID"}; enum array_types type; for (type = (enum array_types)(MAX_ARRAY_TYPE - 1); type > UNKNOWN_ARRAY_TYPE; type = (enum array_types)(type - 1)) { if (strcmp(names[type], type_name) == 0) break; } if (type == UNKNOWN_ARRAY_TYPE) fprintf(stderr, "unknown type: %s\n", type_name); return type; } /* * find_aray -- return info about array with proper name */ static TOID(struct array_info) find_array(const char *name) { TOID(struct array_info) info; POBJ_FOREACH_TYPE(pop, info) { if (strncmp(D_RO(info)->name, name, MAX_BUFFLEN) == 0) return info; } return TOID_NULL(struct array_info); } /* * elm_constructor -- constructor of array_elm type object */ static int elm_constructor(PMEMobjpool *pop, void *ptr, void *arg) { struct array_elm *obj = (struct array_elm *)ptr; int *id = (int *)arg; obj->id = *id; pmemobj_persist(pop, obj, sizeof(*obj)); return 0; } /* * print_int -- print array of int type */ static void print_int(struct array_info *info) { TOID(int) array; TOID_ASSIGN(array, info->array); for (size_t i = 0; i < info->size; i++) printf("%d ", D_RO(array)[i]); } /* * print_pmemoid -- print array of PMEMoid type */ static void print_pmemoid(struct array_info *info) { TOID(PMEMoid) array; TOID(struct array_elm) elm; TOID_ASSIGN(array, info->array); for (size_t i = 0; i < info->size; i++) { TOID_ASSIGN(elm, D_RW(array)[i]); printf("%d ", D_RO(elm)->id); } } /* * print_toid -- print array of TOID(struct array_elm) type */ static void print_toid(struct array_info *info) { TOID_ARRAY(TOID(struct array_elm)) array; TOID_ASSIGN(array, info->array); for (size_t i = 0; i < info->size; i++) printf("%d ", D_RO(D_RO(array)[i])->id); } typedef void (*fn_print)(struct array_info *info); static fn_print print_array[] = {NULL, print_int, print_pmemoid, print_toid}; /* * free_int -- de-allocate array of int type */ static void free_int(struct array_info *info) { TOID(int) array; TOID_ASSIGN(array, info->array); /* * When there is persistent array of simple type allocated, * there is enough to de-allocate persistent pointer */ POBJ_FREE(&array); } /* * free_pmemoid -- de-allocate array of PMEMoid type */ static void free_pmemoid(struct array_info *info) { TOID(PMEMoid) array; TOID_ASSIGN(array, info->array); /* * When there is persistent array of persistent pointer type allocated, * there is necessary to de-allocate each element, if they were * allocated earlier */ for (size_t i = 0; i < info->size; i++) pmemobj_free(&D_RW(array)[i]); POBJ_FREE(&array); } /* * free_toid -- de-allocate array of TOID(struct array_elm) type */ static void free_toid(struct array_info *info) { TOID_ARRAY(TOID(struct array_elm)) array; TOID_ASSIGN(array, info->array); /* * When there is persistent array of persistent pointer type allocated, * there is necessary to de-allocate each element, if they were * allocated earlier */ for (size_t i = 0; i < info->size; i++) POBJ_FREE(&D_RW(array)[i]); POBJ_FREE(&array); } typedef void (*fn_free)(struct array_info *info); static fn_free free_array[] = {NULL, free_int, free_pmemoid, free_toid}; /* * realloc_int -- reallocate array of int type */ static PMEMoid realloc_int(PMEMoid *info, size_t prev_size, size_t size) { TOID(int) array; TOID_ASSIGN(array, *info); POBJ_REALLOC(pop, &array, int, size * sizeof(int)); if (size > prev_size) { for (size_t i = prev_size; i < size; i++) D_RW(array)[i] = (int)i; pmemobj_persist(pop, D_RW(array) + prev_size, (size - prev_size) * sizeof(*D_RW(array))); } return array.oid; } /* * realloc_pmemoid -- reallocate array of PMEMoid type */ static PMEMoid realloc_pmemoid(PMEMoid *info, size_t prev_size, size_t size) { TOID(PMEMoid) array; TOID_ASSIGN(array, *info); pmemobj_zrealloc(pop, &array.oid, sizeof(PMEMoid) * size, TOID_TYPE_NUM(PMEMoid)); for (size_t i = prev_size; i < size; i++) { if (pmemobj_alloc(pop, &D_RW(array)[i], sizeof(struct array_elm), TOID_TYPE_NUM(PMEMoid), elm_constructor, &i)) { fprintf(stderr, "pmemobj_alloc\n"); assert(0); } } return array.oid; } /* * realloc_toid -- reallocate array of TOID(struct array_elm) type */ static PMEMoid realloc_toid(PMEMoid *info, size_t prev_size, size_t size) { TOID_ARRAY(TOID(struct array_elm)) array; TOID_ASSIGN(array, *info); pmemobj_zrealloc(pop, &array.oid, sizeof(TOID(struct array_elm)) * size, TOID_TYPE_NUM_OF(array)); for (size_t i = prev_size; i < size; i++) { POBJ_NEW(pop, &D_RW(array)[i], struct array_elm, elm_constructor, &i); if (TOID_IS_NULL(D_RW(array)[i])) { fprintf(stderr, "POBJ_ALLOC\n"); assert(0); } } return array.oid; } typedef PMEMoid (*fn_realloc)(PMEMoid *info, size_t prev_size, size_t size); static fn_realloc realloc_array[] = {NULL, realloc_int, realloc_pmemoid, realloc_toid}; /* * alloc_int -- allocate array of int type */ static PMEMoid alloc_int(size_t size) { TOID(int) array; /* * To allocate persistent array of simple type is enough to allocate * pointer with size equal to number of elements multiplied by size of * user-defined structure. */ POBJ_ALLOC(pop, &array, int, sizeof(int) * size, NULL, NULL); if (TOID_IS_NULL(array)) { fprintf(stderr, "POBJ_ALLOC\n"); return OID_NULL; } for (size_t i = 0; i < size; i++) D_RW(array)[i] = (int)i; pmemobj_persist(pop, D_RW(array), size * sizeof(*D_RW(array))); return array.oid; } /* * alloc_pmemoid -- allocate array of PMEMoid type */ static PMEMoid alloc_pmemoid(size_t size) { TOID(PMEMoid) array; /* * To allocate persistent array of PMEMoid type is necessary to allocate * pointer with size equal to number of elements multiplied by size of * PMEMoid and to allocate each of elements separately. */ POBJ_ALLOC(pop, &array, PMEMoid, sizeof(PMEMoid) * size, NULL, NULL); if (TOID_IS_NULL(array)) { fprintf(stderr, "POBJ_ALLOC\n"); return OID_NULL; } for (size_t i = 0; i < size; i++) { if (pmemobj_alloc(pop, &D_RW(array)[i], sizeof(struct array_elm), TOID_TYPE_NUM(PMEMoid), elm_constructor, &i)) { fprintf(stderr, "pmemobj_alloc\n"); } } return array.oid; } /* * alloc_toid -- allocate array of TOID(struct array_elm) type */ static PMEMoid alloc_toid(size_t size) { TOID_ARRAY(TOID(struct array_elm)) array; /* * To allocate persistent array of TOID with user-defined structure type * is necessary to allocate pointer with size equal to number of * elements multiplied by size of TOID of proper type and to allocate * each of elements separately. */ POBJ_ALLOC(pop, &array, TOID(struct array_elm), sizeof(TOID(struct array_elm)) * size, NULL, NULL); if (TOID_IS_NULL(array)) { fprintf(stderr, "POBJ_ALLOC\n"); return OID_NULL; } for (size_t i = 0; i < size; i++) { POBJ_NEW(pop, &D_RW(array)[i], struct array_elm, elm_constructor, &i); if (TOID_IS_NULL(D_RW(array)[i])) { fprintf(stderr, "POBJ_ALLOC\n"); assert(0); } } return array.oid; } typedef PMEMoid (*fn_alloc)(size_t size); static fn_alloc alloc_array[] = {NULL, alloc_int, alloc_pmemoid, alloc_toid}; /* * do_print -- print values stored by proper array */ static void do_print(int argc, char *argv[]) { if (argc != 1) { printf("usage: ./array <file-name> print <array-name>\n"); return; } TOID(struct array_info) array_info = find_array(argv[0]); if (TOID_IS_NULL(array_info)) { printf("%s doesn't exist\n", argv[0]); return; } printf("%s:\n", argv[0]); print_array[D_RO(array_info)->type](D_RW(array_info)); printf("\n"); } /* * do_free -- de-allocate proper array and proper TOID of array_info type */ static void do_free(int argc, char *argv[]) { if (argc != 1) { printf("usage: ./array <file-name> free <array-name>\n"); return; } TOID(struct array_info) array_info = find_array(argv[0]); if (TOID_IS_NULL(array_info)) { printf("%s doesn't exist\n", argv[0]); return; } free_array[D_RO(array_info)->type](D_RW(array_info)); POBJ_FREE(&array_info); } /* * do_realloc -- reallocate proper array to given size and update information * in array_info structure */ static void do_realloc(int argc, char *argv[]) { if (argc != 2) { printf("usage: ./array <file-name> realloc" " <array-name> <size>\n"); return; } size_t size = atoi(argv[1]); TOID(struct array_info) array_info = find_array(argv[0]); if (TOID_IS_NULL(array_info)) { printf("%s doesn't exist\n", argv[0]); return; } struct array_info *info = D_RW(array_info); info->array = realloc_array[info->type](&info->array, info->size, size); if (OID_IS_NULL(info->array)) { if (size != 0) printf("POBJ_REALLOC\n"); } info->size = size; pmemobj_persist(pop, info, sizeof(*info)); } /* * do_alloc -- allocate persistent array and TOID of array_info type * and set it with information about new array */ static void do_alloc(int argc, char *argv[]) { if (argc != 3) { printf("usage: ./array <file-name> alloc <array-name>" "<size> <type>\n"); return; } enum array_types type = get_type(argv[2]); if (type == UNKNOWN_ARRAY_TYPE) return; size_t size = atoi(argv[1]); TOID(struct array_info) array_info = find_array(argv[0]); if (!TOID_IS_NULL(array_info)) POBJ_FREE(&array_info); POBJ_ZNEW(pop, &array_info, struct array_info); struct array_info *info = D_RW(array_info); strncpy(info->name, argv[0], MAX_BUFFLEN - 1); info->name[MAX_BUFFLEN - 1] = '\0'; info->size = size; info->type = type; info->array = alloc_array[type](size); if (OID_IS_NULL(info->array)) assert(0); pmemobj_persist(pop, info, sizeof(*info)); } typedef void (*fn_op)(int argc, char *argv[]); static fn_op operations[] = {do_alloc, do_realloc, do_free, do_print}; int main(int argc, char *argv[]) { if (argc < 3) { print_usage(); return 1; } const char *path = argv[1]; pop = NULL; if (file_exists(path) != 0) { if ((pop = pmemobj_create(path, POBJ_LAYOUT_NAME(array), PMEMOBJ_MIN_POOL, CREATE_MODE_RW)) == NULL) { printf("failed to create pool\n"); return 1; } } else { if ((pop = pmemobj_open(path, POBJ_LAYOUT_NAME(array))) == NULL) { printf("failed to open pool\n"); return 1; } } const char *option = argv[2]; argv += 3; argc -= 3; const char *names[] = {"alloc", "realloc", "free", "print"}; unsigned i = 0; for (; i < COUNT_OF(names) && strcmp(option, names[i]) != 0; i++); if (i != COUNT_OF(names)) operations[i](argc, argv); else print_usage(); pmemobj_close(pop); return 0; }
11,844
22.595618
77
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/list_map/skiplist_map.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * skiplist_map.c -- Skiplist implementation */ #include <assert.h> #include <errno.h> #include <stdlib.h> #include <stdio.h> #include "skiplist_map.h" #define SKIPLIST_LEVELS_NUM 4 #define NULL_NODE TOID_NULL(struct skiplist_map_node) #include <x86intrin.h> static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } struct skiplist_map_entry { uint64_t key; PMEMoid value; }; struct skiplist_map_node { TOID(struct skiplist_map_node) next[SKIPLIST_LEVELS_NUM]; struct skiplist_map_entry entry; }; /* * skiplist_map_create -- allocates a new skiplist instance */ int skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map, void *arg) { int ret = 0; TX_BEGIN(pop) { pmemobj_tx_add_range_direct(map, sizeof(*map)); *map = TX_ZNEW(struct skiplist_map_node); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * skiplist_map_clear -- removes all elements from the map */ int skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map) { while (!TOID_EQUALS(D_RO(map)->next[0], NULL_NODE)) { TOID(struct skiplist_map_node) next = D_RO(map)->next[0]; skiplist_map_remove_free(pop, map, D_RO(next)->entry.key); } return 0; } /* * skiplist_map_destroy -- cleanups and frees skiplist instance */ int skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map) { int ret = 0; TX_BEGIN(pop) { skiplist_map_clear(pop, *map); pmemobj_tx_add_range_direct(map, sizeof(*map)); TX_FREE(*map); *map = TOID_NULL(struct skiplist_map_node); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * skiplist_map_insert_new -- allocates a new object and inserts it into * the list */ int skiplist_map_insert_new(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg) { int ret = 0; TX_BEGIN(pop) { PMEMoid n = pmemobj_tx_alloc(size, type_num); constructor(pop, pmemobj_direct(n), arg); skiplist_map_insert(pop, map, key, n); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * skiplist_map_insert_node -- (internal) adds new node in selected place */ static void skiplist_map_insert_node(TOID(struct skiplist_map_node) new_node, TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM]) { unsigned current_level = 0; do { TX_ADD_FIELD(path[current_level], next[current_level]); D_RW(new_node)->next[current_level] = D_RO(path[current_level])->next[current_level]; D_RW(path[current_level])->next[current_level] = new_node; } while (++current_level < SKIPLIST_LEVELS_NUM && rand() % 2 == 0); } /* * skiplist_map_map_find -- (internal) returns path to searched node, or if * node doesn't exist, it will return path to place where key should be. */ static void skiplist_map_find(uint64_t key, TOID(struct skiplist_map_node) map, TOID(struct skiplist_map_node) *path) { int current_level; TOID(struct skiplist_map_node) active = map; for (current_level = SKIPLIST_LEVELS_NUM - 1; current_level >= 0; current_level--) { for (TOID(struct skiplist_map_node) next = D_RO(active)->next[current_level]; !TOID_EQUALS(next, NULL_NODE) && D_RO(next)->entry.key < key; next = D_RO(active)->next[current_level]) { active = next; } path[current_level] = active; } } /* * skiplist_map_insert -- inserts a new key-value pair into the map */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; uint64_t resetCycles; #endif int skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, PMEMoid value) { int ret = 0; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif TOID(struct skiplist_map_node) new_node; TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM]; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { new_node = TX_ZNEW(struct skiplist_map_node); D_RW(new_node)->entry.key = key; D_RW(new_node)->entry.value = value; skiplist_map_find(key, map, path); skiplist_map_insert_node(new_node, path); } TX_ONABORT { ret = 1; } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * skiplist_map_remove_free -- removes and frees an object from the list */ int skiplist_map_remove_free(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key) { int ret = 0; TX_BEGIN(pop) { PMEMoid val = skiplist_map_remove(pop, map, key); pmemobj_tx_free(val); } TX_ONABORT { ret = 1; } TX_END return ret; } /* * skiplist_map_remove_node -- (internal) removes selected node */ static void skiplist_map_remove_node( TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM]) { TOID(struct skiplist_map_node) to_remove = D_RO(path[0])->next[0]; int i; for (i = 0; i < SKIPLIST_LEVELS_NUM; i++) { if (TOID_EQUALS(D_RO(path[i])->next[i], to_remove)) { TX_ADD_FIELD(path[i], next[i]); D_RW(path[i])->next[i] = D_RO(to_remove)->next[i]; } } } /* * skiplist_map_remove -- removes key-value pair from the map */ PMEMoid skiplist_map_remove(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key) { PMEMoid ret = OID_NULL; #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM]; TOID(struct skiplist_map_node) to_remove; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { skiplist_map_find(key, map, path); to_remove = D_RO(path[0])->next[0]; if (!TOID_EQUALS(to_remove, NULL_NODE) && D_RO(to_remove)->entry.key == key) { ret = D_RO(to_remove)->entry.value; skiplist_map_remove_node(path); } } TX_ONABORT { ret = OID_NULL; } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("ctree TX/s = %f\nctree tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("ctree tx cmd issue total time = %f\n", (((double)ulogCycles)/2000000000)); printf("ctree tx total wait time = %f\n", (((double)waitCycles)/2000000000)); #endif return ret; } /* * skiplist_map_get -- searches for a value of the key */ PMEMoid skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key) { PMEMoid ret = OID_NULL; TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM], found; skiplist_map_find(key, map, path); found = D_RO(path[0])->next[0]; if (!TOID_EQUALS(found, NULL_NODE) && D_RO(found)->entry.key == key) { ret = D_RO(found)->entry.value; } return ret; } /* * skiplist_map_lookup -- searches if a key exists */ int skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key) { int ret = 0; TOID(struct skiplist_map_node) path[SKIPLIST_LEVELS_NUM], found; skiplist_map_find(key, map, path); found = D_RO(path[0])->next[0]; if (!TOID_EQUALS(found, NULL_NODE) && D_RO(found)->entry.key == key) { ret = 1; } return ret; } /* * skiplist_map_foreach -- calls function for each node on a list */ int skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct skiplist_map_node) next = map; while (!TOID_EQUALS(D_RO(next)->next[0], NULL_NODE)) { next = D_RO(next)->next[0]; cb(D_RO(next)->entry.key, D_RO(next)->entry.value, arg); } return 0; } /* * skiplist_map_is_empty -- checks whether the list map is empty */ int skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map) { return TOID_IS_NULL(D_RO(map)->next[0]); } /* * skiplist_map_check -- check if given persistent object is a skiplist */ int skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map) { return TOID_IS_NULL(map) || !TOID_VALID(map); }
8,913
23.899441
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/list_map/skiplist_map.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016, Intel Corporation */ /* * skiplist_map.h -- sorted list collection implementation */ #ifndef SKIPLIST_MAP_H #define SKIPLIST_MAP_H #include <libpmemobj.h> #ifndef SKIPLIST_MAP_TYPE_OFFSET #define SKIPLIST_MAP_TYPE_OFFSET 2020 #endif struct skiplist_map_node; TOID_DECLARE(struct skiplist_map_node, SKIPLIST_MAP_TYPE_OFFSET + 0); int skiplist_map_check(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); int skiplist_map_create(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map, void *arg); int skiplist_map_destroy(PMEMobjpool *pop, TOID(struct skiplist_map_node) *map); int skiplist_map_insert(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, PMEMoid value); int skiplist_map_insert_new(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key, size_t size, unsigned type_num, void (*constructor)(PMEMobjpool *pop, void *ptr, void *arg), void *arg); PMEMoid skiplist_map_remove(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_remove_free(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_clear(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); PMEMoid skiplist_map_get(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_lookup(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, uint64_t key); int skiplist_map_foreach(PMEMobjpool *pop, TOID(struct skiplist_map_node) map, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); int skiplist_map_is_empty(PMEMobjpool *pop, TOID(struct skiplist_map_node) map); #endif /* SKIPLIST_MAP_H */
1,688
36.533333
80
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_tx.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHMAP_TX_H #define HASHMAP_TX_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_TX_TYPE_OFFSET #define HASHMAP_TX_TYPE_OFFSET 1004 #endif struct hashmap_tx; TOID_DECLARE(struct hashmap_tx, HASHMAP_TX_TYPE_OFFSET + 0); int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg); int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key); int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap); int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_TX_H */
1,270
34.305556
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_rp.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ #ifndef HASHMAP_RP_H #define HASHMAP_RP_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_RP_TYPE_OFFSET #define HASHMAP_RP_TYPE_OFFSET 1008 #endif /* Flags to indicate if insertion is being made during rebuild process */ #define HASHMAP_RP_REBUILD 1 #define HASHMAP_RP_NO_REBUILD 0 /* Initial number of entries for hashamap_rp */ #define INIT_ENTRIES_NUM_RP 16 /* Load factor to indicate resize threshold */ #define HASHMAP_RP_LOAD_FACTOR 0.5f /* Maximum number of swaps allowed during single insertion */ #define HASHMAP_RP_MAX_SWAPS 150 /* Size of an action array used during single insertion */ #define HASHMAP_RP_MAX_ACTIONS (4 * HASHMAP_RP_MAX_SWAPS + 5) struct hashmap_rp; TOID_DECLARE(struct hashmap_rp, HASHMAP_RP_TYPE_OFFSET + 0); int hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg); int hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); PMEMoid hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); int hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key); int hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap); int hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_RP_H */
1,780
36.104167
76
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_internal.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHSET_INTERNAL_H #define HASHSET_INTERNAL_H /* large prime number used as a hashing function coefficient */ #define HASH_FUNC_COEFF_P 32212254719ULL /* initial number of buckets */ #define INIT_BUCKETS_NUM 10 /* number of values in a bucket which trigger hashtable rebuild check */ #define MIN_HASHSET_THRESHOLD 5 /* number of values in a bucket which force hashtable rebuild */ #define MAX_HASHSET_THRESHOLD 10 #endif
521
25.1
72
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_tx.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* integer hash set implementation which uses only transaction APIs */ #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <inttypes.h> #include <libpmemobj.h> #include "hashmap_tx.h" #include "hashmap_internal.h" static inline uint64_t getCycle(){ uint32_t cycles_high, cycles_low, pid; asm volatile ("RDTSCP\n\t" // rdtscp into eax and edx "mov %%edx, %0\n\t" "mov %%eax, %1\n\t" "mov %%ecx, %2\n\t" :"=r" (cycles_high), "=r" (cycles_low), "=r" (pid) //store in vars :// no input :"%eax", "%edx", "%ecx" // clobbered by rdtscp ); return((uint64_t)cycles_high << 32) | cycles_low; } /* layout definition */ TOID_DECLARE(struct buckets, HASHMAP_TX_TYPE_OFFSET + 1); TOID_DECLARE(struct entry, HASHMAP_TX_TYPE_OFFSET + 2); struct entry { uint64_t key; PMEMoid value; /* next entry list pointer */ TOID(struct entry) next; }; struct buckets { /* number of buckets */ size_t nbuckets; /* array of lists */ TOID(struct entry) bucket[]; }; struct hashmap_tx { /* random number generator seed */ uint32_t seed; /* hash function coefficients */ uint32_t hash_fun_a; uint32_t hash_fun_b; uint64_t hash_fun_p; /* number of values inserted */ uint64_t count; /* buckets */ TOID(struct buckets) buckets; }; /* * create_hashmap -- hashmap initializer */ static void create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint32_t seed) { size_t len = INIT_BUCKETS_NUM; size_t sz = sizeof(struct buckets) + len * sizeof(TOID(struct entry)); TX_BEGIN(pop) { TX_ADD(hashmap); D_RW(hashmap)->seed = seed; do { D_RW(hashmap)->hash_fun_a = (uint32_t)rand(); } while (D_RW(hashmap)->hash_fun_a == 0); D_RW(hashmap)->hash_fun_b = (uint32_t)rand(); D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P; D_RW(hashmap)->buckets = TX_ZALLOC(struct buckets, sz); D_RW(D_RW(hashmap)->buckets)->nbuckets = len; } TX_ONABORT { fprintf(stderr, "%s: transaction aborted: %s\n", __func__, pmemobj_errormsg()); abort(); } TX_END } /* * hash -- the simplest hashing function, * see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers */ static uint64_t hash(const TOID(struct hashmap_tx) *hashmap, const TOID(struct buckets) *buckets, uint64_t value) { uint32_t a = D_RO(*hashmap)->hash_fun_a; uint32_t b = D_RO(*hashmap)->hash_fun_b; uint64_t p = D_RO(*hashmap)->hash_fun_p; size_t len = D_RO(*buckets)->nbuckets; return ((a * value + b) % p) % len; } /* * hm_tx_rebuild -- rebuilds the hashmap with a new number of buckets */ static void hm_tx_rebuild(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, size_t new_len) { TOID(struct buckets) buckets_old = D_RO(hashmap)->buckets; if (new_len == 0) new_len = D_RO(buckets_old)->nbuckets; size_t sz_old = sizeof(struct buckets) + D_RO(buckets_old)->nbuckets * sizeof(TOID(struct entry)); size_t sz_new = sizeof(struct buckets) + new_len * sizeof(TOID(struct entry)); TX_BEGIN(pop) { TX_ADD_FIELD(hashmap, buckets); TOID(struct buckets) buckets_new = TX_ZALLOC(struct buckets, sz_new); D_RW(buckets_new)->nbuckets = new_len; pmemobj_tx_add_range(buckets_old.oid, 0, sz_old); for (size_t i = 0; i < D_RO(buckets_old)->nbuckets; ++i) { while (!TOID_IS_NULL(D_RO(buckets_old)->bucket[i])) { TOID(struct entry) en = D_RO(buckets_old)->bucket[i]; uint64_t h = hash(&hashmap, &buckets_new, D_RO(en)->key); D_RW(buckets_old)->bucket[i] = D_RO(en)->next; TX_ADD_FIELD(en, next); D_RW(en)->next = D_RO(buckets_new)->bucket[h]; D_RW(buckets_new)->bucket[h] = en; } } D_RW(hashmap)->buckets = buckets_new; TX_FREE(buckets_old); } TX_ONABORT { fprintf(stderr, "%s: transaction aborted: %s\n", __func__, pmemobj_errormsg()); /* * We don't need to do anything here, because everything is * consistent. The only thing affected is performance. */ } TX_END } /* * hm_tx_insert -- inserts specified value into the hashmap, * returns: * - 0 if successful, * - 1 if value already existed, * - -1 if something bad happened */ #ifdef GET_NDP_BREAKDOWN uint64_t ulogCycles; uint64_t waitCycles; uint64_t resetCycles; #endif int hm_tx_insert(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key, PMEMoid value) { int ret = 0; TOID(struct buckets) buckets = D_RO(hashmap)->buckets; // TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); int num = 0; /* for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) { if (D_RO(var)->key == key) return 1; num++; } */ #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif //uint64_t startCycles1,endCycles1; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]); TX_ADD_FIELD(hashmap, count); TOID(struct entry) e = TX_NEW(struct entry); D_RW(e)->key = key; D_RW(e)->value = value; D_RW(e)->next = D_RO(buckets)->bucket[h]; D_RW(buckets)->bucket[h] = e; D_RW(hashmap)->count++; num++; //printf("parallel time = %f\n", (((double)(endCycles1 - startCycles1)))); } TX_ONABORT { fprintf(stderr, "transaction aborted: %s\n", pmemobj_errormsg()); ret = -1; } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\ntotal tx total time = %f\n", RUN_COUNT/totTime, totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("total tx cmd issue time = %f\n", (((double)ulogCycles)/2000000000)); printf("total tx wait time = %f\n", (((double)waitCycles)/2000000000)); printf("total tx reset time = %f\n", (((double)resetCycles)/2000000000)); #endif if (ret) return ret; return 0; } /* * hm_tx_remove -- removes specified value from the hashmap, * returns: * - key's value if successful, * - OID_NULL if value didn't exist or if something bad happened */ PMEMoid hm_tx_remove(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { int ret = 0; PMEMoid retoid; TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var, prev = TOID_NULL(struct entry); #ifdef GET_NDP_BREAKDOWN ulogCycles = 0; waitCycles = 0; #endif #ifdef GET_NDP_PERFORMENCE uint64_t btreetxCycles = 0; uint64_t endCycles, startCycles; for(int i=0;i<RUN_COUNT;i++){ #endif uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); prev = var, var = D_RO(var)->next) { if (D_RO(var)->key == key) break; } if (TOID_IS_NULL(var)) return OID_NULL; retoid = D_RO(var)->value; #ifdef GET_NDP_PERFORMENCE startCycles = getCycle(); #endif TX_BEGIN(pop) { if (TOID_IS_NULL(prev)) TX_ADD_FIELD(D_RO(hashmap)->buckets, bucket[h]); else TX_ADD_FIELD(prev, next); TX_ADD_FIELD(hashmap, count); if (TOID_IS_NULL(prev)) D_RW(buckets)->bucket[h] = D_RO(var)->next; else D_RW(prev)->next = D_RO(var)->next; D_RW(hashmap)->count--; TX_FREE(var); } TX_ONABORT { fprintf(stderr, "transaction aborted: %s\n", pmemobj_errormsg()); ret = -1; } TX_END #ifdef GET_NDP_PERFORMENCE endCycles = getCycle(); btreetxCycles += endCycles - startCycles; } double totTime = ((double)btreetxCycles)/2000000000; printf("btree TX/s = %f\ntotal tx total time = %f\n",RUN_COUNT/totTime,totTime); #endif #ifdef GET_NDP_BREAKDOWN printf("total tx ulog time = %f\n", (((double)ulogCycles)/2000000000)); printf("total tx wait time = %f\n", (((double)waitCycles)/2000000000)); #endif if (ret) return OID_NULL; if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets) hm_tx_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2); return retoid; } /* * hm_tx_foreach -- prints all values from the hashmap */ int hm_tx_foreach(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; int ret = 0; for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) { if (TOID_IS_NULL(D_RO(buckets)->bucket[i])) continue; for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var); var = D_RO(var)->next) { ret = cb(D_RO(var)->key, D_RO(var)->value, arg); if (ret) break; } } return ret; } /* * hm_tx_debug -- prints complete hashmap state */ static void hm_tx_debug(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, FILE *out) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a, D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p); fprintf(out, "count: %" PRIu64 ", buckets: %zu\n", D_RO(hashmap)->count, D_RO(buckets)->nbuckets); for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) { if (TOID_IS_NULL(D_RO(buckets)->bucket[i])) continue; int num = 0; fprintf(out, "%zu: ", i); for (var = D_RO(buckets)->bucket[i]; !TOID_IS_NULL(var); var = D_RO(var)->next) { fprintf(out, "%" PRIu64 " ", D_RO(var)->key); num++; } fprintf(out, "(%d)\n", num); } } /* * hm_tx_get -- checks whether specified value is in the hashmap */ PMEMoid hm_tx_get(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) if (D_RO(var)->key == key) return D_RO(var)->value; return OID_NULL; } /* * hm_tx_lookup -- checks whether specified value exists */ int hm_tx_lookup(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); for (var = D_RO(buckets)->bucket[h]; !TOID_IS_NULL(var); var = D_RO(var)->next) if (D_RO(var)->key == key) return 1; return 0; } /* * hm_tx_count -- returns number of elements */ size_t hm_tx_count(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { return D_RO(hashmap)->count; } /* * hm_tx_init -- recovers hashmap state, called after pmemobj_open */ int hm_tx_init(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { srand(D_RO(hashmap)->seed); return 0; } /* * hm_tx_create -- allocates new hashmap */ int hm_tx_create(PMEMobjpool *pop, TOID(struct hashmap_tx) *map, void *arg) { struct hashmap_args *args = (struct hashmap_args *)arg; int ret = 0; TX_BEGIN(pop) { TX_ADD_DIRECT(map); *map = TX_ZNEW(struct hashmap_tx); uint32_t seed = args ? args->seed : 0; create_hashmap(pop, *map, seed); } TX_ONABORT { ret = -1; } TX_END return ret; } /* * hm_tx_check -- checks if specified persistent object is an * instance of hashmap */ int hm_tx_check(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap) { return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap); } /* * hm_tx_cmd -- execute cmd for hashmap */ int hm_tx_cmd(PMEMobjpool *pop, TOID(struct hashmap_tx) hashmap, unsigned cmd, uint64_t arg) { switch (cmd) { case HASHMAP_CMD_REBUILD: hm_tx_rebuild(pop, hashmap, arg); return 0; case HASHMAP_CMD_DEBUG: if (!arg) return -EINVAL; hm_tx_debug(pop, hashmap, (FILE *)arg); return 0; default: return -EINVAL; } }
11,602
22.825462
83
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_rp.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2018, Intel Corporation */ /* * Integer hash set implementation with open addressing Robin Hood collision * resolution which uses action.h reserve/publish API. */ #include <assert.h> #include <stdlib.h> #include <stdio.h> #include <errno.h> #include <inttypes.h> #include <libpmemobj.h> #include "hashmap_rp.h" #define TOMBSTONE_MASK (1ULL << 63) #ifdef DEBUG #define HM_ASSERT(cnd) assert(cnd) #else #define HM_ASSERT(cnd) #endif /* layout definition */ TOID_DECLARE(struct entry, HASHMAP_RP_TYPE_OFFSET + 1); struct entry { uint64_t key; PMEMoid value; uint64_t hash; }; struct add_entry { struct entry data; /* position index in hashmap, where data should be inserted/updated */ size_t pos; /* Action array to perform addition in set of actions */ struct pobj_action *actv; /* Action array index counter */ size_t actv_cnt; #ifdef DEBUG /* Swaps counter for current insertion. Enabled in debug mode */ int swaps; #endif }; struct hashmap_rp { /* number of values inserted */ uint64_t count; /* container capacity */ uint64_t capacity; /* resize threshold */ uint64_t resize_threshold; /* entries */ TOID(struct entry) entries; }; int *swaps_array = NULL; #ifdef DEBUG static inline int is_power_of_2(uint64_t v) { return v && !(v & (v - 1)); } #endif /* * entry_is_deleted -- checks 'tombstone' bit if hash is deleted */ static inline int entry_is_deleted(uint64_t hash) { return (hash & TOMBSTONE_MASK) > 0; } /* * entry_is_empty -- checks if entry is empty */ static inline int entry_is_empty(uint64_t hash) { return hash == 0 || entry_is_deleted(hash); } /* * increment_pos -- increment position index, skip 0 */ static uint64_t increment_pos(const struct hashmap_rp *hashmap, uint64_t pos) { HM_ASSERT(is_power_of_2(hashmap->capacity)); pos = (pos + 1) & (hashmap->capacity - 1); return pos == 0 ? 1 : pos; } /* * probe_distance -- returns probe number, an indicator how far from * desired position given hash is stored in hashmap */ static uint64_t probe_distance(const struct hashmap_rp *hashmap, uint64_t hash_key, uint64_t slot_index) { uint64_t capacity = hashmap->capacity; HM_ASSERT(is_power_of_2(hashmap->capacity)); return (int)(slot_index + capacity - hash_key) & (capacity - 1); } /* * hash -- hash function based on Austin Appleby MurmurHash3 64-bit finalizer. * Returned value is modified to work with special values for unused and * and deleted hashes. */ static uint64_t hash(const struct hashmap_rp *hashmap, uint64_t key) { key ^= key >> 33; key *= 0xff51afd7ed558ccd; key ^= key >> 33; key *= 0xc4ceb9fe1a85ec53; key ^= key >> 33; HM_ASSERT(is_power_of_2(hashmap->capacity)); key &= hashmap->capacity - 1; /* first, 'tombstone' bit is used to indicate deleted item */ key &= ~TOMBSTONE_MASK; /* * Ensure that we never return 0 as a hash, since we use 0 to * indicate that element has never been used at all. */ return key == 0 ? 1 : key; } /* * hashmap_create -- hashmap initializer */ static void hashmap_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *hashmap_p, uint32_t seed) { struct pobj_action actv[4]; size_t actv_cnt = 0; TOID(struct hashmap_rp) hashmap = POBJ_RESERVE_NEW(pop, struct hashmap_rp, &actv[actv_cnt]); if (TOID_IS_NULL(hashmap)) goto reserve_err; actv_cnt++; D_RW(hashmap)->count = 0; D_RW(hashmap)->capacity = INIT_ENTRIES_NUM_RP; D_RW(hashmap)->resize_threshold = (uint64_t)(INIT_ENTRIES_NUM_RP * HASHMAP_RP_LOAD_FACTOR); size_t sz = sizeof(struct entry) * D_RO(hashmap)->capacity; /* init entries with zero in order to track unused hashes */ D_RW(hashmap)->entries = POBJ_XRESERVE_ALLOC(pop, struct entry, sz, &actv[actv_cnt], POBJ_XALLOC_ZERO); if (TOID_IS_NULL(D_RO(hashmap)->entries)) goto reserve_err; actv_cnt++; pmemobj_persist(pop, D_RW(hashmap), sizeof(struct hashmap_rp)); pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.pool_uuid_lo, hashmap.oid.pool_uuid_lo); pmemobj_set_value(pop, &actv[actv_cnt++], &hashmap_p->oid.off, hashmap.oid.off); pmemobj_publish(pop, actv, actv_cnt); #ifdef DEBUG swaps_array = (int *)calloc(INIT_ENTRIES_NUM_RP, sizeof(int)); if (!swaps_array) abort(); #endif return; reserve_err: fprintf(stderr, "hashmap alloc failed: %s\n", pmemobj_errormsg()); pmemobj_cancel(pop, actv, actv_cnt); abort(); } /* * entry_update -- updates entry in given hashmap with given arguments */ static void entry_update(PMEMobjpool *pop, struct hashmap_rp *hashmap, struct add_entry *args, int rebuild) { HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 4); struct entry *entry_p = D_RW(hashmap->entries); entry_p += args->pos; if (rebuild == HASHMAP_RP_REBUILD) { entry_p->key = args->data.key; entry_p->value = args->data.value; entry_p->hash = args->data.hash; } else { pmemobj_set_value(pop, args->actv + args->actv_cnt++, &entry_p->key, args->data.key); pmemobj_set_value(pop, args->actv + args->actv_cnt++, &entry_p->value.pool_uuid_lo, args->data.value.pool_uuid_lo); pmemobj_set_value(pop, args->actv + args->actv_cnt++, &entry_p->value.off, args->data.value.off); pmemobj_set_value(pop, args->actv + args->actv_cnt++, &entry_p->hash, args->data.hash); } #ifdef DEBUG assert(sizeof(swaps_array) / sizeof(swaps_array[0]) > args->pos); swaps_array[args->pos] = args->swaps; #endif } /* * entry_add -- increments given hashmap's elements counter and calls * entry_update */ static void entry_add(PMEMobjpool *pop, struct hashmap_rp *hashmap, struct add_entry *args, int rebuild) { HM_ASSERT(HASHMAP_RP_MAX_ACTIONS > args->actv_cnt + 1); if (rebuild == HASHMAP_RP_REBUILD) hashmap->count++; else { pmemobj_set_value(pop, args->actv + args->actv_cnt++, &hashmap->count, hashmap->count + 1); } entry_update(pop, hashmap, args, rebuild); } /* * insert_helper -- inserts specified value into the hashmap * If function was called during rebuild process, no redo logs will be used. * returns: * - 0 if successful, * - 1 if value already existed * - -1 on error */ static int insert_helper(PMEMobjpool *pop, struct hashmap_rp *hashmap, uint64_t key, PMEMoid value, int rebuild) { HM_ASSERT(hashmap->count + 1 < hashmap->resize_threshold); struct pobj_action actv[HASHMAP_RP_MAX_ACTIONS]; struct add_entry args; args.data.key = key; args.data.value = value; args.data.hash = hash(hashmap, key); args.pos = args.data.hash; if (rebuild != HASHMAP_RP_REBUILD) { args.actv = actv; args.actv_cnt = 0; } uint64_t dist = 0; struct entry *entry_p = NULL; #ifdef DEBUG int swaps = 0; #endif for (int n = 0; n < HASHMAP_RP_MAX_SWAPS; ++n) { entry_p = D_RW(hashmap->entries); entry_p += args.pos; #ifdef DEBUG args.swaps = swaps; #endif /* Case 1: key already exists, override value */ if (!entry_is_empty(entry_p->hash) && entry_p->key == args.data.key) { entry_update(pop, hashmap, &args, rebuild); if (rebuild != HASHMAP_RP_REBUILD) pmemobj_publish(pop, args.actv, args.actv_cnt); return 1; } /* Case 2: slot is empty from the beginning */ if (entry_p->hash == 0) { entry_add(pop, hashmap, &args, rebuild); if (rebuild != HASHMAP_RP_REBUILD) pmemobj_publish(pop, args.actv, args.actv_cnt); return 0; } /* * Case 3: existing element (or tombstone) has probed less than * current element. Swap them (or put into tombstone slot) and * keep going to find another slot for that element. */ uint64_t existing_dist = probe_distance(hashmap, entry_p->hash, args.pos); if (existing_dist < dist) { if (entry_is_deleted(entry_p->hash)) { entry_add(pop, hashmap, &args, rebuild); if (rebuild != HASHMAP_RP_REBUILD) pmemobj_publish(pop, args.actv, args.actv_cnt); return 0; } struct entry temp = *entry_p; entry_update(pop, hashmap, &args, rebuild); args.data = temp; #ifdef DEBUG swaps++; #endif dist = existing_dist; } /* * Case 4: increment slot number and probe counter, keep going * to find free slot */ args.pos = increment_pos(hashmap, args.pos); dist += 1; } fprintf(stderr, "insertion requires too many swaps\n"); if (rebuild != HASHMAP_RP_REBUILD) pmemobj_cancel(pop, args.actv, args.actv_cnt); return -1; } /* * index_lookup -- checks if given key exists in hashmap. * Returns index number if key was found, 0 otherwise. */ static uint64_t index_lookup(const struct hashmap_rp *hashmap, uint64_t key) { const uint64_t hash_lookup = hash(hashmap, key); uint64_t pos = hash_lookup; uint64_t dist = 0; const struct entry *entry_p = NULL; do { entry_p = D_RO(hashmap->entries); entry_p += pos; if (entry_p->hash == hash_lookup && entry_p->key == key) return pos; pos = increment_pos(hashmap, pos); } while (entry_p->hash != 0 && (dist++) <= probe_distance(hashmap, entry_p->hash, pos) - 1); return 0; } /* * entries_cache -- cache entries from second argument in entries from first * argument */ static int entries_cache(PMEMobjpool *pop, struct hashmap_rp *dest, const struct hashmap_rp *src) { const struct entry *e_begin = D_RO(src->entries); const struct entry *e_end = e_begin + src->capacity; for (const struct entry *e = e_begin; e != e_end; ++e) { if (entry_is_empty(e->hash)) continue; if (insert_helper(pop, dest, e->key, e->value, HASHMAP_RP_REBUILD) == -1) return -1; } HM_ASSERT(src->count == dest->count); return 0; } /* * hm_rp_rebuild -- rebuilds the hashmap with a new capacity. * Returns 0 on success, -1 otherwise. */ static int hm_rp_rebuild(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, size_t capacity_new) { /* * We will need 6 actions: * - 1 action to set new capacity * - 1 action to set new resize threshold * - 1 action to alloc memory for new entries * - 1 action to free old entries * - 2 actions to set new oid pointing to new entries */ struct pobj_action actv[6]; size_t actv_cnt = 0; size_t sz_alloc = sizeof(struct entry) * capacity_new; uint64_t resize_threshold_new = (uint64_t)(capacity_new * HASHMAP_RP_LOAD_FACTOR); pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->capacity, capacity_new); pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->resize_threshold, resize_threshold_new); struct hashmap_rp hashmap_rebuild; hashmap_rebuild.count = 0; hashmap_rebuild.capacity = capacity_new; hashmap_rebuild.resize_threshold = resize_threshold_new; hashmap_rebuild.entries = POBJ_XRESERVE_ALLOC(pop, struct entry, sz_alloc, &actv[actv_cnt], POBJ_XALLOC_ZERO); if (TOID_IS_NULL(hashmap_rebuild.entries)) { fprintf(stderr, "hashmap rebuild failed: %s\n", pmemobj_errormsg()); goto rebuild_err; } actv_cnt++; #ifdef DEBUG free(swaps_array); swaps_array = (int *)calloc(capacity_new, sizeof(int)); if (!swaps_array) goto rebuild_err; #endif if (entries_cache(pop, &hashmap_rebuild, D_RW(hashmap)) == -1) goto rebuild_err; pmemobj_persist(pop, D_RW(hashmap_rebuild.entries), sz_alloc); pmemobj_defer_free(pop, D_RW(hashmap)->entries.oid, &actv[actv_cnt++]); pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->entries.oid.pool_uuid_lo, hashmap_rebuild.entries.oid.pool_uuid_lo); pmemobj_set_value(pop, &actv[actv_cnt++], &D_RW(hashmap)->entries.oid.off, hashmap_rebuild.entries.oid.off); HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actv_cnt); pmemobj_publish(pop, actv, actv_cnt); return 0; rebuild_err: pmemobj_cancel(pop, actv, actv_cnt); #ifdef DEBUG free(swaps_array); #endif return -1; } /* * hm_rp_create -- initializes hashmap state, called after pmemobj_create */ int hm_rp_create(PMEMobjpool *pop, TOID(struct hashmap_rp) *map, void *arg) { struct hashmap_args *args = (struct hashmap_args *)arg; uint32_t seed = args ? args->seed : 0; hashmap_create(pop, map, seed); return 0; } /* * hm_rp_check -- checks if specified persistent object is an instance of * hashmap */ int hm_rp_check(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap) { return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap); } /* * hm_rp_init -- recovers hashmap state, called after pmemobj_open. * Since hashmap_rp is performing rebuild/insertion completely or not at all, * function is dummy and simply returns 0. */ int hm_rp_init(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap) { return 0; } /* * hm_rp_insert -- rebuilds hashmap if necessary and wraps insert_helper. * returns: * - 0 if successful, * - 1 if value already existed * - -1 if something bad happened */ int hm_rp_insert(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key, PMEMoid value) { if (D_RO(hashmap)->count + 1 >= D_RO(hashmap)->resize_threshold) { uint64_t capacity_new = D_RO(hashmap)->capacity * 2; if (hm_rp_rebuild(pop, hashmap, capacity_new) != 0) return -1; } return insert_helper(pop, D_RW(hashmap), key, value, HASHMAP_RP_NO_REBUILD); } /* * hm_rp_remove -- removes specified key from the hashmap, * returns: * - key's value if successful, * - OID_NULL if value didn't exist or if something bad happened */ PMEMoid hm_rp_remove(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key) { const uint64_t pos = index_lookup(D_RO(hashmap), key); if (pos == 0) return OID_NULL; struct entry *entry_p = D_RW(D_RW(hashmap)->entries); entry_p += pos; PMEMoid ret = entry_p->value; size_t actvcnt = 0; struct pobj_action actv[5]; pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->hash, entry_p->hash | TOMBSTONE_MASK); pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->value.pool_uuid_lo, 0); pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->value.off, 0); pmemobj_set_value(pop, &actv[actvcnt++], &entry_p->key, 0); pmemobj_set_value(pop, &actv[actvcnt++], &D_RW(hashmap)->count, D_RW(hashmap)->count - 1); HM_ASSERT(sizeof(actv) / sizeof(actv[0]) >= actvcnt); pmemobj_publish(pop, actv, actvcnt); uint64_t reduced_threshold = (uint64_t) (((uint64_t)(D_RO(hashmap)->capacity / 2)) * HASHMAP_RP_LOAD_FACTOR); if (reduced_threshold >= INIT_ENTRIES_NUM_RP && D_RW(hashmap)->count < reduced_threshold && hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity / 2)) return OID_NULL; return ret; } /* * hm_rp_get -- checks whether specified key is in the hashmap. * Returns associated value if key exists, OID_NULL otherwise. */ PMEMoid hm_rp_get(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key) { struct entry *entry_p = (struct entry *)pmemobj_direct(D_RW(hashmap)->entries.oid); uint64_t pos = index_lookup(D_RO(hashmap), key); return pos == 0 ? OID_NULL : (entry_p + pos)->value; } /* * hm_rp_lookup -- checks whether specified key is in the hashmap. * Returns 1 if key was found, 0 otherwise. */ int hm_rp_lookup(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, uint64_t key) { return index_lookup(D_RO(hashmap), key) != 0; } /* * hm_rp_foreach -- calls cb for all values from the hashmap */ int hm_rp_foreach(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { struct entry *entry_p = (struct entry *)pmemobj_direct(D_RO(hashmap)->entries.oid); int ret = 0; for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) { uint64_t hash = entry_p->hash; if (entry_is_empty(hash)) continue; ret = cb(entry_p->key, entry_p->value, arg); if (ret) return ret; } return 0; } /* * hm_rp_debug -- prints complete hashmap state */ static void hm_rp_debug(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, FILE *out) { #ifdef DEBUG fprintf(out, "debug: true, "); #endif fprintf(out, "capacity: %" PRIu64 ", count: %" PRIu64 "\n", D_RO(hashmap)->capacity, D_RO(hashmap)->count); struct entry *entry_p = D_RW((D_RW(hashmap)->entries)); for (size_t i = 0; i < D_RO(hashmap)->capacity; ++i, ++entry_p) { uint64_t hash = entry_p->hash; if (entry_is_empty(hash)) continue; uint64_t key = entry_p->key; #ifdef DEBUG fprintf(out, "%zu: %" PRIu64 " hash: %" PRIu64 " dist:%" PRIu32 " swaps:%u\n", i, key, hash, probe_distance(D_RO(hashmap), hash, i), swaps_array[i]); #else fprintf(out, "%zu: %" PRIu64 " dist:%" PRIu64 "\n", i, key, probe_distance(D_RO(hashmap), hash, i)); #endif } } /* * hm_rp_count -- returns number of elements */ size_t hm_rp_count(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap) { return D_RO(hashmap)->count; } /* * hm_rp_cmd -- execute cmd for hashmap */ int hm_rp_cmd(PMEMobjpool *pop, TOID(struct hashmap_rp) hashmap, unsigned cmd, uint64_t arg) { switch (cmd) { case HASHMAP_CMD_REBUILD: hm_rp_rebuild(pop, hashmap, D_RO(hashmap)->capacity); return 0; case HASHMAP_CMD_DEBUG: if (!arg) return -EINVAL; hm_rp_debug(pop, hashmap, (FILE *)arg); return 0; default: return -EINVAL; } }
16,890
23.338617
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_atomic.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ #ifndef HASHMAP_ATOMIC_H #define HASHMAP_ATOMIC_H #include <stddef.h> #include <stdint.h> #include <hashmap.h> #include <libpmemobj.h> #ifndef HASHMAP_ATOMIC_TYPE_OFFSET #define HASHMAP_ATOMIC_TYPE_OFFSET 1000 #endif struct hashmap_atomic; TOID_DECLARE(struct hashmap_atomic, HASHMAP_ATOMIC_TYPE_OFFSET + 0); int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map, void *arg); int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key, PMEMoid value); PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key); int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg); size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap); int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, unsigned cmd, uint64_t arg); #endif /* HASHMAP_ATOMIC_H */
1,384
36.432432
79
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/hashmap/hashmap_atomic.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2018, Intel Corporation */ /* integer hash set implementation which uses only atomic APIs */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <errno.h> #include <inttypes.h> #include <libpmemobj.h> #include "hashmap_atomic.h" #include "hashmap_internal.h" /* layout definition */ TOID_DECLARE(struct buckets, HASHMAP_ATOMIC_TYPE_OFFSET + 1); TOID_DECLARE(struct entry, HASHMAP_ATOMIC_TYPE_OFFSET + 2); struct entry { uint64_t key; PMEMoid value; /* list pointer */ POBJ_LIST_ENTRY(struct entry) list; }; struct entry_args { uint64_t key; PMEMoid value; }; POBJ_LIST_HEAD(entries_head, struct entry); struct buckets { /* number of buckets */ size_t nbuckets; /* array of lists */ struct entries_head bucket[]; }; struct hashmap_atomic { /* random number generator seed */ uint32_t seed; /* hash function coefficients */ uint32_t hash_fun_a; uint32_t hash_fun_b; uint64_t hash_fun_p; /* number of values inserted */ uint64_t count; /* whether "count" should be updated */ uint32_t count_dirty; /* buckets */ TOID(struct buckets) buckets; /* buckets, used during rehashing, null otherwise */ TOID(struct buckets) buckets_tmp; }; /* * create_entry -- entry initializer */ static int create_entry(PMEMobjpool *pop, void *ptr, void *arg) { struct entry *e = (struct entry *)ptr; struct entry_args *args = (struct entry_args *)arg; e->key = args->key; e->value = args->value; memset(&e->list, 0, sizeof(e->list)); pmemobj_persist(pop, e, sizeof(*e)); return 0; } /* * create_buckets -- buckets initializer */ static int create_buckets(PMEMobjpool *pop, void *ptr, void *arg) { struct buckets *b = (struct buckets *)ptr; b->nbuckets = *((size_t *)arg); pmemobj_memset_persist(pop, &b->bucket, 0, b->nbuckets * sizeof(b->bucket[0])); pmemobj_persist(pop, &b->nbuckets, sizeof(b->nbuckets)); return 0; } /* * create_hashmap -- hashmap initializer */ static void create_hashmap(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint32_t seed) { D_RW(hashmap)->seed = seed; do { D_RW(hashmap)->hash_fun_a = (uint32_t)rand(); } while (D_RW(hashmap)->hash_fun_a == 0); D_RW(hashmap)->hash_fun_b = (uint32_t)rand(); D_RW(hashmap)->hash_fun_p = HASH_FUNC_COEFF_P; size_t len = INIT_BUCKETS_NUM; size_t sz = sizeof(struct buckets) + len * sizeof(struct entries_head); if (POBJ_ALLOC(pop, &D_RW(hashmap)->buckets, struct buckets, sz, create_buckets, &len)) { fprintf(stderr, "root alloc failed: %s\n", pmemobj_errormsg()); abort(); } pmemobj_persist(pop, D_RW(hashmap), sizeof(*D_RW(hashmap))); } /* * hash -- the simplest hashing function, * see https://en.wikipedia.org/wiki/Universal_hashing#Hashing_integers */ static uint64_t hash(const TOID(struct hashmap_atomic) *hashmap, const TOID(struct buckets) *buckets, uint64_t value) { uint32_t a = D_RO(*hashmap)->hash_fun_a; uint32_t b = D_RO(*hashmap)->hash_fun_b; uint64_t p = D_RO(*hashmap)->hash_fun_p; size_t len = D_RO(*buckets)->nbuckets; return ((a * value + b) % p) % len; } /* * hm_atomic_rebuild_finish -- finishes rebuild, assumes buckets_tmp is not null */ static void hm_atomic_rebuild_finish(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap) { TOID(struct buckets) cur = D_RO(hashmap)->buckets; TOID(struct buckets) tmp = D_RO(hashmap)->buckets_tmp; for (size_t i = 0; i < D_RO(cur)->nbuckets; ++i) { while (!POBJ_LIST_EMPTY(&D_RO(cur)->bucket[i])) { TOID(struct entry) en = POBJ_LIST_FIRST(&D_RO(cur)->bucket[i]); uint64_t h = hash(&hashmap, &tmp, D_RO(en)->key); if (POBJ_LIST_MOVE_ELEMENT_HEAD(pop, &D_RW(cur)->bucket[i], &D_RW(tmp)->bucket[h], en, list, list)) { fprintf(stderr, "move failed: %s\n", pmemobj_errormsg()); abort(); } } } POBJ_FREE(&D_RO(hashmap)->buckets); D_RW(hashmap)->buckets = D_RO(hashmap)->buckets_tmp; pmemobj_persist(pop, &D_RW(hashmap)->buckets, sizeof(D_RW(hashmap)->buckets)); /* * We have to set offset manually instead of substituting OID_NULL, * because we won't be able to recover easily if crash happens after * pool_uuid_lo, but before offset is set. Another reason why everyone * should use transaction API. * See recovery process in hm_init and TOID_IS_NULL macro definition. */ D_RW(hashmap)->buckets_tmp.oid.off = 0; pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp, sizeof(D_RW(hashmap)->buckets_tmp)); } /* * hm_atomic_rebuild -- rebuilds the hashmap with a new number of buckets */ static void hm_atomic_rebuild(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, size_t new_len) { if (new_len == 0) new_len = D_RO(D_RO(hashmap)->buckets)->nbuckets; size_t sz = sizeof(struct buckets) + new_len * sizeof(struct entries_head); POBJ_ALLOC(pop, &D_RW(hashmap)->buckets_tmp, struct buckets, sz, create_buckets, &new_len); if (TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) { fprintf(stderr, "failed to allocate temporary space of size: %zu" ", %s\n", new_len, pmemobj_errormsg()); return; } hm_atomic_rebuild_finish(pop, hashmap); } /* * hm_atomic_insert -- inserts specified value into the hashmap, * returns: * - 0 if successful, * - 1 if value already existed, * - -1 if something bad happened */ int hm_atomic_insert(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key, PMEMoid value) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); int num = 0; POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list) { if (D_RO(var)->key == key) return 1; num++; } D_RW(hashmap)->count_dirty = 1; pmemobj_persist(pop, &D_RW(hashmap)->count_dirty, sizeof(D_RW(hashmap)->count_dirty)); struct entry_args args; args.key = key; args.value = value; PMEMoid oid = POBJ_LIST_INSERT_NEW_HEAD(pop, &D_RW(buckets)->bucket[h], list, sizeof(struct entry), create_entry, &args); if (OID_IS_NULL(oid)) { fprintf(stderr, "failed to allocate entry: %s\n", pmemobj_errormsg()); return -1; } D_RW(hashmap)->count++; pmemobj_persist(pop, &D_RW(hashmap)->count, sizeof(D_RW(hashmap)->count)); D_RW(hashmap)->count_dirty = 0; pmemobj_persist(pop, &D_RW(hashmap)->count_dirty, sizeof(D_RW(hashmap)->count_dirty)); num++; if (num > MAX_HASHSET_THRESHOLD || (num > MIN_HASHSET_THRESHOLD && D_RO(hashmap)->count > 2 * D_RO(buckets)->nbuckets)) hm_atomic_rebuild(pop, hashmap, D_RW(buckets)->nbuckets * 2); return 0; } /* * hm_atomic_remove -- removes specified value from the hashmap, * returns: * - key's value if successful, * - OID_NULL if value didn't exist or if something bad happened */ PMEMoid hm_atomic_remove(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); POBJ_LIST_FOREACH(var, &D_RW(buckets)->bucket[h], list) { if (D_RO(var)->key == key) break; } if (TOID_IS_NULL(var)) return OID_NULL; D_RW(hashmap)->count_dirty = 1; pmemobj_persist(pop, &D_RW(hashmap)->count_dirty, sizeof(D_RW(hashmap)->count_dirty)); if (POBJ_LIST_REMOVE_FREE(pop, &D_RW(buckets)->bucket[h], var, list)) { fprintf(stderr, "list remove failed: %s\n", pmemobj_errormsg()); return OID_NULL; } D_RW(hashmap)->count--; pmemobj_persist(pop, &D_RW(hashmap)->count, sizeof(D_RW(hashmap)->count)); D_RW(hashmap)->count_dirty = 0; pmemobj_persist(pop, &D_RW(hashmap)->count_dirty, sizeof(D_RW(hashmap)->count_dirty)); if (D_RO(hashmap)->count < D_RO(buckets)->nbuckets) hm_atomic_rebuild(pop, hashmap, D_RO(buckets)->nbuckets / 2); return D_RO(var)->value; } /* * hm_atomic_foreach -- prints all values from the hashmap */ int hm_atomic_foreach(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, int (*cb)(uint64_t key, PMEMoid value, void *arg), void *arg) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; int ret = 0; for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) { ret = cb(D_RO(var)->key, D_RO(var)->value, arg); if (ret) return ret; } return 0; } /* * hm_atomic_debug -- prints complete hashmap state */ static void hm_atomic_debug(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, FILE *out) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; fprintf(out, "a: %u b: %u p: %" PRIu64 "\n", D_RO(hashmap)->hash_fun_a, D_RO(hashmap)->hash_fun_b, D_RO(hashmap)->hash_fun_p); fprintf(out, "count: %" PRIu64 ", buckets: %zu\n", D_RO(hashmap)->count, D_RO(buckets)->nbuckets); for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) { if (POBJ_LIST_EMPTY(&D_RO(buckets)->bucket[i])) continue; int num = 0; fprintf(out, "%zu: ", i); POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) { fprintf(out, "%" PRIu64 " ", D_RO(var)->key); num++; } fprintf(out, "(%d)\n", num); } } /* * hm_atomic_get -- checks whether specified value is in the hashmap */ PMEMoid hm_atomic_get(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list) if (D_RO(var)->key == key) return D_RO(var)->value; return OID_NULL; } /* * hm_atomic_lookup -- checks whether specified value is in the hashmap */ int hm_atomic_lookup(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, uint64_t key) { TOID(struct buckets) buckets = D_RO(hashmap)->buckets; TOID(struct entry) var; uint64_t h = hash(&hashmap, &buckets, key); POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[h], list) if (D_RO(var)->key == key) return 1; return 0; } /* * hm_atomic_create -- initializes hashmap state, called after pmemobj_create */ int hm_atomic_create(PMEMobjpool *pop, TOID(struct hashmap_atomic) *map, void *arg) { struct hashmap_args *args = (struct hashmap_args *)arg; uint32_t seed = args ? args->seed : 0; POBJ_ZNEW(pop, map, struct hashmap_atomic); create_hashmap(pop, *map, seed); return 0; } /* * hm_atomic_init -- recovers hashmap state, called after pmemobj_open */ int hm_atomic_init(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap) { srand(D_RO(hashmap)->seed); /* handle rebuild interruption */ if (!TOID_IS_NULL(D_RO(hashmap)->buckets_tmp)) { printf("rebuild, previous attempt crashed\n"); if (TOID_EQUALS(D_RO(hashmap)->buckets, D_RO(hashmap)->buckets_tmp)) { /* see comment in hm_rebuild_finish */ D_RW(hashmap)->buckets_tmp.oid.off = 0; pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp, sizeof(D_RW(hashmap)->buckets_tmp)); } else if (TOID_IS_NULL(D_RW(hashmap)->buckets)) { D_RW(hashmap)->buckets = D_RW(hashmap)->buckets_tmp; pmemobj_persist(pop, &D_RW(hashmap)->buckets, sizeof(D_RW(hashmap)->buckets)); /* see comment in hm_rebuild_finish */ D_RW(hashmap)->buckets_tmp.oid.off = 0; pmemobj_persist(pop, &D_RW(hashmap)->buckets_tmp, sizeof(D_RW(hashmap)->buckets_tmp)); } else { hm_atomic_rebuild_finish(pop, hashmap); } } /* handle insert or remove interruption */ if (D_RO(hashmap)->count_dirty) { printf("count dirty, recalculating\n"); TOID(struct entry) var; TOID(struct buckets) buckets = D_RO(hashmap)->buckets; uint64_t cnt = 0; for (size_t i = 0; i < D_RO(buckets)->nbuckets; ++i) POBJ_LIST_FOREACH(var, &D_RO(buckets)->bucket[i], list) cnt++; printf("old count: %" PRIu64 ", new count: %" PRIu64 "\n", D_RO(hashmap)->count, cnt); D_RW(hashmap)->count = cnt; pmemobj_persist(pop, &D_RW(hashmap)->count, sizeof(D_RW(hashmap)->count)); D_RW(hashmap)->count_dirty = 0; pmemobj_persist(pop, &D_RW(hashmap)->count_dirty, sizeof(D_RW(hashmap)->count_dirty)); } return 0; } /* * hm_atomic_check -- checks if specified persistent object is an * instance of hashmap */ int hm_atomic_check(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap) { return TOID_IS_NULL(hashmap) || !TOID_VALID(hashmap); } /* * hm_atomic_count -- returns number of elements */ size_t hm_atomic_count(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap) { return D_RO(hashmap)->count; } /* * hm_atomic_cmd -- execute cmd for hashmap */ int hm_atomic_cmd(PMEMobjpool *pop, TOID(struct hashmap_atomic) hashmap, unsigned cmd, uint64_t arg) { switch (cmd) { case HASHMAP_CMD_REBUILD: hm_atomic_rebuild(pop, hashmap, arg); return 0; case HASHMAP_CMD_DEBUG: if (!arg) return -EINVAL; hm_atomic_debug(pop, hashmap, (FILE *)arg); return 0; default: return -EINVAL; } }
12,825
24.001949
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/pmemlog/obj_pmemlog_simple.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2017, Intel Corporation */ /* * obj_pmemlog_simple.c -- alternate pmemlog implementation based on pmemobj * * usage: obj_pmemlog_simple [co] file [cmd[:param]...] * * c - create file * o - open file * * The "cmd" arguments match the pmemlog functions: * a - append * v - appendv * r - rewind * w - walk * n - nbyte * t - tell * "a", "w" and "v" require a parameter string(s) separated by a colon */ #include <ex_common.h> #include <sys/stat.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include <errno.h> #include "libpmemobj.h" #include "libpmem.h" #include "libpmemlog.h" #define USABLE_SIZE (9.0 / 10) #define MAX_POOL_SIZE (((size_t)1024 * 1024 * 1024 * 16)) #define POOL_SIZE ((size_t)(1024 * 1024 * 100)) POBJ_LAYOUT_BEGIN(obj_pmemlog_simple); POBJ_LAYOUT_ROOT(obj_pmemlog_simple, struct base); POBJ_LAYOUT_TOID(obj_pmemlog_simple, struct log); POBJ_LAYOUT_END(obj_pmemlog_simple); /* log entry header */ struct log_hdr { uint64_t write_offset; /* data write offset */ size_t data_size; /* size available for data */ }; /* struct log stores the entire log entry */ struct log { struct log_hdr hdr; char data[]; }; /* struct base has the lock and log OID */ struct base { PMEMrwlock rwlock; /* lock covering entire log */ TOID(struct log) log; }; /* * pmemblk_map -- (internal) read or initialize the log pool */ static int pmemlog_map(PMEMobjpool *pop, size_t fsize) { int retval = 0; TOID(struct base)bp; bp = POBJ_ROOT(pop, struct base); /* log already initialized */ if (!TOID_IS_NULL(D_RO(bp)->log)) return retval; size_t pool_size = (size_t)(fsize * USABLE_SIZE); /* max size of a single allocation is 16GB */ if (pool_size > MAX_POOL_SIZE) { errno = EINVAL; return 1; } TX_BEGIN(pop) { TX_ADD(bp); D_RW(bp)->log = TX_ZALLOC(struct log, pool_size); D_RW(D_RW(bp)->log)->hdr.data_size = pool_size - sizeof(struct log_hdr); } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemlog_open -- pool open wrapper */ PMEMlogpool * pmemlog_open(const char *path) { PMEMobjpool *pop = pmemobj_open(path, POBJ_LAYOUT_NAME(obj_pmemlog_simple)); assert(pop != NULL); struct stat buf; if (stat(path, &buf)) { perror("stat"); return NULL; } return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop; } /* * pmemlog_create -- pool create wrapper */ PMEMlogpool * pmemlog_create(const char *path, size_t poolsize, mode_t mode) { PMEMobjpool *pop = pmemobj_create(path, POBJ_LAYOUT_NAME(obj_pmemlog_simple), poolsize, mode); assert(pop != NULL); struct stat buf; if (stat(path, &buf)) { perror("stat"); return NULL; } return pmemlog_map(pop, buf.st_size) ? NULL : (PMEMlogpool *)pop; } /* * pool_close -- pool close wrapper */ void pmemlog_close(PMEMlogpool *plp) { pmemobj_close((PMEMobjpool *)plp); } /* * pmemlog_nbyte -- return usable size of a log memory pool */ size_t pmemlog_nbyte(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct log) logp; logp = D_RO(POBJ_ROOT(pop, struct base))->log; return D_RO(logp)->hdr.data_size; } /* * pmemlog_append -- add data to a log memory pool */ int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count) { PMEMobjpool *pop = (PMEMobjpool *)plp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); TOID(struct log) logp; logp = D_RW(bp)->log; /* check for overrun */ if ((D_RO(logp)->hdr.write_offset + count) > D_RO(logp)->hdr.data_size) { errno = ENOMEM; return 1; } /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { char *dst = D_RW(logp)->data + D_RO(logp)->hdr.write_offset; /* add hdr to undo log */ TX_ADD_FIELD(logp, hdr); /* copy and persist data */ pmemobj_memcpy_persist(pop, dst, buf, count); /* set the new offset */ D_RW(logp)->hdr.write_offset += count; } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemlog_appendv -- add gathered data to a log memory pool */ int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt) { PMEMobjpool *pop = (PMEMobjpool *)plp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); uint64_t total_count = 0; /* calculate required space */ for (int i = 0; i < iovcnt; ++i) total_count += iov[i].iov_len; TOID(struct log) logp; logp = D_RW(bp)->log; /* check for overrun */ if ((D_RO(logp)->hdr.write_offset + total_count) > D_RO(logp)->hdr.data_size) { errno = ENOMEM; return 1; } /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { TX_ADD(D_RW(bp)->log); /* append the data */ for (int i = 0; i < iovcnt; ++i) { char *buf = (char *)iov[i].iov_base; size_t count = iov[i].iov_len; char *dst = D_RW(logp)->data + D_RO(logp)->hdr.write_offset; /* copy and persist data */ pmemobj_memcpy_persist(pop, dst, buf, count); /* set the new offset */ D_RW(logp)->hdr.write_offset += count; } } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemlog_tell -- return current write point in a log memory pool */ long long pmemlog_tell(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct log) logp; logp = D_RO(POBJ_ROOT(pop, struct base))->log; return D_RO(logp)->hdr.write_offset; } /* * pmemlog_rewind -- discard all data, resetting a log memory pool to empty */ void pmemlog_rewind(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { /* add the hdr to the undo log */ TX_ADD_FIELD(D_RW(bp)->log, hdr); /* reset the write offset */ D_RW(D_RW(bp)->log)->hdr.write_offset = 0; } TX_END } /* * pmemlog_walk -- walk through all data in a log memory pool * * chunksize of 0 means process_chunk gets called once for all data * as a single chunk. */ void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* acquire a rdlock here */ int err; if ((err = pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock)) != 0) { errno = err; return; } TOID(struct log) logp; logp = D_RW(bp)->log; size_t read_size = chunksize ? chunksize : D_RO(logp)->hdr.data_size; char *read_ptr = D_RW(logp)->data; const char *write_ptr = (D_RO(logp)->data + D_RO(logp)->hdr.write_offset); while (read_ptr < write_ptr) { read_size = MIN(read_size, (size_t)(write_ptr - read_ptr)); (*process_chunk)(read_ptr, read_size, arg); read_ptr += read_size; } pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock); } /* * process_chunk -- (internal) process function for log_walk */ static int process_chunk(const void *buf, size_t len, void *arg) { char *tmp = (char *)malloc(len + 1); if (tmp == NULL) { fprintf(stderr, "malloc error\n"); return 0; } memcpy(tmp, buf, len); tmp[len] = '\0'; printf("log contains:\n"); printf("%s\n", tmp); free(tmp); return 1; /* continue */ } /* * count_iovec -- (internal) count the number of iovec items */ static int count_iovec(char *arg) { int count = 1; char *pch = strchr(arg, ':'); while (pch != NULL) { ++count; pch = strchr(++pch, ':'); } return count; } /* * fill_iovec -- (internal) fill out the iovec */ static void fill_iovec(struct iovec *iov, char *arg) { char *pch = strtok(arg, ":"); while (pch != NULL) { iov->iov_base = pch; iov->iov_len = strlen((char *)iov->iov_base); ++iov; pch = strtok(NULL, ":"); } } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } PMEMlogpool *plp; if (strncmp(argv[1], "c", 1) == 0) { plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW); } else if (strncmp(argv[1], "o", 1) == 0) { plp = pmemlog_open(argv[2]); } else { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } if (plp == NULL) { perror("pmemlog_create/pmemlog_open"); return 1; } /* process the command line arguments */ for (int i = 3; i < argc; i++) { switch (*argv[i]) { case 'a': { printf("append: %s\n", argv[i] + 2); if (pmemlog_append(plp, argv[i] + 2, strlen(argv[i] + 2))) fprintf(stderr, "pmemlog_append" " error\n"); break; } case 'v': { printf("appendv: %s\n", argv[i] + 2); int count = count_iovec(argv[i] + 2); struct iovec *iov = (struct iovec *)malloc( count * sizeof(struct iovec)); if (iov == NULL) { fprintf(stderr, "malloc error\n"); return 1; } fill_iovec(iov, argv[i] + 2); if (pmemlog_appendv(plp, iov, count)) fprintf(stderr, "pmemlog_appendv" " error\n"); free(iov); break; } case 'r': { printf("rewind\n"); pmemlog_rewind(plp); break; } case 'w': { printf("walk\n"); unsigned long walksize = strtoul(argv[i] + 2, NULL, 10); pmemlog_walk(plp, walksize, process_chunk, NULL); break; } case 'n': { printf("nbytes: %zu\n", pmemlog_nbyte(plp)); break; } case 't': { printf("offset: %lld\n", pmemlog_tell(plp)); break; } default: { fprintf(stderr, "unrecognized command %s\n", argv[i]); break; } }; } /* all done */ pmemlog_close(plp); return 0; }
9,720
21.043084
76
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/pmemlog/obj_pmemlog.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_pmemlog.c -- alternate pmemlog implementation based on pmemobj * * usage: obj_pmemlog [co] file [cmd[:param]...] * * c - create file * o - open file * * The "cmd" arguments match the pmemlog functions: * a - append * v - appendv * r - rewind * w - walk * n - nbyte * t - tell * "a" and "v" require a parameter string(s) separated by a colon */ #include <ex_common.h> #include <sys/stat.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include "libpmemobj.h" #include "libpmem.h" #include "libpmemlog.h" #define LAYOUT_NAME "obj_pmemlog" #define POOL_SIZE ((size_t)(1024 * 1024 * 100)) /* types of allocations */ enum types { LOG_TYPE, LOG_HDR_TYPE, BASE_TYPE, MAX_TYPES }; /* log entry header */ struct log_hdr { PMEMoid next; /* object ID of the next log buffer */ size_t size; /* size of this log buffer */ }; /* struct log stores the entire log entry */ struct log { struct log_hdr hdr; /* entry header */ char data[]; /* log entry data */ }; /* struct base keeps track of the beginning of the log list */ struct base { PMEMoid head; /* object ID of the first log buffer */ PMEMoid tail; /* object ID of the last log buffer */ PMEMrwlock rwlock; /* lock covering entire log */ size_t bytes_written; /* number of bytes stored in the pool */ }; /* * pmemlog_open -- pool open wrapper */ PMEMlogpool * pmemlog_open(const char *path) { return (PMEMlogpool *)pmemobj_open(path, LAYOUT_NAME); } /* * pmemlog_create -- pool create wrapper */ PMEMlogpool * pmemlog_create(const char *path, size_t poolsize, mode_t mode) { return (PMEMlogpool *)pmemobj_create(path, LAYOUT_NAME, poolsize, mode); } /* * pmemlog_close -- pool close wrapper */ void pmemlog_close(PMEMlogpool *plp) { pmemobj_close((PMEMobjpool *)plp); } /* * pmemlog_nbyte -- not available in this implementation */ size_t pmemlog_nbyte(PMEMlogpool *plp) { /* N/A */ return 0; } /* * pmemlog_append -- add data to a log memory pool */ int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count) { PMEMobjpool *pop = (PMEMobjpool *)plp; PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base)); struct base *bp = pmemobj_direct(baseoid); /* set the return point */ jmp_buf env; if (setjmp(env)) { /* end the transaction */ (void) pmemobj_tx_end(); return 1; } /* begin a transaction, also acquiring the write lock for the log */ if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock, TX_PARAM_NONE)) return -1; /* allocate the new node to be inserted */ PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr), LOG_TYPE); struct log *logp = pmemobj_direct(log); logp->hdr.size = count; memcpy(logp->data, buf, count); logp->hdr.next = OID_NULL; /* add the modified root object to the undo log */ pmemobj_tx_add_range(baseoid, 0, sizeof(struct base)); if (bp->tail.off == 0) { /* update head */ bp->head = log; } else { /* add the modified tail entry to the undo log */ pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log)); ((struct log *)pmemobj_direct(bp->tail))->hdr.next = log; } bp->tail = log; /* update tail */ bp->bytes_written += count; pmemobj_tx_commit(); (void) pmemobj_tx_end(); return 0; } /* * pmemlog_appendv -- add gathered data to a log memory pool */ int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt) { PMEMobjpool *pop = (PMEMobjpool *)plp; PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base)); struct base *bp = pmemobj_direct(baseoid); /* set the return point */ jmp_buf env; if (setjmp(env)) { /* end the transaction */ pmemobj_tx_end(); return 1; } /* begin a transaction, also acquiring the write lock for the log */ if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock, TX_PARAM_NONE)) return -1; /* add the base object to the undo log - once for the transaction */ pmemobj_tx_add_range(baseoid, 0, sizeof(struct base)); /* add the tail entry once to the undo log, if it is set */ if (!OID_IS_NULL(bp->tail)) pmemobj_tx_add_range(bp->tail, 0, sizeof(struct log)); /* append the data */ for (int i = 0; i < iovcnt; ++i) { char *buf = iov[i].iov_base; size_t count = iov[i].iov_len; /* allocate the new node to be inserted */ PMEMoid log = pmemobj_tx_alloc(count + sizeof(struct log_hdr), LOG_TYPE); struct log *logp = pmemobj_direct(log); logp->hdr.size = count; memcpy(logp->data, buf, count); logp->hdr.next = OID_NULL; if (bp->tail.off == 0) { bp->head = log; /* update head */ } else { ((struct log *)pmemobj_direct(bp->tail))->hdr.next = log; } bp->tail = log; /* update tail */ bp->bytes_written += count; } pmemobj_tx_commit(); (void) pmemobj_tx_end(); return 0; } /* * pmemlog_tell -- returns the current write point for the log */ long long pmemlog_tell(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; struct base *bp = pmemobj_direct(pmemobj_root(pop, sizeof(struct base))); if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0) return 0; long long bytes_written = bp->bytes_written; pmemobj_rwlock_unlock(pop, &bp->rwlock); return bytes_written; } /* * pmemlog_rewind -- discard all data, resetting a log memory pool to empty */ void pmemlog_rewind(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; PMEMoid baseoid = pmemobj_root(pop, sizeof(struct base)); struct base *bp = pmemobj_direct(baseoid); /* set the return point */ jmp_buf env; if (setjmp(env)) { /* end the transaction */ pmemobj_tx_end(); return; } /* begin a transaction, also acquiring the write lock for the log */ if (pmemobj_tx_begin(pop, env, TX_PARAM_RWLOCK, &bp->rwlock, TX_PARAM_NONE)) return; /* add the root object to the undo log */ pmemobj_tx_add_range(baseoid, 0, sizeof(struct base)); /* free all log nodes */ while (bp->head.off != 0) { PMEMoid nextoid = ((struct log *)pmemobj_direct(bp->head))->hdr.next; pmemobj_tx_free(bp->head); bp->head = nextoid; } bp->head = OID_NULL; bp->tail = OID_NULL; bp->bytes_written = 0; pmemobj_tx_commit(); (void) pmemobj_tx_end(); } /* * pmemlog_walk -- walk through all data in a log memory pool * * As this implementation holds the size of each entry, the chunksize is ignored * and the process_chunk function gets the actual entry length. */ void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg) { PMEMobjpool *pop = (PMEMobjpool *)plp; struct base *bp = pmemobj_direct(pmemobj_root(pop, sizeof(struct base))); if (pmemobj_rwlock_rdlock(pop, &bp->rwlock) != 0) return; /* process all chunks */ struct log *next = pmemobj_direct(bp->head); while (next != NULL) { (*process_chunk)(next->data, next->hdr.size, arg); next = pmemobj_direct(next->hdr.next); } pmemobj_rwlock_unlock(pop, &bp->rwlock); } /* * process_chunk -- (internal) process function for log_walk */ static int process_chunk(const void *buf, size_t len, void *arg) { char *tmp = malloc(len + 1); if (tmp == NULL) { fprintf(stderr, "malloc error\n"); return 0; } memcpy(tmp, buf, len); tmp[len] = '\0'; printf("log contains:\n"); printf("%s\n", tmp); free(tmp); return 1; } /* * count_iovec -- (internal) count the number of iovec items */ static int count_iovec(char *arg) { int count = 1; char *pch = strchr(arg, ':'); while (pch != NULL) { ++count; pch = strchr(++pch, ':'); } return count; } /* * fill_iovec -- (internal) fill out the iovec */ static void fill_iovec(struct iovec *iov, char *arg) { char *pch = strtok(arg, ":"); while (pch != NULL) { iov->iov_base = pch; iov->iov_len = strlen((char *)iov->iov_base); ++iov; pch = strtok(NULL, ":"); } } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } PMEMlogpool *plp; if (strncmp(argv[1], "c", 1) == 0) { plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW); } else if (strncmp(argv[1], "o", 1) == 0) { plp = pmemlog_open(argv[2]); } else { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } if (plp == NULL) { perror("pmemlog_create/pmemlog_open"); return 1; } /* process the command line arguments */ for (int i = 3; i < argc; i++) { switch (*argv[i]) { case 'a': { printf("append: %s\n", argv[i] + 2); if (pmemlog_append(plp, argv[i] + 2, strlen(argv[i] + 2))) fprintf(stderr, "pmemlog_append" " error\n"); break; } case 'v': { printf("appendv: %s\n", argv[i] + 2); int count = count_iovec(argv[i] + 2); struct iovec *iov = calloc(count, sizeof(struct iovec)); fill_iovec(iov, argv[i] + 2); if (pmemlog_appendv(plp, iov, count)) fprintf(stderr, "pmemlog_appendv" " error\n"); free(iov); break; } case 'r': { printf("rewind\n"); pmemlog_rewind(plp); break; } case 'w': { printf("walk\n"); pmemlog_walk(plp, 0, process_chunk, NULL); break; } case 'n': { printf("nbytes: %zu\n", pmemlog_nbyte(plp)); break; } case 't': { printf("offset: %lld\n", pmemlog_tell(plp)); break; } default: { fprintf(stderr, "unrecognized command %s\n", argv[i]); break; } }; } /* all done */ pmemlog_close(plp); return 0; }
9,486
20.960648
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/pmemlog/obj_pmemlog_macros.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2015-2019, Intel Corporation */ /* * obj_pmemlog_macros.c -- alternate pmemlog implementation based on pmemobj * * usage: obj_pmemlog_macros [co] file [cmd[:param]...] * * c - create file * o - open file * * The "cmd" arguments match the pmemlog functions: * a - append * v - appendv * r - rewind * w - walk * n - nbyte * t - tell * "a" and "v" require a parameter string(s) separated by a colon */ #include <ex_common.h> #include <sys/stat.h> #include <string.h> #include <stdio.h> #include <assert.h> #include <stdlib.h> #include "libpmemobj.h" #include "libpmem.h" #include "libpmemlog.h" #define POOL_SIZE ((size_t)(1024 * 1024 * 100)) POBJ_LAYOUT_BEGIN(obj_pmemlog_macros); POBJ_LAYOUT_ROOT(obj_pmemlog_macros, struct base); POBJ_LAYOUT_TOID(obj_pmemlog_macros, struct log); POBJ_LAYOUT_END(obj_pmemlog_macros); /* log entry header */ struct log_hdr { TOID(struct log) next; /* object ID of the next log buffer */ size_t size; /* size of this log buffer */ }; /* struct log stores the entire log entry */ struct log { struct log_hdr hdr; /* entry header */ char data[]; /* log entry data */ }; /* struct base keeps track of the beginning of the log list */ struct base { TOID(struct log) head; /* object ID of the first log buffer */ TOID(struct log) tail; /* object ID of the last log buffer */ PMEMrwlock rwlock; /* lock covering entire log */ size_t bytes_written; /* number of bytes stored in the pool */ }; /* * pmemlog_open -- pool open wrapper */ PMEMlogpool * pmemlog_open(const char *path) { return (PMEMlogpool *)pmemobj_open(path, POBJ_LAYOUT_NAME(obj_pmemlog_macros)); } /* * pmemlog_create -- pool create wrapper */ PMEMlogpool * pmemlog_create(const char *path, size_t poolsize, mode_t mode) { return (PMEMlogpool *)pmemobj_create(path, POBJ_LAYOUT_NAME(obj_pmemlog_macros), poolsize, mode); } /* * pool_close -- pool close wrapper */ void pmemlog_close(PMEMlogpool *plp) { pmemobj_close((PMEMobjpool *)plp); } /* * pmemlog_nbyte -- not available in this implementation */ size_t pmemlog_nbyte(PMEMlogpool *plp) { /* N/A */ return 0; } /* * pmemlog_append -- add data to a log memory pool */ int pmemlog_append(PMEMlogpool *plp, const void *buf, size_t count) { PMEMobjpool *pop = (PMEMobjpool *)plp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { /* allocate the new node to be inserted */ TOID(struct log) logp; logp = TX_ALLOC(struct log, count + sizeof(struct log_hdr)); D_RW(logp)->hdr.size = count; memcpy(D_RW(logp)->data, buf, count); D_RW(logp)->hdr.next = TOID_NULL(struct log); /* add the modified root object to the undo log */ TX_ADD(bp); if (TOID_IS_NULL(D_RO(bp)->tail)) { /* update head */ D_RW(bp)->head = logp; } else { /* add the modified tail entry to the undo log */ TX_ADD(D_RW(bp)->tail); D_RW(D_RW(bp)->tail)->hdr.next = logp; } D_RW(bp)->tail = logp; /* update tail */ D_RW(bp)->bytes_written += count; } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemlog_appendv -- add gathered data to a log memory pool */ int pmemlog_appendv(PMEMlogpool *plp, const struct iovec *iov, int iovcnt) { PMEMobjpool *pop = (PMEMobjpool *)plp; int retval = 0; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { /* add the base object and tail entry to the undo log */ TX_ADD(bp); if (!TOID_IS_NULL(D_RO(bp)->tail)) TX_ADD(D_RW(bp)->tail); /* append the data */ for (int i = 0; i < iovcnt; ++i) { char *buf = (char *)iov[i].iov_base; size_t count = iov[i].iov_len; /* allocate the new node to be inserted */ TOID(struct log) logp; logp = TX_ALLOC(struct log, count + sizeof(struct log_hdr)); D_RW(logp)->hdr.size = count; memcpy(D_RW(logp)->data, buf, count); D_RW(logp)->hdr.next = TOID_NULL(struct log); /* update head or tail accordingly */ if (TOID_IS_NULL(D_RO(bp)->tail)) D_RW(bp)->head = logp; else D_RW(D_RW(bp)->tail)->hdr.next = logp; /* update tail */ D_RW(bp)->tail = logp; D_RW(bp)->bytes_written += count; } } TX_ONABORT { retval = -1; } TX_END return retval; } /* * pmemlog_tell -- returns the current write point for the log */ long long pmemlog_tell(PMEMlogpool *plp) { TOID(struct base) bp; bp = POBJ_ROOT((PMEMobjpool *)plp, struct base); return D_RO(bp)->bytes_written; } /* * pmemlog_rewind -- discard all data, resetting a log memory pool to empty */ void pmemlog_rewind(PMEMlogpool *plp) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* begin a transaction, also acquiring the write lock for the log */ TX_BEGIN_PARAM(pop, TX_PARAM_RWLOCK, &D_RW(bp)->rwlock, TX_PARAM_NONE) { /* add the root object to the undo log */ TX_ADD(bp); while (!TOID_IS_NULL(D_RO(bp)->head)) { TOID(struct log) nextp; nextp = D_RW(D_RW(bp)->head)->hdr.next; TX_FREE(D_RW(bp)->head); D_RW(bp)->head = nextp; } D_RW(bp)->head = TOID_NULL(struct log); D_RW(bp)->tail = TOID_NULL(struct log); D_RW(bp)->bytes_written = 0; } TX_END } /* * pmemlog_walk -- walk through all data in a log memory pool * * As this implementation holds the size of each entry, the chunksize is ignored * and the process_chunk function gets the actual entry length. */ void pmemlog_walk(PMEMlogpool *plp, size_t chunksize, int (*process_chunk)(const void *buf, size_t len, void *arg), void *arg) { PMEMobjpool *pop = (PMEMobjpool *)plp; TOID(struct base) bp; bp = POBJ_ROOT(pop, struct base); /* acquire a read lock */ if (pmemobj_rwlock_rdlock(pop, &D_RW(bp)->rwlock) != 0) return; TOID(struct log) next; next = D_RO(bp)->head; /* process all chunks */ while (!TOID_IS_NULL(next)) { (*process_chunk)(D_RO(next)->data, D_RO(next)->hdr.size, arg); next = D_RO(next)->hdr.next; } pmemobj_rwlock_unlock(pop, &D_RW(bp)->rwlock); } /* * process_chunk -- (internal) process function for log_walk */ static int process_chunk(const void *buf, size_t len, void *arg) { char *tmp = (char *)malloc(len + 1); if (tmp == NULL) { fprintf(stderr, "malloc error\n"); return 0; } memcpy(tmp, buf, len); tmp[len] = '\0'; printf("log contains:\n"); printf("%s\n", tmp); free(tmp); return 1; /* continue */ } /* * count_iovec -- (internal) count the number of iovec items */ static int count_iovec(char *arg) { int count = 1; char *pch = strchr(arg, ':'); while (pch != NULL) { ++count; pch = strchr(++pch, ':'); } return count; } /* * fill_iovec -- (internal) fill out the iovec */ static void fill_iovec(struct iovec *iov, char *arg) { char *pch = strtok(arg, ":"); while (pch != NULL) { iov->iov_base = pch; iov->iov_len = strlen((char *)iov->iov_base); ++iov; pch = strtok(NULL, ":"); } } int main(int argc, char *argv[]) { if (argc < 2) { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } PMEMlogpool *plp; if (strncmp(argv[1], "c", 1) == 0) { plp = pmemlog_create(argv[2], POOL_SIZE, CREATE_MODE_RW); } else if (strncmp(argv[1], "o", 1) == 0) { plp = pmemlog_open(argv[2]); } else { fprintf(stderr, "usage: %s [o,c] file [val...]\n", argv[0]); return 1; } if (plp == NULL) { perror("pmemlog_create/pmemlog_open"); return 1; } /* process the command line arguments */ for (int i = 3; i < argc; i++) { switch (*argv[i]) { case 'a': { printf("append: %s\n", argv[i] + 2); if (pmemlog_append(plp, argv[i] + 2, strlen(argv[i] + 2))) fprintf(stderr, "pmemlog_append" " error\n"); break; } case 'v': { printf("appendv: %s\n", argv[i] + 2); int count = count_iovec(argv[i] + 2); struct iovec *iov = (struct iovec *)malloc( count * sizeof(struct iovec)); if (iov == NULL) { fprintf(stderr, "malloc error\n"); break; } fill_iovec(iov, argv[i] + 2); if (pmemlog_appendv(plp, iov, count)) fprintf(stderr, "pmemlog_appendv" " error\n"); free(iov); break; } case 'r': { printf("rewind\n"); pmemlog_rewind(plp); break; } case 'w': { printf("walk\n"); pmemlog_walk(plp, 0, process_chunk, NULL); break; } case 'n': { printf("nbytes: %zu\n", pmemlog_nbyte(plp)); break; } case 't': { printf("offset: %lld\n", pmemlog_tell(plp)); break; } default: { fprintf(stderr, "unrecognized command %s\n", argv[i]); break; } }; } /* all done */ pmemlog_close(plp); return 0; }
8,866
21.448101
80
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree_examine.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree_examine.c * * Description: implementation of examine function for ART tree structures * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #include <stdio.h> #include <libgen.h> #include <string.h> #include <unistd.h> #include <inttypes.h> #include <stdlib.h> #include <getopt.h> #include <stdint.h> #include <stdbool.h> #include "arttree_structures.h" /* * examine context */ struct examine_ctx { struct pmem_context *pmem_ctx; char *offset_string; uint64_t offset; char *type_name; int32_t type; int32_t hexdump; }; static struct examine_ctx *ex_ctx = NULL; struct examine { const char *name; const char *brief; int (*func)(char *, struct examine_ctx *, off_t); void (*help)(char *); }; /* local functions */ static int examine_parse_args(char *appname, int ac, char *av[], struct examine_ctx *ex_ctx); static struct examine *get_examine(char *type_name); static void print_usage(char *appname); static void dump_PMEMoid(char *prefix, PMEMoid *oid); static int examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_tree_root(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_node_u(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_node4(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_node16(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_node48(char *appname, struct examine_ctx *ctx, off_t off); static int examine_art_node256(char *appname, struct examine_ctx *ctx, off_t off); #if 0 /* XXX */ static int examine_art_node(char *appname, struct examine_ctx *ctx, off_t off); #else static int examine_art_node(art_node *an); #endif static int examine_art_leaf(char *appname, struct examine_ctx *ctx, off_t off); static int examine_var_string(char *appname, struct examine_ctx *ctx, off_t off); /* global visible interface */ void arttree_examine_help(char *appname); int arttree_examine_func(char *appname, struct pmem_context *ctx, int ac, char *av[]); static const char *arttree_examine_help_str = "Examine data structures (objects) of ART tree\n" "Arguments: <offset> <type>\n" " <offset> offset of object in pmem file\n" " <type> one of art_tree_root, art_node_u, art_node," " art_node4, art_node16, art_node48, art_node256, art_leaf\n" ; static const struct option long_options[] = { {"hexdump", no_argument, NULL, 'x'}, {NULL, 0, NULL, 0 }, }; static struct examine ex_funcs[] = { { .name = "PMEMobj", .brief = "examine PMEMoid structure", .func = examine_PMEMoid, .help = NULL, }, { .name = "art_tree_root", .brief = "examine art_tree_root structure", .func = examine_art_tree_root, .help = NULL, }, { .name = "art_node_u", .brief = "examine art_node_u structure", .func = examine_art_node_u, .help = NULL, }, { .name = "art_node4", .brief = "examine art_node4 structure", .func = examine_art_node4, .help = NULL, }, { .name = "art_node16", .brief = "examine art_node16 structure", .func = examine_art_node16, .help = NULL, }, { .name = "art_node48", .brief = "examine art_node48 structure", .func = examine_art_node48, .help = NULL, }, { .name = "art_node256", .brief = "examine art_node256 structure", .func = examine_art_node256, .help = NULL, }, { .name = "art_leaf", .brief = "examine art_leaf structure", .func = examine_art_leaf, .help = NULL, }, { .name = "var_string", .brief = "examine var_string structure", .func = examine_var_string, .help = NULL, }, }; /* * number of arttree examine commands */ #define COMMANDS_NUMBER (sizeof(ex_funcs) / sizeof(ex_funcs[0])) void arttree_examine_help(char *appname) { printf("%s %s\n", appname, arttree_examine_help_str); } int arttree_examine_func(char *appname, struct pmem_context *ctx, int ac, char *av[]) { int errors = 0; off_t offset; struct examine *ex; if (ctx == NULL) { return -1; } if (ex_ctx == NULL) { ex_ctx = (struct examine_ctx *) calloc(1, sizeof(struct examine_ctx)); if (ex_ctx == NULL) { return -1; } } ex_ctx->pmem_ctx = ctx; if (examine_parse_args(appname, ac, av, ex_ctx) != 0) { fprintf(stderr, "%s::%s: error parsing arguments\n", appname, __FUNCTION__); errors++; } if (!errors) { offset = (off_t)strtol(ex_ctx->offset_string, NULL, 0); ex = get_examine(ex_ctx->type_name); if (ex != NULL) { ex->func(appname, ex_ctx, offset); } } return errors; } static int examine_parse_args(char *appname, int ac, char *av[], struct examine_ctx *ex_ctx) { int ret = 0; int opt; optind = 0; while ((opt = getopt_long(ac, av, "x", long_options, NULL)) != -1) { switch (opt) { case 'x': ex_ctx->hexdump = 1; break; default: print_usage(appname); ret = 1; } } if (ret == 0) { ex_ctx->offset_string = strdup(av[optind + 0]); ex_ctx->type_name = strdup(av[optind + 1]); } return ret; } static void print_usage(char *appname) { printf("%s: examine <offset> <type>\n", appname); } /* * get_command -- returns command for specified command name */ static struct examine * get_examine(char *type_name) { if (type_name == NULL) { return NULL; } for (size_t i = 0; i < COMMANDS_NUMBER; i++) { if (strcmp(type_name, ex_funcs[i].name) == 0) return &ex_funcs[i]; } return NULL; } static void dump_PMEMoid(char *prefix, PMEMoid *oid) { printf("%s { PMEMoid pool_uuid_lo %" PRIx64 " off 0x%" PRIx64 " = %" PRId64 " }\n", prefix, oid->pool_uuid_lo, oid->off, oid->off); } static int examine_PMEMoid(char *appname, struct examine_ctx *ctx, off_t off) { void *p = (void *)(ctx->pmem_ctx->addr + off); dump_PMEMoid("PMEMoid", p); return 0; } static int examine_art_tree_root(char *appname, struct examine_ctx *ctx, off_t off) { art_tree_root *tree_root = (art_tree_root *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_tree_root {\n", (long long)off); printf(" size %d\n", tree_root->size); dump_PMEMoid(" art_node_u", (PMEMoid *)&(tree_root->root)); printf("\n};\n"); return 0; } static int examine_art_node_u(char *appname, struct examine_ctx *ctx, off_t off) { art_node_u *node_u = (art_node_u *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node_u {\n", (long long)off); printf(" type %d [%s]\n", node_u->art_node_type, art_node_names[node_u->art_node_type]); printf(" tag %d\n", node_u->art_node_tag); switch (node_u->art_node_type) { case ART_NODE4: dump_PMEMoid(" art_node4 oid", &(node_u->u.an4.oid)); break; case ART_NODE16: dump_PMEMoid(" art_node16 oid", &(node_u->u.an16.oid)); break; case ART_NODE48: dump_PMEMoid(" art_node48 oid", &(node_u->u.an48.oid)); break; case ART_NODE256: dump_PMEMoid(" art_node256 oid", &(node_u->u.an256.oid)); break; case ART_LEAF: dump_PMEMoid(" art_leaf oid", &(node_u->u.al.oid)); break; default: printf("ERROR: unknown node type\n"); break; } printf("\n};\n"); return 0; } static int examine_art_node4(char *appname, struct examine_ctx *ctx, off_t off) { art_node4 *an4 = (art_node4 *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node4 {\n", (long long)off); examine_art_node(&(an4->n)); printf("keys ["); for (int i = 0; i < 4; i++) { printf("%c ", an4->keys[i]); } printf("]\nnodes [\n"); for (int i = 0; i < 4; i++) { dump_PMEMoid(" art_node_u oid", &(an4->children[i].oid)); } printf("\n]"); printf("\n};\n"); return 0; } static int examine_art_node16(char *appname, struct examine_ctx *ctx, off_t off) { art_node16 *an16 = (art_node16 *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node16 {\n", (long long)off); examine_art_node(&(an16->n)); printf("keys ["); for (int i = 0; i < 16; i++) { printf("%c ", an16->keys[i]); } printf("]\nnodes [\n"); for (int i = 0; i < 16; i++) { dump_PMEMoid(" art_node_u oid", &(an16->children[i].oid)); } printf("\n]"); printf("\n};\n"); return 0; } static int examine_art_node48(char *appname, struct examine_ctx *ctx, off_t off) { art_node48 *an48 = (art_node48 *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node48 {\n", (long long)off); examine_art_node(&(an48->n)); printf("keys ["); for (int i = 0; i < 256; i++) { printf("%c ", an48->keys[i]); } printf("]\nnodes [\n"); for (int i = 0; i < 48; i++) { dump_PMEMoid(" art_node_u oid", &(an48->children[i].oid)); } printf("\n]"); printf("\n};\n"); return 0; } static int examine_art_node256(char *appname, struct examine_ctx *ctx, off_t off) { art_node256 *an256 = (art_node256 *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node256 {\n", (long long)off); examine_art_node(&(an256->n)); printf("nodes [\n"); for (int i = 0; i < 256; i++) { dump_PMEMoid(" art_node_u oid", &(an256->children[i].oid)); } printf("\n]"); printf("\n};\n"); return 0; } #if 0 /* XXX */ static int examine_art_node(char *appname, struct examine_ctx *ctx, off_t off) { art_node *an = (art_node *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_node {\n", (long long)off); printf(" num_children %d\n", an->num_children); printf(" partial_len %d\n", an->partial_len); printf(" partial ["); for (int i = 0; i < 10; i++) { printf("%c ", an->partial[i]); } printf("\n]"); printf("\n};\n"); return 0; } #else static int examine_art_node(art_node *an) { printf("art_node {\n"); printf(" num_children %d\n", an->num_children); printf(" partial_len %" PRIu32 "\n", an->partial_len); printf(" partial ["); for (int i = 0; i < 10; i++) { printf("%c ", an->partial[i]); } printf("\n]"); printf("\n};\n"); return 0; } #endif static int examine_art_leaf(char *appname, struct examine_ctx *ctx, off_t off) { art_leaf *al = (art_leaf *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, art_leaf {\n", (long long)off); dump_PMEMoid(" var_string key oid ", &(al->key.oid)); dump_PMEMoid(" var_string value oid", &(al->value.oid)); printf("\n};\n"); return 0; } static int examine_var_string(char *appname, struct examine_ctx *ctx, off_t off) { var_string *vs = (var_string *)(ctx->pmem_ctx->addr + off); printf("at offset 0x%llx, var_string {\n", (long long)off); printf(" len %zu s [%s]", vs->len, vs->s); printf("\n};\n"); return 0; }
12,509
24.478615
78
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree_structures.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree_structures.c * * Description: Examine pmem structures; structures and unions taken from * the preprocessor output of a libpmemobj compatible program. * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #ifdef __FreeBSD__ #define _WITH_GETLINE #endif #include <stdio.h> #include <fcntl.h> #include <libgen.h> #include <string.h> #include <unistd.h> #include <stdlib.h> #include <getopt.h> #include <stdint.h> #include <stdbool.h> #include <assert.h> #include <sys/mman.h> #include <sys/stat.h> #include "arttree_structures.h" #include <stdarg.h> #define APPNAME "examine_arttree" #define SRCVERSION "0.2" size_t art_node_sizes[art_node_types] = { sizeof(art_node4), sizeof(art_node16), sizeof(art_node48), sizeof(art_node256), sizeof(art_leaf), sizeof(art_node_u), sizeof(art_node), sizeof(art_tree_root), sizeof(var_string), }; char *art_node_names[art_node_types] = { "art_node4", "art_node16", "art_node48", "art_node256", "art_leaf", "art_node_u", "art_node", "art_tree_root", "var_string" }; /* * long_options -- command line arguments */ static const struct option long_options[] = { {"help", no_argument, NULL, 'h'}, {NULL, 0, NULL, 0 }, }; /* * command -- struct for commands definition */ struct command { const char *name; const char *brief; int (*func)(char *, struct pmem_context *, int, char *[]); void (*help)(char *); }; /* * number of arttree_structures commands */ #define COMMANDS_NUMBER (sizeof(commands) / sizeof(commands[0])) static void print_help(char *appname); static void print_usage(char *appname); static void print_version(char *appname); static int quit_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]); static void quit_help(char *appname); static int set_root_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]); static void set_root_help(char *appname); static int help_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]); static void help_help(char *appname); static struct command *get_command(char *cmd_str); static int ctx_init(struct pmem_context *ctx, char *filename); static int arttree_structures_func(char *appname, struct pmem_context *ctx, int ac, char *av[]); static void arttree_structures_help(char *appname); static int arttree_info_func(char *appname, struct pmem_context *ctx, int ac, char *av[]); static void arttree_info_help(char *appname); extern int arttree_examine_func(); extern void arttree_examine_help(); extern int arttree_search_func(); extern void arttree_search_help(); void outv_err(const char *fmt, ...); void outv_err_vargs(const char *fmt, va_list ap); static struct command commands[] = { { .name = "structures", .brief = "print information about ART structures", .func = arttree_structures_func, .help = arttree_structures_help, }, { .name = "info", .brief = "print information and statistics" " about an ART tree pool", .func = arttree_info_func, .help = arttree_info_help, }, { .name = "examine", .brief = "examine data structures from an ART tree", .func = arttree_examine_func, .help = arttree_examine_help, }, { .name = "search", .brief = "search for a key in an ART tree", .func = arttree_search_func, .help = arttree_search_help, }, { .name = "set_root", .brief = "define offset of root of an ART tree", .func = set_root_func, .help = set_root_help, }, { .name = "help", .brief = "print help text about a command", .func = help_func, .help = help_help, }, { .name = "quit", .brief = "quit ART tree structure examiner", .func = quit_func, .help = quit_help, }, }; static struct pmem_context ctx; /* * outv_err -- print error message */ void outv_err(const char *fmt, ...) { va_list ap; va_start(ap, fmt); outv_err_vargs(fmt, ap); va_end(ap); } /* * outv_err_vargs -- print error message */ void outv_err_vargs(const char *fmt, va_list ap) { fprintf(stderr, "error: "); vfprintf(stderr, fmt, ap); if (!strchr(fmt, '\n')) fprintf(stderr, "\n"); } /* * print_usage -- prints usage message */ static void print_usage(char *appname) { printf("usage: %s [--help] <pmem file> <command> [<args>]\n", appname); } /* * print_version -- prints version message */ static void print_version(char *appname) { printf("%s %s\n", appname, SRCVERSION); } /* * print_help -- prints help message */ static void print_help(char *appname) { print_usage(appname); print_version(appname); printf("\n"); printf("Options:\n"); printf(" -h, --help display this help and exit\n"); printf("\n"); printf("The available commands are:\n"); for (size_t i = 0; i < COMMANDS_NUMBER; i++) printf("%s\t- %s\n", commands[i].name, commands[i].brief); printf("\n"); } /* * set_root_help -- prints help message for set root command */ static void set_root_help(char *appname) { printf("Usage: set_root <offset>\n"); printf(" define the offset of the art tree root\n"); } /* * set_root_func -- set_root define the offset of the art tree root */ static int set_root_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]) { int retval = 0; uint64_t root_offset; if (argc == 2) { root_offset = strtol(argv[1], NULL, 0); ctx->art_tree_root_offset = root_offset; } else { set_root_help(appname); retval = 1; } return retval; } /* * quit_help -- prints help message for quit command */ static void quit_help(char *appname) { printf("Usage: quit\n"); printf(" terminate arttree structure examiner\n"); } /* * quit_func -- quit arttree structure examiner */ static int quit_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]) { printf("\n"); exit(0); return 0; } /* * help_help -- prints help message for help command */ static void help_help(char *appname) { printf("Usage: %s help <command>\n", appname); } /* * help_func -- prints help message for specified command */ static int help_func(char *appname, struct pmem_context *ctx, int argc, char *argv[]) { if (argc > 1) { char *cmd_str = argv[1]; struct command *cmdp = get_command(cmd_str); if (cmdp && cmdp->help) { cmdp->help(appname); return 0; } else { outv_err("No help text for '%s' command\n", cmd_str); return -1; } } else { print_help(appname); return -1; } } static const char *arttree_structures_help_str = "Show information about known ART tree structures\n" ; static void arttree_structures_help(char *appname) { printf("%s %s\n", appname, arttree_structures_help_str); } static int arttree_structures_func(char *appname, struct pmem_context *ctx, int ac, char *av[]) { (void) appname; (void) ac; (void) av; printf( "typedef struct pmemoid {\n" " uint64_t pool_uuid_lo;\n" " uint64_t off;\n" "} PMEMoid;\n"); printf("sizeof(PMEMoid) = %zu\n\n\n", sizeof(PMEMoid)); printf( "struct _art_node_u; typedef struct _art_node_u art_node_u;\n" "struct _art_node_u { \n" " uint8_t art_node_type; \n" " uint8_t art_node_tag; \n" "};\n"); printf("sizeof(art_node_u) = %zu\n\n\n", sizeof(art_node_u)); printf( "struct _art_node; typedef struct _art_node art_node;\n" "struct _art_node {\n" " uint8_t type;\n" " uint8_t num_children;\n" " uint32_t partial_len;\n" " unsigned char partial[10];\n" "};\n"); printf("sizeof(art_node) = %zu\n\n\n", sizeof(art_node)); printf( "typedef uint8_t _toid_art_node_toid_type_num[8];\n"); printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n", sizeof(_toid_art_node_toid_type_num[8])); printf( "union _toid_art_node_u_toid {\n" " PMEMoid oid;\n" " art_node_u *_type;\n" " _toid_art_node_u_toid_type_num *_type_num;\n" "};\n"); printf("sizeof(union _toid_art_node_u_toid) = %zu\n\n\n", sizeof(union _toid_art_node_u_toid)); printf( "typedef uint8_t _toid_art_node_toid_type_num[8];\n"); printf("sizeof(_toid_art_node_toid_type_num[8]) = %zu\n\n\n", sizeof(_toid_art_node_toid_type_num[8])); printf( "union _toid_art_node_toid {\n" " PMEMoid oid; \n" " art_node *_type; \n" " _toid_art_node_toid_type_num *_type_num;\n" "};\n"); printf("sizeof(union _toid_art_node_toid) = %zu\n\n\n", sizeof(union _toid_art_node_toid)); printf( "struct _art_node4; typedef struct _art_node4 art_node4;\n" "struct _art_node4 {\n" " art_node n;\n" " unsigned char keys[4];\n" " union _toid_art_node_u_toid children[4];\n" "};\n"); printf("sizeof(art_node4) = %zu\n\n\n", sizeof(art_node4)); printf( "struct _art_node16; typedef struct _art_node16 art_node16;\n" "struct _art_node16 {\n" " art_node n;\n" " unsigned char keys[16];\n" " union _toid_art_node_u_toid children[16];\n" "};\n"); printf("sizeof(art_node16) = %zu\n\n\n", sizeof(art_node16)); printf( "struct _art_node48; typedef struct _art_node48 art_node48;\n" "struct _art_node48 {\n" " art_node n;\n" " unsigned char keys[256];\n" " union _toid_art_node_u_toid children[48];\n" "};\n"); printf("sizeof(art_node48) = %zu\n\n\n", sizeof(art_node48)); printf( "struct _art_node256; typedef struct _art_node256 art_node256;\n" "struct _art_node256 {\n" " art_ndoe n;\n" " union _toid_art_node_u_toid children[256];\n" "};\n"); printf("sizeof(art_node256) = %zu\n\n\n", sizeof(art_node256)); printf( "struct _art_leaf; typedef struct _art_leaf art_leaf;\n" "struct _art_leaf {\n" " union _toid_var_string_toid value;\n" " union _toid_var_string_toid key;\n" "};\n"); printf("sizeof(art_leaf) = %zu\n\n\n", sizeof(art_leaf)); return 0; } static const char *arttree_info_help_str = "Show information about known ART tree structures\n" ; static void arttree_info_help(char *appname) { printf("%s %s\n", appname, arttree_info_help_str); } static int arttree_info_func(char *appname, struct pmem_context *ctx, int ac, char *av[]) { printf("%s: %s not yet implemented\n", appname, __FUNCTION__); return 0; } /* * get_command -- returns command for specified command name */ static struct command * get_command(char *cmd_str) { if (cmd_str == NULL) { return NULL; } for (size_t i = 0; i < COMMANDS_NUMBER; i++) { if (strcmp(cmd_str, commands[i].name) == 0) return &commands[i]; } return NULL; } static int ctx_init(struct pmem_context *ctx, char *filename) { int errors = 0; if (filename == NULL) errors++; if (ctx == NULL) errors++; if (errors) return errors; ctx->filename = strdup(filename); assert(ctx->filename != NULL); ctx->fd = -1; ctx->addr = NULL; ctx->art_tree_root_offset = 0; if (access(ctx->filename, F_OK) != 0) return 1; if ((ctx->fd = open(ctx->filename, O_RDONLY)) == -1) return 1; struct stat stbuf; if (fstat(ctx->fd, &stbuf) < 0) return 1; ctx->psize = stbuf.st_size; if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ, MAP_SHARED, ctx->fd, 0)) == MAP_FAILED) return 1; return 0; } static void ctx_fini(struct pmem_context *ctx) { munmap(ctx->addr, ctx->psize); close(ctx->fd); free(ctx->filename); } int main(int ac, char *av[]) { int opt; int option_index; int ret = 0; size_t len; ssize_t read; char *cmd_str; char *args[20]; int nargs; char *line; struct command *cmdp = NULL; while ((opt = getopt_long(ac, av, "h", long_options, &option_index)) != -1) { switch (opt) { case 'h': print_help(APPNAME); return 0; default: print_usage(APPNAME); return -1; } } if (optind >= ac) { fprintf(stderr, "ERROR: missing arguments\n"); print_usage(APPNAME); return -1; } ctx_init(&ctx, av[optind]); if (optind + 1 < ac) { /* execute command as given on command line */ cmd_str = av[optind + 1]; cmdp = get_command(cmd_str); if (cmdp != NULL) { ret = cmdp->func(APPNAME, &ctx, ac - 2, av + 2); } } else { /* interactive mode: read commands and execute them */ line = NULL; printf("\n> "); while ((read = getline(&line, &len, stdin)) != -1) { if (line[read - 1] == '\n') { line[read - 1] = '\0'; } args[0] = strtok(line, " "); cmdp = get_command(args[0]); if (cmdp == NULL) { printf("[%s]: command not supported\n", args[0] ? args[0] : "NULL"); printf("\n> "); continue; } nargs = 1; while (1) { args[nargs] = strtok(NULL, " "); if (args[nargs] == NULL) { break; } nargs++; } ret = cmdp->func(APPNAME, &ctx, nargs, args); printf("\n> "); } if (line != NULL) { free(line); } } ctx_fini(&ctx); return ret; }
14,768
22.898058
79
c
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree_structures.h
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree_structures.h * * Description: known structures of the ART tree * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #ifndef _ARTTREE_STRUCTURES_H #define _ARTTREE_STRUCTURES_H #define MAX_PREFIX_LEN 10 /* * pmem_context -- structure for pmempool file */ struct pmem_context { char *filename; size_t psize; int fd; char *addr; uint64_t art_tree_root_offset; }; struct _art_node_u; typedef struct _art_node_u art_node_u; struct _art_node; typedef struct _art_node art_node; struct _art_node4; typedef struct _art_node4 art_node4; struct _art_node16; typedef struct _art_node16 art_node16; struct _art_node48; typedef struct _art_node48 art_node48; struct _art_node256; typedef struct _art_node256 art_node256; struct _var_string; typedef struct _var_string var_string; struct _art_leaf; typedef struct _art_leaf art_leaf; struct _art_tree_root; typedef struct _art_tree_root art_tree_root; typedef uint8_t art_tree_root_toid_type_num[65535]; typedef uint8_t _toid_art_node_u_toid_type_num[2]; typedef uint8_t _toid_art_node_toid_type_num[3]; typedef uint8_t _toid_art_node4_toid_type_num[4]; typedef uint8_t _toid_art_node16_toid_type_num[5]; typedef uint8_t _toid_art_node48_toid_type_num[6]; typedef uint8_t _toid_art_node256_toid_type_num[7]; typedef uint8_t _toid_art_leaf_toid_type_num[8]; typedef uint8_t _toid_var_string_toid_type_num[9]; typedef struct pmemoid { uint64_t pool_uuid_lo; uint64_t off; } PMEMoid; union _toid_art_node_u_toid { PMEMoid oid; art_node_u *_type; _toid_art_node_u_toid_type_num *_type_num; }; union art_tree_root_toid { PMEMoid oid; struct art_tree_root *_type; art_tree_root_toid_type_num *_type_num; }; union _toid_art_node_toid { PMEMoid oid; art_node *_type; _toid_art_node_toid_type_num *_type_num; }; union _toid_art_node4_toid { PMEMoid oid; art_node4 *_type; _toid_art_node4_toid_type_num *_type_num; }; union _toid_art_node16_toid { PMEMoid oid; art_node16 *_type; _toid_art_node16_toid_type_num *_type_num; }; union _toid_art_node48_toid { PMEMoid oid; art_node48 *_type; _toid_art_node48_toid_type_num *_type_num; }; union _toid_art_node256_toid { PMEMoid oid; art_node256 *_type; _toid_art_node256_toid_type_num *_type_num; }; union _toid_var_string_toid { PMEMoid oid; var_string *_type; _toid_var_string_toid_type_num *_type_num; }; union _toid_art_leaf_toid { PMEMoid oid; art_leaf *_type; _toid_art_leaf_toid_type_num *_type_num; }; struct _art_tree_root { int size; union _toid_art_node_u_toid root; }; struct _art_node { uint8_t num_children; uint32_t partial_len; unsigned char partial[MAX_PREFIX_LEN]; }; struct _art_node4 { art_node n; unsigned char keys[4]; union _toid_art_node_u_toid children[4]; }; struct _art_node16 { art_node n; unsigned char keys[16]; union _toid_art_node_u_toid children[16]; }; struct _art_node48 { art_node n; unsigned char keys[256]; union _toid_art_node_u_toid children[48]; }; struct _art_node256 { art_node n; union _toid_art_node_u_toid children[256]; }; struct _var_string { size_t len; unsigned char s[]; }; struct _art_leaf { union _toid_var_string_toid value; union _toid_var_string_toid key; }; struct _art_node_u { uint8_t art_node_type; uint8_t art_node_tag; union { union _toid_art_node4_toid an4; union _toid_art_node16_toid an16; union _toid_art_node48_toid an48; union _toid_art_node256_toid an256; union _toid_art_leaf_toid al; } u; }; typedef enum { ART_NODE4 = 0, ART_NODE16 = 1, ART_NODE48 = 2, ART_NODE256 = 3, ART_LEAF = 4, ART_NODE_U = 5, ART_NODE = 6, ART_TREE_ROOT = 7, VAR_STRING = 8, art_node_types = 9 /* number of different art_nodes */ } art_node_type; #define VALID_NODE_TYPE(n) (((n) >= 0) && ((n) < art_node_types)) extern size_t art_node_sizes[]; extern char *art_node_names[]; #endif /* _ARTTREE_STRUCTURES_H */
5,923
25.927273
78
h
null
NearPMSW-main/nearpm/shadow/pmdk-sd/src/examples/libpmemobj/libart/arttree.c
// SPDX-License-Identifier: BSD-3-Clause /* Copyright 2016-2017, Intel Corporation */ /* * Copyright 2016, FUJITSU TECHNOLOGY SOLUTIONS GMBH * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * * Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * =========================================================================== * * Filename: arttree.c * * Description: implement ART tree using libpmemobj based on libart * * Author: Andreas Bluemle, Dieter Kasper * Andreas.Bluemle.external@ts.fujitsu.com * dieter.kasper@ts.fujitsu.com * * Organization: FUJITSU TECHNOLOGY SOLUTIONS GMBH * * =========================================================================== */ #include <assert.h> #include <errno.h> #include <unistd.h> #include <string.h> #include <strings.h> #ifdef __FreeBSD__ #define _WITH_GETLINE #endif #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <stdbool.h> #include <inttypes.h> #include <fcntl.h> #include <emmintrin.h> #include <sys/types.h> #include <sys/mman.h> #include "libpmemobj.h" #include "arttree.h" /* * dummy structure so far; this should correspond to the datastore * structure as defined in examples/libpmemobj/tree_map/datastore */ struct datastore { void *priv; }; /* * context - main context of datastore */ struct ds_context { char *filename; /* name of pool file */ int mode; /* operation mode */ int insertions; /* number of insert operations to perform */ int newpool; /* complete new memory pool */ size_t psize; /* size of pool */ PMEMobjpool *pop; /* pmemobj handle */ bool fileio; unsigned fmode; int fd; /* file descriptor for file io mode */ char *addr; /* base mapping address for file io mode */ unsigned char *key; /* for SEARCH, INSERT and REMOVE */ uint32_t key_len; unsigned char *value; /* for INSERT */ uint32_t val_len; }; #define FILL (1 << 1) #define DUMP (1 << 2) #define GRAPH (1 << 3) #define INSERT (1 << 4) #define SEARCH (1 << 5) #define REMOVE (1 << 6) struct ds_context my_context; extern TOID(var_string) null_var_string; extern TOID(art_leaf) null_art_leaf; extern TOID(art_node_u) null_art_node_u; #define read_key(p) read_line(p) #define read_value(p) read_line(p) int initialize_context(struct ds_context *ctx, int ac, char *av[]); int initialize_pool(struct ds_context *ctx); int add_elements(struct ds_context *ctx); int insert_element(struct ds_context *ctx); int search_element(struct ds_context *ctx); int delete_element(struct ds_context *ctx); ssize_t read_line(unsigned char **line); void exit_handler(struct ds_context *ctx); int art_tree_map_init(struct datastore *ds, struct ds_context *ctx); void pmemobj_ds_set_priv(struct datastore *ds, void *priv); static int dump_art_leaf_callback(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *val, uint32_t val_len); static int dump_art_node_callback(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *val, uint32_t val_len); static void print_node_info(char *nodetype, uint64_t off, const art_node *an); static int parse_keyval(struct ds_context *ctx, char *arg, int mode); int initialize_context(struct ds_context *ctx, int ac, char *av[]) { int errors = 0; int opt; char mode; if ((ctx == NULL) || (ac < 2)) { errors++; } if (!errors) { ctx->filename = NULL; ctx->psize = PMEMOBJ_MIN_POOL; ctx->newpool = 0; ctx->pop = NULL; ctx->fileio = false; ctx->fmode = 0666; ctx->mode = 0; ctx->fd = -1; } if (!errors) { while ((opt = getopt(ac, av, "s:m:n:")) != -1) { switch (opt) { case 'm': mode = optarg[0]; if (mode == 'f') { ctx->mode |= FILL; } else if (mode == 'd') { ctx->mode |= DUMP; } else if (mode == 'g') { ctx->mode |= GRAPH; } else if (mode == 'i') { ctx->mode |= INSERT; parse_keyval(ctx, av[optind], INSERT); optind++; } else if (mode == 's') { ctx->mode |= SEARCH; parse_keyval(ctx, av[optind], SEARCH); optind++; } else if (mode == 'r') { ctx->mode |= REMOVE; parse_keyval(ctx, av[optind], REMOVE); optind++; } else { errors++; } break; case 'n': { long insertions; insertions = strtol(optarg, NULL, 0); if (insertions > 0 && insertions < LONG_MAX) { ctx->insertions = insertions; } break; } case 's': { unsigned long poolsize; poolsize = strtoul(optarg, NULL, 0); if (poolsize >= PMEMOBJ_MIN_POOL) { ctx->psize = poolsize; } break; } default: errors++; break; } } } if (!errors) { ctx->filename = strdup(av[optind]); } return errors; } static int parse_keyval(struct ds_context *ctx, char *arg, int mode) { int errors = 0; char *p; p = strtok(arg, ":"); if (p == NULL) { errors++; } if (!errors) { if (ctx->mode & (SEARCH|REMOVE|INSERT)) { ctx->key = (unsigned char *)strdup(p); assert(ctx->key != NULL); ctx->key_len = strlen(p) + 1; } if (ctx->mode & INSERT) { p = strtok(NULL, ":"); assert(p != NULL); ctx->value = (unsigned char *)strdup(p); assert(ctx->value != NULL); ctx->val_len = strlen(p) + 1; } } return errors; } void exit_handler(struct ds_context *ctx) { if (!ctx->fileio) { if (ctx->pop) { pmemobj_close(ctx->pop); } } else { if (ctx->fd > (-1)) { close(ctx->fd); } } } int art_tree_map_init(struct datastore *ds, struct ds_context *ctx) { int errors = 0; char *error_string; /* calculate a required pool size */ if (ctx->psize < PMEMOBJ_MIN_POOL) ctx->psize = PMEMOBJ_MIN_POOL; if (!ctx->fileio) { if (access(ctx->filename, F_OK) != 0) { error_string = "pmemobj_create"; ctx->pop = pmemobj_create(ctx->filename, POBJ_LAYOUT_NAME(arttree_tx), ctx->psize, ctx->fmode); ctx->newpool = 1; } else { error_string = "pmemobj_open"; ctx->pop = pmemobj_open(ctx->filename, POBJ_LAYOUT_NAME(arttree_tx)); } if (ctx->pop == NULL) { perror(error_string); errors++; } } else { int flags = O_CREAT | O_RDWR | O_SYNC; /* Create a file if it does not exist. */ if ((ctx->fd = open(ctx->filename, flags, ctx->fmode)) < 0) { perror(ctx->filename); errors++; } /* allocate the pmem */ if ((errno = posix_fallocate(ctx->fd, 0, ctx->psize)) != 0) { perror("posix_fallocate"); errors++; } /* map file to memory */ if ((ctx->addr = mmap(NULL, ctx->psize, PROT_READ, MAP_SHARED, ctx->fd, 0)) == MAP_FAILED) { perror("mmap"); errors++; } } if (!errors) { pmemobj_ds_set_priv(ds, ctx); } else { if (ctx->fileio) { if (ctx->addr != NULL) { munmap(ctx->addr, ctx->psize); } if (ctx->fd >= 0) { close(ctx->fd); } } else { if (ctx->pop) { pmemobj_close(ctx->pop); } } } return errors; } /* * pmemobj_ds_set_priv -- set private structure of datastore */ void pmemobj_ds_set_priv(struct datastore *ds, void *priv) { ds->priv = priv; } struct datastore myds; static void usage(char *progname) { printf("usage: %s -m [f|d|g] file\n", progname); printf(" -m mode known modes are\n"); printf(" f fill create and fill art tree\n"); printf(" i insert insert an element into the art tree\n"); printf(" s search search for a key in the art tree\n"); printf(" r remove remove an element from the art tree\n"); printf(" d dump dump art tree\n"); printf(" g graph dump art tree as a graphviz dot graph\n"); printf(" -n <number> number of key-value pairs to insert" " into the art tree\n"); printf(" -s <size> size in bytes of the memory pool" " (minimum and default: 8 MB)"); printf("\nfilling an art tree is done by reading key-value pairs\n" "from standard input.\n" "Both keys and values are single line only.\n"); } int main(int argc, char *argv[]) { if (initialize_context(&my_context, argc, argv) != 0) { usage(argv[0]); return 1; } if (art_tree_map_init(&myds, &my_context) != 0) { fprintf(stderr, "failed to initialize memory pool file\n"); return 1; } if (my_context.pop == NULL) { perror("pool initialization"); return 1; } if (art_tree_init(my_context.pop, &my_context.newpool)) { perror("pool setup"); return 1; } if ((my_context.mode & FILL)) { if (add_elements(&my_context)) { perror("add elements"); return 1; } } if ((my_context.mode & INSERT)) { if (insert_element(&my_context)) { perror("insert elements"); return 1; } } if ((my_context.mode & SEARCH)) { if (search_element(&my_context)) { perror("search elements"); return 1; } } if ((my_context.mode & REMOVE)) { if (delete_element(&my_context)) { perror("delete elements"); return 1; } } if (my_context.mode & DUMP) { art_iter(my_context.pop, dump_art_leaf_callback, NULL); } if (my_context.mode & GRAPH) { printf("digraph g {\nrankdir=LR;\n"); art_iter(my_context.pop, dump_art_node_callback, NULL); printf("}"); } exit_handler(&my_context); return 0; } int add_elements(struct ds_context *ctx) { PMEMobjpool *pop; int errors = 0; int i; int key_len; int val_len; unsigned char *key; unsigned char *value; if (ctx == NULL) { errors++; } else if (ctx->pop == NULL) { errors++; } if (!errors) { pop = ctx->pop; for (i = 0; i < ctx->insertions; i++) { key = NULL; value = NULL; key_len = read_key(&key); val_len = read_value(&value); art_insert(pop, key, key_len, value, val_len); if (key != NULL) free(key); if (value != NULL) free(value); } } return errors; } int insert_element(struct ds_context *ctx) { PMEMobjpool *pop; int errors = 0; if (ctx == NULL) { errors++; } else if (ctx->pop == NULL) { errors++; } if (!errors) { pop = ctx->pop; art_insert(pop, ctx->key, ctx->key_len, ctx->value, ctx->val_len); } return errors; } int search_element(struct ds_context *ctx) { PMEMobjpool *pop; TOID(var_string) value; int errors = 0; if (ctx == NULL) { errors++; } else if (ctx->pop == NULL) { errors++; } if (!errors) { pop = ctx->pop; printf("search key [%s]: ", (char *)ctx->key); value = art_search(pop, ctx->key, ctx->key_len); if (TOID_IS_NULL(value)) { printf("not found\n"); } else { printf("value [%s]\n", D_RO(value)->s); } } return errors; } int delete_element(struct ds_context *ctx) { PMEMobjpool *pop; int errors = 0; if (ctx == NULL) { errors++; } else if (ctx->pop == NULL) { errors++; } if (!errors) { pop = ctx->pop; art_delete(pop, ctx->key, ctx->key_len); } return errors; } ssize_t read_line(unsigned char **line) { size_t len = -1; ssize_t read = -1; *line = NULL; if ((read = getline((char **)line, &len, stdin)) > 0) { (*line)[read - 1] = '\0'; } return read; } static int dump_art_leaf_callback(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *val, uint32_t val_len) { cb_data *cbd; if (data != NULL) { cbd = (cb_data *)data; printf("node type %d ", D_RO(cbd->node)->art_node_type); if (D_RO(cbd->node)->art_node_type == art_leaf_t) { printf("key len %" PRIu32 " = [%s], value len %" PRIu32 " = [%s]", key_len, key != NULL ? (char *)key : (char *)"NULL", val_len, val != NULL ? (char *)val : (char *)"NULL"); } printf("\n"); } else { printf("key len %" PRIu32 " = [%s], value len %" PRIu32 " = [%s]\n", key_len, key != NULL ? (char *)key : (char *)"NULL", val_len, val != NULL ? (char *)val : (char *)"NULL"); } return 0; } static void print_node_info(char *nodetype, uint64_t off, const art_node *an) { int p_len, i; p_len = an->partial_len; printf("N%" PRIx64 " [label=\"%s at\\n0x%" PRIx64 "\\n%d children", off, nodetype, off, an->num_children); if (p_len != 0) { printf("\\nlen %d", p_len); printf(": "); for (i = 0; i < p_len; i++) { printf("%c", an->partial[i]); } } printf("\"];\n"); } static int dump_art_node_callback(void *data, const unsigned char *key, uint32_t key_len, const unsigned char *val, uint32_t val_len) { cb_data *cbd; const art_node *an; TOID(art_node4) an4; TOID(art_node16) an16; TOID(art_node48) an48; TOID(art_node256) an256; TOID(art_leaf) al; TOID(art_node_u) child; TOID(var_string) oid_key; TOID(var_string) oid_value; if (data != NULL) { cbd = (cb_data *)data; switch (D_RO(cbd->node)->art_node_type) { case NODE4: an4 = D_RO(cbd->node)->u.an4; an = &(D_RO(an4)->n); child = D_RO(an4)->children[cbd->child_idx]; if (!TOID_IS_NULL(child)) { print_node_info("node4", cbd->node.oid.off, an); printf("N%" PRIx64 " -> N%" PRIx64 " [label=\"%c\"];\n", cbd->node.oid.off, child.oid.off, D_RO(an4)->keys[cbd->child_idx]); } break; case NODE16: an16 = D_RO(cbd->node)->u.an16; an = &(D_RO(an16)->n); child = D_RO(an16)->children[cbd->child_idx]; if (!TOID_IS_NULL(child)) { print_node_info("node16", cbd->node.oid.off, an); printf("N%" PRIx64 " -> N%" PRIx64 " [label=\"%c\"];\n", cbd->node.oid.off, child.oid.off, D_RO(an16)->keys[cbd->child_idx]); } break; case NODE48: an48 = D_RO(cbd->node)->u.an48; an = &(D_RO(an48)->n); child = D_RO(an48)->children[cbd->child_idx]; if (!TOID_IS_NULL(child)) { print_node_info("node48", cbd->node.oid.off, an); printf("N%" PRIx64 " -> N%" PRIx64 " [label=\"%c\"];\n", cbd->node.oid.off, child.oid.off, D_RO(an48)->keys[cbd->child_idx]); } break; case NODE256: an256 = D_RO(cbd->node)->u.an256; an = &(D_RO(an256)->n); child = D_RO(an256)->children[cbd->child_idx]; if (!TOID_IS_NULL(child)) { print_node_info("node256", cbd->node.oid.off, an); printf("N%" PRIx64 " -> N%" PRIx64 " [label=\"0x%x\"];\n", cbd->node.oid.off, child.oid.off, (char)((cbd->child_idx) & 0xff)); } break; case art_leaf_t: al = D_RO(cbd->node)->u.al; oid_key = D_RO(al)->key; oid_value = D_RO(al)->value; printf("N%" PRIx64 " [shape=box," "label=\"leaf at\\n0x%" PRIx64 "\"];\n", cbd->node.oid.off, cbd->node.oid.off); printf("N%" PRIx64 " [shape=box," "label=\"key at 0x%" PRIx64 ": %s\"];\n", oid_key.oid.off, oid_key.oid.off, D_RO(oid_key)->s); printf("N%" PRIx64 " [shape=box," "label=\"value at 0x%" PRIx64 ": %s\"];\n", oid_value.oid.off, oid_value.oid.off, D_RO(oid_value)->s); printf("N%" PRIx64 " -> N%" PRIx64 ";\n", cbd->node.oid.off, oid_key.oid.off); printf("N%" PRIx64 " -> N%" PRIx64 ";\n", cbd->node.oid.off, oid_value.oid.off); break; default: break; } } else { printf("leaf: key len %" PRIu32 " = [%s], value len %" PRIu32 " = [%s]\n", key_len, key, val_len, val); } return 0; }
16,439
22.688761
78
c