repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
null
ceph-main/src/common/function_signature.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Copied from: * https://github.com/exclipy/inline_variant_visitor/blob/master/function_signature.hpp * which apparently copied it from * http://stackoverflow.com/questions/4771417/how-to-get-the-signature-of-a-c-bind-expression */ #ifndef FUNCTION_SIGNATURE_H #define FUNCTION_SIGNATURE_H #include <boost/mpl/pop_front.hpp> #include <boost/mpl/push_front.hpp> #include <boost/function_types/function_type.hpp> #include <boost/function_types/result_type.hpp> #include <boost/function_types/parameter_types.hpp> template <typename F> struct signature_of_member { typedef typename boost::function_types::result_type<F>::type result_type; typedef typename boost::function_types::parameter_types<F>::type parameter_types; typedef typename boost::mpl::pop_front<parameter_types>::type base; typedef typename boost::mpl::push_front<base, result_type>::type L; typedef typename boost::function_types::function_type<L>::type type; }; template <typename F, bool is_class> struct signature_of_impl { typedef typename boost::function_types::function_type<F>::type type; }; template <typename F> struct signature_of_impl<F, true> { typedef typename signature_of_member<decltype(&F::operator())>::type type; }; template <typename F> struct signature_of { typedef typename signature_of_impl<F, boost::is_class<F>::value>::type type; }; #endif
1,474
29.729167
93
h
null
ceph-main/src/common/hex.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_HEX_H #define CEPH_COMMON_HEX_H #include <string> extern void hex2str(const char *s, int len, char *buf, int dest_len); extern std::string hexdump(std::string msg, const char *s, int len); #endif
630
23.269231
70
h
null
ceph-main/src/common/histogram.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * Copyright 2013 Inktank */ #ifndef CEPH_HISTOGRAM_H #define CEPH_HISTOGRAM_H #include <list> #include "include/encoding.h" #include "include/intarith.h" namespace ceph { class Formatter; } /** * power of 2 histogram */ struct pow2_hist_t { // /** * histogram * * bin size is 2^index * value is count of elements that are <= the current bin but > the previous bin. */ std::vector<int32_t> h; private: /// expand to at least another's size void _expand_to(unsigned s) { if (s > h.size()) h.resize(s, 0); } /// drop useless trailing 0's void _contract() { unsigned p = h.size(); while (p > 0 && h[p-1] == 0) --p; h.resize(p); } public: void clear() { h.clear(); } bool empty() const { return h.empty(); } void set_bin(int bin, int32_t count) { _expand_to(bin + 1); h[bin] = count; _contract(); } void add(int32_t v) { int bin = cbits(v); _expand_to(bin + 1); h[bin]++; _contract(); } bool operator==(const pow2_hist_t &r) const { return h == r.h; } /// get a value's position in the histogram. /// /// positions are represented as values in the range [0..1000000] /// (millionths on the unit interval). /// /// @param v [in] value (non-negative) /// @param lower [out] pointer to lower-bound (0..1000000) /// @param upper [out] pointer to the upper bound (0..1000000) int get_position_micro(int32_t v, uint64_t *lower, uint64_t *upper) { if (v < 0) return -1; unsigned bin = cbits(v); uint64_t lower_sum = 0, upper_sum = 0, total = 0; for (unsigned i=0; i<h.size(); ++i) { if (i <= bin) upper_sum += h[i]; if (i < bin) lower_sum += h[i]; total += h[i]; } if (total > 0) { *lower = lower_sum * 1000000 / total; *upper = upper_sum * 1000000 / total; } return 0; } void add(const pow2_hist_t& o) { _expand_to(o.h.size()); for (unsigned p = 0; p < o.h.size(); ++p) h[p] += o.h[p]; _contract(); } void sub(const pow2_hist_t& o) { _expand_to(o.h.size()); for (unsigned p = 0; p < o.h.size(); ++p) h[p] -= o.h[p]; _contract(); } int32_t upper_bound() const { return 1 << h.size(); } /// decay histogram by N bits (default 1, for a halflife) void decay(int bits = 1); void dump(ceph::Formatter *f) const; void encode(ceph::buffer::list &bl) const; void decode(ceph::buffer::list::const_iterator &bl); static void generate_test_instances(std::list<pow2_hist_t*>& o); }; WRITE_CLASS_ENCODER(pow2_hist_t) #endif /* CEPH_HISTOGRAM_H */
2,973
22.054264
83
h
null
ceph-main/src/common/hobject.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef __CEPH_OS_HOBJECT_H #define __CEPH_OS_HOBJECT_H #if FMT_VERSION >= 90000 #include <fmt/ostream.h> #endif #include "include/types.h" #include "json_spirit/json_spirit_value.h" #include "include/ceph_assert.h" // spirit clobbers it! #include "reverse.h" namespace ceph { class Formatter; } #ifndef UINT64_MAX #define UINT64_MAX (18446744073709551615ULL) #endif #ifndef INT64_MIN #define INT64_MIN ((int64_t)0x8000000000000000ll) #endif struct hobject_t { public: static const int64_t POOL_META = -1; static const int64_t POOL_TEMP_START = -2; // and then negative static bool is_temp_pool(int64_t pool) { return pool <= POOL_TEMP_START; } static int64_t get_temp_pool(int64_t pool) { return POOL_TEMP_START - pool; } static bool is_meta_pool(int64_t pool) { return pool == POOL_META; } public: object_t oid; snapid_t snap; private: uint32_t hash; bool max; uint32_t nibblewise_key_cache; uint32_t hash_reverse_bits; public: int64_t pool; std::string nspace; private: std::string key; class hobject_t_max {}; public: const std::string& get_key() const { return key; } void set_key(const std::string& key_) { if (key_ == oid.name) key.clear(); else key = key_; } std::string to_str() const; uint32_t get_hash() const { return hash; } void set_hash(uint32_t value) { hash = value; build_hash_cache(); } static bool match_hash(uint32_t to_check, uint32_t bits, uint32_t match) { return (match & ~((~0)<<bits)) == (to_check & ~((~0)<<bits)); } bool match(uint32_t bits, uint32_t match) const { return match_hash(hash, bits, match); } bool is_temp() const { return is_temp_pool(pool) && pool != INT64_MIN; } bool is_meta() const { return is_meta_pool(pool); } int64_t get_logical_pool() const { if (is_temp_pool(pool)) return get_temp_pool(pool); // it's reversible else return pool; } hobject_t() : snap(0), hash(0), max(false), pool(INT64_MIN) { build_hash_cache(); } hobject_t(const hobject_t &rhs) = default; hobject_t(hobject_t &&rhs) = default; hobject_t(hobject_t_max &&singleton) : hobject_t() { max = true; } hobject_t &operator=(const hobject_t &rhs) = default; hobject_t &operator=(hobject_t &&rhs) = default; hobject_t &operator=(hobject_t_max &&singleton) { *this = hobject_t(); max = true; return *this; } // maximum sorted value. static hobject_t_max get_max() { return hobject_t_max(); } hobject_t(const object_t& oid, const std::string& key, snapid_t snap, uint32_t hash, int64_t pool, const std::string& nspace) : oid(oid), snap(snap), hash(hash), max(false), pool(pool), nspace(nspace), key(oid.name == key ? std::string() : key) { build_hash_cache(); } hobject_t(const sobject_t &soid, const std::string &key, uint32_t hash, int64_t pool, const std::string& nspace) : oid(soid.oid), snap(soid.snap), hash(hash), max(false), pool(pool), nspace(nspace), key(soid.oid.name == key ? std::string() : key) { build_hash_cache(); } // used by Crimson hobject_t(const std::string &key, snapid_t snap, uint32_t reversed_hash, int64_t pool, const std::string& nspace) : oid(key), snap(snap), max(false), pool(pool), nspace(nspace) { set_bitwise_key_u32(reversed_hash); } /// @return min hobject_t ret s.t. ret.hash == this->hash hobject_t get_boundary() const { if (is_max()) return *this; hobject_t ret; ret.set_hash(hash); ret.pool = pool; return ret; } hobject_t get_object_boundary() const { if (is_max()) return *this; hobject_t ret = *this; ret.snap = 0; return ret; } /// @return head version of this hobject_t hobject_t get_head() const { hobject_t ret(*this); ret.snap = CEPH_NOSNAP; return ret; } /// @return snapdir version of this hobject_t hobject_t get_snapdir() const { hobject_t ret(*this); ret.snap = CEPH_SNAPDIR; return ret; } /// @return true if object is snapdir bool is_snapdir() const { return snap == CEPH_SNAPDIR; } /// @return true if object is head bool is_head() const { return snap == CEPH_NOSNAP; } /// @return true if object is neither head nor snapdir nor max bool is_snap() const { return !is_max() && !is_head() && !is_snapdir(); } /// @return true iff the object should have a snapset in it's attrs bool has_snapset() const { return is_head() || is_snapdir(); } /* Do not use when a particular hash function is needed */ explicit hobject_t(const sobject_t &o) : oid(o.oid), snap(o.snap), max(false), pool(POOL_META) { set_hash(std::hash<sobject_t>()(o)); } bool is_max() const { ceph_assert(!max || (*this == hobject_t(hobject_t::get_max()))); return max; } bool is_min() const { // this needs to match how it's constructed return snap == 0 && hash == 0 && !max && pool == INT64_MIN; } static uint32_t _reverse_bits(uint32_t v) { return reverse_bits(v); } static uint32_t _reverse_nibbles(uint32_t retval) { return reverse_nibbles(retval); } /** * Returns set S of strings such that for any object * h where h.match(bits, mask), there is some string * s \f$\in\f$ S such that s is a prefix of h.to_str(). * Furthermore, for any s \f$\in\f$ S, s is a prefix of * h.str() implies that h.match(bits, mask). */ static std::set<std::string> get_prefixes( uint32_t bits, uint32_t mask, int64_t pool); // filestore nibble-based key uint32_t get_nibblewise_key_u32() const { ceph_assert(!max); return nibblewise_key_cache; } uint64_t get_nibblewise_key() const { return max ? 0x100000000ull : nibblewise_key_cache; } // newer bit-reversed key uint32_t get_bitwise_key_u32() const { ceph_assert(!max); return hash_reverse_bits; } uint64_t get_bitwise_key() const { return max ? 0x100000000ull : hash_reverse_bits; } // please remember to update set_bitwise_key_u32() also // once you change build_hash_cache() void build_hash_cache() { nibblewise_key_cache = _reverse_nibbles(hash); hash_reverse_bits = _reverse_bits(hash); } void set_bitwise_key_u32(uint32_t value) { hash = _reverse_bits(value); // below is identical to build_hash_cache() and shall be // updated correspondingly if you change build_hash_cache() nibblewise_key_cache = _reverse_nibbles(hash); hash_reverse_bits = value; } const std::string& get_effective_key() const { if (key.length()) return key; return oid.name; } hobject_t make_temp_hobject(const std::string& name) const { return hobject_t(object_t(name), "", CEPH_NOSNAP, hash, get_temp_pool(pool), ""); } void swap(hobject_t &o) { hobject_t temp(o); o = (*this); (*this) = temp; } const std::string &get_namespace() const { return nspace; } bool parse(const std::string& s); void encode(ceph::buffer::list& bl) const; void decode(ceph::bufferlist::const_iterator& bl); void decode(json_spirit::Value& v); void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<hobject_t*>& o); friend int cmp(const hobject_t& l, const hobject_t& r); auto operator<=>(const hobject_t &rhs) const noexcept { auto cmp = max <=> rhs.max; if (cmp != 0) return cmp; cmp = pool <=> rhs.pool; if (cmp != 0) return cmp; cmp = get_bitwise_key() <=> rhs.get_bitwise_key(); if (cmp != 0) return cmp; cmp = nspace <=> rhs.nspace; if (cmp != 0) return cmp; if (!(get_key().empty() && rhs.get_key().empty())) { cmp = get_effective_key() <=> rhs.get_effective_key(); if (cmp != 0) return cmp; } cmp = oid <=> rhs.oid; if (cmp != 0) return cmp; return snap <=> rhs.snap; } bool operator==(const hobject_t& rhs) const noexcept { return operator<=>(rhs) == 0; } friend struct ghobject_t; }; WRITE_CLASS_ENCODER(hobject_t) namespace std { template<> struct hash<hobject_t> { size_t operator()(const hobject_t &r) const { static rjhash<uint64_t> RJ; return RJ(r.get_hash() ^ r.snap); } }; } // namespace std std::ostream& operator<<(std::ostream& out, const hobject_t& o); template <typename T> struct always_false { using value = std::false_type; }; template <typename T> inline bool operator==(const hobject_t &lhs, const T&) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return lhs.is_max(); } template <typename T> inline bool operator==(const T&, const hobject_t &rhs) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return rhs.is_max(); } template <typename T> inline bool operator!=(const hobject_t &lhs, const T&) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return !lhs.is_max(); } template <typename T> inline bool operator!=(const T&, const hobject_t &rhs) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return !rhs.is_max(); } extern int cmp(const hobject_t& l, const hobject_t& r); template <typename T> static inline int cmp(const hobject_t &l, const T&) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return l.is_max() ? 0 : -1; } template <typename T> static inline int cmp(const T&, const hobject_t&r) { static_assert(always_false<T>::value::value, "Do not compare to get_max()"); return r.is_max() ? 0 : 1; } typedef version_t gen_t; struct ghobject_t { static const gen_t NO_GEN = UINT64_MAX; bool max = false; shard_id_t shard_id = shard_id_t::NO_SHARD; hobject_t hobj; gen_t generation = NO_GEN; ghobject_t() = default; explicit ghobject_t(const hobject_t &obj) : hobj(obj) {} ghobject_t(const hobject_t &obj, gen_t gen, shard_id_t shard) : shard_id(shard), hobj(obj), generation(gen) {} // used by Crimson ghobject_t(shard_id_t shard, int64_t pool, uint32_t reversed_hash, const std::string& nspace, const std::string& oid, snapid_t snap, gen_t gen) : shard_id(shard), hobj(oid, snap, reversed_hash, pool, nspace), generation(gen) {} static ghobject_t make_pgmeta(int64_t pool, uint32_t hash, shard_id_t shard) { hobject_t h(object_t(), std::string(), CEPH_NOSNAP, hash, pool, std::string()); return ghobject_t(h, NO_GEN, shard); } bool is_pgmeta() const { // make sure we are distinct from hobject_t(), which has pool INT64_MIN return hobj.pool >= 0 && hobj.oid.name.empty(); } bool match(uint32_t bits, uint32_t match) const { return hobj.match_hash(hobj.hash, bits, match); } /// @return min ghobject_t ret s.t. ret.hash == this->hash ghobject_t get_boundary() const { if (hobj.is_max()) return *this; ghobject_t ret; ret.hobj.set_hash(hobj.hash); ret.shard_id = shard_id; ret.hobj.pool = hobj.pool; return ret; } uint32_t get_nibblewise_key_u32() const { return hobj.get_nibblewise_key_u32(); } uint32_t get_nibblewise_key() const { return hobj.get_nibblewise_key(); } bool is_degenerate() const { return generation == NO_GEN && shard_id == shard_id_t::NO_SHARD; } bool is_no_gen() const { return generation == NO_GEN; } bool is_no_shard() const { return shard_id == shard_id_t::NO_SHARD; } void set_shard(shard_id_t s) { shard_id = s; } bool parse(const std::string& s); // maximum sorted value. static ghobject_t get_max() { ghobject_t h; h.max = true; h.hobj = hobject_t::get_max(); // so that is_max() => hobj.is_max() return h; } bool is_max() const { return max; } bool is_min() const { return *this == ghobject_t(); } void swap(ghobject_t &o) { ghobject_t temp(o); o = (*this); (*this) = temp; } void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); void decode(json_spirit::Value& v); size_t encoded_size() const; void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<ghobject_t*>& o); friend int cmp(const ghobject_t& l, const ghobject_t& r); auto operator<=>(const ghobject_t&) const = default; bool operator==(const ghobject_t&) const = default; }; WRITE_CLASS_ENCODER(ghobject_t) namespace std { template<> struct hash<ghobject_t> { size_t operator()(const ghobject_t &r) const { static rjhash<uint64_t> RJ; static hash<hobject_t> HO; size_t hash = HO(r.hobj); hash = RJ(hash ^ r.generation); hash = hash ^ r.shard_id.id; return hash; } }; } // namespace std std::ostream& operator<<(std::ostream& out, const ghobject_t& o); #if FMT_VERSION >= 90000 template <> struct fmt::formatter<ghobject_t> : fmt::ostream_formatter {}; #endif extern int cmp(const ghobject_t& l, const ghobject_t& r); #endif
13,472
25.110465
83
h
null
ceph-main/src/common/hobject_fmt.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once /** * \file fmtlib formatters for some hobject.h classes */ #include <fmt/format.h> #include <fmt/ranges.h> #include "common/hobject.h" #include "include/object_fmt.h" #include "msg/msg_fmt.h" // \todo reimplement static inline void append_out_escaped(const std::string& in, std::string* out) { for (auto i = in.cbegin(); i != in.cend(); ++i) { if (*i == '%' || *i == ':' || *i == '/' || *i < 32 || *i >= 127) { char buf[4]; snprintf(buf, sizeof(buf), "%%%02x", (int)(unsigned char)*i); out->append(buf); } else { out->push_back(*i); } } } template <> struct fmt::formatter<hobject_t> { constexpr auto parse(format_parse_context& ctx) { return ctx.begin(); } template <typename FormatContext> auto format(const hobject_t& ho, FormatContext& ctx) { if (ho == hobject_t{}) { return fmt::format_to(ctx.out(), "MIN"); } if (ho.is_max()) { return fmt::format_to(ctx.out(), "MAX"); } std::string v; append_out_escaped(ho.nspace, &v); v.push_back(':'); append_out_escaped(ho.get_key(), &v); v.push_back(':'); append_out_escaped(ho.oid.name, &v); return fmt::format_to(ctx.out(), "{}:{:08x}:{}:{}", static_cast<uint64_t>(ho.pool), ho.get_bitwise_key_u32(), v, ho.snap); } };
1,398
24.907407
88
h
null
ceph-main/src/common/hostname.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_HOSTNAME_H #define CEPH_COMMON_HOSTNAME_H #include <string> extern std::string ceph_get_hostname(); extern std::string ceph_get_short_hostname(); #endif
601
25.173913
70
h
null
ceph-main/src/common/inline_variant.h
// -*- mode:C++; tab-width:8; c-basic-offset:4; indent-tabs-mode:t -*- // vim: ts=8 sw=4 smarttab /* * Copied from: * https://github.com/exclipy/inline_variant_visitor/blob/master/inline_variant.hpp */ #ifndef INLINE_VARIANT_H #define INLINE_VARIANT_H #include <boost/function_types/function_arity.hpp> #include <boost/fusion/algorithm/transformation/transform.hpp> #include <boost/mpl/contains.hpp> #include <boost/mpl/map.hpp> #include <boost/mpl/vector.hpp> #include <boost/mpl/range_c.hpp> #include <boost/noncopyable.hpp> #include "function_signature.h" namespace detail { // A metafunction class for getting the argument type from a unary function or functor type struct function_arg_extractor { // Function is either a function type like void(int const&), or a functor - eg. a class with void operator(int) // Sets type to the argument type with the constness and referenceness stripped (eg. int) template <typename Function> struct apply { private: typedef typename boost::remove_const< typename boost::remove_reference<Function>::type >::type bare_type; typedef typename signature_of<bare_type>::type normalized_function_type; typedef typename boost::function_types::function_arity<normalized_function_type>::type arity; typedef typename boost::function_types::parameter_types<normalized_function_type>::type parameter_types; typedef typename boost::function_types::result_type<normalized_function_type>::type result_type; BOOST_STATIC_ASSERT_MSG((arity::value == 1), "make_visitor called with a non-unary function"); typedef typename boost::mpl::front<parameter_types>::type parameter_type; public: typedef typename boost::remove_const< typename boost::remove_reference<parameter_type>::type >::type type; }; }; struct make_pair { template <typename AType, typename Ind> struct apply { typedef boost::mpl::pair<AType, Ind> type; }; }; // A metafunction class that asserts the second argument is in Allowed, and returns void template<typename Allowed> struct check_in { template <typename Type1, typename Type2> struct apply { private: BOOST_STATIC_ASSERT_MSG((boost::mpl::contains<Allowed, typename boost::mpl::first<Type2>::type>::value), "make_visitor called with spurious handler functions"); public: typedef void type; }; }; template <typename Seq> struct as_map { private: struct insert_helper { template <typename M, typename P> struct apply { typedef typename boost::mpl::insert< M, P>::type type; }; }; public: typedef typename boost::mpl::fold<Seq, boost::mpl::map0<>, insert_helper>::type type; }; // A functor template suitable for passing into apply_visitor. The constructor accepts the list of handler functions, // which are then exposed through a set of operator()s template <typename Result, typename Variant, typename... Functions> struct generic_visitor : boost::static_visitor<Result>, boost::noncopyable { private: typedef generic_visitor<Result, Variant, Functions...> type; // Compute the function_map type typedef boost::mpl::vector<Functions...> function_types; typedef typename boost::mpl::transform<function_types, function_arg_extractor>::type arg_types; typedef typename boost::mpl::transform< arg_types, boost::mpl::range_c<int, 0, boost::mpl::size<arg_types>::value>, make_pair >::type pair_list; typedef typename as_map<pair_list>::type fmap; // Check that the argument types are unique BOOST_STATIC_ASSERT_MSG((boost::mpl::size<fmap>::value == boost::mpl::size<arg_types>::value), "make_visitor called with non-unique argument types for handler functions"); // Check that there aren't any argument types not in the variant types typedef typename boost::mpl::fold<fmap, void, check_in<typename Variant::types> >::type dummy; boost::fusion::vector<Functions...> fvec; template <typename T> Result apply_helper(const T& object, boost::mpl::true_) const { typedef typename boost::mpl::at<fmap, T>::type Ind; return boost::fusion::at<Ind>(fvec)(object); } template <typename T> Result apply_helper(const T& object, boost::mpl::false_) const { return Result(); } BOOST_MOVABLE_BUT_NOT_COPYABLE(generic_visitor) public: generic_visitor(BOOST_RV_REF(type) other) : fvec(boost::move(other.fvec)) { } generic_visitor(Functions&&... functions) : fvec(std::forward<Functions>(functions)...) { } template <typename T> Result operator()(const T& object) const { typedef typename boost::mpl::has_key<fmap, T>::type correct_key; BOOST_STATIC_ASSERT_MSG(correct_key::value, "make_visitor called without specifying handlers for all required types"); return apply_helper(object, correct_key()); } }; // A metafunction class for getting the return type of a function struct function_return_extractor { template <typename Function> struct apply : boost::function_types::result_type<typename signature_of<Function>::type> { }; }; // A metafunction class that asserts the two arguments are the same and returns the first one struct check_same { template <typename Type1, typename Type2> struct apply { private: BOOST_STATIC_ASSERT_MSG((boost::is_same<Type1, Type2>::value), "make_visitor called with functions of differing return types"); public: typedef Type1 type; }; }; // A metafunction for getting the required generic_visitor type for the set of Functions template <typename Variant, typename... Functions> struct get_generic_visitor { private: typedef boost::mpl::vector<Functions...> function_types; typedef typename boost::mpl::transform< function_types, boost::remove_const< boost::remove_reference<boost::mpl::_1> > >::type bare_function_types; typedef typename boost::mpl::transform<bare_function_types, function_return_extractor>::type return_types; public: // Set result_type to the return type of the first function typedef typename boost::mpl::front<return_types>::type result_type; typedef generic_visitor<result_type, Variant, Functions...> type; private: // Assert that every return type is the same as the first one typedef typename boost::mpl::fold<return_types, result_type, check_same>::type dummy; }; // Accepts a set of functions and returns an object suitable for apply_visitor template <typename Variant, typename... Functions> auto make_visitor(BOOST_RV_REF(Functions)... functions) -> typename detail::get_generic_visitor<Variant, Functions...>::type { return typename detail::get_generic_visitor<Variant, Functions...>::type(boost::forward<Functions>(functions)...); } } template <typename Variant, typename... Functions> auto match(Variant const& variant, BOOST_RV_REF(Functions)... functions) -> typename detail::get_generic_visitor<Variant, Functions...>::result_type { return boost::apply_visitor(detail::make_visitor<Variant>( boost::forward<Functions>(functions)...), variant); } #endif
7,222
33.070755
118
h
null
ceph-main/src/common/interval_map.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef INTERVAL_MAP_H #define INTERVAL_MAP_H #include "include/interval_set.h" #include <initializer_list> template <typename K, typename V, typename S> /** * interval_map * * Maps intervals to values. Erasing or inserting over an existing * range will use S::operator() to split any overlapping existing * values. * * Surprisingly, boost/icl/interval_map doesn't seem to be appropriate * for this use case. The aggregation concept seems to assume * commutativity, which doesn't work if we want more recent insertions * to overwrite previous ones. */ class interval_map { S s; using map = std::map<K, std::pair<K, V> >; using mapiter = typename std::map<K, std::pair<K, V> >::iterator; using cmapiter = typename std::map<K, std::pair<K, V> >::const_iterator; map m; std::pair<mapiter, mapiter> get_range(K off, K len) { // fst is first iterator with end after off (may be end) auto fst = m.upper_bound(off); if (fst != m.begin()) --fst; if (fst != m.end() && off >= (fst->first + fst->second.first)) ++fst; // lst is first iterator with start after off + len (may be end) auto lst = m.lower_bound(off + len); return std::make_pair(fst, lst); } std::pair<cmapiter, cmapiter> get_range(K off, K len) const { // fst is first iterator with end after off (may be end) auto fst = m.upper_bound(off); if (fst != m.begin()) --fst; if (fst != m.end() && off >= (fst->first + fst->second.first)) ++fst; // lst is first iterator with start after off + len (may be end) auto lst = m.lower_bound(off + len); return std::make_pair(fst, lst); } void try_merge(mapiter niter) { if (niter != m.begin()) { auto prev = niter; prev--; if (prev->first + prev->second.first == niter->first && s.can_merge(prev->second.second, niter->second.second)) { V n = s.merge( std::move(prev->second.second), std::move(niter->second.second)); K off = prev->first; K len = niter->first + niter->second.first - off; niter++; m.erase(prev, niter); auto p = m.insert( std::make_pair( off, std::make_pair(len, std::move(n)))); ceph_assert(p.second); niter = p.first; } } auto next = niter; next++; if (next != m.end() && niter->first + niter->second.first == next->first && s.can_merge(niter->second.second, next->second.second)) { V n = s.merge( std::move(niter->second.second), std::move(next->second.second)); K off = niter->first; K len = next->first + next->second.first - off; next++; m.erase(niter, next); auto p = m.insert( std::make_pair( off, std::make_pair(len, std::move(n)))); ceph_assert(p.second); } } public: interval_map() = default; interval_map(std::initializer_list<typename map::value_type> l) { for (auto& v : l) { insert(v.first, v.second.first, v.second.second); } } interval_map intersect(K off, K len) const { interval_map ret; auto limits = get_range(off, len); for (auto i = limits.first; i != limits.second; ++i) { K o = i->first; K l = i->second.first; V v = i->second.second; if (o < off) { V p = v; l -= (off - o); v = s.split(off - o, l, p); o = off; } if ((o + l) > (off + len)) { V p = v; l -= (o + l) - (off + len); v = s.split(0, l, p); } ret.insert(o, l, v); } return ret; } void clear() { m.clear(); } void erase(K off, K len) { if (len == 0) return; auto range = get_range(off, len); std::vector< std::pair< K, std::pair<K, V> >> to_insert; for (auto i = range.first; i != range.second; ++i) { if (i->first < off) { to_insert.emplace_back( std::make_pair( i->first, std::make_pair( off - i->first, s.split(0, off - i->first, i->second.second)))); } if ((off + len) < (i->first + i->second.first)) { K nlen = (i->first + i->second.first) - (off + len); to_insert.emplace_back( std::make_pair( off + len, std::make_pair( nlen, s.split(i->second.first - nlen, nlen, i->second.second)))); } } m.erase(range.first, range.second); m.insert(to_insert.begin(), to_insert.end()); } void insert(K off, K len, V &&v) { ceph_assert(len > 0); ceph_assert(len == s.length(v)); erase(off, len); auto p = m.insert(make_pair(off, std::make_pair(len, std::forward<V>(v)))); ceph_assert(p.second); try_merge(p.first); } void insert(interval_map &&other) { for (auto i = other.m.begin(); i != other.m.end(); other.m.erase(i++)) { insert(i->first, i->second.first, std::move(i->second.second)); } } void insert(K off, K len, const V &v) { ceph_assert(len > 0); ceph_assert(len == s.length(v)); erase(off, len); auto p = m.insert(make_pair(off, std::make_pair(len, v))); ceph_assert(p.second); try_merge(p.first); } void insert(const interval_map &other) { for (auto &&i: other) { insert(i.get_off(), i.get_len(), i.get_val()); } } bool empty() const { return m.empty(); } interval_set<K> get_interval_set() const { interval_set<K> ret; for (auto &&i: *this) { ret.insert(i.get_off(), i.get_len()); } return ret; } class const_iterator { cmapiter it; const_iterator(cmapiter &&it) : it(std::move(it)) {} const_iterator(const cmapiter &it) : it(it) {} friend class interval_map; public: const_iterator(const const_iterator &) = default; const_iterator &operator=(const const_iterator &) = default; const_iterator &operator++() { ++it; return *this; } const_iterator operator++(int) { return const_iterator(it++); } const_iterator &operator--() { --it; return *this; } const_iterator operator--(int) { return const_iterator(it--); } bool operator==(const const_iterator &rhs) const { return it == rhs.it; } bool operator!=(const const_iterator &rhs) const { return it != rhs.it; } K get_off() const { return it->first; } K get_len() const { return it->second.first; } const V &get_val() const { return it->second.second; } const_iterator &operator*() { return *this; } }; const_iterator begin() const { return const_iterator(m.begin()); } const_iterator end() const { return const_iterator(m.end()); } std::pair<const_iterator, const_iterator> get_containing_range( K off, K len) const { auto rng = get_range(off, len); return std::make_pair(const_iterator(rng.first), const_iterator(rng.second)); } unsigned ext_count() const { return m.size(); } bool operator==(const interval_map &rhs) const { return m == rhs.m; } std::ostream &print(std::ostream &out) const { bool first = true; out << "{"; for (auto &&i: *this) { if (first) { first = false; } else { out << ","; } out << i.get_off() << "~" << i.get_len() << "(" << s.length(i.get_val()) << ")"; } return out << "}"; } }; template <typename K, typename V, typename S> std::ostream &operator<<(std::ostream &out, const interval_map<K, V, S> &m) { return m.print(out); } #endif
7,720
25.624138
81
h
null
ceph-main/src/common/intrusive_lru.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <boost/intrusive_ptr.hpp> #include <boost/intrusive/set.hpp> #include <boost/intrusive/list.hpp> namespace ceph::common { /** * intrusive_lru: lru implementation with embedded map and list hook * * Elements will be stored in an intrusive set. Once an element is no longer * referenced it will remain in the set. The unreferenced elements will be * evicted from the set once the set size exceeds the `lru_target_size`. * Referenced elements will not be evicted as this is a registery with * extra caching capabilities. * * Note, this implementation currently is entirely thread-unsafe. */ template <typename K, typename V, typename VToK> struct intrusive_lru_config { using key_type = K; using value_type = V; using key_of_value = VToK; }; template <typename Config> class intrusive_lru; template <typename Config> class intrusive_lru_base; template <typename Config> void intrusive_ptr_add_ref(intrusive_lru_base<Config> *p); template <typename Config> void intrusive_ptr_release(intrusive_lru_base<Config> *p); template <typename Config> class intrusive_lru_base { unsigned use_count = 0; // lru points to the corresponding intrusive_lru // which will be set to null if its use_count // is zero (aka unreferenced). intrusive_lru<Config> *lru = nullptr; public: bool is_referenced() const { return static_cast<bool>(lru); } bool is_unreferenced() const { return !is_referenced(); } boost::intrusive::set_member_hook<> set_hook; boost::intrusive::list_member_hook<> list_hook; using Ref = boost::intrusive_ptr<typename Config::value_type>; using lru_t = intrusive_lru<Config>; friend intrusive_lru<Config>; friend void intrusive_ptr_add_ref<>(intrusive_lru_base<Config> *); friend void intrusive_ptr_release<>(intrusive_lru_base<Config> *); virtual ~intrusive_lru_base() {} }; template <typename Config> class intrusive_lru { using base_t = intrusive_lru_base<Config>; using K = typename Config::key_type; using T = typename Config::value_type; using TRef = typename base_t::Ref; using lru_set_option_t = boost::intrusive::member_hook< base_t, boost::intrusive::set_member_hook<>, &base_t::set_hook>; using VToK = typename Config::key_of_value; struct VToKWrapped { using type = typename VToK::type; const type &operator()(const base_t &obc) { return VToK()(static_cast<const T&>(obc)); } }; using lru_set_t = boost::intrusive::set< base_t, lru_set_option_t, boost::intrusive::key_of_value<VToKWrapped> >; lru_set_t lru_set; using lru_list_t = boost::intrusive::list< base_t, boost::intrusive::member_hook< base_t, boost::intrusive::list_member_hook<>, &base_t::list_hook>>; lru_list_t unreferenced_list; size_t lru_target_size = 0; // when the lru_set exceeds its target size, evict // only unreferenced elements from it (if any). void evict() { while (!unreferenced_list.empty() && lru_set.size() > lru_target_size) { auto &evict_target = unreferenced_list.front(); assert(evict_target.is_unreferenced()); unreferenced_list.pop_front(); lru_set.erase_and_dispose( lru_set.iterator_to(evict_target), [](auto *p) { delete p; } ); } } // access an existing element in the lru_set. // mark as referenced if necessary. void access(base_t &b) { if (b.is_referenced()) return; unreferenced_list.erase(lru_list_t::s_iterator_to(b)); b.lru = this; } // insert a new element to the lru_set. // attempt to evict if possible. void insert(base_t &b) { assert(b.is_unreferenced()); lru_set.insert(b); b.lru = this; evict(); } // an element in the lru_set has no users, // mark it as unreferenced and try to evict. void mark_as_unreferenced(base_t &b) { assert(b.is_referenced()); unreferenced_list.push_back(b); b.lru = nullptr; evict(); } public: /** * Returns the TRef corresponding to k if it exists or * creates it otherwise. Return is: * std::pair(reference_to_val, found) */ std::pair<TRef, bool> get_or_create(const K &k) { typename lru_set_t::insert_commit_data icd; auto [iter, missing] = lru_set.insert_check( k, icd); if (missing) { auto ret = new T(k); lru_set.insert_commit(*ret, icd); insert(*ret); return {TRef(ret), false}; } else { access(*iter); return {TRef(static_cast<T*>(&*iter)), true}; } } /* * Clears unreferenced elements from the lru set [from, to] */ void clear_range( const K& from, const K& to) { auto from_iter = lru_set.lower_bound(from); auto to_iter = lru_set.upper_bound(to); for (auto i = from_iter; i != to_iter; ) { if (!(*i).lru) { unreferenced_list.erase(lru_list_t::s_iterator_to(*i)); i = lru_set.erase_and_dispose(i, [](auto *p) { delete p; } ); } else { i++; } } } template <class F> void for_each(F&& f) { for (auto& v : lru_set) { access(v); f(TRef{static_cast<T*>(&v)}); } } /** * Returns the TRef corresponding to k if it exists or * nullptr otherwise. */ TRef get(const K &k) { if (auto iter = lru_set.find(k); iter != std::end(lru_set)) { access(*iter); return TRef(static_cast<T*>(&*iter)); } else { return nullptr; } } void set_target_size(size_t target_size) { lru_target_size = target_size; evict(); } ~intrusive_lru() { set_target_size(0); } friend void intrusive_ptr_add_ref<>(intrusive_lru_base<Config> *); friend void intrusive_ptr_release<>(intrusive_lru_base<Config> *); }; template <typename Config> void intrusive_ptr_add_ref(intrusive_lru_base<Config> *p) { assert(p); assert(p->lru); p->use_count++; } template <typename Config> void intrusive_ptr_release(intrusive_lru_base<Config> *p) { assert(p); assert(p->use_count > 0); --p->use_count; if (p->use_count == 0) { p->lru->mark_as_unreferenced(*p); } } }
6,208
24.342857
76
h
null
ceph-main/src/common/iso_8601.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_COMMON_ISO_8601_H #define CEPH_COMMON_ISO_8601_H #include <string_view> #include <boost/optional.hpp> #include "common/ceph_time.h" namespace ceph { // Here, we support the W3C profile of ISO 8601 with the following // restrictions: // - Subsecond resolution is supported to nanosecond // granularity. Any number of digits between 1 and 9 may be // specified after the decimal point. // - All times must be UTC. // - All times must be representable as a sixty-four bit count of // nanoseconds since the epoch. // - Partial times are handled thus: // * If there are no subseconds, they are assumed to be zero. // * If there are no seconds, they are assumed to be zero. // * If there are no minutes, they are assumed to be zero. // * If there is no time, it is assumed to midnight. // * If there is no day, it is assumed to be the first. // * If there is no month, it is assumed to be January. // // If a date is invalid, boost::none is returned. boost::optional<ceph::real_time> from_iso_8601( std::string_view s, const bool ws_terminates = true) noexcept; enum class iso_8601_format { Y, YM, YMD, YMDh, YMDhm, YMDhms, YMDhmsn }; std::string to_iso_8601(const ceph::real_time t, const iso_8601_format f = iso_8601_format::YMDhmsn, std::string_view date_separator = "-", std::string_view time_separator = ":") noexcept; static inline std::string to_iso_8601_no_separators(const ceph::real_time t, const iso_8601_format f = iso_8601_format::YMDhmsn) noexcept { return to_iso_8601(t, f, "", ""); } } #endif
1,803
33.037736
103
h
null
ceph-main/src/common/likely.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2010 Dreamhost * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_LIKELY_DOT_H #define CEPH_LIKELY_DOT_H /* * Likely / Unlikely macros */ #ifndef likely #define likely(x) __builtin_expect((x),1) #endif #ifndef unlikely #define unlikely(x) __builtin_expect((x),0) #endif #ifndef expect #define expect(x, hint) __builtin_expect((x),(hint)) #endif #endif
714
21.34375
70
h
null
ceph-main/src/common/lockdep.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2008-2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_LOCKDEP_H #define CEPH_LOCKDEP_H #include "include/common_fwd.h" #ifdef CEPH_DEBUG_MUTEX extern bool g_lockdep; extern void lockdep_register_ceph_context(CephContext *cct); extern void lockdep_unregister_ceph_context(CephContext *cct); // lockdep tracks dependencies between multiple and different instances // of locks within a class denoted by `n`. // Caller is obliged to guarantee name uniqueness. extern int lockdep_register(const char *n); extern void lockdep_unregister(int id); extern int lockdep_will_lock(const char *n, int id, bool force_backtrace=false, bool recursive=false); extern int lockdep_locked(const char *n, int id, bool force_backtrace=false); extern int lockdep_will_unlock(const char *n, int id); extern int lockdep_dump_locks(); #else static constexpr bool g_lockdep = false; #define lockdep_register(...) 0 #define lockdep_unregister(...) #define lockdep_will_lock(...) 0 #define lockdep_locked(...) 0 #define lockdep_will_unlock(...) 0 #endif // CEPH_DEBUG_MUTEX #endif
1,442
27.86
79
h
null
ceph-main/src/common/lru_map.h
#ifndef CEPH_LRU_MAP_H #define CEPH_LRU_MAP_H #include "common/ceph_mutex.h" template <class K, class V> class lru_map { struct entry { V value; typename std::list<K>::iterator lru_iter; }; std::map<K, entry> entries; std::list<K> entries_lru; ceph::mutex lock = ceph::make_mutex("lru_map::lock"); size_t max; public: class UpdateContext { public: virtual ~UpdateContext() {} /* update should return true if object is updated */ virtual bool update(V *v) = 0; }; bool _find(const K& key, V *value, UpdateContext *ctx); void _add(const K& key, V& value); public: lru_map(int _max) : max(_max) {} virtual ~lru_map() {} bool find(const K& key, V& value); /* * find_and_update() * * - will return true if object is found * - if ctx is set will return true if object is found and updated */ bool find_and_update(const K& key, V *value, UpdateContext *ctx); void add(const K& key, V& value); void erase(const K& key); }; template <class K, class V> bool lru_map<K, V>::_find(const K& key, V *value, UpdateContext *ctx) { typename std::map<K, entry>::iterator iter = entries.find(key); if (iter == entries.end()) { return false; } entry& e = iter->second; entries_lru.erase(e.lru_iter); bool r = true; if (ctx) r = ctx->update(&e.value); if (value) *value = e.value; entries_lru.push_front(key); e.lru_iter = entries_lru.begin(); return r; } template <class K, class V> bool lru_map<K, V>::find(const K& key, V& value) { std::lock_guard l(lock); return _find(key, &value, NULL); } template <class K, class V> bool lru_map<K, V>::find_and_update(const K& key, V *value, UpdateContext *ctx) { std::lock_guard l(lock); return _find(key, value, ctx); } template <class K, class V> void lru_map<K, V>::_add(const K& key, V& value) { typename std::map<K, entry>::iterator iter = entries.find(key); if (iter != entries.end()) { entry& e = iter->second; entries_lru.erase(e.lru_iter); } entries_lru.push_front(key); entry& e = entries[key]; e.value = value; e.lru_iter = entries_lru.begin(); while (entries.size() > max) { typename std::list<K>::reverse_iterator riter = entries_lru.rbegin(); iter = entries.find(*riter); // ceph_assert(iter != entries.end()); entries.erase(iter); entries_lru.pop_back(); } } template <class K, class V> void lru_map<K, V>::add(const K& key, V& value) { std::lock_guard l(lock); _add(key, value); } template <class K, class V> void lru_map<K, V>::erase(const K& key) { std::lock_guard l(lock); typename std::map<K, entry>::iterator iter = entries.find(key); if (iter == entries.end()) return; entry& e = iter->second; entries_lru.erase(e.lru_iter); entries.erase(iter); } #endif
2,813
20.157895
79
h
null
ceph-main/src/common/mClockPriorityQueue.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2016 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <functional> #include <map> #include <list> #include <cmath> #include "common/Formatter.h" #include "common/OpQueue.h" #include "dmclock/src/dmclock_server.h" // the following is done to unclobber _ASSERT_H so it returns to the // way ceph likes it #include "include/ceph_assert.h" namespace ceph { namespace dmc = crimson::dmclock; template <typename T, typename K> class mClockQueue : public OpQueue <T, K> { using priority_t = unsigned; using cost_t = unsigned; typedef std::list<std::pair<cost_t, T> > ListPairs; static void filter_list_pairs(ListPairs *l, std::function<bool (T&&)> f) { for (typename ListPairs::iterator i = l->end(); i != l->begin(); /* no inc */ ) { auto next = i; --next; if (f(std::move(next->second))) { l->erase(next); } else { i = next; } } } struct SubQueue { private: typedef std::map<K, ListPairs> Classes; // client-class to ordered queue Classes q; unsigned tokens, max_tokens; typename Classes::iterator cur; public: SubQueue(const SubQueue &other) : q(other.q), tokens(other.tokens), max_tokens(other.max_tokens), cur(q.begin()) {} SubQueue() : tokens(0), max_tokens(0), cur(q.begin()) {} void set_max_tokens(unsigned mt) { max_tokens = mt; } unsigned get_max_tokens() const { return max_tokens; } unsigned num_tokens() const { return tokens; } void put_tokens(unsigned t) { tokens += t; if (tokens > max_tokens) { tokens = max_tokens; } } void take_tokens(unsigned t) { if (tokens > t) { tokens -= t; } else { tokens = 0; } } void enqueue(K cl, cost_t cost, T&& item) { q[cl].emplace_back(cost, std::move(item)); if (cur == q.end()) cur = q.begin(); } void enqueue_front(K cl, cost_t cost, T&& item) { q[cl].emplace_front(cost, std::move(item)); if (cur == q.end()) cur = q.begin(); } const std::pair<cost_t, T>& front() const { ceph_assert(!(q.empty())); ceph_assert(cur != q.end()); return cur->second.front(); } std::pair<cost_t, T>& front() { ceph_assert(!(q.empty())); ceph_assert(cur != q.end()); return cur->second.front(); } void pop_front() { ceph_assert(!(q.empty())); ceph_assert(cur != q.end()); cur->second.pop_front(); if (cur->second.empty()) { auto i = cur; ++cur; q.erase(i); } else { ++cur; } if (cur == q.end()) { cur = q.begin(); } } unsigned get_size_slow() const { unsigned count = 0; for (const auto& cls : q) { count += cls.second.size(); } return count; } bool empty() const { return q.empty(); } void remove_by_filter(std::function<bool (T&&)> f) { for (typename Classes::iterator i = q.begin(); i != q.end(); /* no-inc */) { filter_list_pairs(&(i->second), f); if (i->second.empty()) { if (cur == i) { ++cur; } i = q.erase(i); } else { ++i; } } if (cur == q.end()) cur = q.begin(); } void remove_by_class(K k, std::list<T> *out) { typename Classes::iterator i = q.find(k); if (i == q.end()) { return; } if (i == cur) { ++cur; } if (out) { for (auto j = i->second.rbegin(); j != i->second.rend(); ++j) { out->push_front(std::move(j->second)); } } q.erase(i); if (cur == q.end()) cur = q.begin(); } void dump(ceph::Formatter *f) const { f->dump_int("size", get_size_slow()); f->dump_int("num_keys", q.size()); } }; using SubQueues = std::map<priority_t, SubQueue>; SubQueues high_queue; using Queue = dmc::PullPriorityQueue<K,T,false>; Queue queue; // when enqueue_front is called, rather than try to re-calc tags // to put in mClock priority queue, we'll just keep a separate // list from which we dequeue items first, and only when it's // empty do we use queue. std::list<std::pair<K,T>> queue_front; public: mClockQueue( const typename Queue::ClientInfoFunc& info_func, double anticipation_timeout = 0.0) : queue(info_func, dmc::AtLimit::Allow, anticipation_timeout) { // empty } unsigned get_size_slow() const { unsigned total = 0; total += queue_front.size(); total += queue.request_count(); for (auto i = high_queue.cbegin(); i != high_queue.cend(); ++i) { ceph_assert(i->second.get_size_slow()); total += i->second.get_size_slow(); } return total; } // be sure to do things in reverse priority order and push_front // to the list so items end up on list in front-to-back priority // order void remove_by_filter(std::function<bool (T&&)> filter_accum) { queue.remove_by_req_filter([&] (std::unique_ptr<T>&& r) { return filter_accum(std::move(*r)); }, true); for (auto i = queue_front.rbegin(); i != queue_front.rend(); /* no-inc */) { if (filter_accum(std::move(i->second))) { i = decltype(i){ queue_front.erase(std::next(i).base()) }; } else { ++i; } } for (typename SubQueues::iterator i = high_queue.begin(); i != high_queue.end(); /* no-inc */ ) { i->second.remove_by_filter(filter_accum); if (i->second.empty()) { i = high_queue.erase(i); } else { ++i; } } } void remove_by_class(K k, std::list<T> *out = nullptr) override final { if (out) { queue.remove_by_client(k, true, [&out] (std::unique_ptr<T>&& t) { out->push_front(std::move(*t)); }); } else { queue.remove_by_client(k, true); } for (auto i = queue_front.rbegin(); i != queue_front.rend(); /* no-inc */) { if (k == i->first) { if (nullptr != out) out->push_front(std::move(i->second)); i = decltype(i){ queue_front.erase(std::next(i).base()) }; } else { ++i; } } for (auto i = high_queue.begin(); i != high_queue.end(); /* no-inc */) { i->second.remove_by_class(k, out); if (i->second.empty()) { i = high_queue.erase(i); } else { ++i; } } } void enqueue_strict(K cl, unsigned priority, T&& item) override final { high_queue[priority].enqueue(cl, 1, std::move(item)); } void enqueue_strict_front(K cl, unsigned priority, T&& item) override final { high_queue[priority].enqueue_front(cl, 1, std::move(item)); } void enqueue(K cl, unsigned priority, unsigned cost, T&& item) override final { // priority is ignored queue.add_request(std::move(item), cl, cost); } void enqueue_front(K cl, unsigned priority, unsigned cost, T&& item) override final { queue_front.emplace_front(std::pair<K,T>(cl, std::move(item))); } bool empty() const override final { return queue.empty() && high_queue.empty() && queue_front.empty(); } T dequeue() override final { ceph_assert(!empty()); if (!high_queue.empty()) { T ret = std::move(high_queue.rbegin()->second.front().second); high_queue.rbegin()->second.pop_front(); if (high_queue.rbegin()->second.empty()) { high_queue.erase(high_queue.rbegin()->first); } return ret; } if (!queue_front.empty()) { T ret = std::move(queue_front.front().second); queue_front.pop_front(); return ret; } auto pr = queue.pull_request(); ceph_assert(pr.is_retn()); auto& retn = pr.get_retn(); return std::move(*(retn.request)); } void dump(ceph::Formatter *f) const override final { f->open_array_section("high_queues"); for (typename SubQueues::const_iterator p = high_queue.begin(); p != high_queue.end(); ++p) { f->open_object_section("subqueue"); f->dump_int("priority", p->first); p->second.dump(f); f->close_section(); } f->close_section(); f->open_object_section("queue_front"); f->dump_int("size", queue_front.size()); f->close_section(); f->open_object_section("queue"); f->dump_int("size", queue.request_count()); f->close_section(); } // dump void print(std::ostream &os) const final { os << "mClockPriorityQueue"; } }; } // namespace ceph
8,645
22.367568
83
h
null
ceph-main/src/common/mime.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_MIME_H #define CEPH_COMMON_MIME_H #ifdef __cplusplus extern "C" { #endif /* Encode a buffer as quoted-printable. * * The input is a null-terminated string. * The output is a null-terminated string representing the input encoded as * a MIME quoted-printable. * * Returns the length of the buffer we would need to do the encoding. * If we don't have enough buffer space, the output will be truncated. * * You may call mime_encode_as_qp(input, NULL, 0) to find the size of the * buffer you will need. */ signed int mime_encode_as_qp(const char *input, char *output, int outlen); /* Decode a quoted-printable buffer. * * The input is a null-terminated string encoded as a MIME quoted-printable. * The output is a null-terminated string representing the input decoded. * * Returns a negative error code if the input is not a valid quoted-printable * buffer. * Returns the length of the buffer we would need to do the encoding. * If we don't have enough buffer space, the output will be truncated. * * You may call mime_decode_as_qp(input, NULL, 0) to find the size of the * buffer you will need. The output will never be longer than the input for * this function. */ signed int mime_decode_from_qp(const char *input, char *output, int outlen); #ifdef __cplusplus } #endif #endif
1,732
29.403509
77
h
null
ceph-main/src/common/module.c
/* * Ceph - scalable distributed file system * * Copyright (C) 2014 Inktank Storage, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "acconfig.h" #include "include/compat.h" #include <errno.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #if defined(__FreeBSD__) #include <sys/wait.h> #endif #ifndef _WIN32 /* * TODO: Switch to libkmod when we abandon older platforms. The APIs * we want are: * * - kmod_module_new_from_name() for obtaining handles; * - kmod_module_probe_insert_module() for module_load(); * - kmod_module_get_info(), kmod_module_info_get_{key,value}() for * module_has_param(). */ /* * Return command's exit status or -1 on error. */ static int run_command(const char *command) { int status; status = system(command); if (status >= 0 && WIFEXITED(status)) return WEXITSTATUS(status); if (status < 0) { char error_buf[80]; char* errp = ceph_strerror_r(errno, error_buf, sizeof(error_buf)); fprintf(stderr, "couldn't run '%s': %s\n", command, errp); } else if (WIFSIGNALED(status)) { fprintf(stderr, "'%s' killed by signal %d\n", command, WTERMSIG(status)); } else { fprintf(stderr, "weird status from '%s': %d\n", command, status); } return -1; } int module_has_param(const char *module, const char *param) { char command[128]; snprintf(command, sizeof(command), "/sbin/modinfo -F parm %s | /bin/grep -q ^%s:", module, param); return run_command(command) == 0; } int module_load(const char *module, const char *options) { char command[128]; snprintf(command, sizeof(command), "/sbin/modprobe %s %s", module, (options ? options : "")); return run_command(command); } #else // We're stubbing out those functions, for now. int module_has_param(const char *module, const char *param) { return -1; } int module_load(const char *module, const char *options) { return -1; } #endif /* _WIN32 */
2,084
20.71875
69
c
null
ceph-main/src/common/mutex_debug.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_MUTEX_DEBUG_H #define CEPH_COMMON_MUTEX_DEBUG_H #include <atomic> #include <system_error> #include <thread> #include <pthread.h> #include "include/ceph_assert.h" #include "include/common_fwd.h" #include "ceph_time.h" #include "likely.h" #include "lockdep.h" namespace ceph { namespace mutex_debug_detail { class mutex_debugging_base { protected: std::string group; int id = -1; bool lockdep; // track this mutex using lockdep_* bool backtrace; // gather backtrace on lock acquisition std::atomic<int> nlock = 0; std::thread::id locked_by = {}; bool _enable_lockdep() const { return lockdep && g_lockdep; } void _register(); void _will_lock(bool recursive=false); // about to lock void _locked(); // just locked void _will_unlock(); // about to unlock mutex_debugging_base(std::string group, bool ld = true, bool bt = false); ~mutex_debugging_base(); public: bool is_locked() const { return (nlock > 0); } bool is_locked_by_me() const { return nlock.load(std::memory_order_acquire) > 0 && locked_by == std::this_thread::get_id(); } operator bool() const { return is_locked_by_me(); } }; // Since this is a /debugging/ mutex just define it in terms of the // pthread error check mutex. template<bool Recursive> class mutex_debug_impl : public mutex_debugging_base { private: pthread_mutex_t m; void _init() { pthread_mutexattr_t a; pthread_mutexattr_init(&a); int r; if (recursive) r = pthread_mutexattr_settype(&a, PTHREAD_MUTEX_RECURSIVE); else r = pthread_mutexattr_settype(&a, PTHREAD_MUTEX_ERRORCHECK); ceph_assert(r == 0); r = pthread_mutex_init(&m, &a); ceph_assert(r == 0); } bool enable_lockdep(bool no_lockdep) const { if (recursive) { return false; } else if (no_lockdep) { return false; } else { return _enable_lockdep(); } } public: static constexpr bool recursive = Recursive; mutex_debug_impl(std::string group, bool ld = true, bool bt = false) : mutex_debugging_base(group, ld, bt) { _init(); } // Mutex is Destructible ~mutex_debug_impl() { int r = pthread_mutex_destroy(&m); ceph_assert(r == 0); } // Mutex concept is non-Copyable mutex_debug_impl(const mutex_debug_impl&) = delete; mutex_debug_impl& operator =(const mutex_debug_impl&) = delete; // Mutex concept is non-Movable mutex_debug_impl(mutex_debug_impl&&) = delete; mutex_debug_impl& operator =(mutex_debug_impl&&) = delete; void lock_impl() { int r = pthread_mutex_lock(&m); // Allowed error codes for Mutex concept if (unlikely(r == EPERM || r == EDEADLK || r == EBUSY)) { throw std::system_error(r, std::generic_category()); } ceph_assert(r == 0); } void unlock_impl() noexcept { int r = pthread_mutex_unlock(&m); ceph_assert(r == 0); } bool try_lock_impl() { int r = pthread_mutex_trylock(&m); switch (r) { case 0: return true; case EBUSY: return false; default: throw std::system_error(r, std::generic_category()); } } pthread_mutex_t* native_handle() { return &m; } void _post_lock() { if (!recursive) ceph_assert(nlock == 0); locked_by = std::this_thread::get_id(); nlock.fetch_add(1, std::memory_order_release); } void _pre_unlock() { if (recursive) { ceph_assert(nlock > 0); } else { ceph_assert(nlock == 1); } ceph_assert(locked_by == std::this_thread::get_id()); if (nlock == 1) locked_by = std::thread::id(); nlock.fetch_sub(1, std::memory_order_release); } bool try_lock(bool no_lockdep = false) { bool locked = try_lock_impl(); if (locked) { if (enable_lockdep(no_lockdep)) _locked(); _post_lock(); } return locked; } void lock(bool no_lockdep = false) { if (enable_lockdep(no_lockdep)) _will_lock(recursive); if (try_lock(no_lockdep)) return; lock_impl(); if (enable_lockdep(no_lockdep)) _locked(); _post_lock(); } void unlock(bool no_lockdep = false) { _pre_unlock(); if (enable_lockdep(no_lockdep)) _will_unlock(); unlock_impl(); } }; } // namespace mutex_debug_detail typedef mutex_debug_detail::mutex_debug_impl<false> mutex_debug; typedef mutex_debug_detail::mutex_debug_impl<true> mutex_recursive_debug; } // namespace ceph #endif
4,859
22.142857
96
h
null
ceph-main/src/common/numa.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <include/compat.h> #include <sched.h> #include <ostream> #include <set> int parse_cpu_set_list(const char *s, size_t *cpu_set_size, cpu_set_t *cpu_set); std::string cpu_set_to_str_list(size_t cpu_set_size, const cpu_set_t *cpu_set); std::set<int> cpu_set_to_set(size_t cpu_set_size, const cpu_set_t *cpu_set); int get_numa_node_cpu_set(int node, size_t *cpu_set_size, cpu_set_t *cpu_set); int set_cpu_affinity_all_threads(size_t cpu_set_size, cpu_set_t *cpu_set);
634
24.4
70
h
null
ceph-main/src/common/obj_bencher.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2009 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_OBJ_BENCHER_H #define CEPH_OBJ_BENCHER_H #include "common/ceph_context.h" #include "common/Formatter.h" #include "ceph_time.h" #include <cfloat> using ceph::mono_clock; struct bench_interval_data { double min_bandwidth = DBL_MAX; double max_bandwidth = 0; double avg_bandwidth = 0; int bandwidth_cycles = 0; double bandwidth_diff_sum = 0; int min_iops = INT_MAX; int max_iops = 0; double avg_iops = 0; int iops_cycles = 0; double iops_diff_sum = 0; }; struct bench_data { bool done; //is the benchmark is done uint64_t object_size; //the size of the objects uint64_t op_size; // the size of the read/write ops bool hints; // same as object_size for write tests int in_flight; //number of reads/writes being waited on int started; int finished; double min_latency; double max_latency; double avg_latency; struct bench_interval_data idata; // data that is updated by time intervals and not by events double latency_diff_sum; std::chrono::duration<double> cur_latency; //latency of last completed transaction - in seconds by default mono_time start_time; //start time for benchmark - use the monotonic clock as we'll measure the passage of time char *object_contents; //pointer to the contents written to each object }; const int OP_WRITE = 1; const int OP_SEQ_READ = 2; const int OP_RAND_READ = 3; // Object is composed of <oid,namespace> typedef std::pair<std::string, std::string> Object; class ObjBencher { bool show_time; Formatter *formatter = NULL; std::ostream *outstream = NULL; public: CephContext *cct; protected: ceph::mutex lock = ceph::make_mutex("ObjBencher::lock"); static void *status_printer(void *bencher); struct bench_data data; int fetch_bench_metadata(const std::string& metadata_file, uint64_t* op_size, uint64_t* object_size, int* num_ops, int* num_objects, int* prev_pid); int write_bench(int secondsToRun, int concurrentios, const std::string& run_name_meta, unsigned max_objects, int prev_pid); int seq_read_bench(int secondsToRun, int num_ops, int num_objects, int concurrentios, int writePid, bool no_verify=false); int rand_read_bench(int secondsToRun, int num_ops, int num_objects, int concurrentios, int writePid, bool no_verify=false); int clean_up(int num_objects, int prevPid, int concurrentios); bool more_objects_matching_prefix(const std::string& prefix, std::list<Object>* name); virtual int completions_init(int concurrentios) = 0; virtual void completions_done() = 0; virtual int create_completion(int i, void (*cb)(void *, void*), void *arg) = 0; virtual void release_completion(int slot) = 0; virtual bool completion_is_done(int slot) = 0; virtual int completion_wait(int slot) = 0; virtual int completion_ret(int slot) = 0; virtual int aio_read(const std::string& oid, int slot, bufferlist *pbl, size_t len, size_t offset) = 0; virtual int aio_write(const std::string& oid, int slot, bufferlist& bl, size_t len, size_t offset) = 0; virtual int aio_remove(const std::string& oid, int slot) = 0; virtual int sync_read(const std::string& oid, bufferlist& bl, size_t len) = 0; virtual int sync_write(const std::string& oid, bufferlist& bl, size_t len) = 0; virtual int sync_remove(const std::string& oid) = 0; virtual bool get_objects(std::list< std::pair<std::string, std::string> >* objects, int num) = 0; virtual void set_namespace(const std::string&) {} std::ostream& out(std::ostream& os); std::ostream& out(std::ostream& os, utime_t& t); public: explicit ObjBencher(CephContext *cct_) : show_time(false), cct(cct_), data() {} virtual ~ObjBencher() {} int aio_bench( int operation, int secondsToRun, int concurrentios, uint64_t op_size, uint64_t object_size, unsigned max_objects, bool cleanup, bool hints, const std::string& run_name, bool reuse_bench, bool no_verify=false); int clean_up(const std::string& prefix, int concurrentios, const std::string& run_name); void set_show_time(bool dt) { show_time = dt; } void set_formatter(Formatter *f) { formatter = f; } void set_outstream(std::ostream& os) { outstream = &os; } int clean_up_slow(const std::string& prefix, int concurrentios); }; #endif
4,670
34.386364
125
h
null
ceph-main/src/common/openssl_opts_handler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (c) 2020 Huawei Technologies Co., Ltd. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CEPH_OPENSSL_OPTS_HANDLER_H #define CEPH_OPENSSL_OPTS_HANDLER_H namespace ceph { namespace crypto { void init_openssl_engine_once(); } } #endif
633
24.36
70
h
null
ceph-main/src/common/options.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <chrono> #include <string> #include <variant> #include <vector> #include "include/str_list.h" #include "msg/msg_types.h" #include "include/uuid.h" struct Option { enum type_t { TYPE_UINT = 0, TYPE_INT = 1, TYPE_STR = 2, TYPE_FLOAT = 3, TYPE_BOOL = 4, TYPE_ADDR = 5, TYPE_ADDRVEC = 6, TYPE_UUID = 7, TYPE_SIZE = 8, TYPE_SECS = 9, TYPE_MILLISECS = 10, }; static const char *type_to_c_type_str(type_t t) { switch (t) { case TYPE_UINT: return "uint64_t"; case TYPE_INT: return "int64_t"; case TYPE_STR: return "std::string"; case TYPE_FLOAT: return "double"; case TYPE_BOOL: return "bool"; case TYPE_ADDR: return "entity_addr_t"; case TYPE_ADDRVEC: return "entity_addrvec_t"; case TYPE_UUID: return "uuid_d"; case TYPE_SIZE: return "uint64_t"; case TYPE_SECS: return "secs"; case TYPE_MILLISECS: return "millisecs"; default: return "unknown"; } } static const char *type_to_str(type_t t) { switch (t) { case TYPE_UINT: return "uint"; case TYPE_INT: return "int"; case TYPE_STR: return "str"; case TYPE_FLOAT: return "float"; case TYPE_BOOL: return "bool"; case TYPE_ADDR: return "addr"; case TYPE_ADDRVEC: return "addrvec"; case TYPE_UUID: return "uuid"; case TYPE_SIZE: return "size"; case TYPE_SECS: return "secs"; case TYPE_MILLISECS: return "millisecs"; default: return "unknown"; } } static int str_to_type(const std::string& s) { if (s == "uint") { return TYPE_UINT; } if (s == "int") { return TYPE_INT; } if (s == "str") { return TYPE_STR; } if (s == "float") { return TYPE_FLOAT; } if (s == "bool") { return TYPE_BOOL; } if (s == "addr") { return TYPE_ADDR; } if (s == "addrvec") { return TYPE_ADDRVEC; } if (s == "uuid") { return TYPE_UUID; } if (s == "size") { return TYPE_SIZE; } if (s == "secs") { return TYPE_SECS; } if (s == "millisecs") { return TYPE_MILLISECS; } return -1; } /** * Basic: for users, configures some externally visible functional aspect * Advanced: for users, configures some internal behaviour * Development: not for users. May be dangerous, may not be documented. */ enum level_t { LEVEL_BASIC = 0, LEVEL_ADVANCED = 1, LEVEL_DEV = 2, LEVEL_UNKNOWN = 3, }; static const char *level_to_str(level_t l) { switch (l) { case LEVEL_BASIC: return "basic"; case LEVEL_ADVANCED: return "advanced"; case LEVEL_DEV: return "dev"; default: return "unknown"; } } enum flag_t { FLAG_RUNTIME = 0x1, ///< option can be changed at runtime FLAG_NO_MON_UPDATE = 0x2, ///< option cannot be changed via mon config FLAG_STARTUP = 0x4, ///< option can only take effect at startup FLAG_CLUSTER_CREATE = 0x8, ///< option only has effect at cluster creation FLAG_CREATE = 0x10, ///< option only has effect at daemon creation FLAG_MGR = 0x20, ///< option is a mgr module option FLAG_MINIMAL_CONF = 0x40, ///< option should go in a minimal ceph.conf }; struct size_t { std::uint64_t value; operator uint64_t() const { return static_cast<uint64_t>(value); } bool operator==(const size_t& rhs) const { return value == rhs.value; } }; using value_t = std::variant< std::monostate, std::string, uint64_t, int64_t, double, bool, entity_addr_t, entity_addrvec_t, std::chrono::seconds, std::chrono::milliseconds, size_t, uuid_d>; const std::string name; const type_t type; const level_t level; std::string desc; std::string long_desc; unsigned flags = 0; int subsys = -1; // if >= 0, we are a subsys debug level value_t value; value_t daemon_value; static std::string to_str(const value_t& v); // Items like mon, osd, rgw, rbd, ceph-fuse. This is advisory metadata // for presentation layers (like web dashboards, or generated docs), so that // they know which options to display where. // Additionally: "common" for settings that exist in any Ceph code. Do // not use common for settings that are just shared some places: for those // places, list them. std::vector<const char*> services; // Topics like: // "service": a catchall for the boring stuff like log/asok paths. // "network" // "performance": a setting that may need adjustment depending on // environment/workload to get best performance. std::vector<const char*> tags; std::vector<const char*> see_also; value_t min, max; std::vector<const char*> enum_allowed; /** * Return nonzero and set second argument to error string if the * value is invalid. * * These callbacks are more than just validators, as they can also * modify the value as it passes through. */ typedef std::function<int(std::string *, std::string *)> validator_fn_t; validator_fn_t validator; Option(std::string const &name, type_t t, level_t l) : name(name), type(t), level(l) { // While value_t is nullable (via std::monostate), we don't ever // want it set that way in an Option instance: within an instance, // the type of ::value should always match the declared type. switch (type) { case TYPE_INT: value = int64_t(0); break; case TYPE_UINT: value = uint64_t(0); break; case TYPE_STR: value = std::string(""); break; case TYPE_FLOAT: value = 0.0; break; case TYPE_BOOL: value = false; break; case TYPE_ADDR: value = entity_addr_t(); break; case TYPE_ADDRVEC: value = entity_addrvec_t(); break; case TYPE_UUID: value = uuid_d(); break; case TYPE_SIZE: value = size_t{0}; break; case TYPE_SECS: value = std::chrono::seconds{0}; break; case TYPE_MILLISECS: value = std::chrono::milliseconds{0}; break; default: ceph_abort(); } } void dump_value(const char *field_name, const value_t &v, ceph::Formatter *f) const; // Validate and potentially modify incoming string value int pre_validate(std::string *new_value, std::string *err) const; // Validate properly typed value against bounds int validate(const Option::value_t &new_value, std::string *err) const; // const char * must be explicit to avoid it being treated as an int Option& set_value(value_t& v, const char *new_value) { v = std::string(new_value); return *this; } // bool is an integer, but we don't think so. teach it the hard way. template<typename T> using is_not_integer_t = std::enable_if_t<!std::is_integral_v<T> || std::is_same_v<T, bool>, int>; template<typename T> using is_integer_t = std::enable_if_t<std::is_integral_v<T> && !std::is_same_v<T, bool>, int>; template<typename T, typename = is_not_integer_t<T>> Option& set_value(value_t& v, const T& new_value) { v = new_value; return *this; } // For potentially ambiguous types, inspect Option::type and // do some casting. This is necessary to make sure that setting // a float option to "0" actually sets the double part of variant. template<typename T, typename = is_integer_t<T>> Option& set_value(value_t& v, T new_value) { switch (type) { case TYPE_INT: v = int64_t(new_value); break; case TYPE_UINT: v = uint64_t(new_value); break; case TYPE_FLOAT: v = double(new_value); break; case TYPE_BOOL: v = bool(new_value); break; case TYPE_SIZE: v = size_t{static_cast<std::uint64_t>(new_value)}; break; case TYPE_SECS: v = std::chrono::seconds{new_value}; break; case TYPE_MILLISECS: v = std::chrono::milliseconds{new_value}; break; default: std::cerr << "Bad type in set_value: " << name << ": " << typeid(T).name() << std::endl; ceph_abort(); } return *this; } /// parse and validate a string input int parse_value( const std::string& raw_val, value_t *out, std::string *error_message, std::string *normalized_value=nullptr) const; template<typename T> Option& set_default(const T& v) { return set_value(value, v); } template<typename T> Option& set_daemon_default(const T& v) { return set_value(daemon_value, v); } Option& add_tag(const char* tag) { tags.push_back(tag); return *this; } Option& add_tag(const std::initializer_list<const char*>& ts) { tags.insert(tags.end(), ts); return *this; } Option& add_service(const char* service) { services.push_back(service); return *this; } Option& add_service(const std::initializer_list<const char*>& ss) { services.insert(services.end(), ss); return *this; } Option& add_see_also(const char* t) { see_also.push_back(t); return *this; } Option& add_see_also(const std::initializer_list<const char*>& ts) { see_also.insert(see_also.end(), ts); return *this; } Option& set_description(const char* new_desc) { desc = new_desc; return *this; } Option& set_long_description(const char* new_desc) { long_desc = new_desc; return *this; } template<typename T> Option& set_min(const T& mi) { set_value(min, mi); return *this; } template<typename T> Option& set_min_max(const T& mi, const T& ma) { set_value(min, mi); set_value(max, ma); return *this; } Option& set_enum_allowed(const std::vector<const char*>& allowed) { enum_allowed = allowed; return *this; } Option &set_flag(flag_t f) { flags |= f; return *this; } Option &set_flags(flag_t f) { flags |= f; return *this; } Option &set_validator(const validator_fn_t &validator_) { validator = validator_; return *this; } Option &set_subsys(int s) { subsys = s; return *this; } void dump(ceph::Formatter *f) const; void print(std::ostream *out) const; bool has_flag(flag_t f) const { return flags & f; } /** * A crude indicator of whether the value may be * modified safely at runtime -- should be replaced * with proper locking! */ bool can_update_at_runtime() const { return (has_flag(FLAG_RUNTIME) || (!has_flag(FLAG_MGR) && (type == TYPE_BOOL || type == TYPE_INT || type == TYPE_UINT || type == TYPE_FLOAT || type == TYPE_SIZE || type == TYPE_SECS || type == TYPE_MILLISECS))) && !has_flag(FLAG_STARTUP) && !has_flag(FLAG_CLUSTER_CREATE) && !has_flag(FLAG_CREATE); } }; constexpr unsigned long long operator"" _min (unsigned long long min) { return min * 60; } constexpr unsigned long long operator"" _hr (unsigned long long hr) { return hr * 60 * 60; } constexpr unsigned long long operator"" _day (unsigned long long day) { return day * 24 * 60 * 60; } constexpr unsigned long long operator"" _K (unsigned long long n) { return n << 10; } constexpr unsigned long long operator"" _M (unsigned long long n) { return n << 20; } constexpr unsigned long long operator"" _G (unsigned long long n) { return n << 30; } constexpr unsigned long long operator"" _T (unsigned long long n) { return n << 40; } extern const std::vector<Option> ceph_options;
11,502
26.065882
86
h
null
ceph-main/src/common/ostream_temp.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <sstream> typedef enum { CLOG_DEBUG = 0, CLOG_INFO = 1, CLOG_SEC = 2, CLOG_WARN = 3, CLOG_ERROR = 4, CLOG_UNKNOWN = -1, } clog_type; class OstreamTemp { public: class OstreamTempSink { public: virtual void do_log(clog_type prio, std::stringstream& ss) = 0; virtual ~OstreamTempSink() {} }; OstreamTemp(clog_type type_, OstreamTempSink *parent_); OstreamTemp(OstreamTemp &&rhs) = default; ~OstreamTemp(); template<typename T> std::ostream& operator<<(const T& rhs) { return ss << rhs; } private: clog_type type; OstreamTempSink *parent; std::stringstream ss; }; class LoggerSinkSet : public OstreamTemp::OstreamTempSink { public: virtual void info(std::stringstream &s) = 0; virtual void warn(std::stringstream &s) = 0; virtual void error(std::stringstream &s) = 0; virtual void sec(std::stringstream &s) = 0; virtual void debug(std::stringstream &s) = 0; virtual OstreamTemp info() = 0; virtual OstreamTemp warn() = 0; virtual OstreamTemp error() = 0; virtual OstreamTemp sec() = 0; virtual OstreamTemp debug() = 0; virtual void do_log(clog_type prio, std::stringstream& ss) = 0; virtual void do_log(clog_type prio, const std::string& ss) = 0; virtual ~LoggerSinkSet() {}; };
1,379
23.210526
70
h
null
ceph-main/src/common/perf_counters.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * Copyright (C) 2017 OVH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_PERF_COUNTERS_H #define CEPH_COMMON_PERF_COUNTERS_H #include <string> #include <vector> #include <memory> #include <atomic> #include <cstdint> #include "common/perf_histogram.h" #include "include/utime.h" #include "include/common_fwd.h" #include "common/ceph_mutex.h" #include "common/ceph_time.h" namespace TOPNSPC::common { class CephContext; class PerfCountersBuilder; class PerfCounters; } enum perfcounter_type_d : uint8_t { PERFCOUNTER_NONE = 0, PERFCOUNTER_TIME = 0x1, // float (measuring seconds) PERFCOUNTER_U64 = 0x2, // integer (note: either TIME or U64 *must* be set) PERFCOUNTER_LONGRUNAVG = 0x4, // paired counter + sum (time) PERFCOUNTER_COUNTER = 0x8, // counter (vs gauge) PERFCOUNTER_HISTOGRAM = 0x10, // histogram (vector) of values }; enum unit_t : uint8_t { UNIT_BYTES, UNIT_NONE }; /* Class for constructing a PerfCounters object. * * This class performs some validation that the parameters we have supplied are * correct in create_perf_counters(). * * In the future, we will probably get rid of the first/last arguments, since * PerfCountersBuilder can deduce them itself. */ namespace TOPNSPC::common { class PerfCountersBuilder { public: PerfCountersBuilder(CephContext *cct, const std::string &name, int first, int last); ~PerfCountersBuilder(); // prio values: higher is better, and higher values get included in // 'ceph daemonperf' (and similar) results. // Use of priorities enables us to add large numbers of counters // internally without necessarily overwhelming consumers. enum { PRIO_CRITICAL = 10, // 'interesting' is the default threshold for `daemonperf` output PRIO_INTERESTING = 8, // `useful` is the default threshold for transmission to ceph-mgr // and inclusion in prometheus/influxdb plugin output PRIO_USEFUL = 5, PRIO_UNINTERESTING = 2, PRIO_DEBUGONLY = 0, }; void add_u64(int key, const char *name, const char *description=NULL, const char *nick = NULL, int prio=0, int unit=UNIT_NONE); void add_u64_counter(int key, const char *name, const char *description=NULL, const char *nick = NULL, int prio=0, int unit=UNIT_NONE); void add_u64_avg(int key, const char *name, const char *description=NULL, const char *nick = NULL, int prio=0, int unit=UNIT_NONE); void add_time(int key, const char *name, const char *description=NULL, const char *nick = NULL, int prio=0); void add_time_avg(int key, const char *name, const char *description=NULL, const char *nick = NULL, int prio=0); void add_u64_counter_histogram( int key, const char* name, PerfHistogramCommon::axis_config_d x_axis_config, PerfHistogramCommon::axis_config_d y_axis_config, const char *description=NULL, const char* nick = NULL, int prio=0, int unit=UNIT_NONE); void set_prio_default(int prio_) { prio_default = prio_; } PerfCounters* create_perf_counters(); private: PerfCountersBuilder(const PerfCountersBuilder &rhs); PerfCountersBuilder& operator=(const PerfCountersBuilder &rhs); void add_impl(int idx, const char *name, const char *description, const char *nick, int prio, int ty, int unit=UNIT_NONE, std::unique_ptr<PerfHistogram<>> histogram = nullptr); PerfCounters *m_perf_counters; int prio_default = 0; }; /* * A PerfCounters object is usually associated with a single subsystem. * It contains counters which we modify to track performance and throughput * over time. * * PerfCounters can track several different types of values: * 1) integer values & counters * 2) floating-point values & counters * 3) floating-point averages * 4) 2D histograms of quantized value pairs * * The difference between values, counters and histograms is in how they are initialized * and accessed. For a counter, use the inc(counter, amount) function (note * that amount defaults to 1 if you don't set it). For a value, use the * set(index, value) function. For histogram use the hinc(value1, value2) function. * (For time, use the tinc and tset variants.) * * If for some reason you would like to reset your counters, you can do so using * the set functions even if they are counters, and you can also * increment your values if for some reason you wish to. * * For the time average, it returns the current value and * the "avgcount" member when read off. avgcount is incremented when you call * tinc. Calling tset on an average is an error and will assert out. */ class PerfCounters { public: /** Represents a PerfCounters data element. */ struct perf_counter_data_any_d { perf_counter_data_any_d() : name(NULL), description(NULL), nick(NULL), type(PERFCOUNTER_NONE), unit(UNIT_NONE) {} perf_counter_data_any_d(const perf_counter_data_any_d& other) : name(other.name), description(other.description), nick(other.nick), type(other.type), unit(other.unit), u64(other.u64.load()) { auto a = other.read_avg(); u64 = a.first; avgcount = a.second; avgcount2 = a.second; if (other.histogram) { histogram.reset(new PerfHistogram<>(*other.histogram)); } } const char *name; const char *description; const char *nick; uint8_t prio = 0; enum perfcounter_type_d type; enum unit_t unit; std::atomic<uint64_t> u64 = { 0 }; std::atomic<uint64_t> avgcount = { 0 }; std::atomic<uint64_t> avgcount2 = { 0 }; std::unique_ptr<PerfHistogram<>> histogram; void reset() { if (type != PERFCOUNTER_U64) { u64 = 0; avgcount = 0; avgcount2 = 0; } if (histogram) { histogram->reset(); } } // read <sum, count> safely by making sure the post- and pre-count // are identical; in other words the whole loop needs to be run // without any intervening calls to inc, set, or tinc. std::pair<uint64_t,uint64_t> read_avg() const { uint64_t sum, count; do { count = avgcount2; sum = u64; } while (avgcount != count); return { sum, count }; } }; template <typename T> struct avg_tracker { std::pair<uint64_t, T> last; std::pair<uint64_t, T> cur; avg_tracker() : last(0, 0), cur(0, 0) {} T current_avg() const { if (cur.first == last.first) return 0; return (cur.second - last.second) / (cur.first - last.first); } void consume_next(const std::pair<uint64_t, T> &next) { last = cur; cur = next; } }; ~PerfCounters(); void inc(int idx, uint64_t v = 1); void dec(int idx, uint64_t v = 1); void set(int idx, uint64_t v); uint64_t get(int idx) const; void tset(int idx, utime_t v); void tinc(int idx, utime_t v); void tinc(int idx, ceph::timespan v); utime_t tget(int idx) const; void hinc(int idx, int64_t x, int64_t y); void reset(); void dump_formatted(ceph::Formatter *f, bool schema, bool dump_labeled, const std::string &counter = "") const { dump_formatted_generic(f, schema, false, dump_labeled, counter); } void dump_formatted_histograms(ceph::Formatter *f, bool schema, const std::string &counter = "") const { dump_formatted_generic(f, schema, true, false, counter); } std::pair<uint64_t, uint64_t> get_tavg_ns(int idx) const; const std::string& get_name() const; void set_name(std::string s) { m_name = s; } /// adjust priority values by some value void set_prio_adjust(int p) { prio_adjust = p; } int get_adjusted_priority(int p) const { return std::max(std::min(p + prio_adjust, (int)PerfCountersBuilder::PRIO_CRITICAL), 0); } private: PerfCounters(CephContext *cct, const std::string &name, int lower_bound, int upper_bound); PerfCounters(const PerfCounters &rhs); PerfCounters& operator=(const PerfCounters &rhs); void dump_formatted_generic(ceph::Formatter *f, bool schema, bool histograms, bool dump_labeled, const std::string &counter = "") const; typedef std::vector<perf_counter_data_any_d> perf_counter_data_vec_t; CephContext *m_cct; int m_lower_bound; int m_upper_bound; std::string m_name; int prio_adjust = 0; #if !defined(WITH_SEASTAR) || defined(WITH_ALIEN) const std::string m_lock_name; /** Protects m_data */ ceph::mutex m_lock; #endif perf_counter_data_vec_t m_data; friend class PerfCountersBuilder; friend class PerfCountersCollectionImpl; }; class SortPerfCountersByName { public: bool operator()(const PerfCounters* lhs, const PerfCounters* rhs) const { return (lhs->get_name() < rhs->get_name()); } }; typedef std::set <PerfCounters*, SortPerfCountersByName> perf_counters_set_t; /* * PerfCountersCollectionImp manages PerfCounters objects for a Ceph process. */ class PerfCountersCollectionImpl { public: PerfCountersCollectionImpl(); ~PerfCountersCollectionImpl(); void add(PerfCounters *l); void remove(PerfCounters *l); void clear(); bool reset(const std::string &name); void dump_formatted(ceph::Formatter *f, bool schema, bool dump_labeled, const std::string &logger = "", const std::string &counter = "") const { dump_formatted_generic(f, schema, false, dump_labeled, logger, counter); } void dump_formatted_histograms(ceph::Formatter *f, bool schema, const std::string &logger = "", const std::string &counter = "") const { dump_formatted_generic(f, schema, true, false, logger, counter); } // A reference to a perf_counter_data_any_d, with an accompanying // pointer to the enclosing PerfCounters, in order that the consumer // can see the prio_adjust class PerfCounterRef { public: PerfCounters::perf_counter_data_any_d *data; PerfCounters *perf_counters; }; typedef std::map<std::string, PerfCounterRef> CounterMap; void with_counters(std::function<void(const CounterMap &)>) const; private: void dump_formatted_generic(ceph::Formatter *f, bool schema, bool histograms, bool dump_labeled, const std::string &logger = "", const std::string &counter = "") const; perf_counters_set_t m_loggers; CounterMap by_path; }; class PerfGuard { const ceph::real_clock::time_point start; PerfCounters* const counters; const int event; public: PerfGuard(PerfCounters* const counters, const int event) : start(ceph::real_clock::now()), counters(counters), event(event) { } ~PerfGuard() { counters->tinc(event, ceph::real_clock::now() - start); } }; } #endif
11,376
28.550649
96
h
null
ceph-main/src/common/perf_counters_collection.h
#pragma once #include "common/perf_counters.h" #include "common/ceph_mutex.h" #include "include/common_fwd.h" namespace ceph::common { class PerfCountersCollection { CephContext *m_cct; /** Protects perf_impl->m_loggers */ mutable ceph::mutex m_lock; PerfCountersCollectionImpl perf_impl; public: PerfCountersCollection(CephContext *cct); ~PerfCountersCollection(); void add(PerfCounters *l); void remove(PerfCounters *l); void clear(); bool reset(const std::string &name); void dump_formatted(ceph::Formatter *f, bool schema, bool dump_labeled, const std::string &logger = "", const std::string &counter = ""); void dump_formatted_histograms(ceph::Formatter *f, bool schema, const std::string &logger = "", const std::string &counter = ""); void with_counters(std::function<void(const PerfCountersCollectionImpl::CounterMap &)>) const; friend class PerfCountersCollectionTest; }; class PerfCountersDeleter { CephContext* cct; public: PerfCountersDeleter() noexcept : cct(nullptr) {} PerfCountersDeleter(CephContext* cct) noexcept : cct(cct) {} void operator()(PerfCounters* p) noexcept; }; } using PerfCountersRef = std::unique_ptr<ceph::common::PerfCounters, ceph::common::PerfCountersDeleter>;
1,351
29.044444
103
h
null
ceph-main/src/common/perf_counters_key.h
#pragma once #include <optional> #include <string> #include <utility> namespace ceph::perf_counters { /// A key/value pair representing a perf counter label using label_pair = std::pair<std::string_view, std::string_view>; /// \brief Construct a key for a perf counter and set of labels. /// /// Returns a string of the form "counter_name\0key1\0val1\0key2\0val2\0", /// where label pairs are sorted by key with duplicates removed. /// /// This string representation avoids extra memory allocations associated /// with map<string, string>. It also supports the hashing and comparison /// operators required for use as a key in unordered and ordered containers. /// /// Example: /// \code /// std::string key = key_create("counter_name", { /// {"key1", "val1"}, {"key2", "val2"} /// }); /// \endcode template <std::size_t Count> std::string key_create(std::string_view counter_name, label_pair (&&labels)[Count]); /// \brief Construct a key for a perf counter without labels. /// \overload std::string key_create(std::string_view counter_name); /// \brief Insert additional labels into an existing key. /// /// This returns a new string without modifying the input. The returned /// string has labels in sorted order and no duplicate keys. template <std::size_t Count> std::string key_insert(std::string_view key, label_pair (&&labels)[Count]); /// \brief Return the counter name for a given key. std::string_view key_name(std::string_view key); /// A forward iterator over label_pairs encoded in a key class label_iterator { public: using base_iterator = const char*; using difference_type = std::ptrdiff_t; using value_type = label_pair; using pointer = const value_type*; using reference = const value_type&; label_iterator() = default; label_iterator(base_iterator begin, base_iterator end); label_iterator& operator++(); label_iterator operator++(int); reference operator*() const { return state->label; } pointer operator->() const { return &state->label; } auto operator<=>(const label_iterator& rhs) const = default; private: struct iterator_state { base_iterator pos; // end of current label base_iterator end; // end of buffer label_pair label; // current label auto operator<=>(const iterator_state& rhs) const = default; }; // an empty state represents a past-the-end iterator std::optional<iterator_state> state; // find the next two delimiters and construct the label string views static void advance(std::optional<iterator_state>& s); // try to parse the first label pair static auto make_state(base_iterator begin, base_iterator end) -> std::optional<iterator_state>; }; /// A sorted range of label_pairs class label_range { std::string_view buffer; public: using iterator = label_iterator; using const_iterator = label_iterator; label_range(std::string_view buffer) : buffer(buffer) {} const_iterator begin() const { return {buffer.begin(), buffer.end()}; } const_iterator cbegin() const { return {buffer.begin(), buffer.end()}; } const_iterator end() const { return {}; } const_iterator cend() const { return {}; } }; /// \brief Return the sorted range of label_pairs for a given key. /// /// Example: /// \code /// for (label_pair label : key_labels(key)) { /// std::cout << label.first << ":" << label.second << std::endl; /// } /// \endcode label_range key_labels(std::string_view key); namespace detail { std::string create(std::string_view counter_name, label_pair* begin, label_pair* end); std::string insert(const char* begin1, const char* end1, label_pair* begin2, label_pair* end2); } // namespace detail template <std::size_t Count> std::string key_create(std::string_view counter_name, label_pair (&&labels)[Count]) { return detail::create(counter_name, std::begin(labels), std::end(labels)); } template <std::size_t Count> std::string key_insert(std::string_view key, label_pair (&&labels)[Count]) { return detail::insert(key.begin(), key.end(), std::begin(labels), std::end(labels)); } } // namespace ceph::perf_counters
4,245
29.328571
76
h
null
ceph-main/src/common/perf_histogram.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2017 OVH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_PERF_HISTOGRAM_H #define CEPH_COMMON_PERF_HISTOGRAM_H #include <array> #include <atomic> #include <memory> #include "common/Formatter.h" #include "include/int_types.h" #include "include/ceph_assert.h" class PerfHistogramCommon { public: enum scale_type_d : uint8_t { SCALE_LINEAR = 1, SCALE_LOG2 = 2, }; struct axis_config_d { const char *m_name = nullptr; scale_type_d m_scale_type = SCALE_LINEAR; int64_t m_min = 0; int64_t m_quant_size = 0; int32_t m_buckets = 0; axis_config_d() = default; axis_config_d(const char* name, scale_type_d scale_type, int64_t min, int64_t quant_size, int32_t buckets) : m_name(name), m_scale_type(scale_type), m_min(min), m_quant_size(quant_size), m_buckets(buckets) {} }; protected: /// Dump configuration of one axis to a formatter static void dump_formatted_axis(ceph::Formatter *f, const axis_config_d &ac); /// Quantize given value and convert to bucket number on given axis static int64_t get_bucket_for_axis(int64_t value, const axis_config_d &ac); /// Calculate inclusive ranges of axis values for each bucket on that axis static std::vector<std::pair<int64_t, int64_t>> get_axis_bucket_ranges( const axis_config_d &ac); }; /// PerfHistogram does trace a histogram of input values. It's an extended /// version of a standard histogram which does trace characteristics of a single /// one value only. In this implementation, values can be traced in multiple /// dimensions - i.e. we can create a histogram of input request size (first /// dimension) and processing latency (second dimension). Creating standard /// histogram out of such multidimensional one is trivial and requires summing /// values across dimensions we're not interested in. template <int DIM = 2> class PerfHistogram : public PerfHistogramCommon { public: /// Initialize new histogram object PerfHistogram(std::initializer_list<axis_config_d> axes_config) { ceph_assert(axes_config.size() == DIM && "Invalid number of axis configuration objects"); int i = 0; for (const auto &ac : axes_config) { ceph_assertf(ac.m_buckets > 0, "Must have at least one bucket on axis"); ceph_assertf(ac.m_quant_size > 0, "Quantization unit must be non-zero positive integer value"); m_axes_config[i++] = ac; } m_rawData.reset(new std::atomic<uint64_t>[get_raw_size()] {}); } /// Copy from other histogram object PerfHistogram(const PerfHistogram &other) : m_axes_config(other.m_axes_config) { int64_t size = get_raw_size(); m_rawData.reset(new std::atomic<uint64_t>[size] {}); for (int64_t i = 0; i < size; i++) { m_rawData[i] = other.m_rawData[i].load(); } } /// Set all histogram values to 0 void reset() { auto size = get_raw_size(); for (auto i = size; --i >= 0;) { m_rawData[i] = 0; } } /// Increase counter for given axis values by one template <typename... T> void inc(T... axis) { auto index = get_raw_index_for_value(axis...); m_rawData[index]++; } /// Increase counter for given axis buckets by one template <typename... T> void inc_bucket(T... bucket) { auto index = get_raw_index_for_bucket(bucket...); m_rawData[index]++; } /// Read value from given bucket template <typename... T> uint64_t read_bucket(T... bucket) const { auto index = get_raw_index_for_bucket(bucket...); return m_rawData[index]; } /// Dump data to a Formatter object void dump_formatted(ceph::Formatter *f) const { // Dump axes configuration f->open_array_section("axes"); for (auto &ac : m_axes_config) { dump_formatted_axis(f, ac); } f->close_section(); // Dump histogram values dump_formatted_values(f); } protected: /// Raw data stored as linear space, internal indexes are calculated on /// demand. std::unique_ptr<std::atomic<uint64_t>[]> m_rawData; /// Configuration of axes std::array<axis_config_d, DIM> m_axes_config; /// Dump histogram counters to a formatter void dump_formatted_values(ceph::Formatter *f) const { visit_values([f](int) { f->open_array_section("values"); }, [f](int64_t value) { f->dump_unsigned("value", value); }, [f](int) { f->close_section(); }); } /// Get number of all histogram counters int64_t get_raw_size() { int64_t ret = 1; for (const auto &ac : m_axes_config) { ret *= ac.m_buckets; } return ret; } /// Calculate m_rawData index from axis values template <typename... T> int64_t get_raw_index_for_value(T... axes) const { static_assert(sizeof...(T) == DIM, "Incorrect number of arguments"); return get_raw_index_internal<0>(get_bucket_for_axis, 0, axes...); } /// Calculate m_rawData index from axis bucket numbers template <typename... T> int64_t get_raw_index_for_bucket(T... buckets) const { static_assert(sizeof...(T) == DIM, "Incorrect number of arguments"); return get_raw_index_internal<0>( [](int64_t bucket, const axis_config_d &ac) { ceph_assertf(bucket >= 0, "Bucket index can not be negative"); ceph_assertf(bucket < ac.m_buckets, "Bucket index too large"); return bucket; }, 0, buckets...); } template <int level = 0, typename F, typename... T> int64_t get_raw_index_internal(F bucket_evaluator, int64_t startIndex, int64_t value, T... tail) const { static_assert(level + 1 + sizeof...(T) == DIM, "Internal consistency check"); auto &ac = m_axes_config[level]; auto bucket = bucket_evaluator(value, ac); return get_raw_index_internal<level + 1>( bucket_evaluator, ac.m_buckets * startIndex + bucket, tail...); } template <int level, typename F> int64_t get_raw_index_internal(F, int64_t startIndex) const { static_assert(level == DIM, "Internal consistency check"); return startIndex; } /// Visit all histogram counters, call onDimensionEnter / onDimensionLeave /// when starting / finishing traversal /// on given axis, call onValue when dumping raw histogram counter value. template <typename FDE, typename FV, typename FDL> void visit_values(FDE onDimensionEnter, FV onValue, FDL onDimensionLeave, int level = 0, int startIndex = 0) const { if (level == DIM) { onValue(m_rawData[startIndex]); return; } onDimensionEnter(level); auto &ac = m_axes_config[level]; startIndex *= ac.m_buckets; for (int32_t i = 0; i < ac.m_buckets; ++i, ++startIndex) { visit_values(onDimensionEnter, onValue, onDimensionLeave, level + 1, startIndex); } onDimensionLeave(level); } }; #endif
7,206
30.609649
80
h
null
ceph-main/src/common/pick_address.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_PICK_ADDRESS_H #define CEPH_PICK_ADDRESS_H #include <string> #include <list> #include "include/common_fwd.h" struct entity_addr_t; class entity_addrvec_t; #define CEPH_PICK_ADDRESS_PUBLIC 0x01 #define CEPH_PICK_ADDRESS_CLUSTER 0x02 #define CEPH_PICK_ADDRESS_MSGR1 0x04 #define CEPH_PICK_ADDRESS_MSGR2 0x08 #define CEPH_PICK_ADDRESS_IPV4 0x10 #define CEPH_PICK_ADDRESS_IPV6 0x20 #define CEPH_PICK_ADDRESS_PREFER_IPV4 0x40 #define CEPH_PICK_ADDRESS_DEFAULT_MON_PORTS 0x80 #define CEPH_PICK_ADDRESS_PUBLIC_BIND 0x100 #ifndef WITH_SEASTAR /* Pick addresses based on subnets if needed. If an address is not explicitly given, and a list of subnets is given, find an assigned IP address in the subnets and set that. cluster_addr is set based on cluster_network, public_addr is set based on public_network. cluster_network and public_network are a list of ip/prefix pairs. All IP addresses assigned to all local network interfaces are potential matches. If multiple IP addresses match the subnet, one of them will be picked, effectively randomly. This function will exit on error. */ void pick_addresses(CephContext *cct, int needs); #endif // !WITH_SEASTAR int pick_addresses(CephContext *cct, unsigned flags, entity_addrvec_t *addrs, int preferred_numa_node = -1); int pick_addresses(CephContext *cct, unsigned flags, struct ifaddrs *ifa, entity_addrvec_t *addrs, int preferred_numa_node = -1); /** * Find a network interface whose address matches the address/netmask * in `network`. */ std::string pick_iface(CephContext *cct, const struct sockaddr_storage &network); /** * check for a locally configured address * * check if any of the listed addresses is configured on the local host. * * @param cct context * @param ls list of addresses * @param match [out] pointer to match, if an item in @a ls is found configured locally. */ bool have_local_addr(CephContext *cct, const std::list<entity_addr_t>& ls, entity_addr_t *match); /** * filter the addresses in @c ifa with specified interfaces, networks and IPv * * @param cct * @param ifa a list of network interface addresses to be filtered * @param ipv bitmask of CEPH_PICK_ADDRESS_IPV4 and CEPH_PICK_ADDRESS_IPV6. * it is used to filter the @c networks * @param networks a comma separated list of networks as the allow list. only * the addresses in the specified networks are allowed. all addresses * are accepted if it is empty. * @param interfaces a comma separated list of interfaces for the allow list. * all addresses are accepted if it is empty * @param exclude_lo_iface filter out network interface named "lo" */ const struct sockaddr *find_ip_in_subnet_list( CephContext *cct, const struct ifaddrs *ifa, unsigned ipv, const std::string &networks, const std::string &interfaces, int numa_node=-1); int get_iface_numa_node( const std::string& iface, int *node); #endif
3,094
30.262626
97
h
null
ceph-main/src/common/ppc-opcode.h
/* * Copyright (C) 2015 Anton Blanchard <anton@au.ibm.com>, IBM * * This program is free software; you can redistribute it and/or * modify it under the terms of either: * * a) the GNU General Public License as published by the Free Software * Foundation; either version 2 of the License, or (at your option) * any later version, or * b) the Apache License, Version 2.0 */ #ifndef __OPCODES_H #define __OPCODES_H #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_XT(s) __PPC_XS(s) #define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) #define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) #define PPC_INST_VPMSUMW 0x10000488 #define PPC_INST_VPMSUMD 0x100004c8 #define PPC_INST_MFVSRD 0x7c000066 #define PPC_INST_MTVSRD 0x7c000166 #define VPMSUMW(t, a, b) .long PPC_INST_VPMSUMW | VSX_XX3((t), a, b) #define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) #define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t)+32, a, 0) #define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t)+32, a, 0) #endif /* Copyright (C) 2017 International Business Machines Corp. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef __OPCODES_H #define __OPCODES_H #define __PPC_RA(a) (((a) & 0x1f) << 16) #define __PPC_RB(b) (((b) & 0x1f) << 11) #define __PPC_XA(a) ((((a) & 0x1f) << 16) | (((a) & 0x20) >> 3)) #define __PPC_XB(b) ((((b) & 0x1f) << 11) | (((b) & 0x20) >> 4)) #define __PPC_XS(s) ((((s) & 0x1f) << 21) | (((s) & 0x20) >> 5)) #define __PPC_XT(s) __PPC_XS(s) #define VSX_XX3(t, a, b) (__PPC_XT(t) | __PPC_XA(a) | __PPC_XB(b)) #define VSX_XX1(s, a, b) (__PPC_XS(s) | __PPC_RA(a) | __PPC_RB(b)) #define PPC_INST_VPMSUMW 0x10000488 #define PPC_INST_VPMSUMD 0x100004c8 #define PPC_INST_MFVSRD 0x7c000066 #define PPC_INST_MTVSRD 0x7c000166 #define VPMSUMW(t, a, b) .long PPC_INST_VPMSUMW | VSX_XX3((t), a, b) #define VPMSUMD(t, a, b) .long PPC_INST_VPMSUMD | VSX_XX3((t), a, b) #define MFVRD(a, t) .long PPC_INST_MFVSRD | VSX_XX1((t)+32, a, 0) #define MTVRD(t, a) .long PPC_INST_MTVSRD | VSX_XX1((t)+32, a, 0) #endif
2,567
37.909091
71
h
null
ceph-main/src/common/pretty_binary.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <string> template<typename S> static std::string pretty_binary_string(const S& bin) { std::string pretty; if (bin.empty()) return pretty; pretty.reserve(bin.length() * 3); auto printable = [](unsigned char c) -> bool { return (c >= 32) && (c <= 126); }; auto append_hex = [&](unsigned char c) { static const char hex[16] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'}; pretty.push_back(hex[c / 16]); pretty.push_back(hex[c % 16]); }; // prologue bool strmode = printable(bin[0]); if (strmode) { pretty.push_back('\''); } else { pretty.push_back('0'); pretty.push_back('x'); } for (size_t i = 0; i < bin.length(); ++i) { // change mode from hex to str if following 3 characters are printable if (strmode) { if (!printable(bin[i])) { pretty.push_back('\''); pretty.push_back('0'); pretty.push_back('x'); strmode = false; } } else { if (i + 2 < bin.length() && printable(bin[i]) && printable(bin[i + 1]) && printable(bin[i + 2])) { pretty.push_back('\''); strmode = true; } } if (strmode) { if (bin[i] == '\'') pretty.push_back('\''); pretty.push_back(bin[i]); } else { append_hex(bin[i]); } } // epilog if (strmode) { pretty.push_back('\''); } return pretty; } std::string pretty_binary_string_reverse(const std::string& pretty);
1,561
21.970588
74
h
null
ceph-main/src/common/random_string.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2009 Sage Weil <sage@newdream.net> * Copyright (C) 2015 Yehuda Sadeh <yehuda@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <string> #include "include/common_fwd.h" /* size should be the required string size + 1 */ int gen_rand_base64(CephContext *cct, char *dest, size_t size); void gen_rand_alphanumeric(CephContext *cct, char *dest, size_t size); void gen_rand_alphanumeric_lower(CephContext *cct, char *dest, size_t size); void gen_rand_alphanumeric_upper(CephContext *cct, char *dest, size_t size); void gen_rand_alphanumeric_no_underscore(CephContext *cct, char *dest, size_t size); void gen_rand_alphanumeric_plain(CephContext *cct, char *dest, size_t size); // returns a std::string with 'size' random characters std::string gen_rand_alphanumeric(CephContext *cct, size_t size); std::string gen_rand_alphanumeric_lower(CephContext *cct, size_t size); std::string gen_rand_alphanumeric_upper(CephContext *cct, size_t size); std::string gen_rand_alphanumeric_no_underscore(CephContext *cct, size_t size); std::string gen_rand_alphanumeric_plain(CephContext *cct, size_t size);
1,473
39.944444
84
h
null
ceph-main/src/common/ref.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef COMMON_REF_H #define COMMON_REF_H #include <boost/intrusive_ptr.hpp> namespace ceph { template<typename T> using ref_t = boost::intrusive_ptr<T>; template<typename T> using cref_t = boost::intrusive_ptr<const T>; template<class T, class U> ref_t<T> ref_cast(const ref_t<U>& r) noexcept { return static_cast<T*>(r.get()); } template<class T, class U> ref_t<T> ref_cast(ref_t<U>&& r) noexcept { return {static_cast<T*>(r.detach()), false}; } template<class T, class U> cref_t<T> ref_cast(const cref_t<U>& r) noexcept { return static_cast<const T*>(r.get()); } template<class T, typename... Args> ceph::ref_t<T> make_ref(Args&&... args) { return {new T(std::forward<Args>(args)...), false}; } } // Friends cannot be partial specializations: https://en.cppreference.com/w/cpp/language/friend #define FRIEND_MAKE_REF(C) \ template<class T, typename... Args> friend ceph::ref_t<T> ceph::make_ref(Args&&... args) #endif
1,032
28.514286
95
h
null
ceph-main/src/common/reverse.c
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "reverse.h" uint32_t reverse_bits(uint32_t v) { if (v == 0) return v; /* reverse bits * swap odd and even bits */ v = ((v >> 1) & 0x55555555) | ((v & 0x55555555) << 1); /* swap consecutive pairs */ v = ((v >> 2) & 0x33333333) | ((v & 0x33333333) << 2); /* swap nibbles ... */ v = ((v >> 4) & 0x0F0F0F0F) | ((v & 0x0F0F0F0F) << 4); /* swap bytes */ v = ((v >> 8) & 0x00FF00FF) | ((v & 0x00FF00FF) << 8); /* swap 2-byte long pairs */ v = ( v >> 16 ) | ( v << 16); return v; } uint32_t reverse_nibbles(uint32_t retval) { /* reverse nibbles */ retval = ((retval & 0x0f0f0f0f) << 4) | ((retval & 0xf0f0f0f0) >> 4); retval = ((retval & 0x00ff00ff) << 8) | ((retval & 0xff00ff00) >> 8); retval = ((retval & 0x0000ffff) << 16) | ((retval & 0xffff0000) >> 16); return retval; }
1,311
29.511628
89
c
null
ceph-main/src/common/reverse.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef __CEPH_OS_REVERSE_H #define __CEPH_OS_REVERSE_H #include "include/int_types.h" #ifdef __cplusplus extern "C" { #endif extern uint32_t reverse_bits(uint32_t v); extern uint32_t reverse_nibbles(uint32_t retval); #ifdef __cplusplus } #endif #endif
691
20.625
71
h
null
ceph-main/src/common/run_cmd.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_RUN_CMD_H #define CEPH_COMMON_RUN_CMD_H #include <string> // // Fork a command and run it. The shell will not be invoked and shell // expansions will not be done. // This function takes a variable number of arguments. The last argument must // be NULL. // // Example: // run_cmd("rm", "-rf", "foo", NULL) // // Returns an empty string on success, and an error string otherwise. // std::string run_cmd(const char *cmd, ...); #endif
865
24.470588
77
h
null
ceph-main/src/common/safe_io.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_SAFE_IO #define CEPH_SAFE_IO #include "common/compiler_extensions.h" #include <sys/types.h> #ifdef __cplusplus extern "C" { #endif /* * Safe functions wrapping the raw read() and write() libc functions. * These retry on EINTR, and on error return -errno instead of returning * -1 and setting errno). * * On Windows, only recv/send work with sockets. */ ssize_t safe_read(int fd, void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_write(int fd, const void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_recv(int fd, void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_send(int fd, const void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_pread(int fd, void *buf, size_t count, off_t offset) WARN_UNUSED_RESULT; ssize_t safe_pwrite(int fd, const void *buf, size_t count, off_t offset) WARN_UNUSED_RESULT; #ifdef CEPH_HAVE_SPLICE /* * Similar to the above (non-exact version) and below (exact version). * See splice(2) for parameter descriptions. */ ssize_t safe_splice(int fd_in, off_t *off_in, int fd_out, off_t *off_out, size_t len, unsigned int flags) WARN_UNUSED_RESULT; ssize_t safe_splice_exact(int fd_in, off_t *off_in, int fd_out, off_t *off_out, size_t len, unsigned int flags) WARN_UNUSED_RESULT; #endif /* * Same as the above functions, but return -EDOM unless exactly the requested * number of bytes can be read. */ ssize_t safe_read_exact(int fd, void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_recv_exact(int fd, void *buf, size_t count) WARN_UNUSED_RESULT; ssize_t safe_pread_exact(int fd, void *buf, size_t count, off_t offset) WARN_UNUSED_RESULT; /* * Safe functions to read and write an entire file. */ int safe_write_file(const char *base, const char *file, const char *val, size_t vallen, unsigned mode); int safe_read_file(const char *base, const char *file, char *val, size_t vallen); #ifdef __cplusplus } #endif #endif
2,482
28.915663
79
h
null
ceph-main/src/common/scrub_types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_SCRUB_TYPES_H #define CEPH_SCRUB_TYPES_H #include "osd/osd_types.h" // wrappers around scrub types to offer the necessary bits other than // the minimal set that the lirados requires struct object_id_wrapper : public librados::object_id_t { explicit object_id_wrapper(const hobject_t& hoid) : object_id_t{hoid.oid.name, hoid.nspace, hoid.get_key(), hoid.snap} {} void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); }; WRITE_CLASS_ENCODER(object_id_wrapper) namespace librados { inline void decode(object_id_t& obj, ceph::buffer::list::const_iterator& bp) { reinterpret_cast<object_id_wrapper&>(obj).decode(bp); } } struct osd_shard_wrapper : public librados::osd_shard_t { void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bp); }; WRITE_CLASS_ENCODER(osd_shard_wrapper) namespace librados { inline void decode(librados::osd_shard_t& shard, ceph::buffer::list::const_iterator& bp) { reinterpret_cast<osd_shard_wrapper&>(shard).decode(bp); } } struct shard_info_wrapper : public librados::shard_info_t { public: shard_info_wrapper() = default; explicit shard_info_wrapper(const ScrubMap::object& object) { set_object(object); } void set_object(const ScrubMap::object& object); void set_missing() { errors |= err_t::SHARD_MISSING; } void set_omap_digest_mismatch_info() { errors |= err_t::OMAP_DIGEST_MISMATCH_INFO; } void set_size_mismatch_info() { errors |= err_t::SIZE_MISMATCH_INFO; } void set_data_digest_mismatch_info() { errors |= err_t::DATA_DIGEST_MISMATCH_INFO; } void set_read_error() { errors |= err_t::SHARD_READ_ERR; } void set_stat_error() { errors |= err_t::SHARD_STAT_ERR; } void set_ec_hash_mismatch() { errors |= err_t::SHARD_EC_HASH_MISMATCH; } void set_ec_size_mismatch() { errors |= err_t::SHARD_EC_SIZE_MISMATCH; } void set_info_missing() { errors |= err_t::INFO_MISSING; } void set_info_corrupted() { errors |= err_t::INFO_CORRUPTED; } void set_snapset_missing() { errors |= err_t::SNAPSET_MISSING; } void set_snapset_corrupted() { errors |= err_t::SNAPSET_CORRUPTED; } void set_obj_size_info_mismatch() { errors |= err_t::OBJ_SIZE_INFO_MISMATCH; } void set_hinfo_missing() { errors |= err_t::HINFO_MISSING; } void set_hinfo_corrupted() { errors |= err_t::HINFO_CORRUPTED; } bool only_data_digest_mismatch_info() const { return errors == err_t::DATA_DIGEST_MISMATCH_INFO; } void clear_data_digest_mismatch_info() { errors &= ~err_t::DATA_DIGEST_MISMATCH_INFO; } void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bp); }; WRITE_CLASS_ENCODER(shard_info_wrapper) namespace librados { inline void decode(librados::shard_info_t& shard, ceph::buffer::list::const_iterator& bp) { reinterpret_cast<shard_info_wrapper&>(shard).decode(bp); } } struct inconsistent_obj_wrapper : librados::inconsistent_obj_t { explicit inconsistent_obj_wrapper(const hobject_t& hoid); void set_object_info_inconsistency() { errors |= obj_err_t::OBJECT_INFO_INCONSISTENCY; } void set_omap_digest_mismatch() { errors |= obj_err_t::OMAP_DIGEST_MISMATCH; } void set_data_digest_mismatch() { errors |= obj_err_t::DATA_DIGEST_MISMATCH; } void set_size_mismatch() { errors |= obj_err_t::SIZE_MISMATCH; } void set_attr_value_mismatch() { errors |= obj_err_t::ATTR_VALUE_MISMATCH; } void set_attr_name_mismatch() { errors |= obj_err_t::ATTR_NAME_MISMATCH; } void set_snapset_inconsistency() { errors |= obj_err_t::SNAPSET_INCONSISTENCY; } void set_hinfo_inconsistency() { errors |= obj_err_t::HINFO_INCONSISTENCY; } void set_size_too_large() { errors |= obj_err_t::SIZE_TOO_LARGE; } void add_shard(const pg_shard_t& pgs, const shard_info_wrapper& shard); void set_auth_missing(const hobject_t& hoid, const std::map<pg_shard_t, ScrubMap>&, std::map<pg_shard_t, shard_info_wrapper>&, int &shallow_errors, int &deep_errors, const pg_shard_t &primary); void set_version(uint64_t ver) { version = ver; } void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bp); }; WRITE_CLASS_ENCODER(inconsistent_obj_wrapper) inline void decode(librados::inconsistent_obj_t& obj, ceph::buffer::list::const_iterator& bp) { reinterpret_cast<inconsistent_obj_wrapper&>(obj).decode(bp); } struct inconsistent_snapset_wrapper : public librados::inconsistent_snapset_t { inconsistent_snapset_wrapper() = default; explicit inconsistent_snapset_wrapper(const hobject_t& head); void set_headless(); // soid claims that it is a head or a snapdir, but its SS_ATTR // is missing. void set_snapset_missing(); void set_info_missing(); void set_snapset_corrupted(); void set_info_corrupted(); // snapset with missing clone void set_clone_missing(snapid_t); // Clones that are there void set_clone(snapid_t); // the snapset is not consistent with itself void set_snapset_error(); void set_size_mismatch(); void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bp); }; WRITE_CLASS_ENCODER(inconsistent_snapset_wrapper) namespace librados { inline void decode(librados::inconsistent_snapset_t& snapset, ceph::buffer::list::const_iterator& bp) { reinterpret_cast<inconsistent_snapset_wrapper&>(snapset).decode(bp); } } struct scrub_ls_arg_t { uint32_t interval; uint32_t get_snapsets; librados::object_id_t start_after; uint64_t max_return; void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); }; WRITE_CLASS_ENCODER(scrub_ls_arg_t); struct scrub_ls_result_t { epoch_t interval; std::vector<ceph::buffer::list> vals; void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); }; WRITE_CLASS_ENCODER(scrub_ls_result_t); #endif
6,175
28.270142
92
h
null
ceph-main/src/common/secret.c
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <string.h> #include <stdio.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <keyutils.h> #include "include/compat.h" #include "common/armor.h" #include "common/safe_io.h" int read_secret_from_file(const char *filename, char *secret, size_t max_len) { char *end; int fd; int len; fd = open(filename, O_RDONLY); if (fd < 0) { perror("unable to read secretfile"); return -errno; } len = safe_read(fd, secret, max_len); if (len <= 0) { perror("unable to read secret from file"); close(fd); return len ? len : -ENODATA; } end = secret; while (end < secret + len && *end && *end != '\n' && *end != '\r') end++; *end = '\0'; close(fd); return 0; } int set_kernel_secret(const char *secret, const char *key_name) { /* try to submit key to kernel via the keys api */ key_serial_t serial; int ret; int secret_len = strlen(secret); char payload[((secret_len * 3) / 4) + 4]; if (!secret_len) { fprintf(stderr, "secret is empty.\n"); return -EINVAL; } ret = ceph_unarmor(payload, payload+sizeof(payload), secret, secret+secret_len); if (ret < 0) { char error_buf[80]; fprintf(stderr, "secret is not valid base64: %s.\n", ceph_strerror_r(-ret, error_buf, sizeof(error_buf))); return ret; } serial = add_key("ceph", key_name, payload, ret, KEY_SPEC_PROCESS_KEYRING); if (serial == -1) { ret = -errno; } return ret; } int is_kernel_secret(const char *key_name) { key_serial_t serial; serial = request_key("ceph", key_name, NULL, KEY_SPEC_USER_KEYRING); return serial != -1; }
2,024
22.275862
82
c
null
ceph-main/src/common/shunique_lock.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_COMMON_SHUNIQUE_LOCK_H #define CEPH_COMMON_SHUNIQUE_LOCK_H #include <mutex> #include <shared_mutex> #include <system_error> namespace ceph { // This is a 'lock' class in the style of shared_lock and // unique_lock. Like shared_mutex it implements both Lockable and // SharedLockable. // My rationale is thus: one of the advantages of unique_lock is that // I can pass a thread of execution's control of a lock around as a // parameter. So that methods further down the call stack can unlock // it, do something, relock it, and have the lock state be known by // the caller afterward, explicitly. The shared_lock class offers a // similar advantage to shared_lock, but each class is one or the // other. In Objecter we have calls that in most cases need /a/ lock // on the shared mutex, and whether it's shared or exclusive doesn't // matter. In some circumstances they may drop the shared lock and // reacquire an exclusive one. This could be handled by passing both a // shared and unique lock down the call stack. This is vexacious and // shameful. // Wanting to avoid heaping shame and vexation upon myself, I threw // this class together. // This class makes no attempt to support atomic upgrade or // downgrade. I don't want either. Matt has convinced me that if you // think you want them you've usually made a mistake somewhere. It is // exactly and only a reification of the state held on a shared mutex. /// Acquire unique ownership of the mutex. struct acquire_unique_t { }; /// Acquire shared ownership of the mutex. struct acquire_shared_t { }; constexpr acquire_unique_t acquire_unique { }; constexpr acquire_shared_t acquire_shared { }; template<typename Mutex> class shunique_lock { public: typedef Mutex mutex_type; typedef std::unique_lock<Mutex> unique_lock_type; typedef std::shared_lock<Mutex> shared_lock_type; shunique_lock() noexcept : m(nullptr), o(ownership::none) { } // We do not provide a default locking/try_locking constructor that // takes only the mutex, since it is not clear whether to take it // shared or unique. We explicitly require the use of lock_deferred // to prevent Nasty Surprises. shunique_lock(mutex_type& m, std::defer_lock_t) noexcept : m(&m), o(ownership::none) { } shunique_lock(mutex_type& m, acquire_unique_t) : m(&m), o(ownership::none) { lock(); } shunique_lock(mutex_type& m, acquire_shared_t) : m(&m), o(ownership::none) { lock_shared(); } template<typename AcquireType> shunique_lock(mutex_type& m, AcquireType at, std::try_to_lock_t) : m(&m), o(ownership::none) { try_lock(at); } shunique_lock(mutex_type& m, acquire_unique_t, std::adopt_lock_t) : m(&m), o(ownership::unique) { // You'd better actually have a lock, or I will find you and I // will hunt you down. } shunique_lock(mutex_type& m, acquire_shared_t, std::adopt_lock_t) : m(&m), o(ownership::shared) { } template<typename AcquireType, typename Clock, typename Duration> shunique_lock(mutex_type& m, AcquireType at, const std::chrono::time_point<Clock, Duration>& t) : m(&m), o(ownership::none) { try_lock_until(at, t); } template<typename AcquireType, typename Rep, typename Period> shunique_lock(mutex_type& m, AcquireType at, const std::chrono::duration<Rep, Period>& dur) : m(&m), o(ownership::none) { try_lock_for(at, dur); } ~shunique_lock() { switch (o) { case ownership::none: return; case ownership::unique: m->unlock(); break; case ownership::shared: m->unlock_shared(); break; } } shunique_lock(shunique_lock const&) = delete; shunique_lock& operator=(shunique_lock const&) = delete; shunique_lock(shunique_lock&& l) noexcept : shunique_lock() { swap(l); } shunique_lock(unique_lock_type&& l) noexcept { if (l.owns_lock()) o = ownership::unique; else o = ownership::none; m = l.release(); } shunique_lock(shared_lock_type&& l) noexcept { if (l.owns_lock()) o = ownership::shared; else o = ownership::none; m = l.release(); } shunique_lock& operator=(shunique_lock&& l) noexcept { shunique_lock(std::move(l)).swap(*this); return *this; } shunique_lock& operator=(unique_lock_type&& l) noexcept { shunique_lock(std::move(l)).swap(*this); return *this; } shunique_lock& operator=(shared_lock_type&& l) noexcept { shunique_lock(std::move(l)).swap(*this); return *this; } void lock() { lockable(); m->lock(); o = ownership::unique; } void lock_shared() { lockable(); m->lock_shared(); o = ownership::shared; } void lock(ceph::acquire_unique_t) { lock(); } void lock(ceph::acquire_shared_t) { lock_shared(); } bool try_lock() { lockable(); if (m->try_lock()) { o = ownership::unique; return true; } return false; } bool try_lock_shared() { lockable(); if (m->try_lock_shared()) { o = ownership::shared; return true; } return false; } bool try_lock(ceph::acquire_unique_t) { return try_lock(); } bool try_lock(ceph::acquire_shared_t) { return try_lock_shared(); } template<typename Rep, typename Period> bool try_lock_for(const std::chrono::duration<Rep, Period>& dur) { lockable(); if (m->try_lock_for(dur)) { o = ownership::unique; return true; } return false; } template<typename Rep, typename Period> bool try_lock_shared_for(const std::chrono::duration<Rep, Period>& dur) { lockable(); if (m->try_lock_shared_for(dur)) { o = ownership::shared; return true; } return false; } template<typename Rep, typename Period> bool try_lock_for(ceph::acquire_unique_t, const std::chrono::duration<Rep, Period>& dur) { return try_lock_for(dur); } template<typename Rep, typename Period> bool try_lock_for(ceph::acquire_shared_t, const std::chrono::duration<Rep, Period>& dur) { return try_lock_shared_for(dur); } template<typename Clock, typename Duration> bool try_lock_until(const std::chrono::time_point<Clock, Duration>& time) { lockable(); if (m->try_lock_until(time)) { o = ownership::unique; return true; } return false; } template<typename Clock, typename Duration> bool try_lock_shared_until(const std::chrono::time_point<Clock, Duration>& time) { lockable(); if (m->try_lock_shared_until(time)) { o = ownership::shared; return true; } return false; } template<typename Clock, typename Duration> bool try_lock_until(ceph::acquire_unique_t, const std::chrono::time_point<Clock, Duration>& time) { return try_lock_until(time); } template<typename Clock, typename Duration> bool try_lock_until(ceph::acquire_shared_t, const std::chrono::time_point<Clock, Duration>& time) { return try_lock_shared_until(time); } // Only have a single unlock method. Otherwise we'd be building an // Acme lock class suitable only for ravenous coyotes desparate to // devour a road runner. It would be bad. It would be disgusting. It // would be infelicitous as heck. It would leave our developers in a // state of seeming safety unaware of the yawning chasm of failure // that had opened beneath their feet that would soon transition // into a sickening realization of the error they made and a brief // moment of blinking self pity before their program hurled itself // into undefined behaviour and plummeted up the stack with core // dumps trailing behind it. void unlock() { switch (o) { case ownership::none: throw std::system_error((int)std::errc::resource_deadlock_would_occur, std::generic_category()); break; case ownership::unique: m->unlock(); break; case ownership::shared: m->unlock_shared(); break; } o = ownership::none; } // Setters void swap(shunique_lock& u) noexcept { std::swap(m, u.m); std::swap(o, u.o); } mutex_type* release() noexcept { o = ownership::none; mutex_type* tm = m; m = nullptr; return tm; } // Ideally I'd rather make a move constructor for std::unique_lock // that took a shunique_lock, but obviously I can't. unique_lock_type release_to_unique() { if (o == ownership::unique) { o = ownership::none; unique_lock_type tu(*m, std::adopt_lock); m = nullptr; return tu; } else if (o == ownership::none) { unique_lock_type tu(*m, std::defer_lock); m = nullptr; return tu; } else if (m == nullptr) { return unique_lock_type(); } throw std::system_error((int)std::errc::operation_not_permitted, std::generic_category()); } shared_lock_type release_to_shared() { if (o == ownership::shared) { o = ownership::none; shared_lock_type ts(*m, std::adopt_lock); m = nullptr; return ts; } else if (o == ownership::none) { shared_lock_type ts(*m, std::defer_lock); m = nullptr; return ts; } else if (m == nullptr) { return shared_lock_type(); } throw std::system_error((int)std::errc::operation_not_permitted, std::generic_category()); return shared_lock_type(); } // Getters // Note that this returns true if the lock UNIQUE, it will return // false for shared bool owns_lock() const noexcept { return o == ownership::unique; } bool owns_lock_shared() const noexcept { return o == ownership::shared; } // If you want to make sure you have a lock of some sort on the // mutex, just treat as a bool. explicit operator bool() const noexcept { return o != ownership::none; } mutex_type* mutex() const noexcept { return m; } private: void lockable() const { if (m == nullptr) throw std::system_error((int)std::errc::operation_not_permitted, std::generic_category()); if (o != ownership::none) throw std::system_error((int)std::errc::resource_deadlock_would_occur, std::generic_category()); } mutex_type* m; enum struct ownership : uint8_t { none, unique, shared }; ownership o; }; } // namespace ceph namespace std { template<typename Mutex> void swap(ceph::shunique_lock<Mutex> sh1, ceph::shunique_lock<Mutex> sha) { sh1.swap(sha); } } // namespace std #endif // CEPH_COMMON_SHUNIQUE_LOCK_H
10,608
25.926396
77
h
null
ceph-main/src/common/signal.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_SIGNAL_H #define CEPH_COMMON_SIGNAL_H #include <signal.h> #include <string> // Returns a string showing the set of blocked signals for the calling thread. // Other threads may have a different set (this is per-thread thing). extern std::string signal_mask_to_str(); // Block a list of signals. If siglist == NULL, blocks all signals. // If not, the list is terminated with a 0 element. // // On success, stores the old set of blocked signals in // old_sigset. On failure, stores an invalid set of blocked signals in // old_sigset. extern void block_signals(const int *siglist, sigset_t *old_sigset); // Restore the set of blocked signals. Will not restore an invalid set of // blocked signals. extern void restore_sigset(const sigset_t *old_sigset); // Unblock all signals. On success, stores the old set of blocked signals in // old_sigset. On failure, stores an invalid set of blocked signals in // old_sigset. extern void unblock_all_signals(sigset_t *old_sigset); #endif
1,410
31.813953
78
h
null
ceph-main/src/common/snap_types.h
#ifndef __CEPH_SNAP_TYPES_H #define __CEPH_SNAP_TYPES_H #include "include/types.h" #include "include/utime.h" #include "include/fs_types.h" namespace ceph { class Formatter; } struct SnapRealmInfo { mutable ceph_mds_snap_realm h; std::vector<snapid_t> my_snaps; std::vector<snapid_t> prior_parent_snaps; // before parent_since SnapRealmInfo() { // FIPS zeroization audit 20191115: this memset is not security related. memset(&h, 0, sizeof(h)); } SnapRealmInfo(inodeno_t ino_, snapid_t created_, snapid_t seq_, snapid_t current_parent_since_) { // FIPS zeroization audit 20191115: this memset is not security related. memset(&h, 0, sizeof(h)); h.ino = ino_; h.created = created_; h.seq = seq_; h.parent_since = current_parent_since_; } inodeno_t ino() const { return inodeno_t(h.ino); } inodeno_t parent() const { return inodeno_t(h.parent); } snapid_t seq() const { return snapid_t(h.seq); } snapid_t parent_since() const { return snapid_t(h.parent_since); } snapid_t created() const { return snapid_t(h.created); } void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<SnapRealmInfo*>& o); }; WRITE_CLASS_ENCODER(SnapRealmInfo) // "new* snap realm info - carries additional metadata (last modified, // change_attr) and is version encoded. struct SnapRealmInfoNew { SnapRealmInfo info; utime_t last_modified; uint64_t change_attr; SnapRealmInfoNew() { } SnapRealmInfoNew(const SnapRealmInfo &info_, utime_t last_modified_, uint64_t change_attr_) { // FIPS zeroization audit 20191115: this memset is not security related. info = info_; last_modified = last_modified_; change_attr = change_attr_; } inodeno_t ino() const { return inodeno_t(info.h.ino); } inodeno_t parent() const { return inodeno_t(info.h.parent); } snapid_t seq() const { return snapid_t(info.h.seq); } snapid_t parent_since() const { return snapid_t(info.h.parent_since); } snapid_t created() const { return snapid_t(info.h.created); } void encode(ceph::buffer::list& bl) const; void decode(ceph::buffer::list::const_iterator& bl); void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<SnapRealmInfoNew*>& o); }; WRITE_CLASS_ENCODER(SnapRealmInfoNew) struct SnapContext { snapid_t seq; // 'time' stamp std::vector<snapid_t> snaps; // existent snaps, in descending order SnapContext() {} SnapContext(snapid_t s, const std::vector<snapid_t>& v) : seq(s), snaps(v) {} bool is_valid() const; void clear() { seq = 0; snaps.clear(); } bool empty() const { return seq == 0; } void encode(ceph::buffer::list& bl) const { using ceph::encode; encode(seq, bl); encode(snaps, bl); } void decode(ceph::buffer::list::const_iterator& bl) { using ceph::decode; decode(seq, bl); decode(snaps, bl); } void dump(ceph::Formatter *f) const; static void generate_test_instances(std::list<SnapContext*>& o); }; WRITE_CLASS_ENCODER(SnapContext) inline std::ostream& operator<<(std::ostream& out, const SnapContext& snapc) { return out << snapc.seq << "=" << snapc.snaps; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<SnapContext> : fmt::ostream_formatter {}; #endif #endif
3,395
29.321429
99
h
null
ceph-main/src/common/split.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab ft=cpp /* * Ceph - scalable distributed file system * * Copyright (C) 2019 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <string_view> namespace ceph { // a forward iterator over the parts of a split string class spliterator { std::string_view str; // full string std::string_view delims; // delimiters using size_type = std::string_view::size_type; size_type pos = 0; // start position of current part std::string_view part; // view of current part // return the next part after the given position std::string_view next(size_type end) { pos = str.find_first_not_of(delims, end); if (pos == str.npos) { return {}; } return str.substr(pos, str.find_first_of(delims, pos) - pos); } public: // types required by std::iterator_traits using difference_type = int; using value_type = std::string_view; using pointer = const value_type*; using reference = const value_type&; using iterator_category = std::forward_iterator_tag; spliterator() = default; spliterator(std::string_view str, std::string_view delims) : str(str), delims(delims), pos(0), part(next(0)) {} spliterator& operator++() { part = next(pos + part.size()); return *this; } spliterator operator++(int) { spliterator tmp = *this; part = next(pos + part.size()); return tmp; } reference operator*() const { return part; } pointer operator->() const { return &part; } friend bool operator==(const spliterator& lhs, const spliterator& rhs) { return lhs.part.data() == rhs.part.data() && lhs.part.size() == rhs.part.size(); } friend bool operator!=(const spliterator& lhs, const spliterator& rhs) { return lhs.part.data() != rhs.part.data() || lhs.part.size() != rhs.part.size(); } }; // represents an immutable range of split string parts // // ranged-for loop example: // // for (std::string_view s : split(input)) { // ... // // container initialization example: // // auto parts = split(input); // // std::vector<std::string> strings; // strings.assign(parts.begin(), parts.end()); // class split { std::string_view str; // full string std::string_view delims; // delimiters public: split(std::string_view str, std::string_view delims = ";,= \t\n") : str(str), delims(delims) {} using iterator = spliterator; using const_iterator = spliterator; iterator begin() const { return {str, delims}; } const_iterator cbegin() const { return {str, delims}; } iterator end() const { return {}; } const_iterator cend() const { return {}; } }; } // namespace ceph
2,899
25.851852
74
h
null
ceph-main/src/common/static_ptr.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2017 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <cstddef> #include <utility> #include <type_traits> namespace ceph { // `static_ptr` // =========== // // It would be really nice if polymorphism didn't require a bunch of // mucking about with the heap. So let's build something where we // don't have to do that. // namespace _mem { // This, an operator function, is one of the canonical ways to do type // erasure in C++ so long as all operations can be done with subsets // of the same arguments (which is not true for function type erasure) // it's a pretty good one. enum class op { move, destroy, size }; template<typename T> static std::size_t op_fun(op oper, void* p1, void* p2) { auto me = static_cast<T*>(p1); switch (oper) { case op::move: new (p2) T(std::move(*me)); break; case op::destroy: me->~T(); break; case op::size: return sizeof(T); } return 0; } } // The thing itself! // // The default value for Size may be wrong in almost all cases. You // can change it to your heart's content. The upside is that you'll // just get a compile error and you can bump it up. // // I *recommend* having a size constant in header files (or perhaps a // using declaration, e.g. // ``` // using StaticFoo = static_ptr<Foo, sizeof(Blah)>` // ``` // in some header file that can be used multiple places) so that when // you create a new derived class with a larger size, you only have to // change it in one place. // template<typename Base, std::size_t Size = sizeof(Base)> class static_ptr { template<typename U, std::size_t S> friend class static_ptr; // Refuse to be set to anything with whose type we are // incompatible. Also never try to eat anything bigger than you are. // template<typename T, std::size_t S> constexpr static int create_ward() noexcept { static_assert(std::is_void_v<Base> || std::is_base_of_v<Base, std::decay_t<T>>, "Value to store must be a derivative of the base."); static_assert(S <= Size, "Value too large."); static_assert(std::is_void_v<Base> || !std::is_const<Base>{} || std::is_const_v<T>, "Cannot assign const pointer to non-const pointer."); return 0; } // Here we can store anything that has the same signature, which is // relevant to the multiple-versions for move/copy support that I // mentioned above. // size_t (*operate)(_mem::op, void*, void*); // This is mutable so that get and the dereference operators can be // const. Since we're modeling a pointer, we should preserve the // difference in semantics between a pointer-to-const and a const // pointer. // mutable typename std::aligned_storage<Size>::type buf; public: using element_type = Base; using pointer = Base*; // Empty static_ptr() noexcept : operate(nullptr) {} static_ptr(std::nullptr_t) noexcept : operate(nullptr) {} static_ptr& operator =(std::nullptr_t) noexcept { reset(); return *this; } ~static_ptr() noexcept { reset(); } // Since other pointer-ish types have it void reset() noexcept { if (operate) { operate(_mem::op::destroy, &buf, nullptr); operate = nullptr; } } // Set from another static pointer. // // Since the templated versions don't count for overriding the defaults static_ptr(static_ptr&& rhs) noexcept(std::is_nothrow_move_constructible_v<Base>) : operate(rhs.operate) { if (operate) { operate(_mem::op::move, &rhs.buf, &buf); } } template<typename U, std::size_t S> static_ptr(static_ptr<U, S>&& rhs) noexcept(std::is_nothrow_move_constructible_v<U>) : operate(rhs.operate) { create_ward<U, S>(); if (operate) { operate(_mem::op::move, &rhs.buf, &buf); } } static_ptr& operator =(static_ptr&& rhs) noexcept(std::is_nothrow_move_constructible_v<Base>) { reset(); if (rhs) { operate = rhs.operate; operate(_mem::op::move, &rhs.buf, &buf); } return *this; } template<typename U, std::size_t S> static_ptr& operator =(static_ptr<U, S>&& rhs) noexcept(std::is_nothrow_move_constructible_v<U>) { create_ward<U, S>(); reset(); if (rhs) { operate = rhs.operate; operate(_mem::op::move, &rhs.buf, &buf); } return *this; } bool operator ==(std::nullptr_t) const { return !operate; } // In-place construction! // // This is basically what you want, and I didn't include value // construction because in-place construction renders it // unnecessary. Also it doesn't fit the pointer idiom as well. // template<typename T, typename... Args> static_ptr(std::in_place_type_t<T>, Args&& ...args) noexcept(std::is_nothrow_constructible_v<T, Args...>) : operate(&_mem::op_fun<T>){ static_assert((!std::is_nothrow_copy_constructible_v<Base> || std::is_nothrow_copy_constructible_v<T>) && (!std::is_nothrow_move_constructible_v<Base> || std::is_nothrow_move_constructible_v<T>), "If declared type of static_ptr is nothrow " "move/copy constructible, then any " "type assigned to it must be as well. " "You can use reinterpret_pointer_cast " "to get around this limit, but don't " "come crying to me when the C++ " "runtime calls terminate()."); create_ward<T, sizeof(T)>(); new (&buf) T(std::forward<Args>(args)...); } // I occasionally get tempted to make an overload of the assignment // operator that takes a tuple as its right-hand side to provide // arguments. // template<typename T, typename... Args> void emplace(Args&& ...args) noexcept(std::is_nothrow_constructible_v<T, Args...>) { create_ward<T, sizeof(T)>(); reset(); operate = &_mem::op_fun<T>; new (&buf) T(std::forward<Args>(args)...); } // Access! Base* get() const noexcept { return operate ? reinterpret_cast<Base*>(&buf) : nullptr; } template<typename U = Base> std::enable_if_t<!std::is_void_v<U>, Base*> operator->() const noexcept { return get(); } template<typename U = Base> std::enable_if_t<!std::is_void_v<U>, Base&> operator *() const noexcept { return *get(); } operator bool() const noexcept { return !!operate; } // Big wall of friendship // template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> static_pointer_cast(const static_ptr<T, S>& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> static_pointer_cast(static_ptr<T, S>&& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> dynamic_pointer_cast(const static_ptr<T, S>& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> dynamic_pointer_cast(static_ptr<T, S>&& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> const_pointer_cast(const static_ptr<T, S>& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> const_pointer_cast(static_ptr<T, S>&& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> reinterpret_pointer_cast(const static_ptr<T, S>& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> reinterpret_pointer_cast(static_ptr<T, S>&& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> resize_pointer_cast(const static_ptr<T, S>& p); template<typename U, std::size_t Z, typename T, std::size_t S> friend static_ptr<U, Z> resize_pointer_cast(static_ptr<T, S>&& p); }; // These are all modeled after the same ones for shared pointer. // // Also I'm annoyed that the standard library doesn't have // *_pointer_cast overloads for a move-only unique pointer. It's a // nice idiom. Having to release and reconstruct is obnoxious. // template<typename U, std::size_t Z, typename T, std::size_t S> static_ptr<U, Z> static_pointer_cast(static_ptr<T, S>&& p) { static_assert(Z >= S, "Value too large."); static_ptr<U, Z> r; if (static_cast<U*>(p.get())) { p.operate(_mem::op::move, &p.buf, &r.buf); r.operate = p.operate; } return r; } // Here the conditional is actually important and ensures we have the // same behavior as dynamic_cast. // template<typename U, std::size_t Z, typename T, std::size_t S> static_ptr<U, Z> dynamic_pointer_cast(static_ptr<T, S>&& p) { static_assert(Z >= S, "Value too large."); static_ptr<U, Z> r; if (dynamic_cast<U*>(p.get())) { p.operate(_mem::op::move, &p.buf, &r.buf); r.operate = p.operate; } return r; } template<typename U, std::size_t Z, typename T, std::size_t S> static_ptr<U, Z> const_pointer_cast(static_ptr<T, S>&& p) { static_assert(Z >= S, "Value too large."); static_ptr<U, Z> r; if (const_cast<U*>(p.get())) { p.operate(_mem::op::move, &p.buf, &r.buf); r.operate = p.operate; } return r; } // I'm not sure if anyone will ever use this. I can imagine situations // where they might. It works, though! // template<typename U, std::size_t Z, typename T, std::size_t S> static_ptr<U, Z> reinterpret_pointer_cast(static_ptr<T, S>&& p) { static_assert(Z >= S, "Value too large."); static_ptr<U, Z> r; p.operate(_mem::op::move, &p.buf, &r.buf); r.operate = p.operate; return r; } // This is the only way to move from a bigger static pointer into a // smaller static pointer. The size of the total data stored in the // pointer is checked at runtime and if the destination size is large // enough, we copy it over. // // I follow cast semantics. Since this is a pointer-like type, it // returns a null value rather than throwing. template<typename U, std::size_t Z, typename T, std::size_t S> static_ptr<U, Z> resize_pointer_cast(static_ptr<T, S>&& p) { static_assert(std::is_same_v<U, T>, "resize_pointer_cast only changes size, not type."); static_ptr<U, Z> r; if (Z >= p.operate(_mem::op::size, &p.buf, nullptr)) { p.operate(_mem::op::move, &p.buf, &r.buf); r.operate = p.operate; } return r; } // Since `make_unique` and `make_shared` exist, we should follow their // lead. // template<typename Base, typename Derived = Base, std::size_t Size = sizeof(Derived), typename... Args> static_ptr<Base, Size> make_static(Args&& ...args) { return { std::in_place_type<Derived>, std::forward<Args>(args)... }; } }
10,890
30.845029
81
h
null
ceph-main/src/common/strescape.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2021 Red Hat, Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_STRESCAPE_H #define CEPH_STRESCAPE_H #include <algorithm> #include <ostream> #include <string_view> #include <ctype.h> inline std::string binstrprint(std::string_view sv, size_t maxlen=0) { std::string s; if (maxlen == 0 || sv.size() < maxlen) { s = std::string(sv); } else { maxlen = std::max<size_t>(8, maxlen); s = std::string(sv.substr(0, maxlen-3)) + "..."; } std::replace_if(s.begin(), s.end(), [](char c){ return !(isalnum(c) || ispunct(c)); }, '.'); return s; } #endif
923
23.315789
94
h
null
ceph-main/src/common/strtol.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_STRTOL_H #define CEPH_COMMON_STRTOL_H #include <charconv> #include <cinttypes> #include <cstdlib> #include <optional> #include <string> #include <string_view> #include <system_error> #include <type_traits> namespace ceph { // Wrappers around std::from_chars. // // Why do we want this instead of strtol and friends? Because the // string doesn't have to be NUL-terminated! (Also, for a lot of // purposes, just putting a string_view in and getting an optional out // is friendly.) // // Returns the found number on success. Returns an empty optional on // failure OR on trailing characters. // Sadly GCC < 11 is missing the floating point versions. template<typename T> auto parse(std::string_view s, int base = 10) -> std::enable_if_t<std::is_integral_v<T>, std::optional<T>> { T t; auto r = std::from_chars(s.data(), s.data() + s.size(), t, base); if ((r.ec != std::errc{}) || (r.ptr != s.data() + s.size())) { return std::nullopt; } return t; } // As above, but succeed on trailing characters and trim the supplied // string_view to remove the parsed number. Set the supplied // string_view to empty if it ends with the number. template<typename T> auto consume(std::string_view& s, int base = 10) -> std::enable_if_t<std::is_integral_v<T>, std::optional<T>> { T t; auto r = std::from_chars(s.data(), s.data() + s.size(), t, base); if (r.ec != std::errc{}) return std::nullopt; if (r.ptr == s.data() + s.size()) { s = std::string_view{}; } else { s.remove_prefix(r.ptr - s.data()); } return t; } } // namespace ceph bool strict_strtob(const char* str, std::string *err); long long strict_strtoll(std::string_view str, int base, std::string *err); int strict_strtol(std::string_view str, int base, std::string *err); double strict_strtod(std::string_view str, std::string *err); float strict_strtof(std::string_view str, std::string *err); uint64_t strict_iecstrtoll(std::string_view str, std::string *err); template<typename T> T strict_iec_cast(std::string_view str, std::string *err); template<typename T> T strict_si_cast(std::string_view str, std::string *err); /* On enter buf points to the end of the buffer, e.g. where the least * significant digit of the input number will be printed. Returns pointer to * where the most significant digit were printed, including zero padding. * Does NOT add zero at the end of buffer, this is responsibility of the caller. */ template<typename T, const unsigned base = 10, const unsigned width = 1> static inline char* ritoa(T u, char *buf) { static_assert(std::is_unsigned_v<T>, "signed types are not supported"); static_assert(base <= 16, "extend character map below to support higher bases"); unsigned digits = 0; while (u) { *--buf = "0123456789abcdef"[u % base]; u /= base; digits++; } while (digits++ < width) *--buf = '0'; return buf; } #endif
3,318
28.371681
82
h
null
ceph-main/src/common/subsys.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ /** * This header describes the subsystems (each one gets a "--debug-<subsystem>" * log verbosity setting), along with their default verbosities. */ DEFAULT_SUBSYS(0, 5) SUBSYS(lockdep, 0, 1) SUBSYS(context, 0, 1) SUBSYS(crush, 1, 1) SUBSYS(mds, 1, 5) SUBSYS(mds_balancer, 1, 5) SUBSYS(mds_locker, 1, 5) SUBSYS(mds_log, 1, 5) SUBSYS(mds_log_expire, 1, 5) SUBSYS(mds_migrator, 1, 5) SUBSYS(buffer, 0, 1) SUBSYS(timer, 0, 1) SUBSYS(filer, 0, 1) SUBSYS(striper, 0, 1) SUBSYS(objecter, 0, 1) SUBSYS(rados, 0, 5) SUBSYS(rbd, 0, 5) SUBSYS(rbd_mirror, 0, 5) SUBSYS(rbd_replay, 0, 5) SUBSYS(rbd_pwl, 0, 5) SUBSYS(journaler, 0, 5) SUBSYS(objectcacher, 0, 5) SUBSYS(immutable_obj_cache, 0, 5) SUBSYS(client, 0, 5) SUBSYS(osd, 1, 5) SUBSYS(optracker, 0, 5) SUBSYS(objclass, 0, 5) SUBSYS(filestore, 1, 3) SUBSYS(journal, 1, 3) SUBSYS(ms, 0, 0) SUBSYS(mon, 1, 5) SUBSYS(monc, 0, 10) SUBSYS(paxos, 1, 5) SUBSYS(tp, 0, 5) SUBSYS(auth, 1, 5) SUBSYS(crypto, 1, 5) SUBSYS(finisher, 1, 1) SUBSYS(reserver, 1, 1) SUBSYS(heartbeatmap, 1, 5) SUBSYS(perfcounter, 1, 5) SUBSYS(rgw, 1, 5) // log level for the Rados gateway SUBSYS(rgw_sync, 1, 5) SUBSYS(rgw_datacache, 1, 5) SUBSYS(rgw_access, 1, 5) SUBSYS(rgw_dbstore, 1, 5) SUBSYS(rgw_flight, 1, 5) SUBSYS(javaclient, 1, 5) SUBSYS(asok, 1, 5) SUBSYS(throttle, 1, 1) SUBSYS(refs, 0, 0) SUBSYS(compressor, 1, 5) SUBSYS(bluestore, 1, 5) SUBSYS(bluefs, 1, 5) SUBSYS(bdev, 1, 3) SUBSYS(kstore, 1, 5) SUBSYS(rocksdb, 4, 5) SUBSYS(fuse, 1, 5) SUBSYS(mgr, 2, 5) SUBSYS(mgrc, 1, 5) SUBSYS(dpdk, 1, 5) SUBSYS(eventtrace, 1, 5) SUBSYS(prioritycache, 1, 5) SUBSYS(test, 0, 5) SUBSYS(cephfs_mirror, 0, 5) SUBSYS(cephsqlite, 0, 5) SUBSYS(seastore, 0, 5) // logs above seastore tm SUBSYS(seastore_onode, 0, 5) SUBSYS(seastore_odata, 0, 5) SUBSYS(seastore_omap, 0, 5) SUBSYS(seastore_tm, 0, 5) // logs below seastore tm SUBSYS(seastore_t, 0, 5) SUBSYS(seastore_cleaner, 0, 5) SUBSYS(seastore_epm, 0, 5) SUBSYS(seastore_lba, 0, 5) SUBSYS(seastore_fixedkv_tree, 0, 5) SUBSYS(seastore_cache, 0, 5) SUBSYS(seastore_journal, 0, 5) SUBSYS(seastore_device, 0, 5) SUBSYS(seastore_backref, 0, 5) SUBSYS(alienstore, 0, 5) SUBSYS(mclock, 1, 5) SUBSYS(cyanstore, 0, 5) SUBSYS(ceph_exporter, 1, 5) SUBSYS(memstore, 1, 5) // ********************************************************************* // Developers should update /doc/rados/troubleshooting/log-and-debug.rst // when adding or removing a subsystem accordingly. // *********************************************************************
2,947
25.8
78
h
null
ceph-main/src/common/subsys_types.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat Inc. * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_SUBSYS_TYPES_H #define CEPH_SUBSYS_TYPES_H #include <algorithm> #include <array> #include <cstdint> enum ceph_subsys_id_t { ceph_subsys_, // default #define SUBSYS(name, log, gather) \ ceph_subsys_##name, #define DEFAULT_SUBSYS(log, gather) #include "common/subsys.h" #undef SUBSYS #undef DEFAULT_SUBSYS ceph_subsys_max }; constexpr static std::size_t ceph_subsys_get_num() { return static_cast<std::size_t>(ceph_subsys_max); } struct ceph_subsys_item_t { const char* name; uint8_t log_level; uint8_t gather_level; }; constexpr static std::array<ceph_subsys_item_t, ceph_subsys_get_num()> ceph_subsys_get_as_array() { #define SUBSYS(name, log, gather) \ ceph_subsys_item_t{ #name, log, gather }, #define DEFAULT_SUBSYS(log, gather) \ ceph_subsys_item_t{ "none", log, gather }, return { #include "common/subsys.h" }; #undef SUBSYS #undef DEFAULT_SUBSYS } constexpr static std::uint8_t ceph_subsys_get_max_default_level(const std::size_t subidx) { const auto item = ceph_subsys_get_as_array()[subidx]; return std::max(item.log_level, item.gather_level); } // Compile time-capable version of std::strlen. Resorting to own // implementation only because C++17 doesn't mandate constexpr // on the standard one. constexpr static std::size_t strlen_ct(const char* const s) { std::size_t l = 0; while (s[l] != '\0') { ++l; } return l; } constexpr static std::size_t ceph_subsys_max_name_length() { return std::max({ #define SUBSYS(name, log, gather) \ strlen_ct(#name), #define DEFAULT_SUBSYS(log, gather) \ strlen_ct("none"), #include "common/subsys.h" #undef SUBSYS #undef DEFAULT_SUBSYS }); } #endif // CEPH_SUBSYS_TYPES_H
2,094
22.806818
70
h
null
ceph-main/src/common/sync_filesystem.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_SYNC_FILESYSTEM_H #define CEPH_SYNC_FILESYSTEM_H #include <unistd.h> #if defined(__linux__) #include <sys/ioctl.h> #include <syscall.h> #include "os/fs/btrfs_ioctl.h" #endif inline int sync_filesystem(int fd) { /* On Linux, newer versions of glibc have a function called syncfs that * performs a sync on only one filesystem. If we don't have this call, we * have to fall back on sync(), which synchronizes every filesystem on the * computer. */ #ifdef HAVE_SYS_SYNCFS if (syncfs(fd) == 0) return 0; #elif defined(SYS_syncfs) if (syscall(SYS_syncfs, fd) == 0) return 0; #elif defined(__NR_syncfs) if (syscall(__NR_syncfs, fd) == 0) return 0; #endif #if defined(HAVE_SYS_SYNCFS) || defined(SYS_syncfs) || defined(__NR_syncfs) else if (errno == ENOSYS) { sync(); return 0; } else { return -errno; } #else sync(); return 0; #endif } #endif
1,314
22.070175
76
h
null
ceph-main/src/common/tracer.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "acconfig.h" #include "include/buffer.h" #ifdef HAVE_JAEGER #include "opentelemetry/trace/provider.h" using jspan = opentelemetry::nostd::shared_ptr<opentelemetry::trace::Span>; using jspan_context = opentelemetry::trace::SpanContext; using jspan_attribute = opentelemetry::common::AttributeValue; namespace tracing { class Tracer { private: const static opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer> noop_tracer; const static jspan noop_span; opentelemetry::nostd::shared_ptr<opentelemetry::trace::Tracer> tracer; public: Tracer() = default; Tracer(opentelemetry::nostd::string_view service_name); void init(opentelemetry::nostd::string_view service_name); bool is_enabled() const; // creates and returns a new span with `trace_name` // this span represents a trace, since it has no parent. jspan start_trace(opentelemetry::nostd::string_view trace_name); // creates and returns a new span with `trace_name` // if false is given to `trace_is_enabled` param, noop span will be returned jspan start_trace(opentelemetry::nostd::string_view trace_name, bool trace_is_enabled); // creates and returns a new span with `span_name` which parent span is `parent_span' jspan add_span(opentelemetry::nostd::string_view span_name, const jspan& parent_span); // creates and return a new span with `span_name` // the span is added to the trace which it's context is `parent_ctx`. // parent_ctx contains the required information of the trace. jspan add_span(opentelemetry::nostd::string_view span_name, const jspan_context& parent_ctx); }; void encode(const jspan_context& span, ceph::buffer::list& bl, uint64_t f = 0); void decode(jspan_context& span_ctx, ceph::buffer::list::const_iterator& bl); } // namespace tracing #else // !HAVE_JAEGER #include <string_view> class Value { public: template <typename T> Value(T val) {} }; using jspan_attribute = Value; struct jspan_context { jspan_context() {} jspan_context(bool sampled_flag, bool is_remote) {} }; struct span_stub { jspan_context _ctx; template <typename T> void SetAttribute(std::string_view key, const T& value) const noexcept {} void AddEvent(std::string_view) {} void AddEvent(std::string_view, std::initializer_list<std::pair<std::string_view, jspan_attribute>> fields) {} template <typename T> void AddEvent(std::string_view name, const T& fields = {}) {} const jspan_context& GetContext() { return _ctx; } void UpdateName(std::string_view) {} bool IsRecording() { return false; } }; class jspan { span_stub span; public: span_stub& operator*() { return span; } const span_stub& operator*() const { return span; } span_stub* operator->() { return &span; } const span_stub* operator->() const { return &span; } operator bool() const { return false; } }; namespace tracing { struct Tracer { bool is_enabled() const { return false; } jspan start_trace(std::string_view, bool enabled = true) { return {}; } jspan add_span(std::string_view, const jspan&) { return {}; } jspan add_span(std::string_view span_name, const jspan_context& parent_ctx) { return {}; } void init(std::string_view service_name) {} }; inline void encode(const jspan_context& span, bufferlist& bl, uint64_t f=0) {} inline void decode(jspan_context& span_ctx, ceph::buffer::list::const_iterator& bl) {} } #endif // !HAVE_JAEGER
3,503
31.444444
112
h
null
ceph-main/src/common/utf8.c
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "common/utf8.h" #include <string.h> /* * http://www.unicode.org/versions/Unicode6.0.0/ch03.pdf - page 94 * * Table 3-7. Well-Formed UTF-8 Byte Sequences * * +--------------------+------------+-------------+------------+-------------+ * | Code Points | First Byte | Second Byte | Third Byte | Fourth Byte | * +--------------------+------------+-------------+------------+-------------+ * | U+0000..U+007F | 00..7F | | | | * +--------------------+------------+-------------+------------+-------------+ * | U+0080..U+07FF | C2..DF | 80..BF | | | * +--------------------+------------+-------------+------------+-------------+ * | U+0800..U+0FFF | E0 | A0..BF | 80..BF | | * +--------------------+------------+-------------+------------+-------------+ * | U+1000..U+CFFF | E1..EC | 80..BF | 80..BF | | * +--------------------+------------+-------------+------------+-------------+ * | U+D000..U+D7FF | ED | 80..9F | 80..BF | | * +--------------------+------------+-------------+------------+-------------+ * | U+E000..U+FFFF | EE..EF | 80..BF | 80..BF | | * +--------------------+------------+-------------+------------+-------------+ * | U+10000..U+3FFFF | F0 | 90..BF | 80..BF | 80..BF | * +--------------------+------------+-------------+------------+-------------+ * | U+40000..U+FFFFF | F1..F3 | 80..BF | 80..BF | 80..BF | * +--------------------+------------+-------------+------------+-------------+ * | U+100000..U+10FFFF | F4 | 80..8F | 80..BF | 80..BF | * +--------------------+------------+-------------+------------+-------------+ */ static int high_bits_set(int c) { int ret = 0; while (1) { if ((c & 0x80) != 0x080) break; c <<= 1; ++ret; } return ret; } /* Encode a 31-bit UTF8 code point to 'buf'. * Assumes buf is of size MAX_UTF8_SZ * Returns -1 on failure; number of bytes in the encoded value otherwise. */ int encode_utf8(unsigned long u, unsigned char *buf) { /* Unroll loop for common code points */ if (u <= 0x0000007F) { buf[0] = u; return 1; } else if (u <= 0x000007FF) { buf[0] = 0xC0 | (u >> 6); buf[1] = 0x80 | (u & 0x3F); return 2; } else if (u <= 0x0000FFFF) { buf[0] = 0xE0 | (u >> 12); buf[1] = 0x80 | ((u >> 6) & 0x3F); buf[2] = 0x80 | (u & 0x3F); return 3; } else if (u <= 0x001FFFFF) { buf[0] = 0xF0 | (u >> 18); buf[1] = 0x80 | ((u >> 12) & 0x3F); buf[2] = 0x80 | ((u >> 6) & 0x3F); buf[3] = 0x80 | (u & 0x3F); return 4; } else { /* Rare/illegal code points */ if (u <= 0x03FFFFFF) { for (int i = 4; i >= 1; --i) { buf[i] = 0x80 | (u & 0x3F); u >>= 6; } buf[0] = 0xF8 | u; return 5; } else if (u <= 0x7FFFFFFF) { for (int i = 5; i >= 1; --i) { buf[i] = 0x80 | (u & 0x3F); u >>= 6; } buf[0] = 0xFC | u; return 6; } return -1; } } /* * Decode a UTF8 character from an array of bytes. Return character code. * Upon error, return INVALID_UTF8_CHAR. */ unsigned long decode_utf8(unsigned char *buf, int nbytes) { unsigned long code; int i, j; if (nbytes <= 0) return INVALID_UTF8_CHAR; if (nbytes == 1) { if (buf[0] >= 0x80) return INVALID_UTF8_CHAR; return buf[0]; } i = high_bits_set(buf[0]); if (i != nbytes) return INVALID_UTF8_CHAR; code = buf[0] & (0xff >> i); for (j = 1; j < nbytes; ++j) { if ((buf[j] & 0xc0) != 0x80) return INVALID_UTF8_CHAR; code = (code << 6) | (buf[j] & 0x3f); } // Check for invalid code points if (code == 0xFFFE) return INVALID_UTF8_CHAR; if (code == 0xFFFF) return INVALID_UTF8_CHAR; if (code >= 0xD800 && code <= 0xDFFF) return INVALID_UTF8_CHAR; return code; } int check_utf8(const char *buf, int len) { /* * "char" is "signed" on x86 but "unsigned" on aarch64 by default. * Below code depends on signed/unsigned comparisons, define an * unsigned buffer explicitly to fix the gap. */ const unsigned char *bufu = (const unsigned char *)buf; int err_pos = 1; while (len) { int nbytes; unsigned char byte1 = bufu[0]; /* 00..7F */ if (byte1 <= 0x7F) { nbytes = 1; /* C2..DF, 80..BF */ } else if (len >= 2 && byte1 >= 0xC2 && byte1 <= 0xDF && (signed char)bufu[1] <= (signed char)0xBF) { nbytes = 2; } else if (len >= 3) { unsigned char byte2 = bufu[1]; /* Is byte2, byte3 between 0x80 ~ 0xBF */ int byte2_ok = (signed char)byte2 <= (signed char)0xBF; int byte3_ok = (signed char)bufu[2] <= (signed char)0xBF; if (byte2_ok && byte3_ok && /* E0, A0..BF, 80..BF */ ((byte1 == 0xE0 && byte2 >= 0xA0) || /* E1..EC, 80..BF, 80..BF */ (byte1 >= 0xE1 && byte1 <= 0xEC) || /* ED, 80..9F, 80..BF */ (byte1 == 0xED && byte2 <= 0x9F) || /* EE..EF, 80..BF, 80..BF */ (byte1 >= 0xEE && byte1 <= 0xEF))) { nbytes = 3; } else if (len >= 4) { /* Is byte4 between 0x80 ~ 0xBF */ int byte4_ok = (signed char)bufu[3] <= (signed char)0xBF; if (byte2_ok && byte3_ok && byte4_ok && /* F0, 90..BF, 80..BF, 80..BF */ ((byte1 == 0xF0 && byte2 >= 0x90) || /* F1..F3, 80..BF, 80..BF, 80..BF */ (byte1 >= 0xF1 && byte1 <= 0xF3) || /* F4, 80..8F, 80..BF, 80..BF */ (byte1 == 0xF4 && byte2 <= 0x8F))) { nbytes = 4; } else { return err_pos; } } else { return err_pos; } } else { return err_pos; } len -= nbytes; err_pos += nbytes; bufu += nbytes; } return 0; } int check_utf8_cstr(const char *buf) { return check_utf8(buf, strlen(buf)); } int is_control_character(int c) { return (((c != 0) && (c < 0x20)) || (c == 0x7f)); } int check_for_control_characters(const char *buf, int len) { int i; for (i = 0; i < len; ++i) { if (is_control_character((int)(unsigned char)buf[i])) { return i + 1; } } return 0; } int check_for_control_characters_cstr(const char *buf) { return check_for_control_characters(buf, strlen(buf)); }
6,560
26.800847
79
c
null
ceph-main/src/common/utf8.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_UTF8_H #define CEPH_COMMON_UTF8_H #define MAX_UTF8_SZ 6 #define INVALID_UTF8_CHAR 0xfffffffful #ifdef __cplusplus extern "C" { #endif /* Checks if a buffer is valid UTF-8. * Returns 0 if it is, and one plus the offset of the first invalid byte * if it is not. */ int check_utf8(const char *buf, int len); /* Checks if a null-terminated string is valid UTF-8. * Returns 0 if it is, and one plus the offset of the first invalid byte * if it is not. */ int check_utf8_cstr(const char *buf); /* Returns true if 'ch' is a control character. * We do count newline as a control character, but not NULL. */ int is_control_character(int ch); /* Checks if a buffer contains control characters. */ int check_for_control_characters(const char *buf, int len); /* Checks if a null-terminated string contains control characters. */ int check_for_control_characters_cstr(const char *buf); /* Encode a 31-bit UTF8 code point to 'buf'. * Assumes buf is of size MAX_UTF8_SZ * Returns -1 on failure; number of bytes in the encoded value otherwise. */ int encode_utf8(unsigned long u, unsigned char *buf); /* * Decode a UTF8 character from an array of bytes. Return character code. * Upon error, return INVALID_UTF8_CHAR. */ unsigned long decode_utf8(unsigned char *buf, int nbytes); #ifdef __cplusplus } #endif #endif
1,754
25.19403
73
h
null
ceph-main/src/common/valgrind.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef CEPH_VALGRIND_H #define CEPH_VALGRIND_H #include "acconfig.h" #if defined(HAVE_VALGRIND_HELGRIND_H) && !defined(NDEBUG) #include <valgrind/helgrind.h> #else #define ANNOTATE_HAPPENS_AFTER(x) (void)0 #define ANNOTATE_HAPPENS_BEFORE_FORGET_ALL(x) (void)0 #define ANNOTATE_HAPPENS_BEFORE(x) (void)0 #define ANNOTATE_BENIGN_RACE_SIZED(address, size, description) (void)0 #endif #endif // CEPH_VALGRIND_H
544
26.25
72
h
null
ceph-main/src/common/version.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2011 New Dream Network * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_VERSION_H #define CEPH_COMMON_VERSION_H #include <string> // Return a string describing the Ceph version const char *ceph_version_to_str(); // Return a string with the Ceph release const char *ceph_release_to_str(void); // Return a string describing the git version const char *git_version_to_str(void); // Return a formatted string describing the ceph and git versions std::string const pretty_version_to_str(void); // Release type ("dev", "rc", or "stable") const char *ceph_release_type(void); #endif
940
25.138889
70
h
null
ceph-main/src/common/weighted_shuffle.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <algorithm> #include <iterator> #include <random> template <class RandomIt, class DistIt, class URBG> void weighted_shuffle(RandomIt first, RandomIt last, DistIt weight_first, DistIt weight_last, URBG &&g) { if (first == last) { return; } else { std::discrete_distribution d{weight_first, weight_last}; if (auto n = d(g); n > 0) { std::iter_swap(first, std::next(first, n)); std::iter_swap(weight_first, std::next(weight_first, n)); } weighted_shuffle(++first, last, ++weight_first, weight_last, std::move(g)); } }
692
25.653846
79
h
null
ceph-main/src/common/zipkin_trace.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #ifndef COMMON_ZIPKIN_TRACE_H #define COMMON_ZIPKIN_TRACE_H #include "acconfig.h" #include "include/encoding.h" #ifdef WITH_BLKIN #include <ztracer.hpp> #else // !WITH_BLKIN // add stubs for noop Trace and Endpoint // match the "real" struct struct blkin_trace_info { int64_t trace_id; int64_t span_id; int64_t parent_span_id; }; namespace ZTracer { static inline int ztrace_init() { return 0; } class Endpoint { public: Endpoint(const char *name) {} Endpoint(const char *ip, int port, const char *name) {} void copy_ip(const std::string &newip) {} void copy_name(const std::string &newname) {} void copy_address_from(const Endpoint *endpoint) {} void share_address_from(const Endpoint *endpoint) {} void set_port(int p) {} }; class Trace { public: Trace() {} Trace(const char *name, const Endpoint *ep, const Trace *parent = NULL) {} Trace(const char *name, const Endpoint *ep, const blkin_trace_info *i, bool child=false) {} bool valid() const { return false; } operator bool() const { return false; } int init(const char *name, const Endpoint *ep, const Trace *parent = NULL) { return 0; } int init(const char *name, const Endpoint *ep, const blkin_trace_info *i, bool child=false) { return 0; } void copy_name(const std::string &newname) {} const blkin_trace_info* get_info() const { return NULL; } void set_info(const blkin_trace_info *i) {} void keyval(const char *key, const char *val) const {} void keyval(const char *key, int64_t val) const {} void keyval(const char *key, const char *val, const Endpoint *ep) const {} void keyval(const char *key, int64_t val, const Endpoint *ep) const {} void event(const char *event) const {} void event(const char *event, const Endpoint *ep) const {} }; } // namespace ZTrace #endif // !WITH_BLKIN static inline void encode(const blkin_trace_info& b, ceph::buffer::list& bl) { using ceph::encode; encode(b.trace_id, bl); encode(b.span_id, bl); encode(b.parent_span_id, bl); } static inline void decode(blkin_trace_info& b, ceph::buffer::list::const_iterator& p) { using ceph::decode; decode(b.trace_id, p); decode(b.span_id, p); decode(b.parent_span_id, p); } #endif // COMMON_ZIPKIN_TRACE_H
2,365
24.170213
85
h
null
ceph-main/src/common/async/bind_handler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_ASYNC_BIND_HANDLER_H #define CEPH_ASYNC_BIND_HANDLER_H #include <tuple> #include <boost/asio.hpp> namespace ceph::async { /** * A bound completion handler for use with boost::asio. * * A completion handler wrapper that allows a tuple of arguments to be forwarded * to the original Handler. This is intended for use with boost::asio functions * like defer(), dispatch() and post() which expect handlers which are callable * with no arguments. * * The original Handler's associated allocator and executor are maintained. * * @see bind_handler */ template <typename Handler, typename Tuple> struct CompletionHandler { Handler handler; Tuple args; CompletionHandler(Handler&& handler, Tuple&& args) : handler(std::move(handler)), args(std::move(args)) {} void operator()() & { std::apply(handler, args); } void operator()() const & { std::apply(handler, args); } void operator()() && { std::apply(std::move(handler), std::move(args)); } using allocator_type = boost::asio::associated_allocator_t<Handler>; allocator_type get_allocator() const noexcept { return boost::asio::get_associated_allocator(handler); } }; } // namespace ceph::async namespace boost::asio { // specialize boost::asio::associated_executor<> for CompletionHandler template <typename Handler, typename Tuple, typename Executor> struct associated_executor<ceph::async::CompletionHandler<Handler, Tuple>, Executor> { using type = boost::asio::associated_executor_t<Handler, Executor>; static type get(const ceph::async::CompletionHandler<Handler, Tuple>& handler, const Executor& ex = Executor()) noexcept { return boost::asio::get_associated_executor(handler.handler, ex); } }; } // namespace boost::asio namespace ceph::async { /** * Returns a wrapped completion handler with bound arguments. * * Binds the given arguments to a handler, and returns a CompletionHandler that * is callable with no arguments. This is similar to std::bind(), except that * all arguments must be provided. Move-only argument types are supported as * long as the CompletionHandler's 'operator() &&' overload is used, i.e. * std::move(handler)(). * * Example use: * * // bind the arguments (5, "hello") to a callback lambda: * auto callback = [] (int a, std::string b) {}; * auto handler = bind_handler(callback, 5, "hello"); * * // execute the bound handler on an io_context: * boost::asio::io_context context; * boost::asio::post(context, std::move(handler)); * context.run(); * * @see CompletionHandler */ template <typename Handler, typename ...Args> auto bind_handler(Handler&& h, Args&& ...args) { return CompletionHandler{std::forward<Handler>(h), std::make_tuple(std::forward<Args>(args)...)}; } } // namespace ceph::async #endif // CEPH_ASYNC_BIND_HANDLER_H
3,292
28.401786
86
h
null
ceph-main/src/common/async/bind_like.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat <contact@redhat.com> * Author: Adam C. Emerson * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include <boost/asio/associated_allocator.hpp> #include <boost/asio/associated_executor.hpp> #include <boost/asio/bind_allocator.hpp> #include <boost/asio/bind_executor.hpp> namespace ceph::async { template<typename Executor, typename Allocator, typename Completion> auto bind_ea(const Executor& executor, const Allocator& allocator, Completion&& completion) { return bind_allocator(allocator, boost::asio::bind_executor( executor, std::forward<Completion>(completion))); } // Bind `Completion` to the executor and allocator of `Proto` template<typename Proto, typename Completion> auto bind_like(const Proto& proto, Completion&& completion) { return bind_ea(boost::asio::get_associated_executor(proto), boost::asio::get_associated_allocator(proto), std::forward<Completion>(completion)); } }
1,272
30.825
70
h
null
ceph-main/src/common/async/blocked_completion.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H #define CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H #include <atomic> #include <condition_variable> #include <mutex> #include <optional> #include <type_traits> #include <boost/asio/async_result.hpp> #include <boost/system/error_code.hpp> #include <boost/system/system_error.hpp> namespace ceph::async { namespace bs = boost::system; class use_blocked_t { use_blocked_t(bs::error_code* ec) : ec(ec) {} public: use_blocked_t() = default; use_blocked_t operator [](bs::error_code& _ec) const { return use_blocked_t(&_ec); } bs::error_code* ec = nullptr; }; inline constexpr use_blocked_t use_blocked; namespace detail { template<typename... Ts> struct blocked_handler { blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {} void operator ()(Ts... values) noexcept { std::scoped_lock l(*m); *ec = bs::error_code{}; *value = std::forward_as_tuple(std::move(values)...); *done = true; cv->notify_one(); } void operator ()(bs::error_code ec, Ts... values) noexcept { std::scoped_lock l(*m); *this->ec = ec; *value = std::forward_as_tuple(std::move(values)...); *done = true; cv->notify_one(); } bs::error_code* ec; std::optional<std::tuple<Ts...>>* value = nullptr; std::mutex* m = nullptr; std::condition_variable* cv = nullptr; bool* done = nullptr; }; template<typename T> struct blocked_handler<T> { blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {} void operator ()(T value) noexcept { std::scoped_lock l(*m); *ec = bs::error_code(); *this->value = std::move(value); *done = true; cv->notify_one(); } void operator ()(bs::error_code ec, T value) noexcept { std::scoped_lock l(*m); *this->ec = ec; *this->value = std::move(value); *done = true; cv->notify_one(); } //private: bs::error_code* ec; std::optional<T>* value; std::mutex* m = nullptr; std::condition_variable* cv = nullptr; bool* done = nullptr; }; template<> struct blocked_handler<void> { blocked_handler(use_blocked_t b) noexcept : ec(b.ec) {} void operator ()() noexcept { std::scoped_lock l(*m); *ec = bs::error_code{}; *done = true; cv->notify_one(); } void operator ()(bs::error_code ec) noexcept { std::scoped_lock l(*m); *this->ec = ec; *done = true; cv->notify_one(); } bs::error_code* ec; std::mutex* m = nullptr; std::condition_variable* cv = nullptr; bool* done = nullptr; }; template<typename... Ts> class blocked_result { public: using completion_handler_type = blocked_handler<Ts...>; using return_type = std::tuple<Ts...>; explicit blocked_result(completion_handler_type& h) noexcept { std::scoped_lock l(m); out_ec = h.ec; if (!out_ec) h.ec = &ec; h.value = &value; h.m = &m; h.cv = &cv; h.done = &done; } return_type get() { std::unique_lock l(m); cv.wait(l, [this]() { return done; }); if (!out_ec && ec) throw bs::system_error(ec); return std::move(*value); } blocked_result(const blocked_result&) = delete; blocked_result& operator =(const blocked_result&) = delete; blocked_result(blocked_result&&) = delete; blocked_result& operator =(blocked_result&&) = delete; private: bs::error_code* out_ec; bs::error_code ec; std::optional<return_type> value; std::mutex m; std::condition_variable cv; bool done = false; }; template<typename T> class blocked_result<T> { public: using completion_handler_type = blocked_handler<T>; using return_type = T; explicit blocked_result(completion_handler_type& h) noexcept { std::scoped_lock l(m); out_ec = h.ec; if (!out_ec) h.ec = &ec; h.value = &value; h.m = &m; h.cv = &cv; h.done = &done; } return_type get() { std::unique_lock l(m); cv.wait(l, [this]() { return done; }); if (!out_ec && ec) throw bs::system_error(ec); return std::move(*value); } blocked_result(const blocked_result&) = delete; blocked_result& operator =(const blocked_result&) = delete; blocked_result(blocked_result&&) = delete; blocked_result& operator =(blocked_result&&) = delete; private: bs::error_code* out_ec; bs::error_code ec; std::optional<return_type> value; std::mutex m; std::condition_variable cv; bool done = false; }; template<> class blocked_result<void> { public: using completion_handler_type = blocked_handler<void>; using return_type = void; explicit blocked_result(completion_handler_type& h) noexcept { std::scoped_lock l(m); out_ec = h.ec; if (!out_ec) h.ec = &ec; h.m = &m; h.cv = &cv; h.done = &done; } void get() { std::unique_lock l(m); cv.wait(l, [this]() { return done; }); if (!out_ec && ec) throw bs::system_error(ec); } blocked_result(const blocked_result&) = delete; blocked_result& operator =(const blocked_result&) = delete; blocked_result(blocked_result&&) = delete; blocked_result& operator =(blocked_result&&) = delete; private: bs::error_code* out_ec; bs::error_code ec; std::mutex m; std::condition_variable cv; bool done = false; }; } // namespace detail } // namespace ceph::async namespace boost::asio { template<typename ReturnType> class async_result<ceph::async::use_blocked_t, ReturnType()> : public ceph::async::detail::blocked_result<void> { public: explicit async_result(typename ceph::async::detail::blocked_result<void> ::completion_handler_type& h) : ceph::async::detail::blocked_result<void>(h) {} }; template<typename ReturnType, typename... Args> class async_result<ceph::async::use_blocked_t, ReturnType(Args...)> : public ceph::async::detail::blocked_result<std::decay_t<Args>...> { public: explicit async_result( typename ceph::async::detail::blocked_result<std::decay_t<Args>...>::completion_handler_type& h) : ceph::async::detail::blocked_result<std::decay_t<Args>...>(h) {} }; template<typename ReturnType> class async_result<ceph::async::use_blocked_t, ReturnType(boost::system::error_code)> : public ceph::async::detail::blocked_result<void> { public: explicit async_result( typename ceph::async::detail::blocked_result<void>::completion_handler_type& h) : ceph::async::detail::blocked_result<void>(h) {} }; template<typename ReturnType, typename... Args> class async_result<ceph::async::use_blocked_t, ReturnType(boost::system::error_code, Args...)> : public ceph::async::detail::blocked_result<std::decay_t<Args>...> { public: explicit async_result( typename ceph::async::detail::blocked_result<std::decay_t<Args>...>::completion_handler_type& h) : ceph::async::detail::blocked_result<std::decay_t<Args>...>(h) {} }; } #endif // !CEPH_COMMON_ASYNC_BLOCKED_COMPLETION_H
7,225
23.831615
100
h
null
ceph-main/src/common/async/completion.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_ASYNC_COMPLETION_H #define CEPH_ASYNC_COMPLETION_H #include <memory> #include "bind_handler.h" #include "forward_handler.h" namespace ceph::async { /** * Abstract completion handler interface for use with boost::asio. * * Memory management is performed using the Handler's 'associated allocator', * which carries the additional requirement that its memory be released before * the Handler is invoked. This allows memory allocated for one asynchronous * operation to be reused in its continuation. Because of this requirement, any * calls to invoke the completion must first release ownership of it. To enforce * this, the static functions defer()/dispatch()/post() take the completion by * rvalue-reference to std::unique_ptr<Completion>, i.e. std::move(completion). * * Handlers may also have an 'associated executor', so the calls to defer(), * dispatch(), and post() are forwarded to that executor. If there is no * associated executor (which is generally the case unless one was bound with * boost::asio::bind_executor()), the executor passed to Completion::create() * is used as a default. * * Example use: * * // declare a Completion type with Signature = void(int, string) * using MyCompletion = ceph::async::Completion<void(int, string)>; * * // create a completion with the given callback: * std::unique_ptr<MyCompletion> c; * c = MyCompletion::create(ex, [] (int a, const string& b) {}); * * // bind arguments to the callback and post to its associated executor: * MyCompletion::post(std::move(c), 5, "hello"); * * * Additional user data may be stored along with the Completion to take * advantage of the handler allocator optimization. This is accomplished by * specifying its type in the template parameter T. For example, the type * Completion<void(), int> contains a public member variable 'int user_data'. * Any additional arguments to Completion::create() will be forwarded to type * T's constructor. * * If the AsBase<T> type tag is used, as in Completion<void(), AsBase<T>>, * the Completion will inherit from T instead of declaring it as a member * variable. * * When invoking the completion handler via defer(), dispatch(), or post(), * care must be taken when passing arguments that refer to user data, because * its memory is destroyed prior to invocation. In such cases, the user data * should be moved/copied out of the Completion first. */ template <typename Signature, typename T = void> class Completion; /// type tag for UserData template <typename T> struct AsBase {}; namespace detail { /// optional user data to be stored with the Completion template <typename T> struct UserData { T user_data; template <typename ...Args> UserData(Args&& ...args) : user_data(std::forward<Args>(args)...) {} }; // AsBase specialization inherits from T template <typename T> struct UserData<AsBase<T>> : public T { template <typename ...Args> UserData(Args&& ...args) : T(std::forward<Args>(args)...) {} }; // void specialization template <> class UserData<void> {}; } // namespace detail // template specialization to pull the Signature's args apart template <typename T, typename ...Args> class Completion<void(Args...), T> : public detail::UserData<T> { protected: // internal interfaces for type-erasure on the Handler/Executor. uses // tuple<Args...> to provide perfect forwarding because you can't make // virtual function templates virtual void destroy_defer(std::tuple<Args...>&& args) = 0; virtual void destroy_dispatch(std::tuple<Args...>&& args) = 0; virtual void destroy_post(std::tuple<Args...>&& args) = 0; virtual void destroy() = 0; // constructor is protected, use create(). any constructor arguments are // forwarded to UserData template <typename ...TArgs> Completion(TArgs&& ...args) : detail::UserData<T>(std::forward<TArgs>(args)...) {} public: virtual ~Completion() = default; // use the virtual destroy() interface on delete. this allows the derived // class to manage its memory using Handler allocators, without having to use // a custom Deleter for std::unique_ptr<> static void operator delete(void *p) { static_cast<Completion*>(p)->destroy(); } /// completion factory function that uses the handler's associated allocator. /// any additional arguments are forwared to T's constructor template <typename Executor1, typename Handler, typename ...TArgs> static std::unique_ptr<Completion> create(const Executor1& ex1, Handler&& handler, TArgs&& ...args); /// take ownership of the completion, bind any arguments to the completion /// handler, then defer() it on its associated executor template <typename ...Args2> static void defer(std::unique_ptr<Completion>&& c, Args2&&...args); /// take ownership of the completion, bind any arguments to the completion /// handler, then dispatch() it on its associated executor template <typename ...Args2> static void dispatch(std::unique_ptr<Completion>&& c, Args2&&...args); /// take ownership of the completion, bind any arguments to the completion /// handler, then post() it to its associated executor template <typename ...Args2> static void post(std::unique_ptr<Completion>&& c, Args2&&...args); }; namespace detail { // concrete Completion that knows how to invoke the completion handler. this // observes all of the 'Requirements on asynchronous operations' specified by // the C++ Networking TS template <typename Executor1, typename Handler, typename T, typename ...Args> class CompletionImpl final : public Completion<void(Args...), T> { // use Handler's associated executor (or Executor1 by default) for callbacks using Executor2 = boost::asio::associated_executor_t<Handler, Executor1>; // maintain work on both executors using Work1 = boost::asio::executor_work_guard<Executor1>; using Work2 = boost::asio::executor_work_guard<Executor2>; std::pair<Work1, Work2> work; Handler handler; // use Handler's associated allocator using Alloc2 = boost::asio::associated_allocator_t<Handler>; using Traits2 = std::allocator_traits<Alloc2>; using RebindAlloc2 = typename Traits2::template rebind_alloc<CompletionImpl>; using RebindTraits2 = std::allocator_traits<RebindAlloc2>; // placement new for the handler allocator static void* operator new(size_t, RebindAlloc2 alloc2) { return RebindTraits2::allocate(alloc2, 1); } // placement delete for when the constructor throws during placement new static void operator delete(void *p, RebindAlloc2 alloc2) { RebindTraits2::deallocate(alloc2, static_cast<CompletionImpl*>(p), 1); } static auto bind_and_forward(Handler&& h, std::tuple<Args...>&& args) { return forward_handler(CompletionHandler{std::move(h), std::move(args)}); } void destroy_defer(std::tuple<Args...>&& args) override { auto w = std::move(work); auto f = bind_and_forward(std::move(handler), std::move(args)); RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler); RebindTraits2::destroy(alloc2, this); RebindTraits2::deallocate(alloc2, this, 1); w.second.get_executor().defer(std::move(f), alloc2); } void destroy_dispatch(std::tuple<Args...>&& args) override { auto w = std::move(work); auto f = bind_and_forward(std::move(handler), std::move(args)); RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler); RebindTraits2::destroy(alloc2, this); RebindTraits2::deallocate(alloc2, this, 1); w.second.get_executor().dispatch(std::move(f), alloc2); } void destroy_post(std::tuple<Args...>&& args) override { auto w = std::move(work); auto f = bind_and_forward(std::move(handler), std::move(args)); RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler); RebindTraits2::destroy(alloc2, this); RebindTraits2::deallocate(alloc2, this, 1); w.second.get_executor().post(std::move(f), alloc2); } void destroy() override { RebindAlloc2 alloc2 = boost::asio::get_associated_allocator(handler); RebindTraits2::destroy(alloc2, this); RebindTraits2::deallocate(alloc2, this, 1); } // constructor is private, use create(). extra constructor arguments are // forwarded to UserData template <typename ...TArgs> CompletionImpl(const Executor1& ex1, Handler&& handler, TArgs&& ...args) : Completion<void(Args...), T>(std::forward<TArgs>(args)...), work(ex1, boost::asio::make_work_guard(handler, ex1)), handler(std::move(handler)) {} public: template <typename ...TArgs> static auto create(const Executor1& ex, Handler&& handler, TArgs&& ...args) { auto alloc2 = boost::asio::get_associated_allocator(handler); using Ptr = std::unique_ptr<CompletionImpl>; return Ptr{new (alloc2) CompletionImpl(ex, std::move(handler), std::forward<TArgs>(args)...)}; } static void operator delete(void *p) { static_cast<CompletionImpl*>(p)->destroy(); } }; } // namespace detail template <typename T, typename ...Args> template <typename Executor1, typename Handler, typename ...TArgs> std::unique_ptr<Completion<void(Args...), T>> Completion<void(Args...), T>::create(const Executor1& ex, Handler&& handler, TArgs&& ...args) { using Impl = detail::CompletionImpl<Executor1, Handler, T, Args...>; return Impl::create(ex, std::forward<Handler>(handler), std::forward<TArgs>(args)...); } template <typename T, typename ...Args> template <typename ...Args2> void Completion<void(Args...), T>::defer(std::unique_ptr<Completion>&& ptr, Args2&& ...args) { auto c = ptr.release(); c->destroy_defer(std::make_tuple(std::forward<Args2>(args)...)); } template <typename T, typename ...Args> template <typename ...Args2> void Completion<void(Args...), T>::dispatch(std::unique_ptr<Completion>&& ptr, Args2&& ...args) { auto c = ptr.release(); c->destroy_dispatch(std::make_tuple(std::forward<Args2>(args)...)); } template <typename T, typename ...Args> template <typename ...Args2> void Completion<void(Args...), T>::post(std::unique_ptr<Completion>&& ptr, Args2&& ...args) { auto c = ptr.release(); c->destroy_post(std::make_tuple(std::forward<Args2>(args)...)); } /// completion factory function that uses the handler's associated allocator. /// any additional arguments are forwared to T's constructor template <typename Signature, typename T, typename Executor1, typename Handler, typename ...TArgs> std::unique_ptr<Completion<Signature, T>> create_completion(const Executor1& ex, Handler&& handler, TArgs&& ...args) { return Completion<Signature, T>::create(ex, std::forward<Handler>(handler), std::forward<TArgs>(args)...); } /// take ownership of the completion, bind any arguments to the completion /// handler, then defer() it on its associated executor template <typename Signature, typename T, typename ...Args> void defer(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args) { Completion<Signature, T>::defer(std::move(ptr), std::forward<Args>(args)...); } /// take ownership of the completion, bind any arguments to the completion /// handler, then dispatch() it on its associated executor template <typename Signature, typename T, typename ...Args> void dispatch(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args) { Completion<Signature, T>::dispatch(std::move(ptr), std::forward<Args>(args)...); } /// take ownership of the completion, bind any arguments to the completion /// handler, then post() it to its associated executor template <typename Signature, typename T, typename ...Args> void post(std::unique_ptr<Completion<Signature, T>>&& ptr, Args&& ...args) { Completion<Signature, T>::post(std::move(ptr), std::forward<Args>(args)...); } } // namespace ceph::async #endif // CEPH_ASYNC_COMPLETION_H
12,465
37.834891
82
h
null
ceph-main/src/common/async/context_pool.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat <contact@redhat.com> * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_ASYNC_CONTEXT_POOL_H #define CEPH_COMMON_ASYNC_CONTEXT_POOL_H #include <cstddef> #include <cstdint> #include <mutex> #include <optional> #include <thread> #include <vector> #include <boost/asio/io_context.hpp> #include <boost/asio/executor_work_guard.hpp> #include "common/ceph_mutex.h" #include "common/Thread.h" namespace ceph::async { class io_context_pool { std::vector<std::thread> threadvec; boost::asio::io_context ioctx; std::optional<boost::asio::executor_work_guard< boost::asio::io_context::executor_type>> guard; ceph::mutex m = make_mutex("ceph::io_context_pool::m"); void cleanup() noexcept { guard = std::nullopt; for (auto& th : threadvec) { th.join(); } threadvec.clear(); } public: io_context_pool() noexcept {} io_context_pool(std::int16_t threadcnt) noexcept { start(threadcnt); } ~io_context_pool() { stop(); } void start(std::int16_t threadcnt) noexcept { auto l = std::scoped_lock(m); if (threadvec.empty()) { guard.emplace(boost::asio::make_work_guard(ioctx)); ioctx.restart(); for (std::int16_t i = 0; i < threadcnt; ++i) { threadvec.emplace_back(make_named_thread("io_context_pool", [this]() { ioctx.run(); })); } } } void finish() noexcept { auto l = std::scoped_lock(m); if (!threadvec.empty()) { cleanup(); } } void stop() noexcept { auto l = std::scoped_lock(m); if (!threadvec.empty()) { ioctx.stop(); cleanup(); } } boost::asio::io_context& get_io_context() { return ioctx; } operator boost::asio::io_context&() { return ioctx; } boost::asio::io_context::executor_type get_executor() { return ioctx.get_executor(); } }; } #endif // CEPH_COMMON_ASYNC_CONTEXT_POOL_H
2,287
23.084211
70
h
null
ceph-main/src/common/async/forward_handler.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_ASYNC_FORWARD_HANDLER_H #define CEPH_ASYNC_FORWARD_HANDLER_H #include <boost/asio.hpp> namespace ceph::async { /** * A forwarding completion handler for use with boost::asio. * * A completion handler wrapper that invokes the handler's operator() as an * rvalue, regardless of whether the wrapper is invoked as an lvalue or rvalue. * This operation is potentially destructive to the wrapped handler, so is only * suitable for single-use handlers. * * This is useful when combined with bind_handler() and move-only arguments, * because executors will always call the lvalue overload of operator(). * * The original Handler's associated allocator and executor are maintained. * * @see forward_handler */ template <typename Handler> struct ForwardingHandler { Handler handler; ForwardingHandler(Handler&& handler) : handler(std::move(handler)) {} template <typename ...Args> void operator()(Args&& ...args) { std::move(handler)(std::forward<Args>(args)...); } using allocator_type = boost::asio::associated_allocator_t<Handler>; allocator_type get_allocator() const noexcept { return boost::asio::get_associated_allocator(handler); } }; } // namespace ceph::async namespace boost::asio { // specialize boost::asio::associated_executor<> for ForwardingHandler template <typename Handler, typename Executor> struct associated_executor<ceph::async::ForwardingHandler<Handler>, Executor> { using type = boost::asio::associated_executor_t<Handler, Executor>; static type get(const ceph::async::ForwardingHandler<Handler>& handler, const Executor& ex = Executor()) noexcept { return boost::asio::get_associated_executor(handler.handler, ex); } }; } // namespace boost::asio namespace ceph::async { /** * Returns a single-use completion handler that always forwards on operator(). * * Wraps a completion handler such that it is always invoked as an rvalue. This * is necessary when combining executors and bind_handler() with move-only * argument types. * * Example use: * * auto callback = [] (std::unique_ptr<int>&& p) {}; * auto bound_handler = bind_handler(callback, std::make_unique<int>(5)); * auro handler = forward_handler(std::move(bound_handler)); * * // execute the forwarding handler on an io_context: * boost::asio::io_context context; * boost::asio::post(context, std::move(handler)); * context.run(); * * @see ForwardingHandler */ template <typename Handler> auto forward_handler(Handler&& h) { return ForwardingHandler{std::forward<Handler>(h)}; } } // namespace ceph::async #endif // CEPH_ASYNC_FORWARD_HANDLER_H
3,052
28.355769
79
h
null
ceph-main/src/common/async/librados_completion.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2020 Red Hat * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H #define CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H #include <atomic> #include <condition_variable> #include <mutex> #include <optional> #include <type_traits> #include <boost/asio/async_result.hpp> #include <boost/system/error_code.hpp> #include <boost/system/system_error.hpp> #include "include/rados/librados.hpp" #include "librados/AioCompletionImpl.h" // Allow librados::AioCompletion to be provided as a completion // handler. This is only allowed with a signature of // (boost::system::error_code) or (). On completion the AioCompletion // is completed with the error_code converted to an int with // ceph::from_error_code. // // async_result::return_type is void. namespace ceph::async { namespace bs = boost::system; namespace lr = librados; namespace detail { struct librados_handler { lr::AioCompletionImpl* pc; explicit librados_handler(lr::AioCompletion* c) : pc(c->pc) { pc->get(); } ~librados_handler() { if (pc) { pc->put(); pc = nullptr; } } librados_handler(const librados_handler&) = delete; librados_handler& operator =(const librados_handler&) = delete; librados_handler(librados_handler&& rhs) { pc = rhs.pc; rhs.pc = nullptr; } void operator()(bs::error_code ec) { pc->lock.lock(); pc->rval = ceph::from_error_code(ec); pc->complete = true; pc->lock.unlock(); auto cb_complete = pc->callback_complete; auto cb_complete_arg = pc->callback_complete_arg; if (cb_complete) cb_complete(pc, cb_complete_arg); auto cb_safe = pc->callback_safe; auto cb_safe_arg = pc->callback_safe_arg; if (cb_safe) cb_safe(pc, cb_safe_arg); pc->lock.lock(); pc->callback_complete = NULL; pc->callback_safe = NULL; pc->cond.notify_all(); pc->put_unlock(); pc = nullptr; } void operator ()() { (*this)(bs::error_code{}); } }; } // namespace detail } // namespace ceph::async namespace boost::asio { template<typename ReturnType> class async_result<librados::AioCompletion*, ReturnType()> { public: using completion_handler_type = ceph::async::detail::librados_handler; explicit async_result(completion_handler_type&) {}; using return_type = void; void get() { return; } }; template<typename ReturnType> class async_result<librados::AioCompletion*, ReturnType(boost::system::error_code)> { public: using completion_handler_type = ceph::async::detail::librados_handler; explicit async_result(completion_handler_type&) {}; using return_type = void; void get() { return; } }; } #endif // !CEPH_COMMON_ASYNC_LIBRADOS_COMPLETION_H
3,105
23.650794
72
h
null
ceph-main/src/common/async/shared_mutex.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include "common/async/detail/shared_mutex.h" namespace ceph::async { /** * An asynchronous shared mutex for use with boost::asio. * * A shared mutex class with asynchronous lock operations that complete on a * boost::asio executor. The class also has synchronous interfaces that meet * most of the standard library's requirements for the SharedMutex concept, * which makes it compatible with lock_guard, unique_lock, and shared_lock. * * All lock requests can fail with operation_aborted on cancel() or destruction. * The non-error_code overloads of lock() and lock_shared() will throw this * error as an exception of type boost::system::system_error. * * Exclusive locks are prioritized over shared locks. Locks of the same type * are granted in fifo order. The implementation defines a limit on the number * of shared locks to 65534 at a time. * * Example use: * * boost::asio::io_context context; * SharedMutex mutex{context.get_executor()}; * * mutex.async_lock([&] (boost::system::error_code ec, auto lock) { * if (!ec) { * // mutate shared state ... * } * }); * mutex.async_lock_shared([&] (boost::system::error_code ec, auto lock) { * if (!ec) { * // read shared state ... * } * }); * * context.run(); */ template <typename Executor> class SharedMutex { public: explicit SharedMutex(const Executor& ex); /// on destruction, all pending lock requests are canceled ~SharedMutex(); using executor_type = Executor; executor_type get_executor() const noexcept { return ex; } /// initiate an asynchronous request for an exclusive lock. when the lock is /// granted, the completion handler is invoked with a successful error code /// and a std::unique_lock that owns this mutex. /// Signature = void(boost::system::error_code, std::unique_lock) template <typename CompletionToken> auto async_lock(CompletionToken&& token); /// wait synchronously for an exclusive lock. if an error occurs before the /// lock is granted, that error is thrown as an exception void lock(); /// wait synchronously for an exclusive lock. if an error occurs before the /// lock is granted, that error is assigned to 'ec' void lock(boost::system::error_code& ec); /// try to acquire an exclusive lock. if the lock is not immediately /// available, returns false bool try_lock(); /// releases an exclusive lock. not required to be called from the same thread /// that initiated the lock void unlock(); /// initiate an asynchronous request for a shared lock. when the lock is /// granted, the completion handler is invoked with a successful error code /// and a std::shared_lock that owns this mutex. /// Signature = void(boost::system::error_code, std::shared_lock) template <typename CompletionToken> auto async_lock_shared(CompletionToken&& token); /// wait synchronously for a shared lock. if an error occurs before the /// lock is granted, that error is thrown as an exception void lock_shared(); /// wait synchronously for a shared lock. if an error occurs before the lock /// is granted, that error is assigned to 'ec' void lock_shared(boost::system::error_code& ec); /// try to acquire a shared lock. if the lock is not immediately available, /// returns false bool try_lock_shared(); /// releases a shared lock. not required to be called from the same thread /// that initiated the lock void unlock_shared(); /// cancel any pending requests for exclusive or shared locks with an /// operation_aborted error void cancel(); private: Executor ex; //< default callback executor boost::intrusive_ptr<detail::SharedMutexImpl> impl; // allow lock guards to access impl friend class std::unique_lock<SharedMutex>; friend class std::shared_lock<SharedMutex>; }; template <typename Executor> SharedMutex<Executor>::SharedMutex(const Executor& ex) : ex(ex), impl(new detail::SharedMutexImpl) { } template <typename Executor> SharedMutex<Executor>::~SharedMutex() { try { impl->cancel(); } catch (const std::exception&) { // swallow any exceptions, the destructor can't throw } } template <typename Executor> template <typename CompletionToken> auto SharedMutex<Executor>::async_lock(CompletionToken&& token) { return impl->async_lock(*this, std::forward<CompletionToken>(token)); } template <typename Executor> void SharedMutex<Executor>::lock() { impl->lock(); } template <typename Executor> void SharedMutex<Executor>::lock(boost::system::error_code& ec) { impl->lock(ec); } template <typename Executor> bool SharedMutex<Executor>::try_lock() { return impl->try_lock(); } template <typename Executor> void SharedMutex<Executor>::unlock() { impl->unlock(); } template <typename Executor> template <typename CompletionToken> auto SharedMutex<Executor>::async_lock_shared(CompletionToken&& token) { return impl->async_lock_shared(*this, std::forward<CompletionToken>(token)); } template <typename Executor> void SharedMutex<Executor>::lock_shared() { impl->lock_shared(); } template <typename Executor> void SharedMutex<Executor>::lock_shared(boost::system::error_code& ec) { impl->lock_shared(ec); } template <typename Executor> bool SharedMutex<Executor>::try_lock_shared() { return impl->try_lock_shared(); } template <typename Executor> void SharedMutex<Executor>::unlock_shared() { impl->unlock_shared(); } template <typename Executor> void SharedMutex<Executor>::cancel() { impl->cancel(); } } // namespace ceph::async #include "common/async/detail/shared_lock.h"
6,016
27.248826
80
h
null
ceph-main/src/common/async/waiter.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2004-2006 Sage Weil <sage@newdream.net> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_WAITER_H #define CEPH_COMMON_WAITER_H #include <condition_variable> #include <tuple> #include <boost/asio/async_result.hpp> #include "include/ceph_assert.h" #include "include/function2.hpp" #include "common/ceph_mutex.h" namespace ceph::async { namespace detail { // For safety reasons (avoiding undefined behavior around sequence // points) std::reference_wrapper disallows move construction. This // harms us in cases where we want to pass a reference in to something // that unavoidably moves. // // It should not be used generally. template<typename T> class rvalue_reference_wrapper { public: // types using type = T; rvalue_reference_wrapper(T& r) noexcept : p(std::addressof(r)) {} // We write our semantics to match those of reference collapsing. If // we're treated as an lvalue, collapse to one. rvalue_reference_wrapper(const rvalue_reference_wrapper&) noexcept = default; rvalue_reference_wrapper(rvalue_reference_wrapper&&) noexcept = default; // assignment rvalue_reference_wrapper& operator=( const rvalue_reference_wrapper& x) noexcept = default; rvalue_reference_wrapper& operator=( rvalue_reference_wrapper&& x) noexcept = default; operator T& () const noexcept { return *p; } T& get() const noexcept { return *p; } operator T&& () noexcept { return std::move(*p); } T&& get() noexcept { return std::move(*p); } template<typename... Args> std::result_of_t<T&(Args&&...)> operator ()(Args&&... args ) const { return (*p)(std::forward<Args>(args)...); } template<typename... Args> std::result_of_t<T&&(Args&&...)> operator ()(Args&&... args ) { return std::move(*p)(std::forward<Args>(args)...); } private: T* p; }; class base { protected: ceph::mutex lock = ceph::make_mutex("ceph::async::detail::base::lock"); ceph::condition_variable cond; bool has_value = false; ~base() = default; auto wait_base() { std::unique_lock l(lock); cond.wait(l, [this](){ return has_value; }); return l; } auto exec_base() { std::unique_lock l(lock); // There's no really good way to handle being called twice // without being reset. ceph_assert(!has_value); has_value = true; cond.notify_one(); return l; } }; } // waiter is a replacement for C_SafeCond and friends. It is the // moral equivalent of a future but plays well with a world of // callbacks. template<typename ...S> class waiter; template<> class waiter<> final : public detail::base { public: void wait() { wait_base(); has_value = false; } void operator()() { exec_base(); } auto ref() { return detail::rvalue_reference_wrapper(*this); } operator fu2::unique_function<void() &&>() { return fu2::unique_function<void() &&>(ref()); } }; template<typename Ret> class waiter<Ret> final : public detail::base { std::aligned_storage_t<sizeof(Ret)> ret; public: Ret wait() { auto l = wait_base(); auto r = reinterpret_cast<Ret*>(&ret); auto t = std::move(*r); r->~Ret(); has_value = false; return t; } void operator()(Ret&& _ret) { auto l = exec_base(); auto r = reinterpret_cast<Ret*>(&ret); *r = std::move(_ret); } void operator()(const Ret& _ret) { auto l = exec_base(); auto r = reinterpret_cast<Ret*>(&ret); *r = std::move(_ret); } auto ref() { return detail::rvalue_reference_wrapper(*this); } operator fu2::unique_function<void(Ret) &&>() { return fu2::unique_function<void(Ret) &&>(ref()); } ~waiter() { if (has_value) reinterpret_cast<Ret*>(&ret)->~Ret(); } }; template<typename ...Ret> class waiter final : public detail::base { std::tuple<Ret...> ret; public: std::tuple<Ret...> wait() { using std::tuple; auto l = wait_base(); return std::move(ret); auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret); auto t = std::move(*r); r->~tuple<Ret...>(); has_value = false; return t; } void operator()(Ret&&... _ret) { auto l = exec_base(); auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret); *r = std::forward_as_tuple(_ret...); } void operator()(const Ret&... _ret) { auto l = exec_base(); auto r = reinterpret_cast<std::tuple<Ret...>*>(&ret); *r = std::forward_as_tuple(_ret...); } auto ref() { return detail::rvalue_reference_wrapper(*this); } operator fu2::unique_function<void(Ret...) &&>() { return fu2::unique_function<void(Ret...) &&>(ref()); } ~waiter() { using std::tuple; if (has_value) reinterpret_cast<tuple<Ret...>*>(&ret)->~tuple<Ret...>(); } }; } #endif // CEPH_COMMON_WAITER_H
5,118
21.852679
79
h
null
ceph-main/src/common/async/yield_context.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat, Inc * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <boost/range/begin.hpp> #include <boost/range/end.hpp> #include <boost/asio/io_context.hpp> #include "acconfig.h" #include <spawn/spawn.hpp> // use explicit executor types instead of the type-erased boost::asio::executor. // coroutines wrap the default io_context executor with a strand executor using yield_context = spawn::basic_yield_context< boost::asio::executor_binder<void(*)(), boost::asio::strand<boost::asio::io_context::executor_type>>>; /// optional-like wrapper for a spawn::yield_context and its associated /// boost::asio::io_context. operations that take an optional_yield argument /// will, when passed a non-empty yield context, suspend this coroutine instead /// of the blocking the thread of execution class optional_yield { boost::asio::io_context *c = nullptr; yield_context *y = nullptr; public: /// construct with a valid io and yield_context explicit optional_yield(boost::asio::io_context& c, yield_context& y) noexcept : c(&c), y(&y) {} /// type tag to construct an empty object struct empty_t {}; optional_yield(empty_t) noexcept {} /// implicit conversion to bool, returns true if non-empty operator bool() const noexcept { return y; } /// return a reference to the associated io_context. only valid if non-empty boost::asio::io_context& get_io_context() const noexcept { return *c; } /// return a reference to the yield_context. only valid if non-empty yield_context& get_yield_context() const noexcept { return *y; } }; // type tag object to construct an empty optional_yield static constexpr optional_yield::empty_t null_yield{};
2,067
33.466667
80
h
null
ceph-main/src/common/async/detail/shared_lock.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once namespace std { // specialize unique_lock and shared_lock for SharedMutex to operate on // SharedMutexImpl instead, because the locks may outlive the SharedMutex itself template <typename Executor> class unique_lock<ceph::async::SharedMutex<Executor>> { public: using mutex_type = boost::intrusive_ptr<ceph::async::detail::SharedMutexImpl>; unique_lock() = default; explicit unique_lock(ceph::async::SharedMutex<Executor>& m) : impl(m.impl), locked(true) { impl->lock(); } unique_lock(ceph::async::SharedMutex<Executor>& m, defer_lock_t t) noexcept : impl(m.impl) {} unique_lock(ceph::async::SharedMutex<Executor>& m, try_to_lock_t t) : impl(m.impl), locked(impl->try_lock()) {} unique_lock(ceph::async::SharedMutex<Executor>& m, adopt_lock_t t) noexcept : impl(m.impl), locked(true) {} ~unique_lock() { if (impl && locked) impl->unlock(); } unique_lock(unique_lock&& other) noexcept : impl(std::move(other.impl)), locked(other.locked) { other.locked = false; } unique_lock& operator=(unique_lock&& other) noexcept { if (impl && locked) { impl->unlock(); } impl = std::move(other.impl); locked = other.locked; other.locked = false; return *this; } void swap(unique_lock& other) noexcept { using std::swap; swap(impl, other.impl); swap(locked, other.locked); } mutex_type mutex() const noexcept { return impl; } bool owns_lock() const noexcept { return impl && locked; } explicit operator bool() const noexcept { return impl && locked; } mutex_type release() { auto result = std::move(impl); locked = false; return result; } void lock() { if (!impl) throw system_error(make_error_code(errc::operation_not_permitted)); if (locked) throw system_error(make_error_code(errc::resource_deadlock_would_occur)); impl->lock(); locked = true; } bool try_lock() { if (!impl) throw system_error(make_error_code(errc::operation_not_permitted)); if (locked) throw system_error(make_error_code(errc::resource_deadlock_would_occur)); return locked = impl->try_lock(); } void unlock() { if (!impl || !locked) throw system_error(make_error_code(errc::operation_not_permitted)); impl->unlock(); locked = false; } private: mutex_type impl; bool locked{false}; }; template <typename Executor> class shared_lock<ceph::async::SharedMutex<Executor>> { public: using mutex_type = boost::intrusive_ptr<ceph::async::detail::SharedMutexImpl>; shared_lock() = default; explicit shared_lock(ceph::async::SharedMutex<Executor>& m) : impl(m.impl), locked(true) { impl->lock_shared(); } shared_lock(ceph::async::SharedMutex<Executor>& m, defer_lock_t t) noexcept : impl(m.impl) {} shared_lock(ceph::async::SharedMutex<Executor>& m, try_to_lock_t t) : impl(m.impl), locked(impl->try_lock_shared()) {} shared_lock(ceph::async::SharedMutex<Executor>& m, adopt_lock_t t) noexcept : impl(m.impl), locked(true) {} ~shared_lock() { if (impl && locked) impl->unlock_shared(); } shared_lock(shared_lock&& other) noexcept : impl(std::move(other.impl)), locked(other.locked) { other.locked = false; } shared_lock& operator=(shared_lock&& other) noexcept { if (impl && locked) { impl->unlock_shared(); } impl = std::move(other.impl); locked = other.locked; other.locked = false; return *this; } void swap(shared_lock& other) noexcept { using std::swap; swap(impl, other.impl); swap(locked, other.locked); } mutex_type mutex() const noexcept { return impl; } bool owns_lock() const noexcept { return impl && locked; } explicit operator bool() const noexcept { return impl && locked; } mutex_type release() { auto result = std::move(impl); locked = false; return result; } void lock() { if (!impl) throw system_error(make_error_code(errc::operation_not_permitted)); if (locked) throw system_error(make_error_code(errc::resource_deadlock_would_occur)); impl->lock_shared(); locked = true; } bool try_lock() { if (!impl) throw system_error(make_error_code(errc::operation_not_permitted)); if (locked) throw system_error(make_error_code(errc::resource_deadlock_would_occur)); return locked = impl->try_lock_shared(); } void unlock() { if (!impl || !locked) throw system_error(make_error_code(errc::operation_not_permitted)); impl->unlock_shared(); locked = false; } private: mutex_type impl; bool locked{false}; }; } // namespace std
5,078
26.306452
80
h
null
ceph-main/src/common/async/detail/shared_mutex.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #pragma once #include <condition_variable> #include <mutex> #include <optional> #include <shared_mutex> // for std::shared_lock #include <boost/smart_ptr/intrusive_ref_counter.hpp> #include <boost/intrusive_ptr.hpp> #include <boost/intrusive/list.hpp> #include "include/ceph_assert.h" #include "common/async/completion.h" namespace ceph::async::detail { struct LockRequest : public boost::intrusive::list_base_hook<> { virtual ~LockRequest() {} virtual void complete(boost::system::error_code ec) = 0; virtual void destroy() = 0; }; class SharedMutexImpl : public boost::intrusive_ref_counter<SharedMutexImpl> { public: ~SharedMutexImpl(); template <typename Mutex, typename CompletionToken> auto async_lock(Mutex& mtx, CompletionToken&& token); void lock(); void lock(boost::system::error_code& ec); bool try_lock(); void unlock(); template <typename Mutex, typename CompletionToken> auto async_lock_shared(Mutex& mtx, CompletionToken&& token); void lock_shared(); void lock_shared(boost::system::error_code& ec); bool try_lock_shared(); void unlock_shared(); void cancel(); private: using RequestList = boost::intrusive::list<LockRequest>; RequestList shared_queue; //< requests waiting on a shared lock RequestList exclusive_queue; //< requests waiting on an exclusive lock /// lock state encodes the number of shared lockers, or 'max' for exclusive using LockState = uint16_t; static constexpr LockState Unlocked = 0; static constexpr LockState Exclusive = std::numeric_limits<LockState>::max(); static constexpr LockState MaxShared = Exclusive - 1; LockState state = Unlocked; //< current lock state std::mutex mutex; //< protects lock state and wait queues void complete(RequestList&& requests, boost::system::error_code ec); }; // sync requests live on the stack and wait on a condition variable class SyncRequest : public LockRequest { std::condition_variable cond; std::optional<boost::system::error_code> ec; public: boost::system::error_code wait(std::unique_lock<std::mutex>& lock) { // return the error code once its been set cond.wait(lock, [this] { return ec; }); return *ec; } void complete(boost::system::error_code ec) override { this->ec = ec; cond.notify_one(); } void destroy() override { // nothing, SyncRequests live on the stack } }; // async requests use async::Completion to invoke a handler on its executor template <typename Mutex, template <typename> typename Lock> class AsyncRequest : public LockRequest { Mutex& mutex; //< mutex argument for lock guard public: explicit AsyncRequest(Mutex& mutex) : mutex(mutex) {} using Signature = void(boost::system::error_code, Lock<Mutex>); using LockCompletion = Completion<Signature, AsBase<AsyncRequest>>; void complete(boost::system::error_code ec) override { auto r = static_cast<LockCompletion*>(this); // pass ownership of ourselves to post(). on error, pass an empty lock post(std::unique_ptr<LockCompletion>{r}, ec, ec ? Lock{mutex, std::defer_lock} : Lock{mutex, std::adopt_lock}); } void destroy() override { delete static_cast<LockCompletion*>(this); } }; inline SharedMutexImpl::~SharedMutexImpl() { ceph_assert(state == Unlocked); ceph_assert(shared_queue.empty()); ceph_assert(exclusive_queue.empty()); } template <typename Mutex, typename CompletionToken> auto SharedMutexImpl::async_lock(Mutex& mtx, CompletionToken&& token) { using Request = AsyncRequest<Mutex, std::unique_lock>; using Signature = typename Request::Signature; boost::asio::async_completion<CompletionToken, Signature> init(token); auto& handler = init.completion_handler; auto ex1 = mtx.get_executor(); { std::lock_guard lock{mutex}; boost::system::error_code ec; if (state == Unlocked) { state = Exclusive; // post a successful completion auto ex2 = boost::asio::get_associated_executor(handler, ex1); auto alloc2 = boost::asio::get_associated_allocator(handler); auto b = bind_handler(std::move(handler), ec, std::unique_lock{mtx, std::adopt_lock}); ex2.post(forward_handler(std::move(b)), alloc2); } else { // create a request and add it to the exclusive list using LockCompletion = typename Request::LockCompletion; auto request = LockCompletion::create(ex1, std::move(handler), mtx); exclusive_queue.push_back(*request.release()); } } return init.result.get(); } inline void SharedMutexImpl::lock() { boost::system::error_code ec; lock(ec); if (ec) { throw boost::system::system_error(ec); } } void SharedMutexImpl::lock(boost::system::error_code& ec) { std::unique_lock lock{mutex}; if (state == Unlocked) { state = Exclusive; ec.clear(); } else { SyncRequest request; exclusive_queue.push_back(request); ec = request.wait(lock); } } inline bool SharedMutexImpl::try_lock() { std::lock_guard lock{mutex}; if (state == Unlocked) { state = Exclusive; return true; } return false; } void SharedMutexImpl::unlock() { RequestList granted; { std::lock_guard lock{mutex}; ceph_assert(state == Exclusive); if (!exclusive_queue.empty()) { // grant next exclusive lock auto& request = exclusive_queue.front(); exclusive_queue.pop_front(); granted.push_back(request); } else { // grant shared locks, if any state = shared_queue.size(); if (state > MaxShared) { state = MaxShared; auto end = std::next(shared_queue.begin(), MaxShared); granted.splice(granted.end(), shared_queue, shared_queue.begin(), end, MaxShared); } else { granted.splice(granted.end(), shared_queue); } } } complete(std::move(granted), boost::system::error_code{}); } template <typename Mutex, typename CompletionToken> auto SharedMutexImpl::async_lock_shared(Mutex& mtx, CompletionToken&& token) { using Request = AsyncRequest<Mutex, std::shared_lock>; using Signature = typename Request::Signature; boost::asio::async_completion<CompletionToken, Signature> init(token); auto& handler = init.completion_handler; auto ex1 = mtx.get_executor(); { std::lock_guard lock{mutex}; boost::system::error_code ec; if (exclusive_queue.empty() && state < MaxShared) { state++; auto ex2 = boost::asio::get_associated_executor(handler, ex1); auto alloc2 = boost::asio::get_associated_allocator(handler); auto b = bind_handler(std::move(handler), ec, std::shared_lock{mtx, std::adopt_lock}); ex2.post(forward_handler(std::move(b)), alloc2); } else { using LockCompletion = typename Request::LockCompletion; auto request = LockCompletion::create(ex1, std::move(handler), mtx); shared_queue.push_back(*request.release()); } } return init.result.get(); } inline void SharedMutexImpl::lock_shared() { boost::system::error_code ec; lock_shared(ec); if (ec) { throw boost::system::system_error(ec); } } void SharedMutexImpl::lock_shared(boost::system::error_code& ec) { std::unique_lock lock{mutex}; if (exclusive_queue.empty() && state < MaxShared) { state++; ec.clear(); } else { SyncRequest request; shared_queue.push_back(request); ec = request.wait(lock); } } inline bool SharedMutexImpl::try_lock_shared() { std::lock_guard lock{mutex}; if (exclusive_queue.empty() && state < MaxShared) { state++; return true; } return false; } inline void SharedMutexImpl::unlock_shared() { std::lock_guard lock{mutex}; ceph_assert(state != Unlocked && state <= MaxShared); if (state == 1 && !exclusive_queue.empty()) { // grant next exclusive lock state = Exclusive; auto& request = exclusive_queue.front(); exclusive_queue.pop_front(); request.complete(boost::system::error_code{}); } else if (state == MaxShared && !shared_queue.empty() && exclusive_queue.empty()) { // grant next shared lock auto& request = shared_queue.front(); shared_queue.pop_front(); request.complete(boost::system::error_code{}); } else { state--; } } inline void SharedMutexImpl::cancel() { RequestList canceled; { std::lock_guard lock{mutex}; canceled.splice(canceled.end(), shared_queue); canceled.splice(canceled.end(), exclusive_queue); } complete(std::move(canceled), boost::asio::error::operation_aborted); } void SharedMutexImpl::complete(RequestList&& requests, boost::system::error_code ec) { while (!requests.empty()) { auto& request = requests.front(); requests.pop_front(); try { request.complete(ec); } catch (...) { // clean up any remaining completions and rethrow requests.clear_and_dispose([] (LockRequest *r) { r->destroy(); }); throw; } } } } // namespace ceph::async::detail
9,401
27.752294
79
h
null
ceph-main/src/common/detail/construct_suspended.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2018 Red Hat <contact@redhat.com> * Author: Adam C. Emerson <aemerson@redhat.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H #define CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H namespace ceph { struct construct_suspended_t { }; inline constexpr construct_suspended_t construct_suspended { }; } #endif // CEPH_COMMON_DETAIL_CONSTRUCT_SUSPENDED_H
741
28.68
70
h
null
ceph-main/src/common/win32/registry.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2019 SUSE LINUX GmbH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/compat.h" #include "common/ceph_context.h" class RegistryKey { public: RegistryKey(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey, bool create_value); ~RegistryKey(); static remove(CephContext *cct_, HKEY hRootKey, LPCTSTR strKey); int flush(); int set(LPCTSTR lpValue, DWORD data); int set(LPCTSTR lpValue, std::string data); int get(LPCTSTR lpValue, bool& value); int get(LPCTSTR lpValue, DWORD& value); int get(LPCTSTR lpValue, std::string& value); HKEY hKey = NULL; bool missingKey = false; private: CephContext *cct; };
886
21.74359
83
h
null
ceph-main/src/common/win32/service.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2019 SUSE LINUX GmbH * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #include "include/compat.h" #include "common/ceph_context.h" class ServiceBase { public: ServiceBase(CephContext *cct_); virtual ~ServiceBase() {}; static int initialize(ServiceBase *service); protected: static void run(); static void control_handler(DWORD request); void shutdown(bool ignore_errors = false); void stop(); void set_status(DWORD current_state, DWORD exit_code = NO_ERROR); /* Subclasses should implement the following service hooks. */ virtual int run_hook() = 0; /* Invoked when the service is requested to stop. */ virtual int stop_hook() = 0; /* Invoked when the system is shutting down. */ virtual int shutdown_hook() = 0; CephContext *cct; private: /* A handle used when reporting the current status. */ SERVICE_STATUS_HANDLE hstatus; /* The current service status. */ SERVICE_STATUS status; /* singleton service instance */ static ServiceBase *s_service; };
1,240
23.82
67
h
null
ceph-main/src/compressor/CompressionPlugin.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph distributed storage system * * Copyright (C) 2015 Mirantis, Inc. * * Author: Alyona Kiseleva <akiselyova@mirantis.com> * * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef COMPRESSION_PLUGIN_H #define COMPRESSION_PLUGIN_H #include <iosfwd> #include <iostream> #include "common/PluginRegistry.h" #include "include/common_fwd.h" #include "Compressor.h" namespace ceph { class CompressionPlugin : public Plugin { public: TOPNSPC::CompressorRef compressor; explicit CompressionPlugin(CephContext *cct) : Plugin(cct) {} ~CompressionPlugin() override {} virtual int factory(TOPNSPC::CompressorRef *cs, std::ostream *ss) = 0; virtual const char* name() {return "CompressionPlugin";} }; } #endif
1,091
21.285714
71
h
null
ceph-main/src/compressor/QatAccel.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2018 Intel Corporation * * Author: Qiaowei Ren <qiaowei.ren@intel.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_QATACCEL_H #define CEPH_QATACCEL_H #include <condition_variable> #include <memory> #include <mutex> #include <optional> #include <vector> #include "include/buffer.h" extern "C" struct QzSession_S; // typedef struct QzSession_S QzSession_T; struct QzSessionDeleter { void operator() (struct QzSession_S *session); }; class QatAccel { public: using session_ptr = std::unique_ptr<struct QzSession_S, QzSessionDeleter>; QatAccel(); ~QatAccel(); bool init(const std::string &alg); int compress(const bufferlist &in, bufferlist &out, std::optional<int32_t> &compressor_message); int decompress(const bufferlist &in, bufferlist &out, std::optional<int32_t> compressor_message); int decompress(bufferlist::const_iterator &p, size_t compressed_len, bufferlist &dst, std::optional<int32_t> compressor_message); private: // get a session from the pool or create a new one. returns null if session init fails session_ptr get_session(); friend struct cached_session_t; std::vector<session_ptr> sessions; std::mutex mutex; std::string alg_name; }; #endif
1,455
25.472727
131
h
null
ceph-main/src/compressor/brotli/CompressionPluginBrotli.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2017 BI SHUN KE <aionshun@livemail.tw> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_COMPRESSION_PLUGIN_BROTLI_H #define CEPH_COMPRESSION_PLUGIN_BROTLI_H #include "ceph_ver.h" #include "compressor/CompressionPlugin.h" #include "BrotliCompressor.h" class CompressionPluginBrotli : public CompressionPlugin { public: explicit CompressionPluginBrotli(CephContext *cct) : CompressionPlugin(cct) {} virtual int factory(CompressorRef *cs, std::ostream *ss) { if (compressor == nullptr) { BrotliCompressor *interface = new BrotliCompressor(); compressor = CompressorRef(interface); } *cs = compressor; return 0; } }; #endif
916
23.783784
77
h
null
ceph-main/src/compressor/lz4/CompressionPluginLZ4.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2017 XSKY Inc. * * Author: Haomai Wang <haomaiwang@gmail.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CEPH_COMPRESSION_PLUGIN_LZ4_H #define CEPH_COMPRESSION_PLUGIN_LZ4_H // ----------------------------------------------------------------------------- #include "ceph_ver.h" #include "compressor/CompressionPlugin.h" #include "LZ4Compressor.h" // ----------------------------------------------------------------------------- class CompressionPluginLZ4 : public ceph::CompressionPlugin { public: explicit CompressionPluginLZ4(CephContext* cct) : CompressionPlugin(cct) {} int factory(CompressorRef *cs, std::ostream *ss) override { if (compressor == 0) { LZ4Compressor *interface = new LZ4Compressor(cct); compressor = CompressorRef(interface); } *cs = compressor; return 0; } }; #endif
1,138
26.119048
80
h
null
ceph-main/src/compressor/lz4/LZ4Compressor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2017 Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_LZ4COMPRESSOR_H #define CEPH_LZ4COMPRESSOR_H #include <optional> #include <lz4.h> #include "compressor/Compressor.h" #include "include/buffer.h" #include "include/encoding.h" #include "common/config.h" class LZ4Compressor : public Compressor { public: LZ4Compressor(CephContext* cct) : Compressor(COMP_ALG_LZ4, "lz4") { #ifdef HAVE_QATZIP if (cct->_conf->qat_compressor_enabled && qat_accel.init("lz4")) qat_enabled = true; else qat_enabled = false; #endif } int compress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> &compressor_message) override { // older versions of liblz4 introduce bit errors when compressing // fragmented buffers. this was fixed in lz4 commit // af127334670a5e7b710bbd6adb71aa7c3ef0cd72, which first // appeared in v1.8.2. // // workaround: rebuild if not contiguous. if (!src.is_contiguous()) { ceph::buffer::list new_src = src; new_src.rebuild(); return compress(new_src, dst, compressor_message); } #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.compress(src, dst, compressor_message); #endif ceph::buffer::ptr outptr = ceph::buffer::create_small_page_aligned( LZ4_compressBound(src.length())); LZ4_stream_t lz4_stream; LZ4_resetStream(&lz4_stream); using ceph::encode; auto p = src.begin(); size_t left = src.length(); int pos = 0; const char *data; unsigned num = src.get_num_buffers(); encode((uint32_t)num, dst); while (left) { uint32_t origin_len = p.get_ptr_and_advance(left, &data); int compressed_len = LZ4_compress_fast_continue( &lz4_stream, data, outptr.c_str()+pos, origin_len, outptr.length()-pos, 1); if (compressed_len <= 0) return -1; pos += compressed_len; left -= origin_len; encode(origin_len, dst); encode((uint32_t)compressed_len, dst); } ceph_assert(p.end()); dst.append(outptr, 0, pos); return 0; } int decompress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override { #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.decompress(src, dst, compressor_message); #endif auto i = std::cbegin(src); return decompress(i, src.length(), dst, compressor_message); } int decompress(ceph::buffer::list::const_iterator &p, size_t compressed_len, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override { #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.decompress(p, compressed_len, dst, compressor_message); #endif using ceph::decode; uint32_t count; decode(count, p); std::vector<std::pair<uint32_t, uint32_t> > compressed_pairs(count); uint32_t total_origin = 0; for (auto& [dst_size, src_size] : compressed_pairs) { decode(dst_size, p); decode(src_size, p); total_origin += dst_size; } compressed_len -= (sizeof(uint32_t) + sizeof(uint32_t) * count * 2); ceph::buffer::ptr dstptr(total_origin); LZ4_streamDecode_t lz4_stream_decode; LZ4_setStreamDecode(&lz4_stream_decode, nullptr, 0); ceph::buffer::ptr cur_ptr = p.get_current_ptr(); ceph::buffer::ptr *ptr = &cur_ptr; std::optional<ceph::buffer::ptr> data_holder; if (compressed_len != cur_ptr.length()) { data_holder.emplace(compressed_len); p.copy_deep(compressed_len, *data_holder); ptr = &*data_holder; } char *c_in = ptr->c_str(); char *c_out = dstptr.c_str(); for (unsigned i = 0; i < count; ++i) { int r = LZ4_decompress_safe_continue( &lz4_stream_decode, c_in, c_out, compressed_pairs[i].second, compressed_pairs[i].first); if (r == (int)compressed_pairs[i].first) { c_in += compressed_pairs[i].second; c_out += compressed_pairs[i].first; } else if (r < 0) { return -1; } else { return -2; } } dst.push_back(std::move(dstptr)); return 0; } }; #endif
4,494
29.371622
126
h
null
ceph-main/src/compressor/snappy/CompressionPluginSnappy.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2015 Mirantis, Inc. * * Author: Alyona Kiseleva <akiselyova@mirantis.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CEPH_COMPRESSION_PLUGIN_SNAPPY_H #define CEPH_COMPRESSION_PLUGIN_SNAPPY_H // ----------------------------------------------------------------------------- #include "compressor/CompressionPlugin.h" #include "SnappyCompressor.h" // ----------------------------------------------------------------------------- class CompressionPluginSnappy : public ceph::CompressionPlugin { public: explicit CompressionPluginSnappy(CephContext* cct) : CompressionPlugin(cct) {} int factory(CompressorRef *cs, std::ostream *ss) override { if (compressor == 0) { SnappyCompressor *interface = new SnappyCompressor(cct); compressor = CompressorRef(interface); } *cs = compressor; return 0; } }; #endif
1,173
26.302326
80
h
null
ceph-main/src/compressor/snappy/SnappyCompressor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_SNAPPYCOMPRESSOR_H #define CEPH_SNAPPYCOMPRESSOR_H #include <snappy.h> #include <snappy-sinksource.h> #include "common/config.h" #include "compressor/Compressor.h" #include "include/buffer.h" class CEPH_BUFFER_API BufferlistSource : public snappy::Source { ceph::bufferlist::const_iterator pb; size_t remaining; public: explicit BufferlistSource(ceph::bufferlist::const_iterator _pb, size_t _input_len) : pb(_pb), remaining(_input_len) { remaining = std::min(remaining, (size_t)pb.get_remaining()); } size_t Available() const override { return remaining; } const char *Peek(size_t *len) override { const char *data = NULL; *len = 0; size_t avail = Available(); if (avail) { auto ptmp = pb; *len = ptmp.get_ptr_and_advance(avail, &data); } return data; } void Skip(size_t n) override { ceph_assert(n <= remaining); pb += n; remaining -= n; } ceph::bufferlist::const_iterator get_pos() const { return pb; } }; class SnappyCompressor : public Compressor { public: SnappyCompressor(CephContext* cct) : Compressor(COMP_ALG_SNAPPY, "snappy") { #ifdef HAVE_QATZIP if (cct->_conf->qat_compressor_enabled && qat_accel.init("snappy")) qat_enabled = true; else qat_enabled = false; #endif } int compress(const ceph::bufferlist &src, ceph::bufferlist &dst, std::optional<int32_t> &compressor_message) override { #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.compress(src, dst, compressor_message); #endif BufferlistSource source(const_cast<ceph::bufferlist&>(src).begin(), src.length()); ceph::bufferptr ptr = ceph::buffer::create_small_page_aligned( snappy::MaxCompressedLength(src.length())); snappy::UncheckedByteArraySink sink(ptr.c_str()); snappy::Compress(&source, &sink); dst.append(ptr, 0, sink.CurrentDestination() - ptr.c_str()); return 0; } int decompress(const ceph::bufferlist &src, ceph::bufferlist &dst, std::optional<int32_t> compressor_message) override { #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.decompress(src, dst, compressor_message); #endif auto i = src.begin(); return decompress(i, src.length(), dst, compressor_message); } int decompress(ceph::bufferlist::const_iterator &p, size_t compressed_len, ceph::bufferlist &dst, std::optional<int32_t> compressor_message) override { #ifdef HAVE_QATZIP if (qat_enabled) return qat_accel.decompress(p, compressed_len, dst, compressor_message); #endif BufferlistSource source_1(p, compressed_len); uint32_t res_len = 0; if (!snappy::GetUncompressedLength(&source_1, &res_len)) { return -1; } BufferlistSource source_2(p, compressed_len); ceph::bufferptr ptr(res_len); if (snappy::RawUncompress(&source_2, ptr.c_str())) { p = source_2.get_pos(); dst.append(ptr); return 0; } return -2; } }; #endif
3,386
27.948718
122
h
null
ceph-main/src/compressor/zlib/CompressionPluginZlib.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2015 Mirantis, Inc. * * Author: Alyona Kiseleva <akiselyova@mirantis.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CEPH_COMPRESSION_PLUGIN_ZLIB_H #define CEPH_COMPRESSION_PLUGIN_ZLIB_H // ----------------------------------------------------------------------------- #include "arch/probe.h" #include "arch/intel.h" #include "arch/arm.h" #include "common/ceph_context.h" #include "compressor/CompressionPlugin.h" #include "ZlibCompressor.h" // ----------------------------------------------------------------------------- class CompressionPluginZlib : public ceph::CompressionPlugin { public: bool has_isal = false; explicit CompressionPluginZlib(CephContext *cct) : CompressionPlugin(cct) {} int factory(CompressorRef *cs, std::ostream *ss) override { bool isal = false; #if defined(__i386__) || defined(__x86_64__) // other arches or lack of support result in isal = false if (cct->_conf->compressor_zlib_isal) { ceph_arch_probe(); isal = (ceph_arch_intel_pclmul && ceph_arch_intel_sse41); } #elif defined(__aarch64__) if (cct->_conf->compressor_zlib_isal) { ceph_arch_probe(); isal = (ceph_arch_aarch64_pmull && ceph_arch_neon); } #endif if (compressor == 0 || has_isal != isal) { compressor = std::make_shared<ZlibCompressor>(cct, isal); has_isal = isal; } *cs = compressor; return 0; } }; #endif
1,726
27.311475
80
h
null
ceph-main/src/compressor/zstd/CompressionPluginZstd.h
/* * Ceph - scalable distributed file system * * Copyright (C) 2015 Mirantis, Inc. * * Author: Alyona Kiseleva <akiselyova@mirantis.com> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * */ #ifndef CEPH_COMPRESSION_PLUGIN_ZSTD_H #define CEPH_COMPRESSION_PLUGIN_ZSTD_H // ----------------------------------------------------------------------------- #include "ceph_ver.h" #include "compressor/CompressionPlugin.h" #include "ZstdCompressor.h" // ----------------------------------------------------------------------------- class CompressionPluginZstd : public ceph::CompressionPlugin { public: explicit CompressionPluginZstd(CephContext* cct) : CompressionPlugin(cct) {} int factory(CompressorRef *cs, std::ostream *ss) override { if (compressor == 0) { ZstdCompressor *interface = new ZstdCompressor(cct); compressor = CompressorRef(interface); } *cs = compressor; return 0; } }; #endif
1,181
25.863636
80
h
null
ceph-main/src/compressor/zstd/ZstdCompressor.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab /* * Ceph - scalable distributed file system * * Copyright (C) 2015 Haomai Wang <haomaiwang@gmail.com> * * This is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2.1, as published by the Free Software * Foundation. See file COPYING. * */ #ifndef CEPH_ZSTDCOMPRESSOR_H #define CEPH_ZSTDCOMPRESSOR_H #define ZSTD_STATIC_LINKING_ONLY #include "zstd/lib/zstd.h" #include "include/buffer.h" #include "include/encoding.h" #include "compressor/Compressor.h" class ZstdCompressor : public Compressor { public: ZstdCompressor(CephContext *cct) : Compressor(COMP_ALG_ZSTD, "zstd"), cct(cct) {} int compress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> &compressor_message) override { ZSTD_CStream *s = ZSTD_createCStream(); ZSTD_initCStream_srcSize(s, cct->_conf->compressor_zstd_level, src.length()); auto p = src.begin(); size_t left = src.length(); size_t const out_max = ZSTD_compressBound(left); ceph::buffer::ptr outptr = ceph::buffer::create_small_page_aligned(out_max); ZSTD_outBuffer_s outbuf; outbuf.dst = outptr.c_str(); outbuf.size = outptr.length(); outbuf.pos = 0; while (left) { ceph_assert(!p.end()); struct ZSTD_inBuffer_s inbuf; inbuf.pos = 0; inbuf.size = p.get_ptr_and_advance(left, (const char**)&inbuf.src); left -= inbuf.size; ZSTD_EndDirective const zed = (left==0) ? ZSTD_e_end : ZSTD_e_continue; size_t r = ZSTD_compressStream2(s, &outbuf, &inbuf, zed); if (ZSTD_isError(r)) { return -EINVAL; } } ceph_assert(p.end()); ZSTD_freeCStream(s); // prefix with decompressed length ceph::encode((uint32_t)src.length(), dst); dst.append(outptr, 0, outbuf.pos); return 0; } int decompress(const ceph::buffer::list &src, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override { auto i = std::cbegin(src); return decompress(i, src.length(), dst, compressor_message); } int decompress(ceph::buffer::list::const_iterator &p, size_t compressed_len, ceph::buffer::list &dst, std::optional<int32_t> compressor_message) override { if (compressed_len < 4) { return -1; } compressed_len -= 4; uint32_t dst_len; ceph::decode(dst_len, p); ceph::buffer::ptr dstptr(dst_len); ZSTD_outBuffer_s outbuf; outbuf.dst = dstptr.c_str(); outbuf.size = dstptr.length(); outbuf.pos = 0; ZSTD_DStream *s = ZSTD_createDStream(); ZSTD_initDStream(s); while (compressed_len > 0) { if (p.end()) { return -1; } ZSTD_inBuffer_s inbuf; inbuf.pos = 0; inbuf.size = p.get_ptr_and_advance(compressed_len, (const char**)&inbuf.src); ZSTD_decompressStream(s, &outbuf, &inbuf); compressed_len -= inbuf.size; } ZSTD_freeDStream(s); dst.append(dstptr, 0, outbuf.pos); return 0; } private: CephContext *const cct; }; #endif
3,132
28.009259
126
h
null
ceph-main/src/crimson/admin/admin_socket.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once /** A Crimson-wise version of the src/common/admin_socket.h Note: assumed to be running on a single core. */ #include <map> #include <string> #include <string_view> #include <seastar/core/future.hh> #include <seastar/core/gate.hh> #include <seastar/core/iostream.hh> #include <seastar/core/shared_mutex.hh> #include <seastar/core/shared_ptr.hh> #include <seastar/net/api.hh> #include "common/cmdparse.h" #include "include/buffer.h" #include "crimson/net/Fwd.h" class MCommand; namespace crimson::admin { class AdminSocket; struct tell_result_t { int ret = 0; std::string err; ceph::bufferlist out; tell_result_t() = default; tell_result_t(int ret, std::string&& err); tell_result_t(int ret, std::string&& err, ceph::bufferlist&& out); /** * create a \c tell_result_t indicating the successful completion * of command * * \param formatter the content of formatter will be flushed to the * output buffer */ tell_result_t(std::unique_ptr<Formatter> formatter); }; /** * An abstract class to be inherited by implementations of asock hooks */ class AdminSocketHook { public: AdminSocketHook(std::string_view prefix, std::string_view desc, std::string_view help) : prefix{prefix}, desc{desc}, help{help} {} /** * handle command defined by cmdmap * * \param cmdmap dictionary holding the named parameters * \param format the expected format of the output * \param input the binary input of the command * \pre \c cmdmap should be validated with \c desc * \retval an instance of \c tell_result_t * \note a negative \c ret should be set to indicate that the hook fails to * fulfill the command either because of an invalid input or other * failures. in that case, a brief reason of the failure should * noted in \c err in the returned value */ virtual seastar::future<tell_result_t> call(const cmdmap_t& cmdmap, std::string_view format, ceph::bufferlist&& input) const = 0; virtual ~AdminSocketHook() {} const std::string_view prefix; const std::string_view desc; const std::string_view help; }; class AdminSocket : public seastar::enable_lw_shared_from_this<AdminSocket> { public: AdminSocket() = default; ~AdminSocket() = default; AdminSocket(const AdminSocket&) = delete; AdminSocket& operator=(const AdminSocket&) = delete; AdminSocket(AdminSocket&&) = delete; AdminSocket& operator=(AdminSocket&&) = delete; /** * create the async Seastar thread that handles asok commands arriving * over the socket. */ seastar::future<> start(const std::string& path); seastar::future<> stop(); /** * register an admin socket hook * * Commands (APIs) are registered under a command string. Incoming * commands are split by spaces and matched against the longest * registered command. For example, if 'foo' and 'foo bar' are * registered, and an incoming command is 'foo bar baz', it is * matched with 'foo bar', while 'foo fud' will match 'foo'. * * \param hook a hook which includes its identifying command string, the * expected call syntax, and some help text. * * A note regarding the help text: if empty, command will not be * included in 'help' output. */ void register_command(std::unique_ptr<AdminSocketHook>&& hook); /** * Registering the APIs that are served directly by the admin_socket server. */ void register_admin_commands(); /** * handle a command message by replying an MCommandReply with the same tid * * \param conn connection over which the incoming command message is received * \param m message carrying the command vector and optional input buffer */ seastar::future<> handle_command(crimson::net::ConnectionRef conn, boost::intrusive_ptr<MCommand> m); private: /** * the result of analyzing an incoming command, and locating it in * the registered APIs collection. */ struct parsed_command_t { cmdmap_t params; std::string format; const AdminSocketHook& hook; }; // and the shorthand: seastar::future<> handle_client(seastar::input_stream<char>& inp, seastar::output_stream<char>& out); seastar::future<> execute_line(std::string cmdline, seastar::output_stream<char>& out); seastar::future<> finalize_response(seastar::output_stream<char>& out, ceph::bufferlist&& msgs); seastar::future<tell_result_t> execute_command(const std::vector<std::string>& cmd, ceph::bufferlist&& buf); std::optional<seastar::future<>> task; std::optional<seastar::server_socket> server_sock; std::optional<seastar::connected_socket> connected_sock; /** * stopping incoming ASOK requests at shutdown */ seastar::gate stop_gate; /** * parse the incoming command vector, find a registered hook by looking up by * its prefix, perform sanity checks on the parsed parameters with the hook's * command description * * \param cmd a vector of string which presents a command * \retval on success, a \c parsed_command_t is returned, tell_result_t with * detailed error messages is returned otherwise */ std::variant<parsed_command_t, tell_result_t> parse_cmd(const std::vector<std::string>& cmd); using hooks_t = std::map<std::string_view, std::unique_ptr<AdminSocketHook>>; hooks_t hooks; public: /** * iterator support */ hooks_t::const_iterator begin() const { return hooks.cbegin(); } hooks_t::const_iterator end() const { return hooks.cend(); } }; } // namespace crimson::admin
5,795
29.829787
85
h
null
ceph-main/src/crimson/admin/osd_admin.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <memory> namespace crimson::admin { class AdminSocketHook; class AssertAlwaysHook; class DumpMetricsHook; class DumpPGStateHistory; class DumpPerfCountersHook; class FlushPgStatsHook; class InjectDataErrorHook; class InjectMDataErrorHook; class OsdStatusHook; class SendBeaconHook; class DumpInFlightOpsHook; class DumpHistoricOpsHook; class DumpSlowestHistoricOpsHook; class DumpRecoveryReservationsHook; template<class Hook, class... Args> std::unique_ptr<AdminSocketHook> make_asok_hook(Args&&... args); } // namespace crimson::admin
664
21.931034
70
h
null
ceph-main/src/crimson/auth/AuthClient.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <cstdint> #include <string> #include <tuple> #include <vector> #include "include/buffer_fwd.h" #include "crimson/net/Fwd.h" class CryptoKey; namespace crimson::auth { class error : public std::logic_error { public: using std::logic_error::logic_error; }; using method_t = uint32_t; // TODO: revisit interfaces for non-dummy implementations class AuthClient { public: virtual ~AuthClient() {} struct auth_request_t { method_t auth_method; std::vector<uint32_t> preferred_modes; ceph::bufferlist auth_bl; }; /// Build an authentication request to begin the handshake /// /// @throw auth::error if unable to build the request virtual auth_request_t get_auth_request(crimson::net::Connection &conn, AuthConnectionMeta &auth_meta) = 0; /// Handle server's request to continue the handshake /// /// @throw auth::error if unable to build the request virtual ceph::bufferlist handle_auth_reply_more( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, const ceph::bufferlist& bl) = 0; /// Handle server's indication that authentication succeeded /// /// @return 0 if authenticated, a negative number otherwise virtual int handle_auth_done( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, uint64_t global_id, uint32_t con_mode, const bufferlist& bl) = 0; /// Handle server's indication that the previous auth attempt failed /// /// @return 0 if will try next auth method, a negative number if we have no /// more options virtual int handle_auth_bad_method( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, uint32_t old_auth_method, int result, const std::vector<uint32_t>& allowed_methods, const std::vector<uint32_t>& allowed_modes) = 0; }; } // namespace crimson::auth
1,959
26.222222
77
h
null
ceph-main/src/crimson/auth/AuthServer.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <cstdint> #include <utility> #include <vector> #include "crimson/net/Fwd.h" struct AuthAuthorizeHandler; namespace crimson::auth { class AuthServer { public: virtual ~AuthServer() {} // Get authentication methods and connection modes for the given peer type virtual std::pair<std::vector<uint32_t>, std::vector<uint32_t>> get_supported_auth_methods(int peer_type) = 0; // Get support connection modes for the given peer type and auth method virtual uint32_t pick_con_mode( int peer_type, uint32_t auth_method, const std::vector<uint32_t>& preferred_modes) = 0; // return an AuthAuthorizeHandler for the given peer type and auth method virtual AuthAuthorizeHandler* get_auth_authorize_handler( int peer_type, int auth_method) = 0; // Handle an authentication request on an incoming connection virtual int handle_auth_request( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, bool more, //< true if this is not the first part of the handshake uint32_t auth_method, const bufferlist& bl, uint64_t *p_peer_global_id, bufferlist *reply) = 0; }; } // namespace crimson::auth
1,288
28.976744
80
h
null
ceph-main/src/crimson/auth/DummyAuth.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #include "AuthClient.h" #include "AuthServer.h" namespace crimson::auth { class DummyAuthClientServer : public AuthClient, public AuthServer { public: DummyAuthClientServer() {} // client std::pair<std::vector<uint32_t>, std::vector<uint32_t>> get_supported_auth_methods(int peer_type) final { return {{CEPH_AUTH_NONE}, {CEPH_AUTH_NONE}}; } uint32_t pick_con_mode(int peer_type, uint32_t auth_method, const std::vector<uint32_t>& preferred_modes) final { ceph_assert(auth_method == CEPH_AUTH_NONE); ceph_assert(preferred_modes.size() && preferred_modes[0] == CEPH_CON_MODE_CRC); return CEPH_CON_MODE_CRC; } AuthAuthorizeHandler* get_auth_authorize_handler(int peer_type, int auth_method) final { return nullptr; } AuthClient::auth_request_t get_auth_request( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta) override { return {CEPH_AUTH_NONE, {CEPH_CON_MODE_CRC}, {}}; } ceph::bufferlist handle_auth_reply_more( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, const bufferlist& bl) override { ceph_abort(); } int handle_auth_done( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, uint64_t global_id, uint32_t con_mode, const bufferlist& bl) override { return 0; } int handle_auth_bad_method( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, uint32_t old_auth_method, int result, const std::vector<uint32_t>& allowed_methods, const std::vector<uint32_t>& allowed_modes) override { ceph_abort(); } // server int handle_auth_request( crimson::net::Connection &conn, AuthConnectionMeta &auth_meta, bool more, uint32_t auth_method, const bufferlist& bl, uint64_t *p_peer_global_id, bufferlist *reply) override { return 1; } }; } // namespace crimson::auth
2,053
24.675
70
h
null
ceph-main/src/crimson/common/buffer_io.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include <seastar/core/file-types.hh> #include "include/buffer_fwd.h" namespace crimson { seastar::future<> write_file(ceph::buffer::list&& bl, seastar::sstring fn, seastar::file_permissions= // 0644 (seastar::file_permissions::user_read | seastar::file_permissions::user_write | seastar::file_permissions::group_read | seastar::file_permissions::others_read)); seastar::future<seastar::temporary_buffer<char>> read_file(const seastar::sstring fn); }
803
35.545455
75
h
null
ceph-main/src/crimson/common/condition_variable.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/future.hh> #include <seastar/core/condition-variable.hh> #include <seastar/core/loop.hh> #include "crimson/common/interruptible_future.h" namespace crimson { class condition_variable : public seastar::condition_variable { public: template <typename Pred, typename Func> auto wait( Pred&& pred, Func&& action) noexcept { using func_result_t = std::invoke_result_t<Func>; using intr_errorator_t = typename func_result_t::interrupt_errorator_type; using intr_cond_t = typename func_result_t::interrupt_cond_type; using interruptor = crimson::interruptible::interruptor<intr_cond_t>; return interruptor::repeat( [this, pred=std::forward<Pred>(pred), action=std::forward<Func>(action)]() -> typename intr_errorator_t::template future<seastar::stop_iteration> { if (!pred()) { return seastar::condition_variable::wait().then([] { return seastar::make_ready_future< seastar::stop_iteration>(seastar::stop_iteration::no); }); } else { return action().si_then([] { return seastar::make_ready_future< seastar::stop_iteration>(seastar::stop_iteration::yes); }); } }); } }; } // namespace crimson
1,321
29.045455
78
h
null
ceph-main/src/crimson/common/config_proxy.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/reactor.hh> #include <seastar/core/sharded.hh> #include "common/config.h" #include "common/config_obs.h" #include "common/config_obs_mgr.h" #include "common/errno.h" namespace ceph { class Formatter; } namespace crimson::common { // a facade for managing config. each shard has its own copy of ConfigProxy. // // In seastar-osd, there could be multiple instances of @c ConfigValues in a // single process, as we are using a variant of read-copy-update mechinary to // update the settings at runtime. class ConfigProxy : public seastar::peering_sharded_service<ConfigProxy> { using LocalConfigValues = seastar::lw_shared_ptr<ConfigValues>; seastar::foreign_ptr<LocalConfigValues> values; md_config_t* remote_config = nullptr; std::unique_ptr<md_config_t> local_config; using ConfigObserver = ceph::md_config_obs_impl<ConfigProxy>; ObserverMgr<ConfigObserver> obs_mgr; const md_config_t& get_config() const { return remote_config ? *remote_config : * local_config; } md_config_t& get_config() { return remote_config ? *remote_config : * local_config; } // apply changes to all shards // @param func a functor which accepts @c "ConfigValues&" template<typename Func> seastar::future<> do_change(Func&& func) { return container().invoke_on(values.get_owner_shard(), [func = std::move(func)](ConfigProxy& owner) { // apply the changes to a copy of the values auto new_values = seastar::make_lw_shared(*owner.values); new_values->changed.clear(); func(*new_values); // always apply the new settings synchronously on the owner shard, to // avoid racings with other do_change() calls in parallel. ObserverMgr<ConfigObserver>::rev_obs_map rev_obs; owner.values.reset(new_values); owner.obs_mgr.for_each_change(owner.values->changed, owner, [&rev_obs](ConfigObserver *obs, const std::string &key) { rev_obs[obs].insert(key); }, nullptr); for (auto& [obs, keys] : rev_obs) { obs->handle_conf_change(owner, keys); } return seastar::parallel_for_each(boost::irange(1u, seastar::smp::count), [&owner, new_values] (auto cpu) { return owner.container().invoke_on(cpu, [foreign_values = seastar::make_foreign(new_values)](ConfigProxy& proxy) mutable { proxy.values.reset(); proxy.values = std::move(foreign_values); ObserverMgr<ConfigObserver>::rev_obs_map rev_obs; proxy.obs_mgr.for_each_change(proxy.values->changed, proxy, [&rev_obs](ConfigObserver *obs, const std::string& key) { rev_obs[obs].insert(key); }, nullptr); for (auto& obs_keys : rev_obs) { obs_keys.first->handle_conf_change(proxy, obs_keys.second); } }); }).finally([new_values] { new_values->changed.clear(); }); }); } public: ConfigProxy(const EntityName& name, std::string_view cluster); const ConfigValues* operator->() const noexcept { return values.get(); } const ConfigValues get_config_values() { return *values.get(); } ConfigValues* operator->() noexcept { return values.get(); } // required by sharded<> seastar::future<> start(); seastar::future<> stop() { return seastar::make_ready_future<>(); } void add_observer(ConfigObserver* obs) { obs_mgr.add_observer(obs); } void remove_observer(ConfigObserver* obs) { obs_mgr.remove_observer(obs); } seastar::future<> rm_val(const std::string& key) { return do_change([key, this](ConfigValues& values) { auto ret = get_config().rm_val(values, key); if (ret < 0) { throw std::invalid_argument(cpp_strerror(ret)); } }); } seastar::future<> set_val(const std::string& key, const std::string& val) { return do_change([key, val, this](ConfigValues& values) { std::stringstream err; auto ret = get_config().set_val(values, obs_mgr, key, val, &err); if (ret < 0) { throw std::invalid_argument(err.str()); } }); } int get_val(std::string_view key, std::string *val) const { return get_config().get_val(*values, key, val); } template<typename T> const T get_val(std::string_view key) const { return get_config().template get_val<T>(*values, key); } int get_all_sections(std::vector<std::string>& sections) const { return get_config().get_all_sections(sections); } int get_val_from_conf_file(const std::vector<std::string>& sections, const std::string& key, std::string& out, bool expand_meta) const { return get_config().get_val_from_conf_file(*values, sections, key, out, expand_meta); } unsigned get_osd_pool_default_min_size(uint8_t size) const { return get_config().get_osd_pool_default_min_size(*values, size); } seastar::future<> set_mon_vals(const std::map<std::string,std::string,std::less<>>& kv) { return do_change([kv, this](ConfigValues& values) { get_config().set_mon_vals(nullptr, values, obs_mgr, kv, nullptr); }); } seastar::future<> inject_args(const std::string& s) { return do_change([s, this](ConfigValues& values) { std::stringstream err; if (get_config().injectargs(values, obs_mgr, s, &err)) { throw std::invalid_argument(err.str()); } }); } void show_config(ceph::Formatter* f) const; seastar::future<> parse_argv(std::vector<const char*>& argv) { // we could pass whatever is unparsed to seastar, but seastar::app_template // is used for driving the seastar application, and // crimson::common::ConfigProxy is not available until seastar engine is up // and running, so we have to feed the command line args to app_template // first, then pass them to ConfigProxy. return do_change([&argv, this](ConfigValues& values) { get_config().parse_argv(values, obs_mgr, argv, CONF_CMDLINE); }); } seastar::future<> parse_env() { return do_change([this](ConfigValues& values) { get_config().parse_env(CEPH_ENTITY_TYPE_OSD, values, obs_mgr); }); } seastar::future<> parse_config_files(const std::string& conf_files); using ShardedConfig = seastar::sharded<ConfigProxy>; private: static ShardedConfig sharded_conf; friend ConfigProxy& local_conf(); friend ShardedConfig& sharded_conf(); }; inline ConfigProxy& local_conf() { return ConfigProxy::sharded_conf.local(); } inline ConfigProxy::ShardedConfig& sharded_conf() { return ConfigProxy::sharded_conf; } template<typename T> const T get_conf(const std::string& key) { return local_conf().template get_val<T>(key); } }
7,066
32.023364
92
h
null
ceph-main/src/crimson/common/errorator-loop.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #pragma once #include <seastar/core/future.hh> #include "crimson/common/errorator.h" namespace crimson { template <class... AllowedErrors> class parallel_for_each_state final : private seastar::continuation_base<> { using future_t = typename errorator<AllowedErrors...>::template future<>; std::vector<future_t> _incomplete; seastar::promise<> _result; std::exception_ptr _ex; private: void wait_for_one() noexcept { while (!_incomplete.empty() && _incomplete.back().available()) { if (_incomplete.back().failed()) { _ex = _incomplete.back().get_exception(); } _incomplete.pop_back(); } if (!_incomplete.empty()) { seastar::internal::set_callback(std::move(_incomplete.back()), static_cast<continuation_base<>*>(this)); _incomplete.pop_back(); return; } if (__builtin_expect(bool(_ex), false)) { _result.set_exception(std::move(_ex)); } else { _result.set_value(); } delete this; } virtual void run_and_dispose() noexcept override { if (_state.failed()) { _ex = std::move(_state).get_exception(); } _state = {}; wait_for_one(); } task* waiting_task() noexcept override { return _result.waiting_task(); } public: parallel_for_each_state(size_t n) { _incomplete.reserve(n); } void add_future(future_t&& f) { _incomplete.push_back(std::move(f)); } future_t get_future() { auto ret = _result.get_future(); wait_for_one(); return ret; } }; template <typename Iterator, typename Func, typename... AllowedErrors> static inline typename errorator<AllowedErrors...>::template future<> parallel_for_each(Iterator first, Iterator last, Func&& func) noexcept { parallel_for_each_state<AllowedErrors...>* s = nullptr; // Process all elements, giving each future the following treatment: // - available, not failed: do nothing // - available, failed: collect exception in ex // - not available: collect in s (allocating it if needed) for (;first != last; ++first) { auto f = seastar::futurize_invoke(std::forward<Func>(func), *first); if (!f.available() || f.failed()) { if (!s) { using itraits = std::iterator_traits<Iterator>; auto n = (seastar::internal::iterator_range_estimate_vector_capacity( first, last, typename itraits::iterator_category()) + 1); s = new parallel_for_each_state<AllowedErrors...>(n); } s->add_future(std::move(f)); } } // If any futures were not available, hand off to parallel_for_each_state::start(). // Otherwise we can return a result immediately. if (s) { // s->get_future() takes ownership of s (and chains it to one of the futures it contains) // so this isn't a leak return s->get_future(); } return seastar::make_ready_future<>(); } } // namespace crimson
3,003
31.652174
93
h
null
ceph-main/src/crimson/common/exception.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <exception> #include <seastar/core/future.hh> #include <seastar/core/future-util.hh> #include "crimson/common/log.h" #include "crimson/common/interruptible_future.h" namespace crimson::common { class interruption : public std::exception {}; class system_shutdown_exception final : public interruption{ public: const char* what() const noexcept final { return "system shutting down"; } }; class actingset_changed final : public interruption { public: actingset_changed(bool sp) : still_primary(sp) {} const char* what() const noexcept final { return "acting set changed"; } bool is_primary() const { return still_primary; } private: const bool still_primary; }; template<typename Func, typename... Args> inline seastar::future<> handle_system_shutdown(Func&& func, Args&&... args) { return seastar::futurize_invoke(std::forward<Func>(func), std::forward<Args>(args)...) .handle_exception([](std::exception_ptr eptr) { if (*eptr.__cxa_exception_type() == typeid(crimson::common::system_shutdown_exception)) { crimson::get_logger(ceph_subsys_osd).debug( "operation skipped, system shutdown"); return seastar::now(); } std::rethrow_exception(eptr); }); } }
1,342
23.418182
76
h
null
ceph-main/src/crimson/common/fixed_kv_node_layout.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <algorithm> #include <iostream> #include <boost/iterator/counting_iterator.hpp> #include "include/byteorder.h" #include "crimson/common/layout.h" namespace crimson::common { template <typename T, bool is_const> struct maybe_const_t { }; template<typename T> struct maybe_const_t<T, true> { using type = const T*; }; template<typename T> struct maybe_const_t<T, false> { using type = T*; }; /** * FixedKVNodeLayout * * Reusable implementation of a fixed size block mapping * K -> V with internal representations KINT and VINT. * * Uses absl::container_internal::Layout for the actual memory layout. * * The primary interface exposed is centered on the iterator * and related methods. * * Also included are helpers for doing splits and merges as for a btree. */ template < size_t CAPACITY, typename Meta, typename MetaInt, typename K, typename KINT, typename V, typename VINT, bool VALIDATE_INVARIANTS=true> class FixedKVNodeLayout { char *buf = nullptr; using L = absl::container_internal::Layout<ceph_le32, MetaInt, KINT, VINT>; static constexpr L layout{1, 1, CAPACITY, CAPACITY}; public: template <bool is_const> struct iter_t { friend class FixedKVNodeLayout; using parent_t = typename maybe_const_t<FixedKVNodeLayout, is_const>::type; parent_t node; uint16_t offset = 0; iter_t() = default; iter_t( parent_t parent, uint16_t offset) : node(parent), offset(offset) {} iter_t(const iter_t &) noexcept = default; iter_t(iter_t &&) noexcept = default; template<bool is_const_ = is_const> iter_t(const iter_t<false>& it, std::enable_if_t<is_const_, int> = 0) : iter_t{it.node, it.offset} {} iter_t &operator=(const iter_t &) = default; iter_t &operator=(iter_t &&) = default; // Work nicely with for loops without requiring a nested type. using reference = iter_t&; iter_t &operator*() { return *this; } iter_t *operator->() { return this; } iter_t operator++(int) { auto ret = *this; ++offset; return ret; } iter_t &operator++() { ++offset; return *this; } iter_t operator--(int) { assert(offset > 0); auto ret = *this; --offset; return ret; } iter_t &operator--() { assert(offset > 0); --offset; return *this; } uint16_t operator-(const iter_t &rhs) const { assert(rhs.node == node); return offset - rhs.offset; } iter_t operator+(uint16_t off) const { return iter_t( node, offset + off); } iter_t operator-(uint16_t off) const { return iter_t( node, offset - off); } friend bool operator==(const iter_t &lhs, const iter_t &rhs) { assert(lhs.node == rhs.node); return lhs.offset == rhs.offset; } friend bool operator!=(const iter_t &lhs, const iter_t &rhs) { return !(lhs == rhs); } friend bool operator==(const iter_t<is_const> &lhs, const iter_t<!is_const> &rhs) { assert(lhs.node == rhs.node); return lhs.offset == rhs.offset; } friend bool operator!=(const iter_t<is_const> &lhs, const iter_t<!is_const> &rhs) { return !(lhs == rhs); } K get_key() const { return K(node->get_key_ptr()[offset]); } K get_next_key_or_max() const { auto next = *this + 1; if (next == node->end()) return std::numeric_limits<K>::max(); else return next->get_key(); } void set_val(V val) const { static_assert(!is_const); node->get_val_ptr()[offset] = VINT(val); } V get_val() const { return V(node->get_val_ptr()[offset]); }; bool contains(K addr) const { return (get_key() <= addr) && (get_next_key_or_max() > addr); } uint16_t get_offset() const { return offset; } private: void set_key(K _lb) const { static_assert(!is_const); KINT lb; lb = _lb; node->get_key_ptr()[offset] = lb; } typename maybe_const_t<char, is_const>::type get_key_ptr() const { return reinterpret_cast< typename maybe_const_t<char, is_const>::type>( node->get_key_ptr() + offset); } typename maybe_const_t<char, is_const>::type get_val_ptr() const { return reinterpret_cast< typename maybe_const_t<char, is_const>::type>( node->get_val_ptr() + offset); } }; using const_iterator = iter_t<true>; using iterator = iter_t<false>; struct delta_t { enum class op_t : uint8_t { INSERT, REMOVE, UPDATE, } op; KINT key; VINT val; void replay(FixedKVNodeLayout &l) { switch (op) { case op_t::INSERT: { l.insert(l.lower_bound(key), key, val); break; } case op_t::REMOVE: { auto iter = l.find(key); assert(iter != l.end()); l.remove(iter); break; } case op_t::UPDATE: { auto iter = l.find(key); assert(iter != l.end()); l.update(iter, val); break; } default: assert(0 == "Impossible"); } } bool operator==(const delta_t &rhs) const { return op == rhs.op && key == rhs.key && val == rhs.val; } }; public: class delta_buffer_t { std::vector<delta_t> buffer; public: bool empty() const { return buffer.empty(); } void insert( const K &key, const V &val) { KINT k; k = key; buffer.push_back( delta_t{ delta_t::op_t::INSERT, k, VINT(val) }); } void update( const K &key, const V &val) { KINT k; k = key; buffer.push_back( delta_t{ delta_t::op_t::UPDATE, k, VINT(val) }); } void remove(const K &key) { KINT k; k = key; buffer.push_back( delta_t{ delta_t::op_t::REMOVE, k, VINT() }); } void replay(FixedKVNodeLayout &node) { for (auto &i: buffer) { i.replay(node); } } size_t get_bytes() const { return buffer.size() * sizeof(delta_t); } void copy_out(char *out, size_t len) { assert(len == get_bytes()); ::memcpy(out, reinterpret_cast<const void *>(buffer.data()), get_bytes()); buffer.clear(); } void copy_in(const char *out, size_t len) { assert(empty()); assert(len % sizeof(delta_t) == 0); buffer = std::vector( reinterpret_cast<const delta_t*>(out), reinterpret_cast<const delta_t*>(out + len)); } bool operator==(const delta_buffer_t &rhs) const { return buffer == rhs.buffer; } }; void journal_insert( const_iterator _iter, const K &key, const V &val, delta_buffer_t *recorder) { auto iter = iterator(this, _iter.offset); if (recorder) { recorder->insert( key, val); } insert(iter, key, val); } void journal_update( const_iterator _iter, const V &val, delta_buffer_t *recorder) { auto iter = iterator(this, _iter.offset); if (recorder) { recorder->update(iter->get_key(), val); } update(iter, val); } void journal_replace( const_iterator _iter, const K &key, const V &val, delta_buffer_t *recorder) { auto iter = iterator(this, _iter.offset); if (recorder) { recorder->remove(iter->get_key()); recorder->insert(key, val); } replace(iter, key, val); } void journal_remove( const_iterator _iter, delta_buffer_t *recorder) { auto iter = iterator(this, _iter.offset); if (recorder) { recorder->remove(iter->get_key()); } remove(iter); } FixedKVNodeLayout(char *buf) : buf(buf) {} virtual ~FixedKVNodeLayout() = default; const_iterator begin() const { return const_iterator( this, 0); } const_iterator end() const { return const_iterator( this, get_size()); } iterator begin() { return iterator( this, 0); } iterator end() { return iterator( this, get_size()); } const_iterator iter_idx(uint16_t off) const { return const_iterator( this, off); } const_iterator find(K l) const { auto ret = begin(); for (; ret != end(); ++ret) { if (ret->get_key() == l) break; } return ret; } iterator find(K l) { const auto &tref = *this; return iterator(this, tref.find(l).offset); } const_iterator lower_bound(K l) const { auto it = std::lower_bound(boost::make_counting_iterator<uint16_t>(0), boost::make_counting_iterator<uint16_t>(get_size()), l, [this](uint16_t i, K key) { const_iterator iter(this, i); return iter->get_key() < key; }); return const_iterator(this, *it); } iterator lower_bound(K l) { const auto &tref = *this; return iterator(this, tref.lower_bound(l).offset); } const_iterator upper_bound(K l) const { auto it = std::upper_bound(boost::make_counting_iterator<uint16_t>(0), boost::make_counting_iterator<uint16_t>(get_size()), l, [this](K key, uint16_t i) { const_iterator iter(this, i); return key < iter->get_key(); }); return const_iterator(this, *it); } iterator upper_bound(K l) { const auto &tref = *this; return iterator(this, tref.upper_bound(l).offset); } const_iterator get_split_pivot() const { return iter_idx(get_size() / 2); } uint16_t get_size() const { return *layout.template Pointer<0>(buf); } /** * set_size * * Set size representation to match size */ void set_size(uint16_t size) { *layout.template Pointer<0>(buf) = size; } /** * get_meta/set_meta * * Enables stashing a templated type within the layout. * Cannot be modified after initial write as it is not represented * in delta_t */ Meta get_meta() const { MetaInt &metaint = *layout.template Pointer<1>(buf); return Meta(metaint); } void set_meta(const Meta &meta) { *layout.template Pointer<1>(buf) = MetaInt(meta); } constexpr static size_t get_capacity() { return CAPACITY; } bool operator==(const FixedKVNodeLayout &rhs) const { if (get_size() != rhs.get_size()) { return false; } auto iter = begin(); auto iter2 = rhs.begin(); while (iter != end()) { if (iter->get_key() != iter2->get_key() || iter->get_val() != iter2->get_val()) { return false; } iter++; iter2++; } return true; } /** * split_into * * Takes *this and splits its contents into left and right. */ K split_into( FixedKVNodeLayout &left, FixedKVNodeLayout &right) const { auto piviter = get_split_pivot(); left.copy_from_foreign(left.begin(), begin(), piviter); left.set_size(piviter - begin()); right.copy_from_foreign(right.begin(), piviter, end()); right.set_size(end() - piviter); auto [lmeta, rmeta] = get_meta().split_into(piviter->get_key()); left.set_meta(lmeta); right.set_meta(rmeta); return piviter->get_key(); } /** * merge_from * * Takes two nodes and copies their contents into *this. * * precondition: left.size() + right.size() < CAPACITY */ void merge_from( const FixedKVNodeLayout &left, const FixedKVNodeLayout &right) { copy_from_foreign( end(), left.begin(), left.end()); set_size(left.get_size()); copy_from_foreign( end(), right.begin(), right.end()); set_size(left.get_size() + right.get_size()); set_meta(Meta::merge_from(left.get_meta(), right.get_meta())); } /** * balance_into_new_nodes * * Takes the contents of left and right and copies them into * replacement_left and replacement_right such that in the * event that the number of elements is odd the extra goes to * the left side iff prefer_left. */ static K balance_into_new_nodes( const FixedKVNodeLayout &left, const FixedKVNodeLayout &right, bool prefer_left, FixedKVNodeLayout &replacement_left, FixedKVNodeLayout &replacement_right) { auto total = left.get_size() + right.get_size(); auto pivot_idx = (left.get_size() + right.get_size()) / 2; if (total % 2 && prefer_left) { pivot_idx++; } auto replacement_pivot = pivot_idx >= left.get_size() ? right.iter_idx(pivot_idx - left.get_size())->get_key() : left.iter_idx(pivot_idx)->get_key(); if (pivot_idx < left.get_size()) { replacement_left.copy_from_foreign( replacement_left.end(), left.begin(), left.iter_idx(pivot_idx)); replacement_left.set_size(pivot_idx); replacement_right.copy_from_foreign( replacement_right.end(), left.iter_idx(pivot_idx), left.end()); replacement_right.set_size(left.get_size() - pivot_idx); replacement_right.copy_from_foreign( replacement_right.end(), right.begin(), right.end()); replacement_right.set_size(total - pivot_idx); } else { replacement_left.copy_from_foreign( replacement_left.end(), left.begin(), left.end()); replacement_left.set_size(left.get_size()); replacement_left.copy_from_foreign( replacement_left.end(), right.begin(), right.iter_idx(pivot_idx - left.get_size())); replacement_left.set_size(pivot_idx); replacement_right.copy_from_foreign( replacement_right.end(), right.iter_idx(pivot_idx - left.get_size()), right.end()); replacement_right.set_size(total - pivot_idx); } auto [lmeta, rmeta] = Meta::rebalance( left.get_meta(), right.get_meta(), replacement_pivot); replacement_left.set_meta(lmeta); replacement_right.set_meta(rmeta); return replacement_pivot; } private: void insert( iterator iter, const K &key, const V &val) { if (VALIDATE_INVARIANTS) { if (iter != begin()) { assert((iter - 1)->get_key() < key); } if (iter != end()) { assert(iter->get_key() > key); } assert(get_size() < CAPACITY); } copy_from_local(iter + 1, iter, end()); iter->set_key(key); iter->set_val(val); set_size(get_size() + 1); } void update( iterator iter, V val) { assert(iter != end()); iter->set_val(val); } void replace( iterator iter, const K &key, const V &val) { assert(iter != end()); if (VALIDATE_INVARIANTS) { if (iter != begin()) { assert((iter - 1)->get_key() < key); } if ((iter + 1) != end()) { assert((iter + 1)->get_key() > key); } } iter->set_key(key); iter->set_val(val); } void remove(iterator iter) { assert(iter != end()); copy_from_local(iter, iter + 1, end()); set_size(get_size() - 1); } /** * get_key_ptr * * Get pointer to start of key array */ KINT *get_key_ptr() { return layout.template Pointer<2>(buf); } const KINT *get_key_ptr() const { return layout.template Pointer<2>(buf); } /** * get_val_ptr * * Get pointer to start of val array */ VINT *get_val_ptr() { return layout.template Pointer<3>(buf); } const VINT *get_val_ptr() const { return layout.template Pointer<3>(buf); } /** * node_resolve/unresolve_vals * * If the representation for values depends in some way on the * node in which they are located, users may implement * resolve/unresolve to enable copy_from_foreign to handle that * transition. */ virtual void node_resolve_vals(iterator from, iterator to) const {} virtual void node_unresolve_vals(iterator from, iterator to) const {} /** * copy_from_foreign * * Copies entries from [from_src, to_src) to tgt. * * tgt and from_src must be from different nodes. * from_src and to_src must be from the same node. */ static void copy_from_foreign( iterator tgt, const_iterator from_src, const_iterator to_src) { assert(tgt->node != from_src->node); assert(to_src->node == from_src->node); memcpy( tgt->get_val_ptr(), from_src->get_val_ptr(), to_src->get_val_ptr() - from_src->get_val_ptr()); memcpy( tgt->get_key_ptr(), from_src->get_key_ptr(), to_src->get_key_ptr() - from_src->get_key_ptr()); from_src->node->node_resolve_vals(tgt, tgt + (to_src - from_src)); tgt->node->node_unresolve_vals(tgt, tgt + (to_src - from_src)); } /** * copy_from_local * * Copies entries from [from_src, to_src) to tgt. * * tgt, from_src, and to_src must be from the same node. */ static void copy_from_local( iterator tgt, iterator from_src, iterator to_src) { assert(tgt->node == from_src->node); assert(to_src->node == from_src->node); memmove( tgt->get_val_ptr(), from_src->get_val_ptr(), to_src->get_val_ptr() - from_src->get_val_ptr()); memmove( tgt->get_key_ptr(), from_src->get_key_ptr(), to_src->get_key_ptr() - from_src->get_key_ptr()); } }; }
17,059
22.337893
87
h
null
ceph-main/src/crimson/common/gated.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/gate.hh> #include <seastar/core/future.hh> #include <seastar/core/future-util.hh> #include "crimson/common/exception.h" #include "crimson/common/log.h" #include "include/ceph_assert.h" namespace crimson::common { class Gated { public: static seastar::logger& gated_logger() { return crimson::get_logger(ceph_subsys_osd); } template <typename Func, typename T> inline void dispatch_in_background(const char* what, T& who, Func&& func) { (void) dispatch(what, who, func); } template <typename Func, typename T> inline seastar::future<> dispatch(const char* what, T& who, Func&& func) { return seastar::with_gate(pending_dispatch, std::forward<Func>(func) ).handle_exception([what, &who] (std::exception_ptr eptr) { if (*eptr.__cxa_exception_type() == typeid(system_shutdown_exception)) { gated_logger().debug( "{}, {} skipped, system shutdown", who, what); return; } try { std::rethrow_exception(eptr); } catch (std::exception& e) { gated_logger().error( "{} dispatch() {} caught exception: {}", who, what, e.what()); } assert(*eptr.__cxa_exception_type() == typeid(seastar::gate_closed_exception)); }); } seastar::future<> close() { return pending_dispatch.close(); } bool is_closed() const { return pending_dispatch.is_closed(); } private: seastar::gate pending_dispatch; }; }// namespace crimson::common
1,558
26.839286
78
h
null
ceph-main/src/crimson/common/local_shared_foreign_ptr.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <boost/intrusive_ptr.hpp> #include <boost/smart_ptr/intrusive_ref_counter.hpp> #include <seastar/core/smp.hh> #include <seastar/core/future.hh> #include <seastar/core/sharded.hh> namespace crimson { /** * local_shared_foreign_ptr * * See seastar/include/seastar/core/sharded.hh:foreign_ptr * * seastar::foreign_ptr wraps a smart ptr by proxying the copy() and destructor * operations back to the original core. This works well except that copy() * requires a cross-core call. We need a smart_ptr which allows cross-core * caching of (for example) OSDMaps, but we want to avoid the overhead inherent * in incrementing the source smart_ptr on every copy. Thus, * local_shared_foreign_ptr maintains a core-local foreign_ptr back to the * original core instance with core-local ref counting. */ template <typename PtrType> class local_shared_foreign_ptr { using element_type = typename std::pointer_traits<PtrType>::element_type; using pointer = element_type*; seastar::lw_shared_ptr<seastar::foreign_ptr<PtrType>> ptr; /// Wraps a pointer object and remembers the current core. local_shared_foreign_ptr(seastar::foreign_ptr<PtrType> &&fptr) : ptr(fptr ? seastar::make_lw_shared(std::move(fptr)) : nullptr) { assert(!ptr || (ptr && *ptr)); } template <typename T> friend local_shared_foreign_ptr<T> make_local_shared_foreign( seastar::foreign_ptr<T> &&); public: /// Constructs a null local_shared_foreign_ptr<>. local_shared_foreign_ptr() = default; /// Constructs a null local_shared_foreign_ptr<>. local_shared_foreign_ptr(std::nullptr_t) : local_shared_foreign_ptr() {} /// Moves a local_shared_foreign_ptr<> to another object. local_shared_foreign_ptr(local_shared_foreign_ptr&& other) = default; /// Copies a local_shared_foreign_ptr<> local_shared_foreign_ptr(const local_shared_foreign_ptr &other) = default; /// Releases reference to ptr eventually releasing the contained foreign_ptr ~local_shared_foreign_ptr() = default; /// Creates a copy of this foreign ptr. Only works if the stored ptr is copyable. seastar::future<seastar::foreign_ptr<PtrType>> get_foreign() const noexcept { assert(!ptr || (ptr && *ptr)); return ptr ? ptr->copy() : seastar::make_ready_future<seastar::foreign_ptr<PtrType>>(nullptr); } /// Accesses the wrapped object. element_type& operator*() const noexcept { assert(ptr && *ptr); return **ptr; } /// Accesses the wrapped object. element_type* operator->() const noexcept { assert(ptr && *ptr); return &**ptr; } /// Access the raw pointer to the wrapped object. pointer get() const noexcept { assert(!ptr || (ptr && *ptr)); return ptr ? ptr->get() : nullptr; } /// Return the owner-shard of the contained foreign_ptr. unsigned get_owner_shard() const noexcept { assert(!ptr || (ptr && *ptr)); return ptr ? ptr->get_owner_shard() : seastar::this_shard_id(); } /// Checks whether the wrapped pointer is non-null. operator bool() const noexcept { assert(!ptr || (ptr && *ptr)); return static_cast<bool>(ptr); } /// Move-assigns a \c local_shared_foreign_ptr<>. local_shared_foreign_ptr& operator=(local_shared_foreign_ptr&& other) noexcept { ptr = std::move(other.ptr); return *this; } /// Copy-assigns a \c local_shared_foreign_ptr<>. local_shared_foreign_ptr& operator=(const local_shared_foreign_ptr& other) noexcept { ptr = other.ptr; return *this; } /// Reset the containing ptr void reset() noexcept { assert(!ptr || (ptr && *ptr)); ptr = nullptr; } }; /// Wraps a smart_ptr T in a local_shared_foreign_ptr<>. template <typename T> local_shared_foreign_ptr<T> make_local_shared_foreign( seastar::foreign_ptr<T> &&ptr) { return local_shared_foreign_ptr<T>(std::move(ptr)); } /// Wraps ptr in a local_shared_foreign_ptr<>. template <typename T> local_shared_foreign_ptr<T> make_local_shared_foreign(T &&ptr) { return make_local_shared_foreign<T>( ptr ? seastar::make_foreign(std::forward<T>(ptr)) : nullptr); } template <typename T, typename U> inline bool operator==(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() == y.get(); } template <typename T> inline bool operator==(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() == nullptr; } template <typename T> inline bool operator==(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr == y.get(); } template <typename T, typename U> inline bool operator!=(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() != y.get(); } template <typename T> inline bool operator!=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() != nullptr; } template <typename T> inline bool operator!=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr != y.get(); } template <typename T, typename U> inline bool operator<(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() < y.get(); } template <typename T> inline bool operator<(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() < nullptr; } template <typename T> inline bool operator<(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr < y.get(); } template <typename T, typename U> inline bool operator<=(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() <= y.get(); } template <typename T> inline bool operator<=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() <= nullptr; } template <typename T> inline bool operator<=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr <= y.get(); } template <typename T, typename U> inline bool operator>(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() > y.get(); } template <typename T> inline bool operator>(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() > nullptr; } template <typename T> inline bool operator>(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr > y.get(); } template <typename T, typename U> inline bool operator>=(const local_shared_foreign_ptr<T> &x, const local_shared_foreign_ptr<U> &y) { return x.get() >= y.get(); } template <typename T> inline bool operator>=(const local_shared_foreign_ptr<T> &x, std::nullptr_t) { return x.get() >= nullptr; } template <typename T> inline bool operator>=(std::nullptr_t, const local_shared_foreign_ptr<T>& y) { return nullptr >= y.get(); } } namespace std { template <typename T> struct hash<crimson::local_shared_foreign_ptr<T>> : private hash<typename std::pointer_traits<T>::element_type *> { size_t operator()(const crimson::local_shared_foreign_ptr<T>& p) const { return hash<typename std::pointer_traits<T>::element_type *>::operator()(p.get()); } }; } namespace seastar { template<typename T> struct is_smart_ptr<crimson::local_shared_foreign_ptr<T>> : std::true_type {}; }
7,380
29.004065
87
h
null
ceph-main/src/crimson/common/log.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <fmt/format.h> #include <seastar/util/log.hh> #include "common/subsys_types.h" namespace crimson { seastar::logger& get_logger(int subsys); static inline seastar::log_level to_log_level(int level) { if (level < 0) { return seastar::log_level::error; } else if (level < 1) { return seastar::log_level::warn; } else if (level <= 5) { return seastar::log_level::info; } else if (level <= 20) { return seastar::log_level::debug; } else { return seastar::log_level::trace; } } } /* Logging convenience macros * * The intention here is to standardize prefixing log lines with the function name * and a context prefix (like the operator<< for the PG). Place * * SET_SUBSYS(osd); * * at the top of the file to declare the log lines within the file as being (in this case) * in the osd subsys. At the beginning of each method/function, add * * LOG_PREFIX(Class::method_name) * * to set the FNAME symbol to Class::method_name. In order to use the log macros * within lambdas, capture FNAME by value. * * Log lines can then be declared using the appropriate macro below. */ #define SET_SUBSYS(subname_) static constexpr auto SOURCE_SUBSYS = ceph_subsys_##subname_ #define LOCAL_LOGGER crimson::get_logger(SOURCE_SUBSYS) #define LOGGER(subname_) crimson::get_logger(ceph_subsys_##subname_) #define LOG_PREFIX(x) constexpr auto FNAME = #x #define LOG(level_, MSG, ...) \ LOCAL_LOGGER.log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__) #define SUBLOG(subname_, level_, MSG, ...) \ LOGGER(subname_).log(level_, "{}: " MSG, FNAME , ##__VA_ARGS__) #define TRACE(...) LOG(seastar::log_level::trace, __VA_ARGS__) #define SUBTRACE(subname_, ...) SUBLOG(subname_, seastar::log_level::trace, __VA_ARGS__) #define DEBUG(...) LOG(seastar::log_level::debug, __VA_ARGS__) #define SUBDEBUG(subname_, ...) SUBLOG(subname_, seastar::log_level::debug, __VA_ARGS__) #define INFO(...) LOG(seastar::log_level::info, __VA_ARGS__) #define SUBINFO(subname_, ...) SUBLOG(subname_, seastar::log_level::info, __VA_ARGS__) #define WARN(...) LOG(seastar::log_level::warn, __VA_ARGS__) #define SUBWARN(subname_, ...) SUBLOG(subname_, seastar::log_level::warn, __VA_ARGS__) #define ERROR(...) LOG(seastar::log_level::error, __VA_ARGS__) #define SUBERROR(subname_, ...) SUBLOG(subname_, seastar::log_level::error, __VA_ARGS__) // *DPP macros are intended to take DoutPrefixProvider implementations, but anything with // an operator<< will work as a prefix #define SUBLOGDPP(subname_, level_, MSG, dpp, ...) \ LOGGER(subname_).log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__) #define SUBTRACEDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::trace, __VA_ARGS__) #define SUBDEBUGDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::debug, __VA_ARGS__) #define SUBINFODPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::info, __VA_ARGS__) #define SUBWARNDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::warn, __VA_ARGS__) #define SUBERRORDPP(subname_, ...) SUBLOGDPP(subname_, seastar::log_level::error, __VA_ARGS__) #define LOGDPP(level_, MSG, dpp, ...) \ LOCAL_LOGGER.log(level_, "{} {}: " MSG, dpp, FNAME , ##__VA_ARGS__) #define TRACEDPP(...) LOGDPP(seastar::log_level::trace, __VA_ARGS__) #define DEBUGDPP(...) LOGDPP(seastar::log_level::debug, __VA_ARGS__) #define INFODPP(...) LOGDPP(seastar::log_level::info, __VA_ARGS__) #define WARNDPP(...) LOGDPP(seastar::log_level::warn, __VA_ARGS__) #define ERRORDPP(...) LOGDPP(seastar::log_level::error, __VA_ARGS__)
3,647
39.988764
94
h
null
ceph-main/src/crimson/common/logclient.h
#ifndef CEPH_LOGCLIENT_H #define CEPH_LOGCLIENT_H #include "common/LogEntry.h" #include "common/ostream_temp.h" #include "common/ref.h" #include "include/health.h" #include "crimson/net/Fwd.h" #include <seastar/core/future.hh> #include <seastar/core/gate.hh> #include <seastar/core/lowres_clock.hh> #include <seastar/core/shared_ptr.hh> #include <seastar/core/timer.hh> class LogClient; class MLog; class MLogAck; class Message; struct uuid_d; struct Connection; class LogChannel; namespace ceph { namespace logging { class Graylog; } } template<typename Message> using Ref = boost::intrusive_ptr<Message>; namespace crimson::net { class Messenger; } enum class log_flushing_t { NO_FLUSH, FLUSH }; int parse_log_client_options(CephContext *cct, std::map<std::string,std::string> &log_to_monitors, std::map<std::string,std::string> &log_to_syslog, std::map<std::string,std::string> &log_channels, std::map<std::string,std::string> &log_prios, std::map<std::string,std::string> &log_to_graylog, std::map<std::string,std::string> &log_to_graylog_host, std::map<std::string,std::string> &log_to_graylog_port, uuid_d &fsid, std::string &host); /** Manage where we output to and at which priority * * Not to be confused with the LogClient, which is the almighty coordinator * of channels. We just deal with the boring part of the logging: send to * syslog, send to file, generate LogEntry and queue it for the LogClient. * * Past queueing the LogEntry, the LogChannel is done with the whole thing. * LogClient will deal with sending and handling of LogEntries. */ class LogChannel : public LoggerSinkSet { public: LogChannel(LogClient *lc, const std::string &channel); LogChannel(LogClient *lc, const std::string &channel, const std::string &facility, const std::string &prio); OstreamTemp debug() { return OstreamTemp(CLOG_DEBUG, this); } void debug(std::stringstream &s) final { do_log(CLOG_DEBUG, s); } /** * Convenience function mapping health status to * the appropriate cluster log severity. */ OstreamTemp health(health_status_t health) { switch(health) { case HEALTH_OK: return info(); case HEALTH_WARN: return warn(); case HEALTH_ERR: return error(); default: // Invalid health_status_t value ceph_abort(); } } OstreamTemp info() final { return OstreamTemp(CLOG_INFO, this); } void info(std::stringstream &s) final { do_log(CLOG_INFO, s); } OstreamTemp warn() final { return OstreamTemp(CLOG_WARN, this); } void warn(std::stringstream &s) final { do_log(CLOG_WARN, s); } OstreamTemp error() final { return OstreamTemp(CLOG_ERROR, this); } void error(std::stringstream &s) final { do_log(CLOG_ERROR, s); } OstreamTemp sec() final { return OstreamTemp(CLOG_SEC, this); } void sec(std::stringstream &s) final { do_log(CLOG_SEC, s); } void set_log_to_monitors(bool v); void set_log_to_syslog(bool v) { log_to_syslog = v; } void set_log_channel(const std::string& v) { log_channel = v; } void set_log_prio(const std::string& v) { log_prio = v; } void set_syslog_facility(const std::string& v) { syslog_facility = v; } const std::string& get_log_prio() const { return log_prio; } const std::string& get_log_channel() const { return log_channel; } const std::string& get_syslog_facility() const { return syslog_facility; } bool must_log_to_syslog() const { return log_to_syslog; } /** * Do we want to log to syslog? * * @return true if log_to_syslog is true and both channel and prio * are not empty; false otherwise. */ bool do_log_to_syslog() { return must_log_to_syslog() && !log_prio.empty() && !log_channel.empty(); } bool must_log_to_monitors() { return log_to_monitors; } bool do_log_to_graylog() { return (graylog != nullptr); } using Ref = seastar::lw_shared_ptr<LogChannel>; /** * update config values from parsed k/v std::map for each config option * * Pick out the relevant value based on our channel. */ void update_config(std::map<std::string,std::string> &log_to_monitors, std::map<std::string,std::string> &log_to_syslog, std::map<std::string,std::string> &log_channels, std::map<std::string,std::string> &log_prios, std::map<std::string,std::string> &log_to_graylog, std::map<std::string,std::string> &log_to_graylog_host, std::map<std::string,std::string> &log_to_graylog_port, uuid_d &fsid, std::string &host); void do_log(clog_type prio, std::stringstream& ss) final; void do_log(clog_type prio, const std::string& s) final; private: LogClient *parent; std::string log_channel; std::string log_prio; std::string syslog_facility; bool log_to_syslog; bool log_to_monitors; seastar::shared_ptr<ceph::logging::Graylog> graylog; }; using LogChannelRef = LogChannel::Ref; class LogClient { public: enum logclient_flag_t { NO_FLAGS = 0, FLAG_MON = 0x1, }; LogClient(crimson::net::Messenger *m, logclient_flag_t flags); virtual ~LogClient() = default; seastar::future<> handle_log_ack(Ref<MLogAck> m); MessageURef get_mon_log_message(log_flushing_t flush_flag); bool are_pending() const; LogChannelRef create_channel() { return create_channel(CLOG_CHANNEL_DEFAULT); } LogChannelRef create_channel(const std::string& name); void destroy_channel(const std::string& name) { channels.erase(name); } void shutdown() { channels.clear(); } uint64_t get_next_seq(); entity_addrvec_t get_myaddrs() const; const EntityName& get_myname() const; entity_name_t get_myrank(); version_t queue(LogEntry &entry); void reset(); seastar::future<> set_fsid(const uuid_d& fsid); private: MessageURef _get_mon_log_message(); crimson::net::Messenger *messenger; bool is_mon; version_t last_log_sent; version_t last_log; std::deque<LogEntry> log_queue; std::map<std::string, LogChannelRef> channels; uuid_d m_fsid; }; #endif
6,177
25.515021
76
h
null
ceph-main/src/crimson/common/operation.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab expandtab #pragma once #include <algorithm> #include <array> #include <set> #include <vector> #include <boost/core/demangle.hpp> #include <boost/intrusive/list.hpp> #include <boost/intrusive_ptr.hpp> #include <boost/smart_ptr/intrusive_ref_counter.hpp> #include <seastar/core/shared_mutex.hh> #include <seastar/core/future.hh> #include <seastar/core/timer.hh> #include <seastar/core/lowres_clock.hh> #include <seastar/core/future-util.hh> #include "include/ceph_assert.h" #include "include/utime.h" #include "common/Clock.h" #include "common/Formatter.h" #include "crimson/common/interruptible_future.h" #include "crimson/common/smp_helpers.h" #include "crimson/common/log.h" namespace ceph { class Formatter; } namespace crimson { using registry_hook_t = boost::intrusive::list_member_hook< boost::intrusive::link_mode<boost::intrusive::auto_unlink>>; class Operation; class Blocker; namespace detail { void dump_time_event(const char* name, const utime_t& timestamp, ceph::Formatter* f); void dump_blocking_event(const char* name, const utime_t& timestamp, const Blocker* blocker, ceph::Formatter* f); } // namespace detail /** * Provides an interface for dumping diagnostic information about * why a particular op is not making progress. */ class Blocker { public: void dump(ceph::Formatter *f) const; virtual ~Blocker() = default; private: virtual void dump_detail(ceph::Formatter *f) const = 0; virtual const char *get_type_name() const = 0; }; // the main template. by default an operation has no extenral // event handler (the empty tuple). specializing the template // allows to define backends on per-operation-type manner. // NOTE: basically this could be a function but C++ disallows // differentiating return type among specializations. template <class T> struct EventBackendRegistry { template <typename...> static constexpr bool always_false = false; static std::tuple<> get_backends() { static_assert(always_false<T>, "Registry specialization not found"); return {}; } }; template <class T> struct Event { T* that() { return static_cast<T*>(this); } const T* that() const { return static_cast<const T*>(this); } template <class OpT, class... Args> void trigger(OpT&& op, Args&&... args) { that()->internal_backend.handle(*that(), std::forward<OpT>(op), std::forward<Args>(args)...); // let's call `handle()` for concrete event type from each single // of our backends. the order in the registry matters. std::apply([&, //args=std::forward_as_tuple(std::forward<Args>(args)...), this] (auto... backend) { (..., backend.handle(*that(), std::forward<OpT>(op), std::forward<Args>(args)...)); }, EventBackendRegistry<std::decay_t<OpT>>::get_backends()); } }; // simplest event type for recording things like beginning or end // of TrackableOperation's life. template <class T> struct TimeEvent : Event<T> { struct Backend { // `T` is passed solely to let implementations to discriminate // basing on the type-of-event. virtual void handle(T&, const Operation&) = 0; }; // for the sake of dumping ops-in-flight. struct InternalBackend final : Backend { void handle(T&, const Operation&) override { timestamp = ceph_clock_now(); } utime_t timestamp; } internal_backend; void dump(ceph::Formatter *f) const { auto demangled_name = boost::core::demangle(typeid(T).name()); detail::dump_time_event( demangled_name.c_str(), internal_backend.timestamp, f); } auto get_timestamp() const { return internal_backend.timestamp; } }; template <typename T> class BlockerT : public Blocker { public: struct BlockingEvent : Event<typename T::BlockingEvent> { using Blocker = std::decay_t<T>; struct Backend { // `T` is based solely to let implementations to discriminate // basing on the type-of-event. virtual void handle(typename T::BlockingEvent&, const Operation&, const T&) = 0; }; struct InternalBackend : Backend { void handle(typename T::BlockingEvent&, const Operation&, const T& blocker) override { this->timestamp = ceph_clock_now(); this->blocker = &blocker; } utime_t timestamp; const T* blocker; } internal_backend; // we don't want to make any BlockerT to be aware and coupled with // an operation. to not templatize an entire path from an op to // a blocker, type erasuring is used. struct TriggerI { TriggerI(BlockingEvent& event) : event(event) {} template <class FutureT> auto maybe_record_blocking(FutureT&& fut, const T& blocker) { if (!fut.available()) { // a full blown call via vtable. that's the cost for templatization // avoidance. anyway, most of the things actually have the type // knowledge. record_blocking(blocker); return std::forward<FutureT>(fut).finally( [&event=this->event, &blocker] () mutable { // beware trigger instance may be already dead when this // is executed! record_unblocking(event, blocker); }); } return std::forward<FutureT>(fut); } virtual ~TriggerI() = default; protected: // it's for the sake of erasing the OpT type virtual void record_blocking(const T& blocker) = 0; static void record_unblocking(BlockingEvent& event, const T& blocker) { assert(event.internal_backend.blocker == &blocker); event.internal_backend.blocker = nullptr; } BlockingEvent& event; }; template <class OpT> struct Trigger : TriggerI { Trigger(BlockingEvent& event, const OpT& op) : TriggerI(event), op(op) {} template <class FutureT> auto maybe_record_blocking(FutureT&& fut, const T& blocker) { if (!fut.available()) { // no need for the dynamic dispatch! if we're lucky, a compiler // should collapse all these abstractions into a bunch of movs. this->Trigger::record_blocking(blocker); return std::forward<FutureT>(fut).finally( [&event=this->event, &blocker] () mutable { Trigger::record_unblocking(event, blocker); }); } return std::forward<FutureT>(fut); } const OpT &get_op() { return op; } protected: void record_blocking(const T& blocker) override { this->event.trigger(op, blocker); } const OpT& op; }; void dump(ceph::Formatter *f) const { auto demangled_name = boost::core::demangle(typeid(T).name()); detail::dump_blocking_event( demangled_name.c_str(), internal_backend.timestamp, internal_backend.blocker, f); } }; virtual ~BlockerT() = default; template <class TriggerT, class... Args> decltype(auto) track_blocking(TriggerT&& trigger, Args&&... args) { return std::forward<TriggerT>(trigger).maybe_record_blocking( std::forward<Args>(args)..., static_cast<const T&>(*this)); } private: const char *get_type_name() const final { return static_cast<const T*>(this)->type_name; } }; template <class T> struct AggregateBlockingEvent { struct TriggerI { protected: struct TriggerContainerI { virtual typename T::TriggerI& get_trigger() = 0; virtual ~TriggerContainerI() = default; }; using TriggerContainerIRef = std::unique_ptr<TriggerContainerI>; virtual TriggerContainerIRef create_part_trigger() = 0; public: template <class FutureT> auto maybe_record_blocking(FutureT&& fut, const typename T::Blocker& blocker) { // AggregateBlockingEvent is supposed to be used on relatively cold // paths (recovery), so we don't need to worry about the dynamic // polymothps / dynamic memory's overhead. auto tcont = create_part_trigger(); return tcont->get_trigger().maybe_record_blocking( std::move(fut), blocker ).finally([tcont=std::move(tcont)] {}); } virtual ~TriggerI() = default; }; template <class OpT> struct Trigger final : TriggerI { Trigger(AggregateBlockingEvent& event, const OpT& op) : event(event), op(op) {} class TriggerContainer final : public TriggerI::TriggerContainerI { AggregateBlockingEvent& event; typename decltype(event.events)::iterator iter; typename T::template Trigger<OpT> trigger; typename T::TriggerI &get_trigger() final { return trigger; } public: TriggerContainer(AggregateBlockingEvent& _event, const OpT& op) : event(_event), iter(event.events.emplace(event.events.end())), trigger(*iter, op) {} ~TriggerContainer() final { event.events.erase(iter); } }; protected: typename TriggerI::TriggerContainerIRef create_part_trigger() final { return std::make_unique<TriggerContainer>(event, op); } private: AggregateBlockingEvent& event; const OpT& op; }; private: std::list<T> events; template <class OpT> friend class Trigger; }; /** * Common base for all crimson-osd operations. Mainly provides * an interface for registering ops in flight and dumping * diagnostic information. */ class Operation : public boost::intrusive_ref_counter< Operation, boost::thread_unsafe_counter> { public: using id_t = uint64_t; static constexpr id_t NULL_ID = std::numeric_limits<uint64_t>::max(); id_t get_id() const { return id; } static constexpr bool is_trackable = false; virtual unsigned get_type() const = 0; virtual const char *get_type_name() const = 0; virtual void print(std::ostream &) const = 0; void dump(ceph::Formatter *f) const; void dump_brief(ceph::Formatter *f) const; virtual ~Operation() = default; private: virtual void dump_detail(ceph::Formatter *f) const = 0; registry_hook_t registry_hook; id_t id = 0; void set_id(id_t in_id) { id = in_id; } friend class OperationRegistryI; template <size_t> friend class OperationRegistryT; }; using OperationRef = boost::intrusive_ptr<Operation>; std::ostream &operator<<(std::ostream &, const Operation &op); /** * Maintains a set of lists of all active ops. */ class OperationRegistryI { using op_list_member_option = boost::intrusive::member_hook< Operation, registry_hook_t, &Operation::registry_hook >; friend class Operation; seastar::timer<seastar::lowres_clock> shutdown_timer; seastar::promise<> shutdown; protected: virtual void do_register(Operation *op) = 0; virtual bool registries_empty() const = 0; virtual void do_stop() = 0; public: using op_list = boost::intrusive::list< Operation, op_list_member_option, boost::intrusive::constant_time_size<false>>; template <typename T, typename... Args> auto create_operation(Args&&... args) { boost::intrusive_ptr<T> op = new T(std::forward<Args>(args)...); do_register(&*op); return op; } seastar::future<> stop() { crimson::get_logger(ceph_subsys_osd).info("OperationRegistryI::{}", __func__); do_stop(); shutdown_timer.set_callback([this] { if (registries_empty()) { shutdown.set_value(); shutdown_timer.cancel(); } }); shutdown_timer.arm_periodic( std::chrono::milliseconds(100/*TODO: use option instead*/)); return shutdown.get_future(); } }; template <size_t NUM_REGISTRIES> class OperationRegistryT : public OperationRegistryI { Operation::id_t next_id = 0; std::array< op_list, NUM_REGISTRIES > registries; protected: void do_register(Operation *op) final { const auto op_type = op->get_type(); registries[op_type].push_back(*op); op->set_id(++next_id); } bool registries_empty() const final { return std::all_of(registries.begin(), registries.end(), [](auto& opl) { return opl.empty(); }); } protected: OperationRegistryT(core_id_t core) // Use core to initialize upper 8 bits of counters to ensure that // ids generated by different cores are disjoint : next_id(static_cast<id_t>(core) << (std::numeric_limits<id_t>::digits - 8)) {} template <size_t REGISTRY_INDEX> const op_list& get_registry() const { static_assert( REGISTRY_INDEX < std::tuple_size<decltype(registries)>::value); return registries[REGISTRY_INDEX]; } template <size_t REGISTRY_INDEX> op_list& get_registry() { static_assert( REGISTRY_INDEX < std::tuple_size<decltype(registries)>::value); return registries[REGISTRY_INDEX]; } public: /// Iterate over live ops template <typename F> void for_each_op(F &&f) const { for (const auto &registry: registries) { for (const auto &op: registry) { std::invoke(f, op); } } } /// Removes op from registry void remove_from_registry(Operation &op) { const auto op_type = op.get_type(); registries[op_type].erase(op_list::s_iterator_to(op)); } /// Adds op to registry void add_to_registry(Operation &op) { const auto op_type = op.get_type(); registries[op_type].push_back(op); } }; class PipelineExitBarrierI { public: using Ref = std::unique_ptr<PipelineExitBarrierI>; /// Waits for exit barrier virtual std::optional<seastar::future<>> wait() = 0; /// Releases pipeline stage, can only be called after wait virtual void exit() = 0; /// Releases pipeline resources without waiting on barrier virtual void cancel() = 0; /// Must ensure that resources are released, likely by calling cancel() virtual ~PipelineExitBarrierI() {} }; template <class T> class PipelineStageIT : public BlockerT<T> { const core_id_t core = seastar::this_shard_id(); public: core_id_t get_core() const { return core; } template <class... Args> decltype(auto) enter(Args&&... args) { return static_cast<T*>(this)->enter(std::forward<Args>(args)...); } }; class PipelineHandle { PipelineExitBarrierI::Ref barrier; std::optional<seastar::future<>> wait_barrier() { return barrier ? barrier->wait() : std::nullopt; } public: PipelineHandle() = default; PipelineHandle(const PipelineHandle&) = delete; PipelineHandle(PipelineHandle&&) = default; PipelineHandle &operator=(const PipelineHandle&) = delete; PipelineHandle &operator=(PipelineHandle&&) = default; /** * Returns a future which unblocks when the handle has entered the passed * OrderedPipelinePhase. If already in a phase, enter will also release * that phase after placing itself in the queue for the next one to preserve * ordering. */ template <typename OpT, typename T> seastar::future<> enter(T &stage, typename T::BlockingEvent::template Trigger<OpT>&& t) { ceph_assert(stage.get_core() == seastar::this_shard_id()); auto wait_fut = wait_barrier(); if (wait_fut.has_value()) { return wait_fut.value().then([this, &stage, t=std::move(t)] () mutable { auto fut = t.maybe_record_blocking(stage.enter(t), stage); exit(); return std::move(fut).then( [this, t=std::move(t)](auto &&barrier_ref) mutable { barrier = std::move(barrier_ref); return seastar::now(); }); }); } else { auto fut = t.maybe_record_blocking(stage.enter(t), stage); exit(); return std::move(fut).then( [this, t=std::move(t)](auto &&barrier_ref) mutable { barrier = std::move(barrier_ref); return seastar::now(); }); } } /** * Completes pending exit barrier without entering a new one. */ seastar::future<> complete() { auto ret = wait_barrier(); barrier.reset(); return ret ? std::move(ret.value()) : seastar::now(); } /** * Exits current phase, skips exit barrier, should only be used for op * failure. Permitting the handle to be destructed as the same effect. */ void exit() { barrier.reset(); } }; /** * Ensures that at most one op may consider itself in the phase at a time. * Ops will see enter() unblock in the order in which they tried to enter * the phase. entering (though not necessarily waiting for the future to * resolve) a new phase prior to exiting the previous one will ensure that * the op ordering is preserved. */ template <class T> class OrderedExclusivePhaseT : public PipelineStageIT<T> { void dump_detail(ceph::Formatter *f) const final { f->dump_unsigned("waiting", waiting); if (held_by != Operation::NULL_ID) { f->dump_unsigned("held_by_operation_id", held_by); } } class ExitBarrier final : public PipelineExitBarrierI { OrderedExclusivePhaseT *phase; Operation::id_t op_id; public: ExitBarrier(OrderedExclusivePhaseT *phase, Operation::id_t id) : phase(phase), op_id(id) {} std::optional<seastar::future<>> wait() final { return std::nullopt; } void exit() final { if (phase) { auto *p = phase; auto id = op_id; phase = nullptr; std::ignore = seastar::smp::submit_to( p->get_core(), [p, id] { p->exit(id); }); } } void cancel() final { exit(); } ~ExitBarrier() final { cancel(); } }; void exit(Operation::id_t op_id) { clear_held_by(op_id); mutex.unlock(); } public: template <class TriggerT> seastar::future<PipelineExitBarrierI::Ref> enter(TriggerT& t) { waiting++; return mutex.lock().then([this, op_id=t.get_op().get_id()] { ceph_assert_always(waiting > 0); --waiting; set_held_by(op_id); return PipelineExitBarrierI::Ref(new ExitBarrier{this, op_id}); }); } private: void set_held_by(Operation::id_t id) { ceph_assert_always(held_by == Operation::NULL_ID); held_by = id; } void clear_held_by(Operation::id_t id) { ceph_assert_always(held_by == id); held_by = Operation::NULL_ID; } unsigned waiting = 0; seastar::shared_mutex mutex; Operation::id_t held_by = Operation::NULL_ID; }; /** * Permits multiple ops to inhabit the stage concurrently, but ensures that * they will proceed to the next stage in the order in which they called * enter. */ template <class T> class OrderedConcurrentPhaseT : public PipelineStageIT<T> { using base_t = PipelineStageIT<T>; public: struct BlockingEvent : base_t::BlockingEvent { using base_t::BlockingEvent::BlockingEvent; struct ExitBarrierEvent : TimeEvent<ExitBarrierEvent> {}; template <class OpT> struct Trigger : base_t::BlockingEvent::template Trigger<OpT> { using base_t::BlockingEvent::template Trigger<OpT>::Trigger; template <class FutureT> decltype(auto) maybe_record_exit_barrier(FutureT&& fut) { if (!fut.available()) { exit_barrier_event.trigger(this->op); } return std::forward<FutureT>(fut); } ExitBarrierEvent exit_barrier_event; }; }; private: void dump_detail(ceph::Formatter *f) const final {} template <class TriggerT> class ExitBarrier final : public PipelineExitBarrierI { OrderedConcurrentPhaseT *phase; std::optional<seastar::future<>> barrier; TriggerT trigger; public: ExitBarrier( OrderedConcurrentPhaseT *phase, seastar::future<> &&barrier, TriggerT& trigger) : phase(phase), barrier(std::move(barrier)), trigger(trigger) {} std::optional<seastar::future<>> wait() final { assert(phase); assert(barrier); auto ret = std::move(*barrier); barrier = std::nullopt; return trigger.maybe_record_exit_barrier(std::move(ret)); } void exit() final { if (barrier) { static_cast<void>( std::move(*barrier).then([phase=this->phase] { phase->mutex.unlock(); })); barrier = std::nullopt; phase = nullptr; } if (phase) { std::ignore = seastar::smp::submit_to( phase->get_core(), [this] { phase->mutex.unlock(); phase = nullptr; }); } } void cancel() final { exit(); } ~ExitBarrier() final { cancel(); } }; public: template <class TriggerT> seastar::future<PipelineExitBarrierI::Ref> enter(TriggerT& t) { return seastar::make_ready_future<PipelineExitBarrierI::Ref>( new ExitBarrier<TriggerT>{this, mutex.lock(), t}); } private: seastar::shared_mutex mutex; }; /** * Imposes no ordering or exclusivity at all. Ops enter without constraint and * may exit in any order. Useful mainly for informational purposes between * stages with constraints. */ template <class T> class UnorderedStageT : public PipelineStageIT<T> { void dump_detail(ceph::Formatter *f) const final {} class ExitBarrier final : public PipelineExitBarrierI { public: ExitBarrier() = default; std::optional<seastar::future<>> wait() final { return std::nullopt; } void exit() final {} void cancel() final {} ~ExitBarrier() final {} }; public: template <class... IgnoreArgs> seastar::future<PipelineExitBarrierI::Ref> enter(IgnoreArgs&&...) { return seastar::make_ready_future<PipelineExitBarrierI::Ref>( new ExitBarrier); } }; } #if FMT_VERSION >= 90000 template <> struct fmt::formatter<crimson::Operation> : fmt::ostream_formatter {}; #endif
21,287
26.397683
89
h
null
ceph-main/src/crimson/common/perf_counters_collection.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include "common/perf_counters.h" #include "include/common_fwd.h" #include <seastar/core/sharded.hh> using crimson::common::PerfCountersCollectionImpl; namespace crimson::common { class PerfCountersCollection: public seastar::sharded<PerfCountersCollection> { using ShardedPerfCountersCollection = seastar::sharded<PerfCountersCollection>; private: std::unique_ptr<PerfCountersCollectionImpl> perf_collection; static ShardedPerfCountersCollection sharded_perf_coll; friend PerfCountersCollection& local_perf_coll(); friend ShardedPerfCountersCollection& sharded_perf_coll(); public: PerfCountersCollection(); ~PerfCountersCollection(); PerfCountersCollectionImpl* get_perf_collection(); void dump_formatted(ceph::Formatter *f, bool schema, bool dump_labeled, const std::string &logger = "", const std::string &counter = ""); }; inline PerfCountersCollection::ShardedPerfCountersCollection& sharded_perf_coll(){ return PerfCountersCollection::sharded_perf_coll; } inline PerfCountersCollection& local_perf_coll() { return PerfCountersCollection::sharded_perf_coll.local(); } class PerfCountersDeleter { CephContext* cct; public: PerfCountersDeleter() noexcept : cct(nullptr) {} PerfCountersDeleter(CephContext* cct) noexcept : cct(cct) {} void operator()(PerfCounters* p) noexcept; }; } using PerfCountersRef = std::unique_ptr<crimson::common::PerfCounters, crimson::common::PerfCountersDeleter>;
1,591
30.84
109
h
null
ceph-main/src/crimson/common/shared_lru.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <memory> #include <optional> #include <boost/smart_ptr/local_shared_ptr.hpp> #include <boost/smart_ptr/weak_ptr.hpp> #include "simple_lru.h" /// SharedLRU does its best to cache objects. It not only tracks the objects /// in its LRU cache with strong references, it also tracks objects with /// weak_ptr even if the cache does not hold any strong references to them. so /// that it can return the objects after they are evicted, as long as they've /// ever been cached and have not been destroyed yet. template<class K, class V> class SharedLRU { using shared_ptr_t = boost::local_shared_ptr<V>; using weak_ptr_t = boost::weak_ptr<V>; using value_type = std::pair<K, shared_ptr_t>; // weak_refs is already ordered, and we don't use accessors like // LRUCache::lower_bound(), so unordered LRUCache would suffice. SimpleLRU<K, shared_ptr_t, false> cache; std::map<K, std::pair<weak_ptr_t, V*>> weak_refs; struct Deleter { SharedLRU<K,V>* cache; const K key; void operator()(V* ptr) { cache->_erase_weak(key); delete ptr; } }; void _erase_weak(const K& key) { weak_refs.erase(key); } public: SharedLRU(size_t max_size = 20) : cache{max_size} {} ~SharedLRU() { cache.clear(); // initially, we were assuming that no pointer obtained from SharedLRU // can outlive the lru itself. However, since going with the interruption // concept for handling shutdowns, this is no longer valid. weak_refs.clear(); } /** * Returns a reference to the given key, and perform an insertion if such * key does not already exist */ shared_ptr_t operator[](const K& key); /** * Returns true iff there are no live references left to anything that has been * in the cache. */ bool empty() const { return weak_refs.empty(); } size_t size() const { return cache.size(); } size_t capacity() const { return cache.capacity(); } /*** * Inserts a key if not present, or bumps it to the front of the LRU if * it is, and then gives you a reference to the value. If the key already * existed, you are responsible for deleting the new value you tried to * insert. * * @param key The key to insert * @param value The value that goes with the key * @param existed Set to true if the value was already in the * map, false otherwise * @return A reference to the map's value for the given key */ shared_ptr_t insert(const K& key, std::unique_ptr<V> value); // clear all strong reference from the lru. void clear() { cache.clear(); } shared_ptr_t find(const K& key); // return the last element that is not greater than key shared_ptr_t lower_bound(const K& key); // return the first element that is greater than key std::optional<value_type> upper_bound(const K& key); void erase(const K& key) { cache.erase(key); _erase_weak(key); } }; template<class K, class V> typename SharedLRU<K,V>::shared_ptr_t SharedLRU<K,V>::insert(const K& key, std::unique_ptr<V> value) { shared_ptr_t val; if (auto found = weak_refs.find(key); found != weak_refs.end()) { val = found->second.first.lock(); } if (!val) { val.reset(value.release(), Deleter{this, key}); weak_refs.emplace(key, std::make_pair(val, val.get())); } cache.insert(key, val); return val; } template<class K, class V> typename SharedLRU<K,V>::shared_ptr_t SharedLRU<K,V>::operator[](const K& key) { if (auto found = cache.find(key); found) { return *found; } shared_ptr_t val; if (auto found = weak_refs.find(key); found != weak_refs.end()) { val = found->second.first.lock(); } if (!val) { val.reset(new V{}, Deleter{this, key}); weak_refs.emplace(key, std::make_pair(val, val.get())); } cache.insert(key, val); return val; } template<class K, class V> typename SharedLRU<K,V>::shared_ptr_t SharedLRU<K,V>::find(const K& key) { if (auto found = cache.find(key); found) { return *found; } shared_ptr_t val; if (auto found = weak_refs.find(key); found != weak_refs.end()) { val = found->second.first.lock(); } if (val) { cache.insert(key, val); } return val; } template<class K, class V> typename SharedLRU<K,V>::shared_ptr_t SharedLRU<K,V>::lower_bound(const K& key) { if (weak_refs.empty()) { return {}; } auto found = weak_refs.lower_bound(key); if (found == weak_refs.end()) { --found; } if (auto val = found->second.first.lock(); val) { cache.insert(key, val); return val; } else { return {}; } } template<class K, class V> std::optional<typename SharedLRU<K,V>::value_type> SharedLRU<K,V>::upper_bound(const K& key) { for (auto found = weak_refs.upper_bound(key); found != weak_refs.end(); ++found) { if (auto val = found->second.first.lock(); val) { return std::make_pair(found->first, val); } } return std::nullopt; }
5,024
26.762431
81
h
null
ceph-main/src/crimson/common/simple_lru.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <list> #include <map> #include <optional> #include <type_traits> #include <unordered_map> template <class Key, class Value, bool Ordered> class SimpleLRU { static_assert(std::is_default_constructible_v<Value>); using list_type = std::list<Key>; template<class K, class V> using map_t = std::conditional_t<Ordered, std::map<K, V>, std::unordered_map<K, V>>; using map_type = map_t<Key, std::pair<Value, typename list_type::iterator>>; list_type lru; map_type cache; const size_t max_size; public: SimpleLRU(size_t size = 20) : cache(size), max_size(size) {} size_t size() const { return cache.size(); } size_t capacity() const { return max_size; } using insert_return_type = std::pair<Value, bool>; insert_return_type insert(const Key& key, Value value); std::optional<Value> find(const Key& key); std::optional<std::enable_if<Ordered, Value>> lower_bound(const Key& key); void erase(const Key& key); void clear(); private: // bump the item to the front of the lru list Value _lru_add(typename map_type::iterator found); // evict the last element of most recently used list void _evict(); }; template <class Key, class Value, bool Ordered> typename SimpleLRU<Key,Value,Ordered>::insert_return_type SimpleLRU<Key,Value,Ordered>::insert(const Key& key, Value value) { if constexpr(Ordered) { auto found = cache.lower_bound(key); if (found != cache.end() && found->first == key) { // already exists return {found->second.first, true}; } else { if (size() >= capacity()) { _evict(); } lru.push_front(key); // use lower_bound as hint to save the lookup cache.emplace_hint(found, key, std::make_pair(value, lru.begin())); return {std::move(value), false}; } } else { // cache is not ordered auto found = cache.find(key); if (found != cache.end()) { // already exists return {found->second.first, true}; } else { if (size() >= capacity()) { _evict(); } lru.push_front(key); cache.emplace(key, std::make_pair(value, lru.begin())); return {std::move(value), false}; } } } template <class Key, class Value, bool Ordered> std::optional<Value> SimpleLRU<Key,Value,Ordered>::find(const Key& key) { if (auto found = cache.find(key); found != cache.end()){ return _lru_add(found); } else { return {}; } } template <class Key, class Value, bool Ordered> std::optional<std::enable_if<Ordered, Value>> SimpleLRU<Key,Value,Ordered>::lower_bound(const Key& key) { if (auto found = cache.lower_bound(key); found != cache.end()) { return _lru_add(found); } else { return {}; } } template <class Key, class Value, bool Ordered> void SimpleLRU<Key,Value,Ordered>::clear() { lru.clear(); cache.clear(); } template <class Key, class Value, bool Ordered> void SimpleLRU<Key,Value,Ordered>::erase(const Key& key) { if (auto found = cache.find(key); found != cache.end()) { lru.erase(found->second.second); cache.erase(found); } } template <class Key, class Value, bool Ordered> Value SimpleLRU<Key,Value,Ordered>::_lru_add( typename SimpleLRU<Key,Value,Ordered>::map_type::iterator found) { auto& [value, in_lru] = found->second; if (in_lru != lru.begin()){ // move item to the front lru.splice(lru.begin(), lru, in_lru); } // the item is already at the front return value; } template <class Key, class Value, bool Ordered> void SimpleLRU<Key,Value,Ordered>::_evict() { // evict the last element of most recently used list auto last = --lru.end(); cache.erase(*last); lru.erase(last); }
3,782
25.640845
78
h
null
ceph-main/src/crimson/common/smp_helpers.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- // vim: ts=8 sw=2 smarttab #pragma once #include <limits> #include <seastar/core/smp.hh> #include "crimson/common/errorator.h" #include "crimson/common/utility.h" namespace crimson { using core_id_t = seastar::shard_id; static constexpr core_id_t NULL_CORE = std::numeric_limits<core_id_t>::max(); auto submit_to(core_id_t core, auto &&f) { using ret_type = decltype(f()); if constexpr (is_errorated_future_v<ret_type>) { auto ret = seastar::smp::submit_to( core, [f=std::move(f)]() mutable { return f().to_base(); }); return ret_type(std::move(ret)); } else { return seastar::smp::submit_to(core, std::move(f)); } } template <typename Obj, typename Method, typename... Args> auto proxy_method_on_core( core_id_t core, Obj &obj, Method method, Args&&... args) { return crimson::submit_to( core, [&obj, method, arg_tuple=std::make_tuple(std::forward<Args>(args)...)]() mutable { return apply_method_to_tuple(obj, method, std::move(arg_tuple)); }); } /** * reactor_map_seq * * Invokes f on each reactor sequentially, Caller may assume that * f will not be invoked concurrently on multiple cores. */ template <typename F> auto reactor_map_seq(F &&f) { using ret_type = decltype(f()); if constexpr (is_errorated_future_v<ret_type>) { auto ret = crimson::do_for_each( seastar::smp::all_cpus().begin(), seastar::smp::all_cpus().end(), [f=std::move(f)](auto core) mutable { return seastar::smp::submit_to( core, [&f] { return std::invoke(f); }); }); return ret_type(ret); } else { return seastar::do_for_each( seastar::smp::all_cpus().begin(), seastar::smp::all_cpus().end(), [f=std::move(f)](auto core) mutable { return seastar::smp::submit_to( core, [&f] { return std::invoke(f); }); }); } } /** * sharded_map_seq * * Invokes f on each shard of t sequentially. Caller may assume that * f will not be invoked concurrently on multiple cores. */ template <typename T, typename F> auto sharded_map_seq(T &t, F &&f) { return reactor_map_seq( [&t, f=std::forward<F>(f)]() mutable { return std::invoke(f, t.local()); }); } }
2,286
23.591398
77
h
null
ceph-main/src/crimson/common/throttle.h
// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:nil -*- // vim: ts=8 sw=2 smarttab #pragma once #include <seastar/core/condition-variable.hh> // pull seastar::timer<...>::timer definitions. FIX SEASTAR or reactor.hh // is obligatory and should be included everywhere? #include <seastar/core/reactor.hh> #include "common/ThrottleInterface.h" namespace crimson::common { class Throttle final : public ThrottleInterface { size_t max = 0; size_t count = 0; size_t pending = 0; // we cannot change the "count" of seastar::semaphore after it is created, // so use condition_variable instead. seastar::condition_variable on_free_slots; public: explicit Throttle(size_t m) : max(m) {} int64_t take(int64_t c = 1) override; int64_t put(int64_t c = 1) override; seastar::future<> get(size_t c); size_t get_current() const { return count; } size_t get_max() const { return max; } size_t get_pending() const { return pending; } void reset_max(size_t m); private: bool _should_wait(size_t c) const; }; } // namespace crimson::common
1,097
23.954545
76
h